query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Move the WordForm closer to the middle of various clouds.
def entrench(self, cloud, paradigms, informativity, categorization, unique_base): self.entrench_word(cloud, paradigms, informativity, categorization, unique_base) self.entrench_segments(cloud)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_cloud(self):\n self.remove()\n self.min_x -= 1\n self.max_x -= 1\n self.update()", "def delete_words(self):\n self.word_1.delete(0, tk.END)\n self.word_2.delete(0, tk.END)\n self.word_3.delete(0, tk.END)\n self.word_4.delete(0, tk.END)\n self.word_5.delete(0, tk.END)", "def wordcloud(self):\n if 'clean_comments' not in self.comments.columns:\n self.comments['clean_comments'] = self.comments['comment_message'].apply(self.comment_cleaner)\n\n self.comments = self.comments[self.comments['clean_comments'].notna()]\n\n if 'text_lemmatized' not in self.comments.columns:\n self._proc(self.comments)\n\n long_str = ','.join(list(self.comments['text_lemmatized']))\n\n wc = WordCloud(background_color=\"white\", max_words=5000, contour_width=3, contour_color='steelblue',\n width=1600, height=800)\n wc.generate(long_str)\n return wc.to_image()", "def wrap(self):\n if self.center.x > SCREEN_WIDTH:\n self.center.x = 0\n if self.center.y > SCREEN_HEIGHT:\n self.center.y = 0\n if self.center.x < 0:\n self.center.x = SCREEN_WIDTH\n if self.center.y < 0:\n self.center.y = SCREEN_HEIGHT", "def move_center(obj):\n desktop = QApplication.desktop()\n dw = desktop.width()\n dh = desktop.height()\n size = obj.size()\n mw = size.width()\n mh = size.height()\n obj.move(dw/2-mw/2, dh/2-mh/2)", "def wordcloud_maker(df, stopwords = None):\n all_clean = \" \".join(review for review in df.clean)\n wordcloud = WordCloud(stopwords = stopwords, background_color=\"white\").generate(all_clean)\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.show()", "def replace_nearest(word): \n nearest = spellcheck.correction(word)\n #When there is no valid word, the nearest word\n #is the same as the original\n if word == nearest:\n #This implies we need to try splitting it\n return split_word(word)\n return nearest", "def update_spaces_threatened(self):\n # The threatened spaces will always be it's corners\n current_row = self.position[0]\n current_column = self.position[1]\n corner1 = (current_row + 1 * self.direction, current_column - 1)\n corner2 = (current_row + 1 * self.direction, current_column + 1)\n current_spaces_threatened = [corner1, corner2]\n self.spaces_threatened = current_spaces_threatened\n update_threatening_king(self)", "def warp(self):\n self.maingui.scene.center_on(self.spin_x.value(), self.spin_y.value())\n self.close()", "def move_to_coc(self):\n coc = scale(self.center_of_charge(), -1.0)\n self.translate(coc)", "def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()", "def orientWP(self):\n import DraftGeomUtils\n if hasattr(App, \"DraftWorkingPlane\"):\n if len(self.node) > 1 and self.obj:\n n = DraftGeomUtils.getNormal(self.obj.Shape)\n if not n:\n n = App.DraftWorkingPlane.axis\n p = self.node[-1]\n v = self.node[-2].sub(self.node[-1])\n v = v.negative()\n if not self.oldWP:\n self.oldWP = App.DraftWorkingPlane.copy()\n App.DraftWorkingPlane.alignToPointAndAxis(p, n, upvec=v)\n if hasattr(Gui, \"Snapper\"):\n Gui.Snapper.setGrid()\n Gui.Snapper.restack()\n if self.planetrack:\n self.planetrack.set(self.node[-1])", "def centre(self):\n\n qr = self.frameGeometry()\n cp = QtWidgets.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "def move_back(self):\r\n self.center_x, self.center_y = self.save_pos", "def translate(self, displacement):\n\n self.center = (self.center[0] + displacement[0],\n self.center[1] + displacement[1])", "def get_absolute_window_words(self, pano_windows, window):\n words = []\n im, scale_w, scale_h, window_w, window_h = self.__resize(window.im)\n with torch.no_grad():\n # char_bboxes, char_scores, word_instances = ...\n _, _, word_instances = self.charnet(im, scale_w, scale_h, window_w, window_h)\n\n for word in word_instances:\n # To combat google's watermark of street-view messing with the words\n if word.text == 'GOOGLE':\n continue\n old_word_bbox = word.word_bbox.copy()\n # update absolute position\n word.word_bbox[::2] = [x_coord + window.pos_x for x_coord in word.word_bbox[::2]]\n word.word_bbox[1::2] = [y_coord + window.pos_y for y_coord in word.word_bbox[1::2]]\n word_abs = word\n # open a new window for near-border words\n if self.__word_is_near_border(old_word_bbox, 50, window_w, window_h):\n zoom_w = pano_windows.get_window_at_pos(word.word_bbox[0], word.word_bbox[1], 50)\n z_im, z_scale_w, z_scale_h, z_window_w, z_window_h = self.__resize(zoom_w.im)\n with torch.no_grad():\n _, _, z_word_instances = self.charnet(z_im, z_scale_w, z_scale_h,\n z_window_w, z_window_h)\n\n for z_word in z_word_instances: # Swap only the word that intersects\n z_word.word_bbox[::2] = [x_coord + zoom_w.pos_x for\n x_coord in z_word.word_bbox[::2]]\n z_word.word_bbox[1::2] = [y_coord + zoom_w.pos_y for\n y_coord in z_word.word_bbox[1::2]]\n if self._do_words_intersect(word, z_word):\n word_abs = z_word # save only the new word from the window\n break\n\n words.append(word_abs)\n return words", "def center(self):\r\n qr = self.frameGeometry()\r\n cp = QtWidgets.QDesktopWidget().availableGeometry().center()\r\n qr.moveCenter(cp)\r\n self.move(qr.topLeft())", "def resetAlignmentCenter(self):\n cent = self.TiltSeries_._TiltAlignmentParas.cent\n imdimX = self.TiltSeries_._imdimX\n imdimY = self.TiltSeries_._imdimY\n print(imdimX, imdimY)\n if cent[0] != imdimX//2+1 or cent[1] != imdimY//2+1:\n #rint \"Centers do not match: cent=\"+str(cent)+\", imdim=\"+str(imdim)\n self.TiltSeries_._TiltAlignmentParas.cent = [imdimX//2+1, imdimY//2+1]", "def go_right(self):\n self.rect.centerx += 9", "def create_wordcloud(processed_text, filename):\n\n\n pyplot.clf()\n wordcloud = WordCloud(background_color='white', max_font_size=40, relative_scaling=.5).generate(' '.join(processed_text))\n pyplot.imshow(wordcloud)\n pyplot.axis('off')\n\n pyplot.savefig(filename)", "def move_to_refine(self, des_img_pos, act_img_pos, current_world_pos, increment, img_thresh):\n des_img_x = des_img_pos[0]\n des_img_y = des_img_pos[1]\n act_img_x = act_img_pos[0]\n act_img_y = act_img_pos[1]\n cur_wld_x = current_world_pos[0]\n cur_wld_y = current_world_pos[1]\n new_wld_x = cur_wld_x\n new_wld_y = cur_wld_y\n \n #object to the left -> move left (-wld_y)\n if (act_img_x < des_img_x-img_thresh):\n print(' Moving left')\n new_wld_y = cur_wld_y + increment\n #object to the right -> move right (+wld_y)\n elif (act_img_x > des_img_x+img_thresh):\n new_wld_y = cur_wld_y - increment\n print(' Moving right')\n #object to the top -> move forward (+wld_x)\n if (act_img_y < des_img_y-img_thresh):\n new_wld_x = cur_wld_x + increment\n print(' Moving forward')\n #object to the bottom -> move backward (-wld_x)\n elif (act_img_y > des_img_y+img_thresh):\n new_wld_x = cur_wld_x - increment\n print(' Moving backward')\n \n #move arm to new coordinates\n self.move_to(new_wld_x, new_wld_y, self.move_to_height)\n \n #return new arm position\n return [new_wld_x, new_wld_y]", "def entrench_word(self, cloud, paradigms, informativity, categorization,\n unique_base):\n # Entrench within the WordForm's own cloud. Iterate over positions in\n # the WordForm (up to three Segments).\n for pos, seg in enumerate(self.segments):\n if pos < 3:\n # Iterate over features.\n for feat in seg.features:\n if uniform(0, 1) < probability_of_analogy:\n # Collect other values of the feature across the cloud.\n # Since this is the WordForm's own cloud, set all the\n # weights to 1.\n wv = [(e.segments[pos].features[feat], 1)\n for e in cloud\n if e.lemma == self.lemma\n and e.case == self.case]\n # Entrench the segment based on these values.\n seg.entrench_feature(feat, wv,\n top_value = self_top_value,\n max_movement = self_max_movement)\n # Entrench within other clouds of the same paradigm.\n if paradigms:\n # Iterate over positions in the WordForm (up to three Segments).\n for pos, seg in enumerate(self.segments):\n if pos < 3:\n # Iterate over features.\n for feat in seg.features:\n if uniform(0, 1) < (probability_of_analogy *\n paradigm_weight):\n # Get the weight for each case.\n weights = dict()\n # If informativity is measured via the entropy\n # method, the weight of a case is proportional to\n # the entropy of the feature across all lemmas of\n # that case.\n if informativity == 'entropy':\n weights = {c: entropy(feat, [e.segments[pos].\\\n features[feat]\n for e in cloud\n if e.case == c])\n for c in cases}\n # If informativity is measured via a classification\n # algorithm, the weight of a case is proportional to\n # the performance of the classifier on lemmas within\n # that case using just the current feature.\n elif informativity == 'classification':\n weights = {c: performance([e\n for e in cloud\n if e.case == c],\n positions = [pos],\n features = [feat],\n method = categorization)\n for c in cases}\n # If informativity is not measured, set the weights\n # of all cases to 1.\n elif informativity == 'none':\n weights = {c: 1\n for c in cases}\n # If paradigms are required to have a unique base,\n # the winner takes all the weight.\n if unique_base:\n max_weight = max(weights.values())\n for c in weights:\n if weights[c] < max_weight:\n weights[c] = 0\n # Collect other values of the feature across the\n # cloud, and pair them with their weights.\n wv = [(e.segments[pos].features[feat],\n weights[e.case])\n for e in cloud\n if e.lemma == self.lemma\n and e.case != self.case]\n # Entrench the segment based on these values.\n seg.entrench_feature(feat, wv,\n top_value = paradigm_top_value,\n max_movement = paradigm_max_movement)", "def _move_ligand_to_lower_corner(self):\n spacing = self._grid[\"spacing\"]\n lower_ligand_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float) - 1.5*spacing\n upper_ligand_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float) + 1.5*spacing\n #\n ligand_box_lenghts = upper_ligand_corner - lower_ligand_corner\n if np.any(ligand_box_lenghts < 0):\n raise RuntimeError(\"One of the ligand box lenghts are negative\")\n\n max_grid_indices = np.ceil(ligand_box_lenghts / spacing)\n self._max_grid_indices = self._grid[\"counts\"] - np.array(max_grid_indices, dtype=int)\n if np.any(self._max_grid_indices <= 1):\n raise RuntimeError(\"At least one of the max grid indices is <= one\")\n \n displacement = self._origin_crd - lower_ligand_corner\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n \n self._initial_com = self._get_molecule_center_of_mass()\n return None", "def updatePos(self):\n self.setPos(self.centerX-self.boundingRect().width()/2.0,\n self.centerY-self.boundingRect().height()/2.0)", "def fix_ui(self):\n x0, y0, x1, y1 = win32gui.GetWindowRect(self._handle)\n w = x1 - x0\n h = y1 - y0\n win32gui.MoveWindow(self._handle, x0, y0, w + 1, h + 1, True)", "def show_word_cloud(self):\n\n cloud = WordCloud(str(self.comments))\n cloud.show_cloud()", "def create_wordcloud(self, text):\n text = ' '.join(f\"{word}\" for word in text)\n mask = np.array(Image.open(os.path.join(CURRDIR, \"cloud.png\")))\n wc = WordCloud(background_color=\"white\",\n max_words=200,\n mask=mask)\n wc.generate(text)\n wc.to_file(PATH_TO_SAVE_IMG, \"wordle.png\")", "def center(self):\n qr = self.frameGeometry()\n cp = QtWidgets.QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)", "def show_wordcloud(dictionary, title, min_font = 10):\n wordcloud = WordCloud(min_font_size=min_font).generate_from_frequencies(dictionary)\n plt.figure(figsize = (8, 8), facecolor = None) \n plt.imshow(wordcloud) \n plt.axis(\"off\")\n if title:\n plt.title(title)\n else:\n plt.title(\"Word Cloud\")\n plt.tight_layout(pad = 0) \n\n plt.show()", "def center(self):\n if self.pos != 0.0:\n self.pos = 0.0", "def move_buildings(self):", "def change_loc_coords(self, field_size):\r\n self.top_left_corner = _get_center_writing(self.button) # sets new center\r\n font_size = int(field_size * 2) # resizes font\r\n self.font = pygame.font.SysFont(None, font_size) # updates font\r", "def centerWindow(self):\n framegeo = self.frameGeometry()\n center = QtGui.QDesktopWidget().availableGeometry().center()\n framegeo.moveCenter(center)\n self.move(framegeo.topLeft())", "def center(self):\n # get the compute screen's size\n screen = QDesktopWidget().screenGeometry()\n # get the app windows' size\n size = self.geometry()\n self.move(int((screen.width() - size.width()) / 2), int((screen.height() - size.height()) / 2))", "def correct_errors(page, labels, bboxes, model):\n\n # end_of_word = []\n\n # take every bbox and check the width between them\n # total_bboxes = len(bboxes)\n # for n in range(total_bboxes-1):\n # if (space_between(n,n+1,bboxes)):\n # end_of_word.append(n+1)\n\n # now we can make words by joining the characters where there isn't whitespace\n # so from the start until the index of the first space we should join together the characters\n\n # char_to_word = []\n # for x in range(len(end_of_word)):\n # if (x==0):\n # char_to_word.append(''.join(labels[x:(end_of_word[x])]).upper())\n # else:\n # char_to_word.append(''.join(labels[(end_of_word[x-1]):(end_of_word[x])]).upper())\n\n # We want to remove all the punctuation as it gets in the way of matching words\n # however we still want to store it's index in the labels for later.\n # punctuation_index = []\n # punctuation_value = []\n\n # for x in range(len(labels)):\n # if labels[x] in ('!','.',':',',',';','?','\\''):\n # punctuation_index.append(labels.tolist().index(labels[x]))\n # punctuation_value.append(labels[x])\n # labels[x] = remove_punctuation(labels[x])\n\n # Get the words that don't need changing out of the way (they already exist)\n # no_change_words = set(char_to_word).intersection(model['words'])\n\n # now we want to take all words that don't appear in both lists of words and change those with a few\n # misclassified chars to words that are similar in the list of actual words.\n\n # words_to_correct = [w for w in char_to_word if w not in no_change_words]\n\n # We'll need the index of the words so we can put them back.\n # wtc_index = []\n # for i in range(len(words_to_correct)):\n # wtc_index.append(char_to_word.index(words_to_correct[i]))\n\n # Start looking for similar words of the same length for a given word and input the similar word instead.\n # for y in range(len(words_to_correct)):\n # word = words_to_correct[y]\n # # filter words of the same length\n # close_word = filter(lambda x: len(x) == len(word), model['words'])\n # # replace current word with one real word with only a letters difference\n # for x, similar in enumerate(close_word):\n # if (string_difference(similar, word) == 1):\n # words_to_correct[y] = similar\n\n # using the stored index, put the words back where they need to be\n # for i in range(len(wtc_index)):\n # for n, j in enumerate(char_to_word):\n # if (n == wtc_index[i]):\n # char_to_word[n] = words_to_correct[i]\n\n # start splitting the words up again so that they're in the necessary label shape\n # possible_labels = []\n # for i in range(len(char_to_word)):\n # split_words = [x for x in char_to_word[i].lower()]\n # possible_labels.append(split_words)\n # possible_labels1 = list(itertools.chain.from_iterable(possible_labels))\n\n # add the punctuation back in and you should get the full amount of labels to return and test on\n # for i in range(len(punctuation_index)):\n # while len(possible_labels1) != len(labels):\n # possible_labels1.insert(punctuation_index[i],punctuation_value[i])\n\n return labels", "def center(self):\n cp = self.dat.flowsheet.getCenter()\n self.centerOn(cp[0], cp[1])", "def move(self):\n \n self.position = self.wander()", "def WordCloud(dataframe,column):\r\n from wordcloud import WordCloud\r\n import matplotlib.pyplot as plt\r\n import nltk\r\n from nltk.tokenize import word_tokenize\r\n from PIL import Image\r\n from nltk.corpus import stopwords\r\n from nltk.tokenize import sent_tokenize\r\n from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator\r\n text = \" \".join(review for review in dataframe[column])\r\n tokenized_word=word_tokenize(text)\r\n stop_words=set(stopwords.words(\"english\"))\r\n filtered_sent=[]\r\n for w in tokenized_word:\r\n if w not in stop_words:\r\n if w not in [':',',','.',\"'\",'\\\\n','-','@','(',')','and/or','?',\"'s\"]:\r\n filtered_sent.append(w)\r\n wordcloud = WordCloud(margin=0,stopwords=stop_words,\r\n max_words=200,background_color=\"white\", collocations = False).generate(str(filtered_sent))\r\n plt.figure( figsize=(10,10), facecolor = None)\r\n plt.imshow(wordcloud,interpolation='bilinear')\r\n plt.axis(\"off\")\r\n plt.margins(x=0, y=0)\r\n plt.show()", "def move_right_bottom(obj):\n desktop = QApplication.desktop()\n dw = desktop.width()\n dh = desktop.height()\n size = obj.size()\n mw = size.width()\n mh = size.height()\n w = dw - mw\n h = dh - mh\n obj.move(w, h - 40)", "def center(self):\n qr = self.frameGeometry()\n central_p = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(central_p)\n self.move(qr.topLeft())", "def update(self): \n super().update()\n if self.center_x < constants.left_limit:\n self.center_x = self.screen_width + constants.offscreen_space\n if self.center_x > self.screen_width + constants.offscreen_space:\n self.center_x = constants.left_limit\n if self.center_y > self.screen_height + constants.offscreen_space:\n self.center_y = constants.bottom_limit\n if self.center_y < constants.bottom_limit:\n self.center_y = self.screen_height + constants.offscreen_space", "def go_left(self):\n self.rect.centerx -= 9", "def move_target(self, distance_adjustment):\n\t\tself.x = float(self.screen_rect.right - self.width)\n\t\tself.x = self.x * distance_adjustment\n\t\tself.rect.x = self.x", "def correct(point_cloud, axis_order=[0, 1, 2], dist_from_center=0):\n \n if type(axis_order) != list:\n axis_order = list(axis_order)\n\n # Adding the fixed distance to x and y.\n x = point_cloud[:, axis_order[0]] + dist_from_center\n y = point_cloud[:, axis_order[1]] + dist_from_center\n z = point_cloud[:, axis_order[2]]\n\n return np.vstack((x, y, z)).T", "def center(self):\r\n frameGm = self.frameGeometry()\r\n screen = QtGui.QApplication.desktop().screenNumber(QtGui.QApplication.desktop().cursor().pos())\r\n centerPoint = QtGui.QApplication.desktop().screenGeometry(screen).center()\r\n frameGm.moveCenter(centerPoint)\r\n self.move(frameGm.topLeft())", "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())", "def move(self, center):\n\t\t#print \"made it\"\n\t\tself.rect = self.rect.move(center)", "def center_window(self):\n\n\t\tframe_geo = self.frameGeometry()\n\t\tcursor_pos = QtWidgets.QApplication.desktop().cursor().pos()\n\t\tscreen = QtWidgets.QApplication.desktop().screenNumber(cursor_pos)\n\t\tcenter_point = QtWidgets.QApplication.desktop().screenGeometry(screen).center()\n\t\tframe_geo.moveCenter(center_point)\n\t\tself.move(frame_geo.topLeft())", "def adjust_visual(self):\n\n if (self.direction is bs.Direction.LEFT):\n self.rect.x -= 0.5 * CELL_SIZE", "def centerAxis():\n dislin.center()", "def clean_5_words(self):\n self.first_word.configure(text=\"\")\n self.second_word.configure(text=\"\")\n self.third_word.configure(text=\"\")\n self.fourth_word.configure(text=\"\")\n self.fifth_word.configure(text=\"\")", "def move_west(self):\n self.horizontal = (self.horizontal * 2)[1:5]\n self.vertical[0] = self.horizontal[1]\n self.vertical[2] = self.horizontal[3]", "def move_word(self, direction):\n count = len(self._segments) - 1\n for n in range(count, -1, -1):\n segment = self._segments[n]\n segment.set_velocity(direction)\n segment.move_next()", "def move_east(self):\n self.horizontal = (self.horizontal * 2)[3:7]\n self.vertical[0] = self.horizontal[1]\n self.vertical[2] = self.horizontal[3]", "def _positionWindow(self):\n\t\tscreen = QtGui.QDesktopWidget().screenGeometry()\n\t\tself.setGeometry(1050, 275, 375, 350)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def calculate_postions_in_camera_frame(self):\n\n # Get the different distances\n self.calculate_all_distances()\n\n # The only position for this distance is centered in front of the camera\n self.close_positions_camera = [[self.close_distance, 0, 0]]\n\n # Calculate the dimensions of the field of view for the medium distance\n fov_height = self.fov_height_for_distance(self.medium_distance)\n fov_width = self.fov_width_for_distance(self.medium_distance)\n\n # Calculate the positions for the first row\n self.medium_positions_camera.append(\n [self.medium_distance, -(fov_width / 2 - self.caltab_width / 2), fov_height / 2 - self.caltab_height / 2])\n self.medium_positions_camera.append(\n [self.medium_distance, fov_width / 2 - self.caltab_width / 2, fov_height / 2 - self.caltab_height / 2])\n\n # Calculate the positions for the second row\n self.medium_positions_camera.append(\n [self.medium_distance, -(fov_width / 2 - self.caltab_width / 2),\n -(fov_height / 2 - self.caltab_height / 2)])\n self.medium_positions_camera.append(\n [self.medium_distance, fov_width / 2 - self.caltab_width / 2, -(fov_height / 2 - self.caltab_height / 2)])\n\n # Now get the dimensions of the field of view for the far distance\n fov_height = self.fov_height_for_distance(self.far_distance)\n fov_width = self.fov_width_for_distance(self.far_distance)\n\n # Calculate the positions for the first row\n self.far_positions_camera.append(\n [self.far_distance, -(fov_width / 2 - self.caltab_width / 2), fov_height / 2 - self.caltab_height / 2])\n self.far_positions_camera.append([self.far_distance, 0, fov_height / 2 - self.caltab_height / 2])\n self.far_positions_camera.append(\n [self.far_distance, fov_width / 2 - self.caltab_width / 2, fov_height / 2 - self.caltab_height / 2])\n\n # Calculate the positions for the second row\n self.far_positions_camera.append(\n [self.far_distance, -(fov_width / 2 - self.caltab_width / 2), 0])\n self.far_positions_camera.append([self.far_distance, 0, 0])\n self.far_positions_camera.append(\n [self.far_distance, fov_width / 2 - self.caltab_width / 2, 0])\n\n # Calculate the positions for the third row\n self.far_positions_camera.append(\n [self.far_distance, -(fov_width / 2 - self.caltab_width / 2), -(fov_height / 2 - self.caltab_height / 2)])\n self.far_positions_camera.append(\n [self.far_distance, 0, -(fov_height / 2 - self.caltab_height / 2)])\n self.far_positions_camera.append(\n [self.far_distance, fov_width / 2 - self.caltab_width / 2, -(fov_height / 2 - self.caltab_height / 2)])", "def center(self):\n \n geometry = self.frameGeometry()\n center_p = QDesktopWidget().availableGeometry().center()\n geometry.moveCenter(center_p)\n self.move(geometry.topLeft())", "def move_up(self):\n if self.center.y < (self.screen_height - (self.height / 2)):\n self.center.y += 5", "def topic_wordcloud(top_model):\n\n cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'\n\n cloud = WordCloud(stopwords=stop_words,\n background_color='white',\n width=2500,\n height=1800,\n max_words=20,\n colormap='tab10',\n color_func=lambda *args, **kwargs: cols[i],\n prefer_horizontal=1.0)\n\n topics = top_model.show_topics(formatted=False)\n\n fig, axes = plt.subplots(3, 3, figsize=(10,10), sharex=True, sharey=True)\n\n for i, ax in enumerate(axes.flatten()):\n fig.add_subplot(ax)\n topic_words = dict(topics[i][1])\n cloud.generate_from_frequencies(topic_words, max_font_size=300)\n plt.gca().imshow(cloud)\n plt.gca().set_title('Topic ' + str(i), fontdict=dict(size=16))\n plt.gca().axis('off')\n\n\n plt.subplots_adjust(wspace=0, hspace=0)\n plt.axis('off')\n plt.margins(x=0, y=0)\n plt.tight_layout()\n plt.show()", "def MoveCurrentSpace(self):\n if self.facing == 0:\n self.y -= 1\n elif self.facing == 1:\n self.x += 1\n elif self.facing == 2:\n self.y += 1\n elif self.facing == 3:\n self.x -= 1", "def move_down(self):\n if self.center.y > (self.height / 2):\n self.center.y -= 5", "def center(self, destination):\n self.move(destination=destination, origin=self.center)", "def updateWidthFromLabel(self):\n prevWidth = self.rect().width()\n width = self.text.boundingRect().width() + \\\n CurrentTheme.VERSION_LABEL_MARGIN[0] - 4\n r = self.rect()\n r.setX(r.x()+(prevWidth-width)/2.0)\n r.setWidth(width)\n self.setRect(r)\n self.update()", "def move_east(self):\r\n self.move(dx=1, dy=0)", "def gonio_axis_align():\n \n # Invert camera image, so dark pin on light image becomes a peak\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # High threshold, so AD centroid doesn't interpret background\n cam_8ThresholdOld = cam_8.stats4.centroid_threshold.get()\n cam_8.stats4.centroid_threshold.put(150)\n cam_7ThresholdOld = cam_7.stats4.centroid_threshold.get()\n cam_7.stats4.centroid_threshold.put(150)\n \n # HiMag\n # Copy ROI2 geometry (HiMag Mag3) to ROI4 and use ROI4 centroid plugin\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get())\n cam_8.roi4.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get())\n cam_8.roi4.size.x.put(cam_8.roi2.size.x.get() * 0.20)\n cam_8.roi4.size.y.put(cam_8.roi2.size.y.get())\n cam_8.roi4.min_xyz.min_x.put(cam_8.roi2.min_xyz.min_x.get() + cam_8.roi2.size.x.get()/2 - cam_8.roi4.size.x.get()/2)\n \n # LoMag\n # Copy ROI2 geometry (LoMag Mag1) to ROI4 and use ROI4 centroid plugin\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get())\n cam_7.roi4.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get())\n cam_7.roi4.size.x.put(cam_7.roi2.size.x.get() * 0.05)\n cam_7.roi4.size.y.put(cam_7.roi2.size.y.get())\n cam_7.roi4.min_xyz.min_x.put(cam_7.roi2.min_xyz.min_x.get() + cam_7.roi2.size.x.get()/2 - cam_7.roi4.size.x.get()/2)\n \n centerPinYHiMag0 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag0 = centroid_avg(cam_7.stats4)[1]\n yield from bps.mvr(gonio.o,180)\n time.sleep(2)\n centerPinYHiMag180 = centroid_avg(cam_8.stats4)[1]\n centerPinYLoMag180 = centroid_avg(cam_7.stats4)[1]\n centerPinYHiMag = (centerPinYHiMag0 + centerPinYHiMag180)/2\n centerPinYLoMag = (centerPinYLoMag0 + centerPinYLoMag180)/2\n\n centerPinOffsYHiMag = centerPinYHiMag - cam_8.roi4.size.y.get() / 2\n centerPinOffsYLoMag = centerPinYLoMag - cam_7.roi4.size.y.get() / 2\n \n # Correct Mag 3 (cam_8 ROI2)\n cam_8.roi2.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + centerPinOffsYHiMag)\n # Correct Mag 4 (cam_8 ROI1)\n cam_8.roi1.min_xyz.min_y.put(cam_8.roi2.min_xyz.min_y.get() + (cam_8.roi2.size.y.get()-cam_8.roi1.size.y.get())/2)\n \n # Correct Mag 1 (cam_7 ROI2)\n cam_7.roi2.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + centerPinOffsYLoMag)\n # Correct Mag 2 (cam_7 ROI3)\n cam_7.roi3.min_xyz.min_y.put(cam_7.roi2.min_xyz.min_y.get() + (cam_7.roi2.size.y.get()-cam_7.roi3.size.y.get())/2)\n\n # De-invert image\n cam_7.proc1.scale.put(-1)\n cam_8.proc1.scale.put(-1)\n \n # Set thresold to previous value\n cam_8.stats4.centroid_threshold.put(cam_8ThresholdOld)\n cam_7.stats4.centroid_threshold.put(cam_7ThresholdOld)\n \n return", "def update_current_word(self):\n self.current_word = self.current_row.pop(0) + \" \"", "def set_center(self,structure):\n for i,b in enumerate(self.bfs):\n b.set_center( structure[ self.LIST1[i] ] ) \n return", "def _align_toplevel_grid(self):\n\n # align origin with nearest multple of 128\n self.mins[0] -= self.mins[0] % 128\n self.mins[1] -= self.mins[1] % 128\n\n width = self.maxs[0] - self.mins[0]\n height = self.maxs[1] - self.mins[1]\n greatest_dim = max(width, height)\n nearest_pow_two = int(2 ** np.ceil(np.log2(greatest_dim)))\n width_adjustment = (nearest_pow_two - width)\n height_adjustment = (nearest_pow_two - height)\n\n self.maxs[0] += width_adjustment\n self.maxs[1] += height_adjustment", "def _positionWindow(self):\n\t\tif sys.platform=='win32':\n\t\t\tself.setGeometry(1050, 30, 375, 220)\n\t\telse:\n\t\t\tself.setGeometry(1050, 0, 375, 220)\n\t\t# self.move( (-screen.width()/2)+200, -screen.height()/2 )", "def __show_computed_alignment(self):\n success = False\n try:\n pcd = o3d.io.read_point_cloud(\n self.source_cloud\n )\n pcd.paint_uniform_color([0, 1, 0])\n pcd.transform(self.__compose_transformation())\n pcd.estimate_normals()\n self.computed_alignment_point_cloud_view.load_cloud(pcd)\n success = True\n except (FileNotFoundError, RuntimeError):\n QtWidgets.QMessageBox.warning(self, \"Error\",\n f\"Source point cloud is no longer available\"\n )\n self.source_cloud = \"\"\n self.__update_clickability()\n if success:\n try:\n pcd = o3d.io.read_point_cloud(\n self.target_cloud\n )\n pcd.paint_uniform_color([0, 0, 1])\n pcd.estimate_normals()\n self.computed_alignment_point_cloud_view.load_cloud(pcd)\n try:\n self.computed_alignment_point_cloud_view.show_window()\n except RuntimeError:\n pass\n except(FileNotFoundError, RuntimeError):\n QtWidgets.QMessageBox.warning(self, \"Error\",\n f\"Target point cloud is no longer available\"\n )\n self.source_cloud = \"\"\n self.__update_clickability()\n self.__save_context()", "def __word_is_near_border(bbox, margin, window_w, window_h):\n # [0][1] [2][3]\n #\n # [4][5] [6][7]\n if bbox[2] > window_w - margin or bbox[6] > window_w - margin:\n return True\n if bbox[5] > window_h - margin or bbox[7] > window_h - margin:\n return True\n return False", "def move_to_position2(self):", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def restore_geometry(self):\n pref = SETTINGS.get('waveform_view.geometry')\n self.position_manager.set_geometry_preferences(pref)", "def moveWest(self):\n self._move('w', - Tile.HorizontalDifference)", "def delete(self):\n if not self.selection.isSelection(): return False\n\n # Save the current text\n self.saveText()\n\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx = sm1[1]\n self.edCursor.setPos(w1, cx)\n # Join words before and after selection\n w1.setString(w1.string[:cx] + w2.string[sm2[1]:])\n # Delete all intervening words, and w2\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n if (tl1 == tl2): # only delete from 1 line\n # delete words from wx1+1 to wx2 (incl.)\n for w in tl1.twords[wx1+1:wx2+1]:\n w.delete()\n del(tl1.twords[wx1+1:wx2+1])\n\n else: # deletion block covers >1 line\n # delete words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n w.delete()\n del(tl1.twords[wx1+1:])\n # delete all the intervening lines\n while True:\n tl = self.rsubject.nextLine(tl1)\n if (tl == tl2): break\n self.rsubject.deleteTLine(tl)\n\n # Move remaining words after w2 in tl2 to end of tl1\n for w in tl2.twords[wx2+1:]:\n tl1.insert(w)\n del(tl2.twords[wx2+1:])\n # Delete tl2\n self.rsubject.deleteTLine(tl2)\n\n self.selection.clearSelection()\n\n self.rsubject.renderShortened(w1)\n\n self.edCursor.setPos(w1, cx)\n return True", "def align(self):\n ...", "def wordMoversDistance(model, document1, document2):\n # If pyemd C extension is available, import it.\n # If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance\n from pyemd import emd\n # Remove out-of-vocabulary words.\n len_pre_oov1 = len(document1)\n len_pre_oov2 = len(document2)\n document1 = [token for token in document1 if token in model]\n document2 = [token for token in document2 if token in model]\n diff1 = len_pre_oov1 - len(document1)\n diff2 = len_pre_oov2 - len(document2)\n if diff1 > 0 or diff2 > 0:\n print('Remove ' + str(diff1) + ' and ' + str(diff2) + ' OOV words from document 1 and 2 ('\n 'respectively).')\n return float('inf')\n\n if not document1 or not document2:\n print(\"At least one of the documents had no words that were in the vocabulary. Aborting (returning \"\n \"inf).\")\n return float('inf')\n\n dictionary = Dictionary(documents=[document1, document2])\n vocab_len = len(dictionary)\n\n if vocab_len == 1:\n # Both documents are composed by a single unique token\n return 0.0\n\n # Sets for faster look-up.\n docset1 = set(document1)\n docset2 = set(document2)\n\n # Compute distance matrix.\n distance_matrix = zeros((vocab_len, vocab_len), dtype=double)\n for i, t1 in dictionary.items():\n if t1 not in docset1:\n continue\n\n for j, t2 in dictionary.items():\n if t2 not in docset2 or distance_matrix[i, j] != 0.0:\n continue\n\n # Compute Euclidean distance between word vectors.\n distance_matrix[i, j] = distance_matrix[j, i] = sqrt(np_sum((model[t1] - model[t2]) ** 2))\n\n if np_sum(distance_matrix) == 0.0:\n # `emd` gets stuck if the distance matrix contains only zeros.\n print('The distance matrix is all zeros. Aborting (returning inf).')\n return float('inf')\n\n def nbow(document):\n d = zeros(vocab_len, dtype=double)\n nbow = dictionary.doc2bow(document) # Word frequencies.\n doc_len = len(document)\n for idx, freq in nbow:\n d[idx] = freq / float(doc_len) # Normalized word frequencies.\n return d\n\n # Compute nBOW representation of documents.\n d1 = nbow(document1)\n d2 = nbow(document2)\n\n # Compute WMD.\n return emd(d1, d2, distance_matrix)", "def delete_forward():\r\n point().delete_right_char()", "def center(self):\n return self.centralizer(self)", "def _place_elements(self, dt):\n self.root.size = Window.size\n center = Window.center\n self.rect.pos = center[0] + 100, center[1] + 100\n self.circle.pos = center[0] - 100, center[1] - 100", "def middleUp(self):", "def update(self):\n super().update()\n if self.center_y > TOP_LIMIT:\n self.center_y = BOTTOM_LIMIT\n if self.center_y < BOTTOM_LIMIT:\n self.center_y = TOP_LIMIT\n\n if self.center_x < 250:\n self.change_x = (0.2) * OBJECTS_SPEED\n elif self.center_x > SCREEN_WIDTH - 250:\n self.change_x = (-0.2) * OBJECTS_SPEED", "def set_grid(self, rows, columns):\n self.get_words()\n self.word_search_grid = []\n for r in range(rows):\n rlist = []\n for c in range(columns):\n rlist += [LetterField(self.centrepiece)]\n rlist[-1].entry.grid(row = r, column = c)\n self.word_search_grid += [rlist]\n self.customiser.destroy()\n \n self.action_button.config(text = \"Solve Word Search\", command = self.solve, state = NORMAL)", "def reprojectQcew(overwrite=False):\n\n\tif exists(qcew_2913) and not overwrite:\n\t\tprint '\\nstate plane qcew already exists, if you wish to'\n\t\tprint 'overwrite the existing file use the \"overwrite\" flag\\n'\n\t\treturn\n\n\tgeom_type = 'POINT'\n\ttemplate = src_qcew\n\tospn = arcpy.SpatialReference(2913)\n\tmanagement.CreateFeatureclass(dirname(qcew_2913),\n\t\tbasename(qcew_2913), geom_type, template, spatial_reference=ospn)\n\n\ti_cursor = da.InsertCursor(qcew_2913, '*')\n\n\ts_fields = ['Shape@', '*']\n\twith da.SearchCursor(src_qcew, s_fields) as s_cursor:\n\t\t# replace point coordinates with geometry object in field\n\t\t# definition\n\t\tfields = list(s_cursor.fields)\n\t\tfields[1] = fields.pop(0)\n\n\t\tfor row in s_cursor:\n\t\t\tlist_row = list(row)\n\t\t\tlist_row[1] = list_row.pop(0)\n\t\t\td = OrderedDict(zip(fields, list_row))\n\n\t\t\tgeom = d['Shape@']\n\t\t\tgeom_2913 = geom.projectAs(ospn) \n\t\t\td['Shape@'] = geom_2913\n\t\t\td['POINT_X'] = geom_2913.firstPoint.X\n\t\t\td['POINT_Y'] = geom_2913.firstPoint.Y\n\n\t\t\twrite_row = [v for v in d.values()]\n\t\t\ti_cursor.insertRow(write_row)\n\n\tdel i_cursor", "def moveBackward(self):\n if self.onGround:\n self.vx = -4", "def move_west(self):\r\n self.move(dx=-1, dy=0)", "def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))", "def to_center(self):\n self.ids.edit_area.to_center()", "def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy", "def update_knot_spacing(self):\n knot_spacing = self.knot_spacing.text()\n if knot_spacing:\n self._cache[\"input\"][\"knot_spacing\"] = float(knot_spacing)\n self.reset_input_style_defaults()\n self.fit_continuum(True, sender=self.knot_spacing)\n self.draw_continuum(True)\n \n return None", "def align_with_image(self, other, inplace=False, truncate=False, margin=0):\n out = self if inplace else self.copy()\n rot = other.wcs.get_rot() - self.img.wcs.get_rot()\n if np.abs(rot) > 1.e-3:\n out.img = self.img.rotate(rot, reshape=True, regrid=True,\n flux=False, order=0, inplace=inplace)\n\n if truncate:\n y0 = margin - 1\n y1 = other.shape[0] - margin\n x0 = margin - 1\n x1 = other.shape[1] - margin\n pixsky = other.wcs.pix2sky([[y0, x0],\n [y1, x0],\n [y0, x1],\n [y1, x1]],\n unit=u.deg)\n pixcrd = out.img.wcs.sky2pix(pixsky)\n ymin, xmin = pixcrd.min(axis=0)\n ymax, xmax = pixcrd.max(axis=0)\n out.img.truncate(ymin, ymax, xmin, xmax, mask=False,\n unit=None, inplace=True)\n\n out.img._data = np.around(out.img._data).astype(int)\n # FIXME: temporary workaround to make sure that the data_header is\n # up-to-date when pickling the segmap. This should be detected direclty\n # in MPDAF.\n out.img.data_header = out.img.get_wcs_header()\n return out", "def center_on_screen(self):\n window_frame = self.frameGeometry()\n screen_center = QtGui.QDesktopWidget().availableGeometry().center()\n window_frame.moveCenter(screen_center)\n self.move(window_frame.topLeft())", "def middlemakevisible(self, pos):\n pass", "def word_form(self):\n if self.guessed[-1] in self.word:\n for index, letter in enumerate(self.word_underscored):\n if self.word[index] == self.guessed[-1]:\n self.word_underscored[index] = self.word[index]\n self.word_blank.set(str(self.word_underscored))", "def positioning(self):\n pass", "def forward(self, center, context):\n return self.word_embeds(center), self.word_embeds(context)", "def gridalign(self):\n self.position.x = int(round(self.position.x))\n self.position.y = int(round(self.position.y))\n self.position.z = int(round(self.position.z))\n\n if self.fan:\n self.fan = (int(round(self.fan[0])),int(round(self.fan[1])),int(round(self.fan[2])))\n\n bestDist = 2*9\n bestMatrix = makeMatrix(0,0,0)\n\n for compass in [0, 90, 180, 270]:\n for pitch in [0, 90, 180, 270]:\n for roll in [0, 90, 180, 270]:\n m = makeMatrix(compass,pitch,roll)\n dist = matrixDistanceSquared(self.matrix, m)\n if dist < bestDist:\n bestMatrix = m\n bestDist = dist\n\n self.matrix = bestMatrix\n self.positionOut()\n self.directionOut()", "def move_to_position1(self):" ]
[ "0.59176266", "0.5135103", "0.5036837", "0.50123316", "0.49908572", "0.49366313", "0.49278474", "0.4916772", "0.49037313", "0.4899014", "0.48803365", "0.48684108", "0.48680285", "0.48149422", "0.48149082", "0.47981972", "0.47772795", "0.47746402", "0.47744653", "0.47741532", "0.47708645", "0.47679976", "0.4761665", "0.47489038", "0.47456852", "0.47440505", "0.47393903", "0.4738984", "0.47277004", "0.471555", "0.4701432", "0.47013232", "0.46907085", "0.46724597", "0.46721345", "0.4667545", "0.46618962", "0.464002", "0.46339205", "0.46196368", "0.46047843", "0.4601607", "0.46013033", "0.4597009", "0.45961455", "0.4594426", "0.4594426", "0.459361", "0.456226", "0.45381138", "0.45242012", "0.4519119", "0.45168108", "0.4505568", "0.45023632", "0.449936", "0.44980246", "0.4497166", "0.44921425", "0.4488171", "0.448666", "0.44801056", "0.44693345", "0.44660097", "0.445583", "0.4453647", "0.44532302", "0.44442344", "0.44437087", "0.44391474", "0.44359377", "0.44348216", "0.44292927", "0.44244847", "0.44234017", "0.44096953", "0.4405594", "0.44037715", "0.4403004", "0.44014984", "0.43964627", "0.43888244", "0.4388073", "0.43801606", "0.4378273", "0.4377609", "0.4377538", "0.43759623", "0.43736145", "0.43690017", "0.43619806", "0.43575332", "0.43492913", "0.43472326", "0.4342388", "0.433874", "0.43365043", "0.43321356", "0.4328122", "0.43263945", "0.43242273" ]
0.0
-1
Entrench at the level of the WordForm.
def entrench_word(self, cloud, paradigms, informativity, categorization, unique_base): # Entrench within the WordForm's own cloud. Iterate over positions in # the WordForm (up to three Segments). for pos, seg in enumerate(self.segments): if pos < 3: # Iterate over features. for feat in seg.features: if uniform(0, 1) < probability_of_analogy: # Collect other values of the feature across the cloud. # Since this is the WordForm's own cloud, set all the # weights to 1. wv = [(e.segments[pos].features[feat], 1) for e in cloud if e.lemma == self.lemma and e.case == self.case] # Entrench the segment based on these values. seg.entrench_feature(feat, wv, top_value = self_top_value, max_movement = self_max_movement) # Entrench within other clouds of the same paradigm. if paradigms: # Iterate over positions in the WordForm (up to three Segments). for pos, seg in enumerate(self.segments): if pos < 3: # Iterate over features. for feat in seg.features: if uniform(0, 1) < (probability_of_analogy * paradigm_weight): # Get the weight for each case. weights = dict() # If informativity is measured via the entropy # method, the weight of a case is proportional to # the entropy of the feature across all lemmas of # that case. if informativity == 'entropy': weights = {c: entropy(feat, [e.segments[pos].\ features[feat] for e in cloud if e.case == c]) for c in cases} # If informativity is measured via a classification # algorithm, the weight of a case is proportional to # the performance of the classifier on lemmas within # that case using just the current feature. elif informativity == 'classification': weights = {c: performance([e for e in cloud if e.case == c], positions = [pos], features = [feat], method = categorization) for c in cases} # If informativity is not measured, set the weights # of all cases to 1. elif informativity == 'none': weights = {c: 1 for c in cases} # If paradigms are required to have a unique base, # the winner takes all the weight. if unique_base: max_weight = max(weights.values()) for c in weights: if weights[c] < max_weight: weights[c] = 0 # Collect other values of the feature across the # cloud, and pair them with their weights. wv = [(e.segments[pos].features[feat], weights[e.case]) for e in cloud if e.lemma == self.lemma and e.case != self.case] # Entrench the segment based on these values. seg.entrench_feature(feat, wv, top_value = paradigm_top_value, max_movement = paradigm_max_movement)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _commit_level(self):\n assert self.current_level is not None, \"Cannot write a level with an empty name\"\n # Create a new level descriptor in the lump directory\n self.wad.add_lump(self.current_level, None)\n # Add the lumps to WAD file\n self.wad.add_lump('THINGS', self.lumps['THINGS'])\n self.wad.add_lump('LINEDEFS', self.lumps['LINEDEFS'])\n self.wad.add_lump('SIDEDEFS', self.lumps['SIDEDEFS'])\n self.wad.add_lump('VERTEXES', self.lumps['VERTEXES'])\n self.wad.add_lump('SECTORS', self.lumps['SECTORS'])\n self.lumps = {'THINGS':Things(), 'LINEDEFS':Linedefs(), 'VERTEXES':Vertexes(),'SIDEDEFS': Sidedefs(), 'SECTORS':Sectors()}", "def Wraith_Form(self):\t\t\n\t\tprint(self.name.Title() + \"Wraith\")", "def __change_level(self, level):\n self.level = level", "def clean_level_(self):\n try:\n # Get the verb categories of the taxonomy\n verb_cats = VerbCategory.objects.filter(taxonomy=self.taxonomy)\n except Taxonomy.DoesNotExist:\n raise Http404('The taxonomy does not exist!')\n else:\n\n # Check categories for the entered level value\n submitted_level = self.cleaned_data.get('level', None)\n\n # if updating, need to allow the original level value to be re-entered\n old_level = None if not self.old_category else self.old_category.level\n\n if submitted_level in [cat.level for cat in verb_cats.all()\\\n if cat.level != old_level]:\n culprit = verb_cats.get(level=submitted_level)\n raise forms.ValidationError(f'The verb category \"{culprit.title}\" \\\n already has this value!')\n\n return submitted_level", "def resetWordLevel(self, ID):\n\t\tcommand = \"UPDATE words SET level=0 WHERE ID=?\"\n\t\tparams = (ID,)\n\n\t\tself._run_command(command, params)", "def addLevel(self):\n pass", "def setLevel(self, level):\n self.lvl = level", "def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)", "def setLevel( self, lvl ):\n if isinstance( lvl, str ):\n return super().setLevel( lvl.upper() )\n else:\n return super().setLevel( lvl )", "def __editUnindent(self):\n self.activeWindow().unindentLineOrSelection()", "def addOtherForm(documentName, word, unique):\r\n formRef = \":form_\" + replace_form(word.word)\r\n if word.transliteration and word.transliteration.word != \"\" and word.transliteration.word != \" \":\r\n formRef += \"_\" + word.transliteration.word\r\n formRef += \"_\" + unique\r\n\r\n formRef += \" a ontolex:Form;\\n\"\r\n\r\n writtenRepRef = \" ontolex:writtenRep \\\"\"\r\n writtenRepRef += word.word + \"\\\"\" + word.writingLanguage\r\n\r\n if word.transliteration and word.transliteration.word != \"\":\r\n writtenRepRef += \", \\\"\" + word.transliteration.word + \"\\\"\" + word.transliteration.writingLanguage\r\n writtenRepRef += \" .\"\r\n\r\n frequencyRef = \"\"\r\n if word.frequencyDict:\r\n frequencyRef = \"\\n\"\r\n for corpus,frequency in word.frequencyDict.items():\r\n if frequency != 0:\r\n frequencyRef +=' frac:frequency [a e2model:' + corpus +'; rdf:value \"' + str(frequency) + '\" ] ;\\n'\r\n frequencyRef = frequencyRef[:len(frequencyRef) -2]\r\n frequencyRef += \".\"\r\n formEntry = formRef + writtenRepRef\r\n if frequencyRef != \".\":\r\n formEntry = formEntry[:len(formEntry) -1]\r\n formEntry += \";\"\r\n formEntry += frequencyRef\r\n\r\n with open(documentName, 'a') as f:\r\n f.write(formEntry)\r\n f.write(\"\\n\\n\")\r\n return", "def setWL(self, dn, w, l):\r\n # productive #frequent #onDrag\r\n if frequent: profprint();\r\n dn.SetWindow(w)\r\n dn.SetLevel(l)", "def print_level():\n print(\"\")\n\n def show_hide_word(word):\n \"\"\"show/hide finished/unfinished words\"\"\"\n if word not in current_level.finished_words:\n return \"*\" * len(word)\n return word\n\n current_level.layout.print_layout(\n show_hide_word,\n # Print unfinished words first with '*'\n set(current_level.words) - set(current_level.finished_words),\n )\n\n # level state\n print(\"\")\n print(\"Level: %d/%d\" % (current_level_index + 1, len(all_levels)))\n if current_level.bonus_words:\n bonus_words_status = \"Bonus words: %d/%d\" % (\n len(current_level.finished_bonus_words),\n len(current_level.bonus_words)\n )\n bonus_words_status += \" %s\" % \" \".join(\n change_case(word)\n if word in current_level.finished_bonus_words\n else \"*\" * len(word)\n for word in current_level.bonus_words\n )\n print(bonus_words_status)\n\n # characters\n print(\"\")\n print(\"Chars: %s\" % \" \".join(change_case(char) for char in current_level.chars))\n print(\"\")", "def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L", "def setLevel(self, level):\n self.level = level", "def reset_level(self, format_level):\n assert(format_level in pos_levels)\n self.level = format_level\n self.reset_format()", "def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)", "def edit(self):\n self.toplevel = tk.Toplevel()\n # ============================= Frame Setup\n # Get Frames for each side of the editor\n self.leftSide = tk.LabelFrame(self.toplevel, text=\"Leftside\")\n self.rightSide = tk.LabelFrame(self.toplevel, text=\"Rightside\")\n self.leftSide.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n self.rightSide.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n #### Build the leftside\n # Frame for controlling the title of node\n self.titleFrame = tk.LabelFrame(self.leftSide, text=\"Title\")\n self.titleFrame.pack(side=tk.TOP, fill=tk.X, expand=False)\n self.titleEntry = tk.Entry(self.titleFrame)\n self.titleEntry.pack(side=tk.LEFT, fill=tk.X, expand=True)\n self.titleUpdateButton = tk.Button(self.titleFrame, text=\"Update\", command=self.update_title_from_entry)\n self.titleUpdateButton.pack(side=tk.LEFT)\n # ============================= EditorFrame\n self.editorFrame = tk.Frame(self.leftSide)\n self.editorFrame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.textWidget = tk.Text(self.editorFrame)\n self.textWidget.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n # ============================= Status Bar\n self.statusFrame = tk.LabelFrame(self.leftSide, text=\"Status\", relief=tk.SUNKEN)\n self.statusFrame.pack(side=tk.TOP, fill=tk.X, expand=False)\n self.wordWrapStatus = tk.Menubutton(self.statusFrame)\n self.wordWrapStatus.pack()\n # ============================== Buttons on the right side of the editor\n self.buttonFrame = tk.Frame(self.rightSide)\n self.buttonFrame.pack(side=tk.TOP)\n self.saveButton = tk.Button(self.buttonFrame, text=\"save\", command=self.on_editor_save, bg=\"green\")\n self.exitButton = tk.Button(self.buttonFrame, text=\"exit\", command=self.on_editor_exit, bg=\"red\")\n self.saveButton.pack(side=tk.LEFT, fill=tk.X, expand=True)\n self.exitButton.pack(side=tk.LEFT, fill=tk.X, expand=True)\n # insert title of node into title entry\n self.titleEntry.insert(tk.END, self.title)\n # insert contents of node into textwidget\n self.textWidget.insert(tk.END, self.text)", "def __editProjectPEL(self):\n pel = e5App().getObject(\"Project\").getProjectDictionaries()[1]\n self.__editSpellingDictionary(pel)", "def incrementWordLevel(self, ID):\n\t\tcommand = \"UPDATE words SET level=level+1 WHERE ID=?\"\n\t\tparams = (ID,)\n\n\t\tself._run_command(command, params)", "def level_up(self):\n pass", "def level_down(self):\n if self.level > 1:\n self.level = self.level - 1\n self.update_level_buttons()", "def level_up(self):\n if self.level < self.max_level:\n self.level = self.level + 1\n self.update_level_buttons()", "def setWL(self,dn,w,l):\n #productive #frequent #onDrag\n if frequent: profprint();\n dn.SetWindow(w)\n dn.SetLevel(l)", "def level_upgrade(self, lvl):\n\t\tpass", "def update(self, event, level):\n\t\tDialog.update(self, event, level)\n\t\tif(self.index/SCROLL_CONSTANT >= len(self.text)):\n\t\t\tself.choosing = True", "def clean(self):\n return super(CharacterSkillForm, self).clean()", "def normal_form(self, w):\n return self.element_class(self, self._normalize_word(w))", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "def level(self, L):\n assert isinstance(L, level)\n self.__level = L", "def disable_depth_of_field(self):\n self._render_passes.disable_depth_of_field_pass()", "def save_level(self):\n if self.project is not None:\n self.project.save_level()", "def beforeEditing(self):\n infotext, self.journal, self.parentApp.tmpTransC = (\n ledgeradd.check_trans_in_journal(\n settings=self.parentApp.S,\n transaction=self.parentApp.tmpTransC\n )\n )\n\n # set form title, color and infotext\n self.name = self.parentApp.S.gen_ledger_filename(\n absolute=True,\n year=self.parentApp.tmpTransC.get_date().year\n ) + ' - CHECK'\n\n if 'transaction is already cleared' in infotext:\n self.color = 'DANGER'\n else:\n self.color = 'WARNING'\n\n self.check_me.values = infotext.split('\\n')", "def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)", "def getTrueSentences(self, form):", "def addForms(documentName, lexWord):\r\n addCanonicalForm(documentName, lexWord)\r\n num = 1\r\n for otherForm in lexWord.otherFormsList:\r\n addOtherForm(documentName, otherForm, lexWord.unique_name)\r\n num += 1\r\n return", "def _higlightWord(self, bOn=True): #$NON-NLS-1$\r\n if self.currRange:\r\n if bOn:\r\n self.currRange.scrollIntoView()\r\n self.currRange.select()\r\n else:\r\n self.mshtmlEditControl.selectNone()", "def form_tweaks(self):\n pass", "def render_form():", "def word_form(self):\n if self.guessed[-1] in self.word:\n for index, letter in enumerate(self.word_underscored):\n if self.word[index] == self.guessed[-1]:\n self.word_underscored[index] = self.word[index]\n self.word_blank.set(str(self.word_underscored))", "def dummy():\n\t\t\tself.edit = True", "def _reject_rendering(self):\n\n curItem = self.tree.focus()\n parent = self.tree.parent(curItem)\n\n categories = ['approved', 'conflicts', 'suggestions', 'unknown', \\\n 'cldr',]\n if parent is '':\n #skip it\n pass\n else:\n if parent not in categories:\n curTerm = parent\n category = self.tree.parent(parent)\n else:\n curTerm = curItem\n category = parent\n if category == 'approved':\n #move from approved to unknown, with rendering deleted\n self.tree.item(curTerm, \\\n values=[self.tree.item(curTerm)['values'][0], ''])\n self.tree.move(curTerm, 'unknown', 'end')\n pass\n elif category == 'sugestions':\n if curTerm != curItem:\n self.tree.delete(curItem)\n if len(self.tree.get_children(curTerm)) < 1:\n self.tree.move(curTerm, 'unknown', 'end')\n # move curTrem from suggestions to unknown\n else: #if curTerm == curItem:\n self.tree.delete(*self.tree.get_children(curTerm))\n self.tree.move(curTerm, 'unknown', 'end')\n pass\n elif category == 'conflicts':\n if curTerm != curItem:\n self.tree.delete(curItem)\n if len(self.tree.get_children(curTerm)) == 1:\n curItem = self.tree.get_children(curTerm)[0]\n va = self.tree.item(curTerm)['values']\n vb = self.tree.item(curItem)['values']\n self.tree.item(curTerm, values=[va[0], vb[1]])\n self.tree.item(curTerm, tags='approved')\n self.tree.move(curTerm, 'approved', 'end')\n pass\n elif category == 'unknown':\n #ignore\n pass\n elif category == 'cldr':\n #ignore\n pass\n else:\n messagebox.showerror('_reject_rendering', \\\n 'Unknown category {}.'.format(category))\n\n self._make_suggestions()\n \n self.tree.tag_configure('approved', background='palegreen')\n self.tree.tag_configure('conflict', background='bisque')\n self.tree.tag_configure('suggestions', background='lightblue')\n self.tree.tag_configure('unknown', background='whitesmoke')\n self.tree.tag_configure('cldr', background='violet')\n self.update()", "def __level(self, *args, **kwargs):\n pass", "def name(self) -> Text:\n\n return \"elicitation_form\"", "def normalize_patch_names(self):\n # possibly busted -- don't use unless you know what you're doing\n self.set_all_levels(self.levels.copy())", "def mistake_enter_fields():\n win_mistake_enter_fields = Toplevel(root)\n win_mistake_enter_fields.title(\"Ошибка\")\n win_mistake_enter_fields.geometry('200x40')\n message = Label(win_mistake_enter_fields, text=\"Сначала заполните все поля!\", width=30, height=2)\n message.place(x=-5, y=0)", "def __editProjectPWL(self):\n pwl = e5App().getObject(\"Project\").getProjectDictionaries()[0]\n self.__editSpellingDictionary(pwl)", "def add_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n je.Editor(self.session, self.source.tbl, self.source)", "def m_open_form(self):\n return self._gl", "def setlevel(self, lvl):\n self.logger.setLevel(lvl)", "def toggle_word_window_mode(self):\n self.__sentence_mode = False", "def __showEditSpellingMenu(self):\n proj = e5App().getObject(\"Project\")\n projetOpen = proj.isOpen()\n pwl = e5App().getObject(\"Project\").getProjectDictionaries()[0]\n self.__editProjectPwlAct.setEnabled(projetOpen and bool(pwl))\n pel = e5App().getObject(\"Project\").getProjectDictionaries()[1]\n self.__editProjectPelAct.setEnabled(projetOpen and bool(pel))\n \n from QScintilla.SpellChecker import SpellChecker\n pwl = SpellChecker.getUserDictionaryPath()\n self.__editUserPwlAct.setEnabled(bool(pwl))\n pel = SpellChecker.getUserDictionaryPath(True)\n self.__editUserPelAct.setEnabled(bool(pel))", "def update_level_buttons(self):\n self.level_buttons[0].set_text(\"Level \" + str(self.level))\n if self.level <= 1:\n self.level_buttons[1].disable()\n else:\n self.level_buttons[1].enable()\n if self.level >= self.max_level:\n self.level_buttons[2].disable()\n else:\n self.level_buttons[2].enable()", "def setLevel(self, level):\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)", "def set_indent_level(self, indent_level):\n self.indent_level = indent_level", "def add_level(self, level):\n return", "def __editIndent(self):\n self.activeWindow().indentLineOrSelection()", "def addIndents(self, prevLevel, nextLevel):\n for num in range(self.level - prevLevel):\n self.textLines[0] = u'<div>%s' % self.textLines[0]\n for num in range(self.level - nextLevel):\n self.textLines[-1] = u'%s</div>' % self.textLines[-1]\n return self.level", "def __editUndo(self):\n self.activeWindow().undo()", "def edit_document():", "def Deflect(self):\t\t\n\t\tprint(self.name.Title() + \"Deflect!\")", "def reset_step(self):\n # reset all levels\n for l in self.levels:\n l.reset_level()", "def __init__(self, parent=None):\n super(ShortAdj, self).__init__(parent)\n self.setupUi(self)\n self.parent = parent\n self.morphs = self.parent.morphs\n self.setGeometry(self.parent.geometry[0], self.parent.geometry[1], self.parent.geometry[2], self.parent.geometry[3])\n self.rustr = self.parent.rustr\n self.enstr = self.parent.enstr\n self.stem = self.rustr[:-2]\n self.labeltext = self.titleLbl.text()\n self.labeltext += \" \" + self.rustr.capitalize()\n self.titleLbl.setText(self.labeltext)\n self.shortCheck.clicked.connect(self.enableform)\n self.quitBttn.clicked.connect(self.cancel)\n self.backBttn.clicked.connect(self.backpage)\n self.acceptBttn.clicked.connect(self.accept)\n self.helpBttn.clicked.connect(self.displayhelp)\n self.stemEdit.firereturn.triggered.connect(self.updatestem)\n self.stemEdit.firefocus.triggered.connect(self.updatestem)\n self.enEdit.firereturn.triggered.connect(self.updateeng)\n self.enEdit.firefocus.triggered.connect(self.updateeng)\n self.rumascEdit.setText(self.rustr)\n self.stemEdit.setText(self.stem)\n self.enEdit.setText(self.enstr)\n if self.parent.adjpages[1]:\n self.adjdict.clear()\n self.adjdict = self.parent.adjdict.copy()\n if self.adjdict[\"disabled\"]:\n self.shortCheck.setChecked(False)\n self.stemEdit.setText(\"\")\n self.stemEdit.setEnabled(False)\n self.rumascEdit.setText(\"\")\n self.rumascEdit.setEnabled(False)\n self.rufemEdit.setText(\"\")\n self.rufemEdit.setEnabled(False)\n self.runuetEdit.setText(\"\")\n self.runuetEdit.setEnabled(False)\n self.ruplurEdit.setText(\"\")\n self.ruplurEdit.setEnabled(False)\n self.enEdit.setText(\"\")\n self.enEdit.setEnabled(False)\n else:\n self.shortCheck.setChecked(True)\n self.stemEdit.setText(self.adjdict[\"stem\"])\n self.rumascEdit.setText(self.adjdict[\"masc\"])\n self.rufemEdit.setText(self.adjdict[\"fem\"])\n self.runuetEdit.setText(self.adjdict[\"nuet\"])\n self.ruplurEdit.setText(self.adjdict[\"plur\"])\n self.enEdit.setText(self.adjdict[\"eng\"])\n return\n self.setdata()", "def set_level(self, level_name):\n\n self.current_level = level_name", "def reveal(self):\n self.root.deiconify()", "def validate(self, context):\n _logger.info(\"SpellDictionary EN validated\")\n self.dictionary = {\"hello\" , \"world\", \"welcome\", \"to\", \"the\", \"ipopo\", \"tutorial\"}", "def notify_wizard(self):\n if (self._wfield != None):\n self._wfield.update(self._conds or None)", "def handle_sentence(self, sentence, ctxinfo):\n global vocab\n global lower_attr\n prev_key = \"\"\n for w_i, w in enumerate(sentence):\n key = getattr(w, lower_attr)\n low_key = key.lower()\n forms = vocab.get( low_key, {} )\n form_entry = forms.get( key, [ 0, 0 ] )\n # a form entry has two counters, one for the occurrences and one for\n # the number of times it occurred at the beginning of a sentence. \n # Therefore, form_entry[0] >= form_entry[1]\n form_entry[ 0 ] = form_entry[ 0 ] + 1 \n # This form occurrs at the first position of the sentence or after a\n # period (semicolon, colon, exclamation or question mark). Count it\n if w_i == 0 or re.match( \"[:\\.\\?!;]\", prev_key ) :\n form_entry[ 1 ] = form_entry[ 1 ] + 1 \n forms[ key ] = form_entry\n vocab[ low_key ] = forms\n prev_key = key", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def draw_level(self, DISP, level:int):\r\n windowsize = DISP.get_size()\r\n Level_Text_Obj = self.FontObj.render(\"LEVEL: \" + str(level), True, Colors.colors['WHITE'])\r\n Level_Text_rec = Level_Text_Obj.get_rect()\r\n Level_Text_rec.top = windowsize[1] - Level_Text_rec.height\r\n Level_Text_rec.left = windowsize[0] - Level_Text_rec.width\r\n DISP.blit(Level_Text_Obj, Level_Text_rec)", "def set_level(self, level: LogLevel):\n pass", "def unindent(self):\r\n editor = self.get_current_editor()\r\n if editor is not None:\r\n editor.unindent()", "def umrechnen(self):\n\n self.setHTML(\n self.strat.change(self.ui.betragInput.value(), self.ui.waehrungInput.text(), self.ui.zielInput.text()))\n\n self.ui.statusLabel.setText(\"OK\")", "def _update_model(self, idx):\n self._wfield.update(self._choices[idx][0])", "def inserir(self):\n self.new_window = tk.Toplevel(self.menu)\n Inserir(self.new_window)", "def SetPrintLevel(self, print_lvl):\n return _hypre.HypreADS_SetPrintLevel(self, print_lvl)", "def updateeng(self):\n self.enstr = self.enEdit.text()", "def fixFormLanguage( self, REQUEST ):\n if REQUEST is None:\n return\n\n lang = REQUEST.get( 'LOCALIZER_LANGUAGE' )\n map = Config.LanguageEntitiesMap.get( lang )\n if map is None:\n return\n\n for key, value in REQUEST.form.items():\n if type(value) in ( StringType, UnicodeType, ):\n for entity, char in map.items():\n value = value.replace( entity, char )\n REQUEST.form[ key ] = value\n\n if REQUEST.REQUEST_METHOD == 'PUT':\n value = REQUEST.other.get('BODY')\n if value is not None:\n for entity, char in map.items():\n value = value.replace( entity, char )\n REQUEST.other['BODY'] = value", "def setExpanded(self):", "def make_form(self):", "def create_negated_fenode(t_id):\n # Create focus <fenode>\n negated_fenode = chapter_input.new_tag('fenode')\n negated_fenode['idref'] = t_id\n negated.insert(0, negated_fenode)", "def lemmatize_fun(self):\n tokens = str(self.doc).split()\n cleaned_tokens = None\n if self.lemmatize_method == 'wordnet':\n cleaned_tokens = [self.lemmatizer.lemmatize(token) for token in tokens]\n else:\n cleaned_tokens = [self.lemmatizer.stem(token) for token in tokens]\n \n self.doc = ' '.join(cleaned_tokens)", "def this_word(self):\n self.append = self.add_to_current_word", "def modify_doc(doc):\n # Create Input controls\n topic_options = [\"topic_{0}\".format(x) for x in data.list_known_jsonschemas()]\n proto_options = [\"protobuf_{0}\".format(x) for x in data.list_known_protobufs()]\n modelselec = Select(title=\"Model Selection\", value=DEFAULT_UNSELECTED, options=topic_options + proto_options + [DEFAULT_UNSELECTED], name=MODEL_SELECTION)\n\n # Add a handler for input changes\n modelselec.on_change('value', lambda attr, old, new: modelselec_change())\n\n # construct what the user will see\n selec = widgetbox([modelselec])\n doc.add_root(selec)\n doc.theme = Theme(filename=\"theme.yaml\")", "def SetPrintLevel(self, print_lvl):\n return _hypre.HypreAMS_SetPrintLevel(self, print_lvl)", "def factor_select_initial(self):\n indicators = set()\n factor_levels = self.words.keys()\n for level, words in self.words.items():\n other_levels = set(factor_levels) - {level}\n w1 = set(words) # words associated with this factor level\n w2 = set() # words associated with the other factor levels\n for other_level in other_levels:\n w2.update(self.words.get(other_level))\n specific_words = w1 - w2\n indicators.update(specific_words)\n self.indicator_words = list(indicators)", "def text_level_normalizer(self, sentence: str, *args: Any, **kwargs: Any) -> str:\n text = sentence\n return text", "def writexml(self, writer, indent=\"\", addindent=\"\", newl=\"\"):\n if self.childNodes and len(self.childNodes) == 1 and\\\n self.childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:\n writer.write(indent)\n KmlElement._original_element.writexml(self, writer)\n writer.write(newl)\n else:\n KmlElement._original_element.writexml(self, writer, indent, addindent, newl)", "def update(self, *_):\n if not self.input_main.edit_modified():\n return\n\n analyze_text = self.create_analysis()\n self.output_main[\"state\"] = tk.NORMAL\n self.output_main.delete(\"1.0\", tk.END)\n self.output_main.insert(\"1.0\", analyze_text)\n self.output_main[\"state\"] = tk.DISABLED\n self.input_main.edit_modified(False)", "def activate(widg, self):\n widg.set_sensitive(True)", "def entrench(self, cloud, paradigms, informativity, categorization,\n unique_base):\n self.entrench_word(cloud, paradigms, informativity, categorization,\n unique_base)\n self.entrench_segments(cloud)", "def __editRevert(self):\n self.activeWindow().revertToUnmodified()", "def set_indent_level(self, indent_level):\n super(NestedProtocol, self).set_indent_level(indent_level)\n self.proto.set_indent_level(indent_level)", "def _SetIndentation(self, level, bullet=False):\n if self._level < level:\n # Level increases are strictly 1 at a time.\n if level >= len(self._indent):\n self._indent.append(0)\n indent = self._INDENT\n if bullet and level > 1:\n # Nested bullet indentation is less than normal indent for aesthetics.\n indent -= self._BULLET_DEDENT\n self._indent[level] = self._indent[level - 1] + indent\n self._level = level", "def getLevel(self):\n return self.level", "def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1", "def get_nd_form(e,text):\r\n if e.is_container():\r\n return NdForm.x\r\n if e.check_sc(WP_punct):\r\n if text == '.' or \\\r\n text == '?' or \\\r\n text == '!' or \\\r\n text == ':' or \\\r\n text == ';':\r\n return NdForm.terminator\r\n elif text == ',':\r\n return NdForm.comma\r\n return NdForm.x\r\n if e.is_verb():\r\n # \"sub\": set of terms in subject clause\r\n sub = e.get_subnodes([SR_agent,SR_topic,SR_exper])\r\n if e.v_iso_sub is not None:\r\n # this is subject-verb\r\n return NdForm.verbclause\r\n elif e.check_vp(VP_query):\r\n # explicitly marked as query\r\n return NdForm.queryclause\r\n elif len(sub) == 0:\r\n if e.check_vp(VP_gerund|VP_inf|VP_root):\r\n return NdForm.action\r\n # default is \"verb-clause\"\r\n return NdForm.verbclause\r\n if len(e.wrds) == 1:\r\n # a word. Default is \"X\", but look for useful cases.\r\n wrd = e.get_wrd(0)\r\n if vcb.check_prop(wrd,WP_query):\r\n return NdForm.queryword\r\n if vcb.check_prop(wrd,WP_n|WP_detw):\r\n return NdForm.n\r\n if vcb.check_prop(wrd,WP_conj):\r\n return NdForm.conjword\r\n if vcb.check_prop(wrd,WP_mod):\r\n return NdForm.mod\r\n # use default\r\n return NdForm.x\r\n # a phrase. possessive? (\"John's cat\")\r\n poss_contract = vcb.lkup(\"'s\",False)\r\n if poss_contract in e.wrds:\r\n return NdForm.n\r\n # compound modifier? (\"very happy\", \"sad and miserable\")\r\n is_mod = True\r\n for wrd in e.wrds:\r\n if not vcb.check_prop(wrd,WP_mod|WP_conj):\r\n is_mod = False\r\n break\r\n if is_mod:\r\n return NdForm.mod\r\n # conjunction phrase? (\"boys and girls\")\r\n for wrd in e.wrds:\r\n if vcb.check_prop(wrd,WP_conj):\r\n return NdForm.conjphrase\r\n break\r\n # remaining tests based on first word\r\n wrd = e.get_wrd(0)\r\n if vcb.check_prop(wrd,WP_query):\r\n # \"how many\", \"what time\"\r\n return NdForm.queryphrase\r\n if vcb.check_prop(wrd,WP_dets|WP_detw):\r\n return NdForm.n\r\n # default\r\n return NdForm.x", "def set_level(self, level):\n if self._level_fixed:\n raise NameError(\"set_level() can be called only once !\")\n\n try:\n Level(level)\n except ValueError:\n raise ValueError(\"LEVEL parameter must be a Level\")\n\n self._level = Level(level)\n self._level_fixed = True" ]
[ "0.51781356", "0.5135534", "0.51090235", "0.50966406", "0.5095478", "0.5094889", "0.5074766", "0.5069701", "0.49998125", "0.49493456", "0.4920468", "0.49131694", "0.4900337", "0.48817414", "0.48688623", "0.48541382", "0.48305783", "0.48229364", "0.4820413", "0.47837245", "0.47664204", "0.47626385", "0.47601262", "0.47592494", "0.47491702", "0.47484893", "0.47252023", "0.47114256", "0.4710655", "0.4710655", "0.4710655", "0.46999893", "0.46926138", "0.46908873", "0.46598247", "0.46583754", "0.4636882", "0.46365005", "0.4620651", "0.46168467", "0.46073818", "0.460141", "0.46013197", "0.4599708", "0.4596635", "0.45871633", "0.4573658", "0.45623907", "0.45602193", "0.45472473", "0.45401138", "0.45320717", "0.45270017", "0.4522435", "0.45143998", "0.4510027", "0.45029774", "0.44972354", "0.4494148", "0.44914928", "0.4465307", "0.44628152", "0.44610125", "0.44518396", "0.44462436", "0.44443515", "0.44332775", "0.4424836", "0.4424232", "0.4419413", "0.44125596", "0.44072598", "0.4399333", "0.43976733", "0.4397331", "0.4396916", "0.43941745", "0.43834662", "0.43830073", "0.43802628", "0.43770394", "0.4364449", "0.43457305", "0.4338046", "0.43351594", "0.43351385", "0.43348655", "0.4333952", "0.43328828", "0.43282384", "0.43258694", "0.43255988", "0.43146998", "0.43145707", "0.43122548", "0.43078637", "0.43075332", "0.43069327", "0.4306095", "0.43060932" ]
0.5143527
1
Entrench at the level of the Segment.
def entrench_segments(self, cloud): # Iterate over features. for feat in all_features: if feature_type(feat) == 'continuous': # Collect all values of the feature across the cloud. values = [(s.features[feat], 1) for e in cloud for p, s in enumerate(e.segments) if p < 3 and feat in s.features] # Iterate over Segments. for pos, seg in enumerate(self.segments): if pos < 3: if uniform(0, 1) < probability_of_feat_analogy: # Entrench the feature of the Segment based on # values across the cloud. seg.entrench_feature(feat, values, top_value = segment_top_value, max_movement = segment_max_movement)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __change_level(self, level):\n self.level = level", "def upgrage_level(self):\n print('level is upgraded on one point')\n self.level += 1", "def resetSagittalSegment(self):\r\n #research\r\n profprint()\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeYellow\")\r\n if sYellow == None :\r\n sYellow = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode2\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n sYellow.SetSliceVisible(0)\r\n sYellow.SetOrientationToSagittal()\r\n sw = slicer.app.layoutManager().sliceWidget(\"Yellow\")\r\n sw.fitSliceToBackground()\r\n sYellow.Modified()", "def setLevel(self, level):\n self.lvl = level", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "def level(self, level):\n\n self._level = level", "def setLevel(self, level):\n self.level = level", "def setLevel(self, level):\n self._autoLevelFunction = None\n level = float(level)\n if level != self._level:\n self._level = level\n self._updateScenePrimitive()\n self._updated(Item3DChangedType.ISO_LEVEL)", "def addLevel(self):\n pass", "def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L", "def fix_elevation(self):\n selected_segment = \\\n self.controller.shared_data.obj_track.selected_segment_idx\n segment_idx = selected_segment[0]\n segment = \\\n self.controller.shared_data.obj_track.get_segment(segment_idx)\n\n if len(selected_segment) == 1:\n if segment.shape[0] > constants.fix_thr:\n try:\n self.controller.shared_data.obj_track.fix_elevation(\n segment_idx)\n except ValueError:\n self.controller.shared_data.obj_track.smooth_elevation(\n segment_idx)\n else:\n self.controller.shared_data.obj_track.smooth_elevation(\n segment_idx)\n\n plots.update_plots(\n self.controller.shared_data.obj_track,\n self.controller.shared_data.ax_track,\n self.controller.shared_data.ax_ele,\n self.controller.shared_data.ax_track_info,\n canvas=self.controller.shared_data.canvas)\n\n elif len(selected_segment) > 1:\n messagebox.showerror('Warning',\n 'More than one segment is selected')\n elif len(selected_segment) == 0:\n messagebox.showerror('Warning',\n 'No segment is selected')", "def setLevel(newLevel):\n Verbose.__level = max(-1, newLevel)", "def set(self, start, end, level):\n\n # Check errors, no-ops\n if start >= end:\n return\n\n # Determine levels at start (and before start)\n start_ix = self._trace.bisect_right(start) - 1\n prev_lvl = lvl = 0\n if start_ix >= 0:\n (t, lvl) = self._trace.peekitem(start_ix)\n # If we have no entry exactly at our start point, the\n # level was constant at this point before\n if start > t:\n prev_lvl = lvl\n # Otherwise look up previous level. Default 0 (see above)\n elif start_ix > 0:\n (_, prev_lvl) = self._trace.peekitem(start_ix-1)\n\n # Prepare start\n if prev_lvl == level:\n if start in self._trace:\n del self._trace[start]\n else:\n self._trace[start] = level\n\n # Remove all in-between states\n for time in list(self._trace.irange(start, end, inclusive=(False, False))):\n lvl = self._trace[time]\n del self._trace[time]\n\n # Add or remove end, if necessary\n if end not in self._trace:\n if lvl != level:\n self._trace[end] = lvl\n elif level == self._trace[end]:\n del self._trace[end]", "def level_up(self):\n pass", "def level(self, L):\n assert isinstance(L, level)\n self.__level = L", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def set_level(self, x, level):\n return x * 10 ** ((level - self.ref_level) / 20)", "def __level(self, *args, **kwargs):\n pass", "def set_level(self):\n queue = []\n for node in self.node:\n if distance.euclidean(node.location, para.base) < node.com_ran:\n node.level = 1\n queue.append(node.id)\n while queue:\n for neighbor_id in self.node[queue[0]].neighbor:\n if not self.node[neighbor_id].level:\n self.node[neighbor_id].level = self.node[queue[0]].level + 1\n queue.append(neighbor_id)\n queue.pop(0)", "def atender(self):\n\n if self.enfila>0:\n \n self.enfila-=1\n self.fila.pop(0)", "def saving_throw_bonus_on_level(self, level):\n raise NotImplementedError", "def level_upgrade(self, lvl):\n\t\tpass", "def level_data(self):\n self.level(self.data)", "def _levelChanged(self, event):\n if event == items.Item3DChangedType.ISO_LEVEL:\n model = self.model()\n if model is not None:\n index = self.index(column=1)\n model.dataChanged.emit(index, index)", "def addLevel(self, Level_object):\n\n #### TODO: this part can be improved\n self.annotationLvl = Level_object.energy\n\n while self.annotationLvl < self._lastAnnotationPointHeight:\n self.annotationLvl += self._annotationBoxHeight\n\n self._lastAnnotationPointHeight = self.annotationLvl + self._annotationBoxHeight\n\n\n\n ######## TODO ^^^\n\n def addLevelLine(energy):\n if Level_object.highlighted == False:\n plt.plot([self._levelLineStartingPoint, self._levelLineEndingPoint], [energy, energy], 'k-', lw=Level_object.level_linewidth, color=Level_object.color, linestyle=Level_object.getLineStyle())\n else:\n plt.plot([self._levelLineStartingPoint, self._levelLineEndingPoint], [energy, energy], 'k-',\n lw=Level_object.highlight_linewidth, color=Level_object.color)\n\n def addSpin(spinValue, parityValue, energy):\n # h is additional height of splitted part of level line\n #TODO: annotation width should be scalled by using self.energyAnnotationStartingPoint etc. (!!!)\n\n if parityValue=='-' or parityValue=='+':\n spinAnnotationString = r'${}^{}$'.format(spinValue, parityValue)\n else:\n spinAnnotationString = ''\n\n plt.plot([self._spinAnnotationStartingPoint,self._spinAnnotationEndingPoint], [self.annotationLvl, self.annotationLvl], 'k-', lw=Level_object.level_linewidth)\n plt.plot([self._spinAnnotationStartingPoint,self._spinAnnotationEndingPoint], [self.annotationLvl, self.annotationLvl], 'k-', lw=Level_object.level_linewidth)\n plt.plot([self._spinAnnotationEndingPoint,self._levelLineStartingPoint], [self.annotationLvl, energy], 'k-', lw=Level_object.level_linewidth)\n plt.text(x=self._spinAnnotationTextPoint, y=(self.annotationLvl)+0.01*self.schemeHeight, s=spinAnnotationString, size=self.fontSize, horizontalalignment='center')\n\n\n def addEnergy(energyValue, energy):\n # h is additional height of splitted part of level line\n plt.plot([self._energyAnnotationStartingPoint, self._energyAnnotationEndingPoint], [self.annotationLvl, self.annotationLvl], 'k-', lw=Level_object.level_linewidth)\n plt.plot([self._levelLineEndingPoint,self._energyAnnotationStartingPoint], [energy, self.annotationLvl], 'k-', lw=Level_object.level_linewidth)\n plt.text(x=self._energyAnnotationTextPoint, y=self.annotationLvl+0.01*self.schemeHeight, s=energyValue, size=self.fontSize, horizontalalignment='center')\n\n addLevelLine(energy=Level_object.energy)\n addSpin(spinValue=Level_object.spinValue, parityValue=Level_object.parity, energy=Level_object.energy)\n addEnergy(energyValue=str(Level_object.energy), energy=Level_object.energy)", "def segment(data):", "def add_level(self, level):\n return", "def level_up(self):\n if self.level < self.max_level:\n self.level = self.level + 1\n self.update_level_buttons()", "def entrench(self, cloud, paradigms, informativity, categorization,\n unique_base):\n self.entrench_word(cloud, paradigms, informativity, categorization,\n unique_base)\n self.entrench_segments(cloud)", "def get_level(self, level):\n return", "def setLevel( self, lvl ):\n if isinstance( lvl, str ):\n return super().setLevel( lvl.upper() )\n else:\n return super().setLevel( lvl )", "def setEventLevel(self, Level, stringOnly=0):\n\n msg = \"EVENt:LEVel \" + str(Level)\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def CalcStopLevel(self,entryLevel,tradeSignal):\r\n pass", "def segment_counter(self, _):\n raise NotImplementedError(\n \"We do not support externally altering the segment counter\")", "def change_level(self):\r\n error = False\r\n\r\n try:\r\n char_lvl = int(self.__char_lvl.get())\r\n except ValueError:\r\n error = True\r\n\r\n if error or char_lvl <= 0:\r\n self.__skill_points_indicator.configure(\r\n text=\"Level must be a positive whole number\")\r\n for skill_string in self.__skills:\r\n self.skill_up_disable(skill_string)\r\n self.skill_down_disable(skill_string)\r\n\r\n else:\r\n self.reset_all();\r\n self.__skill_points = 10 + 20 * (char_lvl - 1)\r\n self.__skill_points_indicator.configure(\r\n text=\"Available skillpoints: \" + str(\r\n self.__skill_points))\r\n for skill in self.__skills:\r\n self.check_skill_requirements(skill)", "def _depth_to_segment(self, depth):\r\n segment = depth.clone()\r\n segment[segment > 0] = 1\r\n return segment", "def _commit_level(self):\n assert self.current_level is not None, \"Cannot write a level with an empty name\"\n # Create a new level descriptor in the lump directory\n self.wad.add_lump(self.current_level, None)\n # Add the lumps to WAD file\n self.wad.add_lump('THINGS', self.lumps['THINGS'])\n self.wad.add_lump('LINEDEFS', self.lumps['LINEDEFS'])\n self.wad.add_lump('SIDEDEFS', self.lumps['SIDEDEFS'])\n self.wad.add_lump('VERTEXES', self.lumps['VERTEXES'])\n self.wad.add_lump('SECTORS', self.lumps['SECTORS'])\n self.lumps = {'THINGS':Things(), 'LINEDEFS':Linedefs(), 'VERTEXES':Vertexes(),'SIDEDEFS': Sidedefs(), 'SECTORS':Sectors()}", "def add_point(self):\n self.total_score = self.total_score + 1\n if self.total_score // 10 == 0:\n self.level = self.total_score / 10", "def __drawSegment(self, p1, p2, color):\n pygame.draw.aaline(self.screen, color, p1, p2)", "def Refine(self, level=2):\n\n from scipy.spatial import Delaunay\n try:\n from Florence.QuadratureRules.EquallySpacedPoints import EquallySpacedPoints, EquallySpacedPointsTri, EquallySpacedPointsTet\n from Florence.FunctionSpace import Line, Tri, Quad, Tet, Hex\n from Florence.FunctionSpace.OneDimensional.Line import Lagrange\n from Florence.Tensor import remove_duplicates_2D\n except ImportError:\n raise ImportError(\"This functionality requires florence's support\")\n\n\n # WE NEED AN ACTUAL NDIM\n # ndim = self.InferSpatialDimension()\n if self.element_type == \"line\":\n ndim = 1\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n ndim = 2\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n ndim = 3\n\n mesh = deepcopy(self)\n if mesh.InferPolynomialDegree() > 1:\n mesh = mesh.GetLinearMesh(remap=True)\n\n C = level - 1\n p = C+1\n # CActual = self.InferPolynomialDegree() - 1\n CActual = 0 # MUST BE ALWAYS ZERO\n if self.element_type == \"line\":\n nsize = int(C+2)\n nsize_2 = int(CActual+2)\n elif self.element_type == \"tri\":\n nsize = int((p+1)*(p+2)/2.)\n nsize_2 = int((CActual+2)*(CActual+3)/2.)\n elif self.element_type == \"quad\":\n nsize = int((C+2)**2)\n nsize_2 = int((CActual+2)**2)\n elif self.element_type == \"tet\":\n nsize = int((p+1)*(p+2)*(p+3)/6.)\n nsize_2 = int((CActual+2)*(CActual+3)*(CActual+4)/6.)\n elif self.element_type == \"hex\":\n nsize = int((C+2)**3)\n nsize_2 = int((CActual+2)**3)\n else:\n raise ValueError(\"Element type not undersood\")\n\n if self.element_type == \"line\":\n SingleElementPoints = EquallySpacedPoints(ndim+1,C).ravel()\n\n elif self.element_type == \"quad\" or self.element_type == \"hex\":\n SingleElementPoints = EquallySpacedPoints(ndim+1,C)\n # RE-ARANGE NODES PROVIDED BY EquallySpacedPoints\n if ndim == 2:\n node_aranger = np.lexsort((SingleElementPoints[:,0],SingleElementPoints[:,1]))\n elif ndim == 3:\n node_aranger = np.lexsort((SingleElementPoints[:,0],SingleElementPoints[:,1],SingleElementPoints[:,2]))\n SingleElementPoints = SingleElementPoints[node_aranger,:]\n\n elif self.element_type == \"tri\":\n SingleElementPoints = EquallySpacedPointsTri(C)\n simplices = Delaunay(SingleElementPoints).simplices.copy()\n nsimplices = simplices.shape[0]\n\n elif self.element_type == \"tet\":\n SingleElementPoints = EquallySpacedPointsTet(C)\n simplices = Delaunay(SingleElementPoints).simplices.copy()\n nsimplices = simplices.shape[0]\n\n\n Bases = np.zeros((nsize_2,SingleElementPoints.shape[0]),dtype=np.float64)\n\n if mesh.element_type == \"line\":\n smesh = Mesh()\n smesh.Line(n=level)\n simplices = smesh.elements\n nsimplices = smesh.nelem\n\n hpBases = Line.Lagrange\n for i in range(SingleElementPoints.shape[0]):\n Bases[:,i] = hpBases(CActual,SingleElementPoints[i])[0]\n\n elif mesh.element_type == \"tri\":\n hpBases = Tri.hpNodal.hpBases\n for i in range(SingleElementPoints.shape[0]):\n Bases[:,i] = hpBases(CActual,SingleElementPoints[i,0],SingleElementPoints[i,1],\n EvalOpt=1,equally_spaced=True,Transform=1)[0]\n\n elif mesh.element_type == \"quad\":\n smesh = Mesh()\n smesh.Rectangle(element_type=\"quad\", nx=level, ny=level)\n simplices = smesh.elements\n nsimplices = smesh.nelem\n\n hpBases = Quad.LagrangeGaussLobatto\n for i in range(SingleElementPoints.shape[0]):\n Bases[:,i] = hpBases(CActual,SingleElementPoints[i,0],SingleElementPoints[i,1])[:,0]\n\n elif mesh.element_type == \"tet\":\n hpBases = Tet.hpNodal.hpBases\n for i in range(SingleElementPoints.shape[0]):\n Bases[:,i] = hpBases(CActual,SingleElementPoints[i,0],SingleElementPoints[i,1],\n SingleElementPoints[i,2],EvalOpt=1,equally_spaced=True,Transform=1)[0]\n\n elif mesh.element_type == \"hex\":\n smesh = Mesh()\n smesh.Parallelepiped(element_type=\"hex\", nx=level, ny=level, nz=level)\n simplices = smesh.elements\n nsimplices = smesh.nelem\n\n hpBases = Hex.LagrangeGaussLobatto\n for i in range(SingleElementPoints.shape[0]):\n Bases[:,i] = hpBases(CActual,SingleElementPoints[i,0],SingleElementPoints[i,1],SingleElementPoints[i,2])[:,0]\n\n\n nnode = nsize*mesh.nelem\n nelem = nsimplices*mesh.nelem\n X = np.zeros((nnode,mesh.points.shape[1]),dtype=np.float64)\n T = np.zeros((nelem,mesh.elements.shape[1]),dtype=np.int64)\n\n for ielem in range(mesh.nelem):\n X[ielem*nsize:(ielem+1)*nsize,:] = np.dot(Bases.T, mesh.points[mesh.elements[ielem,:],:])\n T[ielem*nsimplices:(ielem+1)*nsimplices,:] = simplices + ielem*nsize\n\n # REMOVE DUPLICATES\n repoints, idx_repoints, inv_repoints = remove_duplicates_2D(X, decimals=10)\n unique_reelements, inv_reelements = np.unique(T,return_inverse=True)\n unique_reelements = unique_reelements[inv_repoints]\n reelements = unique_reelements[inv_reelements]\n reelements = reelements.reshape(nelem,mesh.elements.shape[1])\n\n self.__reset__()\n self.elements = np.ascontiguousarray(reelements)\n self.points = np.ascontiguousarray(repoints)\n self.element_type = mesh.element_type\n self.nelem = self.elements.shape[0]\n self.nnode = self.points.shape[0]\n\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n self.GetEdges()\n self.GetBoundaryEdges()\n elif self.element_type == \"tet\" or self.element_type == \"hex\":\n self.GetFaces()\n self.GetBoundaryFaces()\n self.GetBoundaryEdges()\n\n if CActual > 0:\n sys.stdout = open(os.devnull, \"w\")\n self.GetHighOrderMesh(p=CActual+1)\n # self.GetHighOrderMesh(p=CActual+1, equally_spaced=equally_spaced, check_duplicates=False)\n sys.stdout = sys.__stdout__", "def set_level(self, level: LogLevel):\n pass", "def SetTriacLevel(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def draw_level(self, DISP, level:int):\r\n windowsize = DISP.get_size()\r\n Level_Text_Obj = self.FontObj.render(\"LEVEL: \" + str(level), True, Colors.colors['WHITE'])\r\n Level_Text_rec = Level_Text_Obj.get_rect()\r\n Level_Text_rec.top = windowsize[1] - Level_Text_rec.height\r\n Level_Text_rec.left = windowsize[0] - Level_Text_rec.width\r\n DISP.blit(Level_Text_Obj, Level_Text_rec)", "def print_level(self, node , level):\n if node is None and level == 1: \n self.level.append(None)\n elif node != None:\n # set the root level as the base case\n if level == 1: \n self.level.append(node)\n elif level > 1 : \n self.print_level(node.left , level - 1) \n self.print_level(node.right , level - 1) \n return self.level", "def level(levelValue):\n def _decoration(fcn):\n fcn.level = levelValue\n return fcn\n return _decoration", "def postSI(self):\n # for cell in self.cells:\n # cell.resetTotOrdFlux()\n self.depth = 0", "def extend(self):\n # -1 in the segments means that starts counting in the end of the list\n self.add_segment(self.segments[-1].position())", "def depth(self, v):\n # method here", "async def loglevel(self, ctx, level):\n level = level.lower()\n assert level in LEVELS\n await self.bot.log.change_level(LEVELS[level], ctx.author.name)\n await ctx.send(f\"Set log level to {level.upper()}\")", "def level_down(self):\n if self.level > 1:\n self.level = self.level - 1\n self.update_level_buttons()", "def increment_level(self):\n self.level += 1\n styled_set_label_text(self.level_display, \"Level: \"+str(self.level))\n glib.timeout_add(2000//(self.level+3), self.make_timer(self.level))", "def set_low(self,chip,line):\n self.output(chip,line, LOW)", "def level(self, level: int):\n if level is None:\n raise ValueError(\"Invalid value for `level`, must not be `None`\")\n\n self._level = level", "def unindent(self):\n self.x_pos -= 10", "def set_contrast(level):\n send_command(0x81)\n send_command(level)", "def __assign_level(vertex: \"Vertex\", level, already_assigned: \"List[Vertex]\"):\n vertex.level = level\n already_assigned.append(vertex)\n for neighbour in vertex.neighbours:\n if neighbour not in already_assigned:\n __assign_level(neighbour, level + 1, already_assigned)", "def set_level(self, level):\n if self._level_fixed:\n raise NameError(\"set_level() can be called only once !\")\n\n try:\n Level(level)\n except ValueError:\n raise ValueError(\"LEVEL parameter must be a Level\")\n\n self._level = Level(level)\n self._level_fixed = True", "def reset_level(self, format_level):\n assert(format_level in pos_levels)\n self.level = format_level\n self.reset_format()", "def level(self, lev):\n if self.is_disposed:\n raise ObjectDisposedException(\"DimmableLightComponent\")\n\n if lev < self.__min:\n raise IndexError(\"Level cannot be less than min_level.\")\n\n if lev > self.__max:\n raise IndexError(\"Level cannot be greater than max_level.\")\n\n on_before_change = self.is_on\n self.__pin.pwm = lev\n on_after_change = self.is_on\n evt = LightLevelChangeEvent(lev)\n self.on_light_level_changed(evt)\n if on_before_change != on_after_change:\n evt2 = LightStateChangeEvent(on_after_change)\n self.on_light_state_changed(evt2)", "def addIndents(self, prevLevel, nextLevel):\n for num in range(self.level - prevLevel):\n self.textLines[0] = u'<div>%s' % self.textLines[0]\n for num in range(self.level - nextLevel):\n self.textLines[-1] = u'%s</div>' % self.textLines[-1]\n return self.level", "def set_level(self, level):\n\n self.sh.setLevel(level)\n\n if self.fh:\n self.fh.setLevel(level)", "def set_level(self, level_name):\n\n self.current_level = level_name", "def prep_level(self):\r\n\t\tlevel_str=\"Level: \"+format(self.stats.level)\r\n\t\tself.level_image=self.font.render(level_str, True,\r\n\t\t\tself.text_color, self.ai_settings.bg_color)\r\n\r\n\t\t#Position the level below the score.\r\n\t\tself.level_rect=self.level_image.get_rect()\r\n\t\tself.level_rect.centerx=self.screen_rect.centerx*1.5\r\n\t\tself.level_rect.top=self.score_rect.top", "def level(self, value):\n self._level = mdraid.RAID_levels.raidLevel(value) # pylint: disable=attribute-defined-outside-init", "def raise_sealevel(self, perc=35):\n maxheight = self.elevation.max()\n self.elevation -= np.percentile(self.elevation, perc)\n self.elevation *= maxheight / self.elevation.max()\n self.elevation[-1] = 0", "def update(self):\n self.setVector(0.15, 0.0)", "def lower_covers(self, x):", "def volume_up(self):\n if self.volume_level < 1:\n self.set_volume_level(min(1, self.volume_level + 0.1))", "def spine(self):", "def update_fuel_level(self, new_level):\n if new_level <= self.fuel_capacity:\n self.fuel_level = new_level\n else:\n print(\"The tank can't hold that much!\")", "def subAgility(self):\n\t\tself.agility -= 1\n\t\tif self.agility < -10:\n\t\t\tself.agility = -10", "def chase_laser(self):\r\n print(\"Meeeeow\\n\")", "def update_fuel_level(self, new_level):\r\n if new_level <= self.fuel_capacity:\r\n self.fuel_level = new_level\r\n else:\r\n print(\"The tank can't hold that much!\")", "def setLevelReached(self, level):\n \n if(0 < level and level < 6 and self.__levelReached < level):\n self.__levelReached = level\n self.savePlayerInfo()\n return True\n else:\n return False\n print\"level reached: \" + self.__levelReached", "def rectChangeLevel(rect, oldlev, newlev):\n return (2**(oldlev - newlev)) * rect", "def check_points_and_level_up(self):\n if self.points > 20 * self.level:\n self.level += 1\n self.refresh_rate = self.refresh_rate * 0.75", "def dump_step(self,status):\n super(vanderpol_output,self).dump_step(status)\n\n L = self.level\n\n oldcol = self.sframe\n # self.sframe = self.ax.scatter(L.uend.pos.values[0],L.uend.pos.values[1],L.uend.pos.values[2])\n self.sframe = self.ax.scatter(L.uend.values[0],L.uend.values[1])\n # Remove old line collection before drawing\n # if oldcol is not None:\n # self.ax.collections.remove(oldcol)\n plt.pause(0.00001)\n\n return None", "def setTriggerLevel(self, Level, stringOnly=0):\n\n msg = \"TRIGger:SEQuence:LEVel \" + str(Level)\n\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg", "def _SetIndentation(self, level, bullet=False):\n if self._level < level:\n # Level increases are strictly 1 at a time.\n if level >= len(self._indent):\n self._indent.append(0)\n indent = self._INDENT\n if bullet and level > 1:\n # Nested bullet indentation is less than normal indent for aesthetics.\n indent -= self._BULLET_DEDENT\n self._indent[level] = self._indent[level - 1] + indent\n self._level = level", "def analyze_tier(self, d, level, lump=False):\n # print(d)\n constraint = d[\"constraint\"]\n code = \"x\"\n if constraint in (\"Symbolic_Subdivision\", \"Symbolic Subdivision\"):\n code = \"s\"\n elif constraint in (\"Symbolic_Association\", \"Symbolic Association\"):\n code = \"a\"\n elif constraint in (\"Time_Subdivision\", \"Time Subdivision\"):\n if lump:\n code = \"s\"\n else:\n code = \"t\"\n elif constraint == \"Included_In\":\n if lump:\n code = \"s\"\n else:\n code = \"i\"\n elif constraint == \"root\":\n code = \"R\"\n elif constraint == \"\":\n code = \"x\"\n elif constraint is None:\n code = \"x\"\n else:\n print(repr(constraint))\n 0 / 0\n self.fingerprint += code\n children = self.tier_hierarchy[d[\"id\"]]\n if children == []:\n return\n self.fingerprint += \"[\"\n for child in children:\n self.analyze_tier(child, level + 1, lump=lump)\n self.fingerprint += \"]\"", "def DrawSegment(self, p1, p2, color):\r\n pygame.draw.aaline(self.surface, color.bytes, p1, p2)", "def svn_info_t_depth_set(svn_info_t_self, svn_depth_t_depth): # real signature unknown; restored from __doc__\n pass", "async def volume(self, ctx, level:int):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n if 0 <= level <= 200:\n voice.source.volume = level / 100\n await ctx.send(f\"Adjusted volume to {level}%.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "def enter_function(_):\n current = self.current_val()\n if current != \"..\":\n self.down(current)\n else:\n self.up()", "def setTraceLevel (self,level):\n if (type(level) == type(\"\") or type(level) == type(u\"\")):\n if (level):\n level = self._coerceLevel(level)\n self.traceLevel = level\n #endIf\n elif (type(level) == type(0)):\n if (self._isTraceLevel(level)):\n self.traceLevel = level\n else:\n # level is a number but not in the range of a trace level.\n raise TraceLevelException(\"Invalid trace level: %s Valid trace levels are defined by the Level class.\" % level)\n #endIf\n else:\n # Odd case where level is unexpected type\n raise TraceLevelException(\"Trace level must be either a string or an integer. Use levels defined by the Level class.\")\n #endIf", "def draw(self, base, level):\n\n a = base.a\n b = base.b\n\n if level > 0:\n delta = base.b - base.a\n px = a.x + delta.x / 3\n py = a.y + delta.y / 3\n rx = a.x + 2 * delta.x / 3\n ry = a.y + 2 * delta.y / 3\n p = Point(px, py)\n r = Point(rx, ry)\n q = Point(rx, ry)\n q.rotate_deg(60, p)\n self.draw(Line(a,p), level-1)\n self.draw(Line(p,q), level-1)\n self.draw(Line(q,r), level-1)\n self.draw(Line(r,b), level-1)\n else:\n self.container.window.create_line(a.x, a.y, b.x, b.y)", "def prep_level(self):\n level_str = f\"Level {self.game.level}\"\n self.level_image = self.font.render(level_str, True, self.text_color)\n\n # Position level below the score.\n self.level_rect = self.level_image.get_rect()\n self.level_rect.right = self.score_rect.right\n self.level_rect.top = self.score_rect.bottom + 10", "def measureUnfoldedLevel(ds, verbose = False):\n points = getIndexedTraces(ds)\n from sklearn.cluster import KMeans\n x = points[points[:,0] > 150, 1].reshape((-1,1))\n # remove outliers \n std = np.std(x)\n mean = np.mean(x)\n x = x[x > mean - 4*std].reshape((-1,1)) \n # ML clustering\n kmeans = KMeans(n_clusters=3, random_state=0).fit(x)\n x_cluster = kmeans.predict(x)\n means = [ np.mean(x[x_cluster == i]) for i in range(3)]\n means = sorted(means) \n level_one = means[1]\n if np.abs(level_one) > 0.35 or np.abs(level_one) < 0.1:\n print(\"Warning! Unfolded level detector in unexpected range: \",leven_one)\n if verbose: #feedback\n pyplot.figure()\n pyplot.hist2d(points[:,0], points[:,1], \n bins=(70*2, 50*2),\n range = [[0, 700], [-0.45, 0.05]],\n cmax = 100000/4 # clip max\n )\n pyplot.plot([0,700], [level_one]*2, 'r--')\n return level_one", "def async_set_level(self, value: int) -> None:\n value = max(0, min(255, value))\n self._position = int(value * 100 / 255)\n self.async_write_ha_state()", "def upgrade(self):\n if self.level < len(self.tower_images):\n self.level_up_animation = True\n self.level += 1\n self.base_damage += 3\n self.damage = self.base_damage\n\n #Since level does not upgrade in menu we have to manually do it here\n self.menu.tower_level += 1", "def setInfo(self, reason=\"10\"):\n self.segmentA.setInfo(reason)", "def upper_covers(self, x):", "def change_level(self):\n new_level = GameLevel[self.scoreboard.current_level]\n self.greeterboard.reset(level=new_level, msg='')\n self.end_game(i18n.OUT_MSG_NEW_GAME)\n self.init_game_metrics()", "def level(self, level):\n allowed_values = [\"INFO\", \"WARNING\", \"SEVERE\", \"FINE\", \"FINER\", \"FINEST\"]\n if level not in allowed_values:\n raise ValueError(\n \"Invalid value for `level` ({0}), must be one of {1}\"\n .format(level, allowed_values)\n )\n\n self._level = level", "def level(self):\n return self.init_v[2]", "def _on_lane_invasion(self, event):\n self.lanes_invaded = event.crossed_lane_markings", "def update_level(self):\n level = 1\n assigned_levels = set([])\n just_assigned = set([])\n for root in self.roots:\n for child in root.children:\n if child in just_assigned:\n continue\n child.level = level\n if len(child.children) == 0:\n continue\n just_assigned.add(child)\n assigned_levels = assigned_levels.union(just_assigned)\n\n level += 1\n leaves = [c for c in self.collectors if len(c.children) == 0]\n len_non_leaves = len(self.collectors) - len(leaves)\n self.update_level_for_non_leaves(\n level, assigned_levels, just_assigned, len_non_leaves\n )", "def setlevel(self, lvl):\n self.logger.setLevel(lvl)", "def undo(self, event=None):\n if not self.segs == []:\n self.requestSegByDct((self.segs[-1].getDct() + 2) % 4)" ]
[ "0.5834273", "0.5602306", "0.5602119", "0.55198884", "0.5512972", "0.5512972", "0.5512972", "0.5404156", "0.53947735", "0.5348047", "0.5295594", "0.52906436", "0.52568555", "0.52522296", "0.5250421", "0.5206884", "0.52009", "0.5166423", "0.5164569", "0.51151156", "0.5082658", "0.5078474", "0.50743186", "0.5059792", "0.505793", "0.5055135", "0.5053367", "0.5044026", "0.50432426", "0.5021192", "0.50092757", "0.4965863", "0.49642593", "0.49604988", "0.4944869", "0.49392453", "0.49243882", "0.4876908", "0.48762393", "0.4867039", "0.48662627", "0.4861796", "0.4858936", "0.48588884", "0.48576564", "0.4851709", "0.48412398", "0.48403078", "0.48302037", "0.48301128", "0.4827817", "0.48216656", "0.4800035", "0.4798759", "0.47895443", "0.47876108", "0.4772298", "0.47595012", "0.47572604", "0.47450426", "0.4733354", "0.4726521", "0.47239608", "0.472248", "0.4722434", "0.47159204", "0.47061747", "0.47053087", "0.47005162", "0.4698198", "0.46933213", "0.46757463", "0.46656528", "0.46644455", "0.46591997", "0.4658053", "0.46552357", "0.46434182", "0.46398115", "0.46390584", "0.4631531", "0.46193394", "0.46173644", "0.46134365", "0.4611358", "0.45932043", "0.45905718", "0.45900783", "0.4589665", "0.45850387", "0.45835644", "0.45821872", "0.45813298", "0.4573165", "0.45658696", "0.4556664", "0.4555954", "0.45557347", "0.45503068", "0.45486903" ]
0.52542055
13
Add noise to the nonsuffix segments in the WordForm.
def add_noise(self): self.segments = deepcopy(self.segments) # Iterate through each of the first three Segments in the WordForm. for i in range(3): # Add noise to each Segment. self.segments[i].add_noise()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_noise(self, words, lengths):\n words, lengths = self.word_shuffle(words, lengths)\n words, lengths = self.word_dropout(words, lengths)\n # words, lengths = self.word_blank(words, lengths)\n return words, lengths", "def add_noise(self, data):", "def remove_noise(text):\n\n text = text.split()\n word = [word for word in text if word not in [\n 'pertain',\n 'estimate',\n 'link',\n 'and',\n 'more',\n 'fetch',\n 'be',\n 'there',\n 'do',\n 'you',\n 'have',\n 'any',\n 'is',\n 'my',\n 'on',\n 'can',\n 'i',\n 'get',\n 'some',\n 'am',\n 'look',\n 'for',\n 'the',\n 'to',\n 'share',\n 'me',\n 'of',\n 'please',\n 'a',\n 'very',\n 'at',\n 'with',\n 'relate',\n 'sorry'\n ]]\n return ' '.join(word)", "def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()", "def noiseAtten(atten) :\n s.noiseAtten(atten)", "def noise(self, freq: int, /) -> None:", "def add_suffix(self, suffix):\n # Append the suffix vowel to this WordForm.\n self.segments.append(Segment.new_segment(suffix))", "def noise(self, stddev):\n #add noise to weights\n pass", "def addNoise(self, sigma=1.0):\n noise = numpy.random.normal(loc=0, scale=sigma, size=(self.ny, self.nx))\n self.image += noise\n return", "def add_noise(self, words, lengths, lang_id):\n words, lengths = self.word_shuffle(words, lengths, lang_id)\n words, lengths = self.word_dropout(words, lengths, lang_id)\n words, lengths = self.word_blank(words, lengths, lang_id)\n return words, lengths", "def addNoise(pure,snr):\r\n watts = pure**2\r\n # Calculate signal power and convert to dB \r\n sig_avg_watts = np.mean(watts)\r\n sig_avg_db = 10 * np.log10(sig_avg_watts)\r\n # Calculate noise according to [2] then convert to watts\r\n noise_avg_db = sig_avg_db - snr\r\n noise_avg_watts = 10 ** (noise_avg_db / 10)\r\n # Generate an sample of white noise\r\n mean_noise = 0\r\n noise = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(watts))\r\n \r\n return pure+noise", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length + 2 * self.seq_pad,\n self.noise_dim)", "def add_noise(Y, sigma):\r\n return Y + np.random.normal(0, sigma, Y.shape)", "def _make_noisy(x, the_noise):\n noise_sample = the_noise[np.random.choice(the_noise.shape[0],\n x.shape[0],\n replace=False)]\n return x + noise_sample", "def build_unigram_noise(freq):\n total = freq.sum()\n noise = freq / total\n assert abs(noise.sum() - 1) < 0.001\n return noise", "def transform_audio(self, segment: Union[AudioSegment, SpeechSegment]) -> None:\n noise_data = self._rng.sample(self._noise_data, 1)[0]\n if noise_data[\"duration\"] < segment.duration:\n raise RuntimeError(\"The duration of sampled noise audio is smaller than the audio segment.\")\n diff_duration = noise_data[\"duration\"] - segment.duration\n start = self._rng.uniform(0, diff_duration)\n end = start + segment.duration\n noise_seg = AudioSegment.from_slice_file(noise_data[\"src\"], start=start, end=end)\n snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB)\n segment.add_noise(noise_seg, snr_dB=snr_dB, allow_downsampling=True, rng=self._rng)", "def add_noise(image):\n image += 10e-10 * np.random.randn(image.shape[0], image.shape[1], 1)\n \n return image", "def remove_noise(text):\n text1 = re.sub(\"[\\t\\r\\s]\", \" \",text)\n text1 = \" \" + text1\n text2 = re.sub(r\"([ \" + string.punctuation + \"]+)[^a-zA-Z ]+\", \"\\g<1> \", text1)\n return text2", "def add_noise(self, snr, unit=None):\n return self.from_time(self.fs, noisify(self.in_time, snr, unit=unit))", "def make_noise(self, num):\n return np.random.randn(num, self.seq_length, self.noise_dim)", "def add_noise(self):\n self.noise = torch.normal(0.5, .2, self.state.shape).double()\n self.noise *= torch.sqrt(2 *\n self.vars['T']*torch.tensor(self.vars['dt']))", "def noise(self, noise):\n\n self._noise = noise", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.image.shape)\n self.image += self.noise\n return", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def add_weight_noise(self, std):\n with torch.no_grad():\n param_vector = parameters_to_vector(self.parameters())\n normal_dist = torch.distributions.Normal(loc=torch.tensor([0.0]), scale=torch.tensor([std]))\n noise = normal_dist.sample(param_vector.size())\n if self.device_id >= 0:\n noise = noise\n param_vector.add_(noise[0])\n vector_to_parameters(param_vector, self.parameters())", "def noiseReduction(self):\n pass", "def addNormalizing(self, name, seq):\n\n for i in xrange(len(seq) - self.kmer_size + 1):\n s = strandless(seq[i:i + self.kmer_size].upper())\n if \"N\" in s:\n continue\n self.normalizingKmers.add(s)", "def add_noise(emg):\n MAX_AMPLITUDE = 32767\n\n # Sampling\n # 1 second of data requires 600 frames. And 600 fps is 600 Hz, sampling rate of EMG.\n Ts = 1/EMG_F_SAMPLE\n\n # Time vector\n t = np.arange(0, len(emg)/EMG_F_SAMPLE, Ts) # each unit of t is a second\n\n # Noise\n randAmplitudeScale = np.random.random()*0.1\n randOffset = np.random.random() * 2*np.pi\n \n fNoise = 50; # Frequency [Hz]\n aNoise = randAmplitudeScale*MAX_AMPLITUDE # Amplitude\n noise = aNoise * np.sin(2 * np.pi * t * fNoise + randOffset)\n\n # Add noise to signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] += noise\n return emg", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.im.shape)\n self.im += self.noise\n return", "def generate_noise_vector(self, ):\n self.noise.resize_(\n self.batch_size, int(self.opt.nz), 1, 1).normal_(0, 1)\n self.noisev = Variable(self.noise) # TODO: Add volatile=True???", "def _add_random_noise_and_flatten(x):\n # Random noise path indexes and random snr levels\n rand_noise = [\n (noise_type,\n tf.random.uniform([], 0, tf.size(type2paths[noise_type]), tf.int32),\n tf.random.uniform([], snr_low, snr_high, tf.float32))\n for noise_type, snr_low, snr_high in snr_list]\n # Select random noise signals by drawn indexes and read contents from files\n rand_noise = [\n (audio_features.read_wav(type2paths[noise_type][rand_index]), snr)\n for noise_type, rand_index, snr in rand_noise]\n\n # Assert sample rates\n # TODO maybe add inline resampling of noise signals so they match the speech sr\n for (noise, sample_rate), snr in rand_noise:\n tf.debugging.assert_equal(sample_rate, x[\"sample_rate\"], message=\"Invalid noise signals are being used, all noise signals must have same sample rate as speech signals that are being augmented\")\n\n # Fix noise signal length to match x[\"signal\"] by repeating the noise signal if it is too short and then slicing it\n rand_noise = [\n # How many multiples of `noise` fits in x[\"signal\"]\n (tf.cast(tf.size(x[\"signal\"]) / tf.size(noise), tf.int32), noise, snr)\n for (noise, _), snr in rand_noise]\n rand_noise = [\n # Repeat noise and slice\n (tf.tile(noise, [1 + noise_length_ratio])[:tf.size(x[\"signal\"])], snr)\n for noise_length_ratio, noise, snr in rand_noise]\n\n # Mix x[\"signal\"] and chosen noise signals\n mixed_signals = [audio_features.snr_mixer(x[\"signal\"], noise, snr)[2] for noise, snr in rand_noise]\n # Create new utterance ids that contain the mixed noise type and SNR level\n new_ids = [\n tf.strings.join((\n \"augmented\",\n x[\"id\"],\n noise_type,\n tf.strings.join((\"snr\", tf.strings.as_string(snr, precision=2)))),\n separator=\"-\")\n for (noise_type, _, _), (_, snr) in zip(snr_list, rand_noise)]\n\n # Create new elements from the mixed signals and return as dataset\n return (tf.data.Dataset\n .zip((tf.data.Dataset.from_tensor_slices(new_ids),\n tf.data.Dataset.from_tensor_slices(mixed_signals),\n tf.data.Dataset.from_tensors(x).repeat(len(mixed_signals))))\n .map(_update_element_meta))", "def noise(self, xs, ys):\n raise NotImplementedError", "def mask_disc_markers(self, text: str) -> str:\n punctuations = \".?!;:-()'\\\"[]\"\n for elem in punctuations:\n text = text.replace(elem, \" \" + elem + \" \")\n text = \" \" + text + \" \"\n for dm in self.dms:\n text.replace(\" \" + dm + \" \", \" <mask> \" * len(dm.split()))\n return text", "def remove_noise_terms(terms: List[str], noise_terms: List[str]) -> List[str]:\n if '' in terms:\n terms.remove('')\n\n cp_terms = terms.copy()\n for term in cp_terms:\n for noise in noise_terms:\n if noise in term.lower():\n terms.remove(term)\n break\n\n return terms", "def strip_silence(self):\n start_idx = 0\n end_idx = -1\n # class position\n for i, tone in enumerate(self.tone_list):\n if not 'silence' in tone:\n start_idx = i\n break\n\n for i, tone in reversed(list(enumerate(self.tone_list))):\n if not 'silence' in tone:\n end_idx = i - 1\n break\n\n self.tone_list = self.tone_list[start_idx:end_idx]", "def add_noise(self, mdct_norm, masking_threshold):\n return self.psychoacoustic.add_noise(mdct_norm, masking_threshold)", "def remove_words_and_ngrams(self, document):\n for w in self.words_and_ngrams_exceptions:\n document = re.sub(w, '', document)\n return document", "def FindSuffix(self):\n self.numSuffixes = 0\n self.forceStress = 0\n resultslist = []\n for f in self.suffixes.finditer(self.wd):\n resultslist.append((f.group(), f.start()))\n if not resultslist: return\n # make sure *end* of word is in list! otherwise, 'DESP erate'\n if resultslist[-1][1] + len(resultslist[-1][0]) < len(self.wd):\n return\n resultslist.reverse()\n for res in resultslist:\n # if no vowel left before, false suffix ('singing')\n # n.b.: will choke on 'quest' etc! put in dictionary, I guess\n if not sre.search('[aeiouy]', self.wd[:res[1]]): break\n if res[0] == 'ing' and self.wd[res[1]-1] == self.wd[res[1]-2]:\n self.sylBounds.append(res[1] - 1) # freq special case\n else: self.sylBounds.append(res[1]) # sorted later\n self.wd = self.wd[:res[1]]\n self.numSuffixes += 1\n if res[0] in STRESSSUFFIX:\n self.forceStress = 0 - len(self.sylBounds)\n if res[0] in MULTISUFFIX:\n # tricky bit! it *happens* that secondary division in all these\n # comes after its first character; NOT inevitable!\n # also does not allow for 3-syl: 'ically' (which are reliable!)\n self.sylBounds.append(res[1]+1)\n self.numSuffixes += 1", "def make_noise(self, signal_only):\n\n #print >> sys.stdout, \"generating noise...\"\n\n if signal_only:\n\n # the noise is just a time series of zeros\n \n self.td_noise = pycbc.types.timeseries.TimeSeries(\n initial_array=np.zeros(self.duration/self.delta_t),\n delta_t=self.delta_t, epoch=self.epoch)\n\n else:\n # Generate noise \n self.assign_noise_curve()\n\n # Generate time-domain noise\n # XXX: minimum duration seems to be 1 second. I'll hack around this by\n # reducing the 1 second to the desired duration\n tmplen=max(self.duration,1.0)/self.delta_t\n self.td_noise = pycbc.noise.noise_from_psd(int(tmplen), self.delta_t,\n self.psd, seed=self.seed)\n\n self.td_noise = \\\n pycbc.types.TimeSeries(self.td_noise.data[:self.duration/self.delta_t],\n delta_t=self.delta_t)\n\n # XXX not sure if this is a good idea...\n self.td_noise.start_time = float(self.epoch)\n\n self.fd_noise = self.td_noise.to_frequencyseries()", "def add_noise(self,fing):\r\n\t\tfingerprint = copy.deepcopy(fing)\r\n\t\tl = len(fingerprint)\r\n\t\tnp.random.seed()\t\r\n\t\tif self.noise_type == 'SNR':\r\n\t\t\tnoise = np.random.normal(0, 1, l)\r\n\t\t\tsignal_Power = np.linalg.norm(fingerprint)\r\n\t\t\tnoise_Power = np.linalg.norm(noise)\r\n\t\t\tcst = signal_Power / (noise_Power * self.noise_level)\r\n\t\t\tnoise = noise * cst\r\n\t\telif self.noise_type == 'Standard':\r\n\t\t\tnoise = np.random.normal(0, self.noise_level, l)\r\n\t\tfingerprint += noise\r\n\t\tif self.trparas.normalization == 'Noisy_input':\r\n\t\t\treturn fingerprint / np.linalg.norm(fingerprint)\r\n\t\telse:\r\n\t\t\treturn fingerprint", "def add_noise(a: np.ndarray, noise: float) -> np.ndarray:\n return np.clip(a + np.random.normal(0, noise, len(a)), -1, 1)", "def _get_noise(self, shape, dtype=None):", "def make_silence_phones_txt(self):\n raise NotImplementedError", "def sample_sonnet(hmm, obs_map, n_words):\n sonnetLines = []\n sonnet = ''\n\n for numLines in range(14):\n line = sample_sentence(hmm, obs_map, n_words)\n sonnetLines.append(''.join(line).capitalize() + '\\n')\n\n for line in sonnetLines:\n sonnet += line\n\n return sonnet", "def add_noise(arr, sigma):\n dims = arr.shape\n arr += sigma * noise(*dims)", "def noiseoff(subarray=DEFAULT) :\n multiSubarray('noiseSource', subarray, False, False)\n multiSubarray('rfPower', subarray, True)", "def noisy_data(filename, split='train', clf='gender'):\n\n filepath = 'data/{}/{}/{}o.wav'.format(clf, split, filename)\n audio, sr = librosa.load(filepath, sr=16000)\n \n # Add noise\n noisy = add_noise(audio)\n # Write noise to file\n sf.write('data/{}/{}/{}n.wav'.format(clf, split, filename), noisy, sr)\n #print(\"Noise added to {}\".format(filename))", "def add_noise(pointcloud, stddev=0.01):\n result = pointcloud\n result[0:2, :] = np.random.normal(pointcloud[0:2, :], stddev)\n return result", "def random_text(self, n=100):\n # choose a random prefix (not weighted by frequency)\n start = random.choice(list(self.suffix_map.keys()))\n #print(\">>DEBUG | start is\", start)\n \n for i in range(n):\n #print(\">> DEBUG | i is\", n)\n suffixes = self.suffix_map.get(start, None)\n #print(\">> DEBUG | suffixes is\", suffixes)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n #print(\">> DEBUG | start isn't in map\")\n random_text(n-i)\n return\n\n # choose a random suffix\n word = random.choice(suffixes)\n #print(\">> DEBUG | word is\", word)\n print(word, end=' ')\n start = self.shift(start, word)", "def add_gaussian_noise(self, samples):\n\n if 'sigma' in self.gaussian_component:\n sigma = self.gaussian_component['sigma']\n return samples + self.random_state.normal(size=samples.shape) * sigma\n if 'sigmas' in self.gaussian_component:\n sigmas = self.gaussian_component['sigmas']\n return samples + self.random_state.normal(size=samples.shape) * sigmas\n\n return samples", "def noise_removal(text):\n # Remove HTML\n text = BeautifulSoup(text, \"html.parser\").get_text()\n\n # Remove non-letters\n text = re.sub(\"[^a-zA-Z]\", \" \", text)\n\n # remove letters that are used more than three times in a row\n # sources: https://en.oxforddictionaries.com/explore/words-with-same-letter-three-times-in-a-row/\n # https://stackoverflow.com/questions/4574509/remove-duplicate-chars-using-regex\n text = re.sub(r'([\\w])\\1{2,}', r'\\1', text)\n\n\n word_list = text.split()\n return word_list", "def add_noise(self, portion, amplitude):\n # TODO: Implement the add_noise function. Remember to record the\n # boolean value is_add_noise. You can try uniform noise or Gaussian\n # noise or others ones that you think appropriate.\n self.is_add_noise = True\n noise = np.random.normal(0, 1, size = self.x.shape)\n noNoiseIndices = np.random.choice(self.x.shape[0], size = int(np.round(self.x.shape[0]*(1 - portion))), replace = False)\n noise = (noise*amplitude).astype(np.uint8)\n noise[noNoiseIndices,:] = 0\n self.x += noise\n self.x = np.clip(self.x, 0, 255, out=self.x)\n \n \n \n \n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################", "def reset_noise(self):\n self.advantage_hidden_layer.reset_noise()\n self.advantage_layer.reset_noise()\n self.value_hidden_layer.reset_noise()\n self.value_layer.reset_noise()", "def add_unknown_words(self, word_vecs, vocab, min_df=3, k=300):\n for word in vocab:\n if word not in word_vecs and vocab[word] >= min_df:\n word_vecs[word] = np.random.uniform(-0.25,0.25,k)", "def addNoise(x,y,noise):\n n = len(x)\n theta = np.random.random(n)*(2*np.pi) # random angle in [0,2*pi[\n d = np.random.random(n)*noise # random amplitude in [0,noise[\n x += np.cos(theta)*d\n y += np.sin(theta)*d", "def skip_and_replace_phrases(text):\n\n # For each text in [], replace it with '' with probability 0.5.\n matches = re.findall('(\\[[ \\w]*\\])', text)\n for match in matches:\n if random.uniform(0, 1) > 0.5:\n text = text.replace(match, '')\n else:\n text = text.replace(match, match[1:-1])\n\n # Remove empty spaces, if any.\n text = re.sub(' +', ' ', text)\n # Search for synonyms, replace at uniformly random.\n text = text.lower()\n for key, values in gvars.METAINFO['synonym_keys']:\n if key in text:\n text = text.replace(key, random.choice(values))\n return text", "def addNoise(img, sigma=2.0, mean=0):\n img2 = np.random.normal(mean, sigma, size=img.shape)\n\n img2 += img\n img2 = np.uint8(img2.clip(0, 255))\n return img2", "def _sample_new_noise(self, *, tf_sess=None):\n if self.framework == \"tf\":\n tf_sess.run(self.tf_sample_new_noise_op)\n elif self.framework == \"tf2\":\n self._tf_sample_new_noise_op()\n else:\n for i in range(len(self.noise)):\n self.noise[i] = torch.normal(\n mean=torch.zeros(self.noise[i].size()), std=self.stddev\n ).to(self.device)", "def random_text(n):\n start = random.choice(suffix_map.keys())\n for i in range(n):\n suffixes = suffix_map.get(start, None)\n if suffixes == None:\n # if the start isn't in map, we got to the end of the\n # original text, so we have to start again.\n random_text(n-i)\n return\n # choose a random suffix\n word = random.choice(suffixes)\n # Jodesty: *Need to learn how to format text output to fit on terminal screen\n output_words.append(word)\n # Jodesty: *what I have for now\n print word,\n start = shift(start, word)", "def addNoise( bmp, noise, savefile = '' ):\n w_count = 0 # count number of non-black pixels\n non_signal = [] # holds coordinates of w_counts\n for h in range(bmp.height):\n for w in range(bmp.width):\n # Add coordinates to non_signal\n if( intensity(bmp.pixels[h][w]) != 0 ):\n w_count += 1\n non_signal.append((h,w))\n\n #print(\"> Counted\", w_count, \"non-signal pixels; length signal\",len(non_signal))\n\n # If noise is greater than number of white pixels, then set noise ceiling\n if( noise > w_count ):\n noise = w_count\n\n # Now add noise\n noise_index = [] # holds random indicies for non_signal manipulation\n \n # we will fill up noise_index with random indicies\n while( len(noise_index) < noise ):\n # randomize based on size of non-black pixels\n add = int(random()*w_count)\n if( add not in noise_index ):\n noise_index.append(add)\n\n for n in noise_index:\n # --------- pixel height ---- pixel width ------------\n bmp.pixels[non_signal[n][0]][non_signal[n][1]] = (0,0,0)\n\n # Save image to a new file if prompted\n if( savefile != '' ):\n bmp.save(savefile)\n \n return bmp", "def white_noise():\n return random.randint(-32767, 32767)", "def sample_sonnet_syllables_only(hmm, obs_map, n_syl = 10):\n sonnetLines = []\n sonnet = ''\n sonnet_length = 14\n count = 0\n syl_counts = syllable_dict()\n \n while count < sonnet_length:\n line = sample_sentence_syl_only(hmm, obs_map, n_syl)\n (worked, nline) = make_line_syl_only(line, n_syl, syl_counts)\n if worked:\n sonnetLines.append(nline)\n count += 1\n for line in sonnetLines:\n sonnet += line\n return sonnet", "def create_synthetic_noise_dataset(cfg):\n from colorednoise import powerlaw_psd_gaussian\n\n betas = np.linspace(cfg['data.mix_synthetic_noise.min_beta'],\n cfg['data.mix_synthetic_noise.max_beta'],\n num=cfg['data.mix_synthetic_noise.num_samples'])\n sample_rate = cfg['data.sample_rate']\n segment_length = 2 * cfg['data.len_min']\n wavs = [powerlaw_psd_gaussian(beta, sample_rate * segment_length)\n for beta in betas]\n wavs = [audio.normalize(wav, low=-1, high=1) for wav in wavs]\n return NoiseDataset(wavs)", "def masked_word(self):\n for i in range(0,len(self._word)):\n if self._word[i] == ' ':\n self.new_string.append(' ')\n else:\n self.new_string.append('__ ')\n\n return self.print_new_word(self.new_string)", "def test_noise(self, lang):\n\n lang_id = self.params.lang2id[lang]\n sent1, len1 = self.get_batch('encdec', lang, None)\n sent1 = sent1.transpose_(0, 1)\n print(sent1.shape)\n print(\"sent1 before noise is \")\n print(sent1)\n print(\"len1 before noise is \")\n print(len1)\n\n sent1, len1 = self.add_noise(sent1, len1, lang_id)\n\n print('sent1 after noise for ' + lang + ' is')\n print(sent1)\n print('len1 for ' + lang + \" is \")\n print(len1)", "def _add_noise(signal: np.array, noise_power: float) -> np.array:\n noise = np.sqrt(noise_power / 2) * np.random.randn(signal.size)\n return signal + noise", "def _add_noise(signal: np.array, noise_power: float) -> np.array:\n noise = np.sqrt(noise_power / 2) * np.random.randn(signal.size)\n return signal + noise", "def add_noise(self, data):\n noise = self._get_noise(data.shape, data.dtype)\n return np.clip(data + noise, self._clipping_lower_bound,\n self._clipping_upper_bound)", "def add_noise(self, data):\n noise = self._get_noise(data.shape, data.dtype)\n return np.clip(data + noise, self._clipping_lower_bound,\n self._clipping_upper_bound)", "def remove_noise(emg):\n def butter_bandstop_filter(data, lowcut, highcut, fs, order=2):\n def butter_bandstop(lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='bandstop')\n return b, a\n \n b, a = butter_bandstop(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y\n \n # Remove noise from signal\n for channel in [\"emg1\", \"emg2\", \"emg3\", \"emg4\", \"emg5\", \"emg6\"]:\n emg[channel] = butter_bandstop_filter(emg[channel], 49., 51., EMG_F_SAMPLE, order=2)\n return emg", "def noise(self) -> Sequence:\n\n return self._noise", "def add_noise(spectra: np.ndarray, maxLevel: float = 0.1, seed: int = 42) -> np.ndarray:\n np.random.seed(seed)\n spectra = spectra.copy()\n spectra[:, 1:] *= (1-maxLevel/2) + np.random.rand(spectra.shape[0], spectra.shape[1]-1) * maxLevel\n return spectra", "def makeNoisy(alogSrc, alogTgt, meas, mag):\n import os\n from numpy.random import normal\n # if meas[0] != 'z':\n # error()\n curdir = os.getcwd()\n os.chdir('/')\n src = open(alogSrc, 'rU')\n tgt = open(alogTgt, 'w')\n os.chdir(curdir)\n\n for msg in src:\n if (\"%%\" in msg):\n msg = msg[0:-2] # get rid of \\n at end for printing later\n else:\n msg = msg.split()\n for des in range(len(meas)):\n if msg[1] == meas[des]:\n noise = normal(float(msg[3]), mag[des], 1)\n msg[3] = str(noise[0]) # center deviation about measurement\n msg = reconstructLine(msg)\n # print(msg)\n\n tgt.write(msg + '\\n')", "def add_image_noise(\n self, mu=0, sigma=0.005, only_positive=False, random_seed=None):\n if random_seed is not None:\n np.random.seed(random_seed)\n shape = self.signal.axes_manager.shape\n noise = normal(mu, sigma, shape)\n if only_positive:\n self._image_noise = np.absolute(noise)\n else:\n self._image_noise = noise", "def __call__(self, wav):\n beg_i = 0\n end_i = wav.shape[0]\n sel_noise = self.load_noise(self.sample_noise())\n if len(sel_noise) < len(wav):\n # pad noise\n P = len(wav) - len(sel_noise)\n sel_noise = np.pad(sel_noise, (0, P))\n # mode='reflect').view(-1).data.numpy()\n T = end_i - beg_i\n # TODO: not pre-loading noises from files?\n if len(sel_noise) > T:\n n_beg_i = np.random.randint(0, len(sel_noise) - T)\n else:\n n_beg_i = 0\n noise = sel_noise[n_beg_i:n_beg_i + T]\n # randomly sample the SNR level\n snr = random.choice(self.snr_levels)\n K, Ex, En = self.compute_SNR_K(wav, noise, snr)\n scaled_noise = K * noise\n if En > 0:\n noisy_wav = wav + scaled_noise\n noisy_wav = self.norm_energy(noisy_wav, Ex)\n else:\n noisy_wav = wav\n return noisy_wav", "def add_gaussian_noise(self, stdev, iteration, method = 'absolute', normalize = \"on\"):\n for time in self.mdvtc.keys():\n self.mdvtc[time].add_gaussian_noise(stdev, iteration, method, normalize)", "def reset_noise(self):\n try:\n self.head.reset_noise()\n except:\n pass\n\n try:\n for m in self.vhead.children():\n try:\n m.reset_noise()\n except:\n pass\n except:\n pass\n\n try:\n for m in self.ahead.children():\n try:\n m.reset_noise()\n except:\n pass\n except:\n pass", "def noisy_seismogram(t, seismogram, noise_amp=5):\n\n # Noise\n signoise = 2 * np.sqrt(3)\n\n # Create filter to take out high frequency noise\n filtgauss = gaussian(t, 75, signoise, 1.)\n filtgauss = filtgauss / sum(filtgauss)\n\n # Amplitude for the noise\n amp = noise_amp\n noise = 2 * (np.random.uniform(size=t.shape) - 0.5) * amp\n\n # Compute filtered noise\n filtnoise = np.real(ifft(fft(noise) * fft(filtgauss)))\n\n # Add noise to original seismogram\n noisemogram = seismogram + filtnoise\n\n return noisemogram", "def model_noise(self, model, model_res=None, num_observations=1):\n\n raise NotImplementedError", "def negative_sampling(data: pd.DataFrame,\n vocab: np.ndarray,\n noise_distribution: list,\n neg_sample_size: int\n ) -> pd.DataFrame:\n \n def samples_generator(word: str\n ) -> List[str]:\n while True:\n samples = np.random.choice(\n vocab, neg_sample_size, p=noise_distribution\n )\n if word not in samples:\n return samples\n \n data['negative_samples'] = data['centre_word'].apply(samples_generator)\n return data", "def add_unknown_words(word_vecs, vocab, min_df=5000, k=300):\n for word in vocab:\n if word not in word_vecs and vocab[word] >= min_df:\n #print word\n word_vecs[word] = np.random.uniform(-0.25,0.25,k)", "def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal", "def add_noise_at_snr(channel_in, snr):\n\n rms_channel = np.sqrt(np.mean(channel_in ** 2.0))\n noise_std = rms_channel / np.sqrt(10.0 ** (snr/10.0))\n\n return channel_in + np.random.normal(size=channel_in.shape, scale=noise_std)", "def noisePreset() :\n s.noisePreset()", "def addNoise (image,noise_type=\"gauss\",var = .01):\n row,col,ch= image.shape\n if noise_type == \"gauss\": \n mean = 0.0\n #var = 0.001\n sigma = var**0.5\n gauss = np.array(image.shape)\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n #print(gauss)\n noisy = image + gauss*255\n return noisy.astype('uint8')\n elif noise_type == \"s&p\":\n s_vs_p = 0.5\n amount = 0.09\n out = image\n # Generate Salt '1' noise\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 255\n # Generate Pepper '0' noise\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n return out\n elif noise_type == \"poisson\":\n vals = len(np.unique(image))\n vals = 2 ** np.ceil(np.log2(vals))\n noisy = np.random.poisson(image * vals) / float(vals)\n return noisy\n elif noise_type ==\"speckle\":\n gauss = np.random.randn(row,col,ch)\n gauss = gauss.reshape(row,col,ch) \n noisy = image + image * gauss\n return noisy\n else:\n return image", "def add_noise(image, noise, rate=0.05):\n\n if noise == \"gaussian\":\n row, col = image.shape\n var = ndimage.laplace(image).var()\n sigma = (var*rate) ** 0.5\n print(var, sigma)\n gauss = np.random.normal(loc=0, scale=sigma, size=(row, col)) * rate\n noisy = image + gauss\n # noisy = image + gauss\n return noisy\n\n elif noise == \"salt_pepper\":\n output = image.copy()\n black = 0\n white = 255\n probs = np.random.random(image.shape[:2])\n output[probs < (rate / 2)] = black\n output[probs > 1 - (rate / 2)] = white\n\n return output\n\n else:\n return image", "def add_noise(self, u):\n noise = torch.randn_like(u)\n noise[:, :, :3] = noise[:, :, :3] * self.imu_std[0]\n noise[:, :, 3:6] = noise[:, :, 3:6] * self.imu_std[1]\n\n # bias repeatability (without in run bias stability)\n b0 = self.uni.sample(u[:, 0].shape).cuda()\n b0[:, :, :3] = b0[:, :, :3] * self.imu_b0[0]\n b0[:, :, 3:6] = b0[:, :, 3:6] * self.imu_b0[1]\n u = u + noise + b0.transpose(1, 2)\n return u", "def remove_non_narration_strings(transcription_row):\n sentence = transcription_row[\"text\"]\n # filter out (CAPITALIZED WORD) and \"CAPITALIZED WORD\". These are not enunciated in the voiceover, but rather\n # indicate noise/words from the original audio track that get interspersed into the voice\n # Might contain special characters\n # Update: Capitalization etc are inconsistent. But all follow the pattern \"text\" and (text). Remove these instead\n crosstalk_pattern = '\\(.*?\\)|\\\".*?\\\"'\n # crosstalk_findings = re.findall(crosstalk_pattern, sentence)\n # print(\"Crosstalk: \"+str(crosstalk_findings))\n sentence = re.sub(crosstalk_pattern, \" \", sentence)\n # filter out ' s ' ' Ss ' etc\n s_pattern = r'\\b[sS]+\\b'\n s_pattern_findings = re.findall(s_pattern, sentence)\n # if len(s_pattern_findings) > 0:\n # print(\"S-pattern: \"+str(s_pattern_findings))\n sentence = re.sub(s_pattern, \" \", sentence)\n transcription_row[\"text\"] = sentence\n return transcription_row", "def add_bb_noise(bb):\n\n center = bb.centre()\n center_4_times = np.array(list(center) * 4)\n bb_v = bb.as_vector()\n bb_center = bb_v - center_4_times\n scale_factor_x = np.random.uniform(1, 1.1)\n scale_factor_y = np.random.uniform(1, 1.2)\n scale_vector = np.array([scale_factor_x, scale_factor_y] * 4)\n scaled_bb_center = np.multiply(bb_center, scale_vector)\n bb_width = bb_v[5] - bb_v[1]\n bb_height = bb_v[2] - bb_v[0]\n #translation up to translation_factor of the new width and height\n translation_factor = 0.2\n\n t_y = np.random.uniform(-(translation_factor * bb_height), translation_factor * bb_height)\n t_x = np.random.uniform(-(translation_factor * bb_width), translation_factor * bb_width)\n t = np.array([t_x, t_y] * 4)\n scaled_bb_center = scaled_bb_center + t\n back_to_image = scaled_bb_center + center_4_times\n return back_to_image", "def predict_w_noise(self, xs, stochastic=True, **kwargs):\n raise NotImplementedError", "def add_silent_values(noiseValues):\n for i in range(0, groupSize):\n noiseRecord = random.choice(noiseValues)\n noiseValue = noiseRecord.audio\n newSample = AudioData(\n noiseValue,\n \"W6\",\n 1,\n \"Washing Room\"\n )\n newSample.processedValue = noiseValue\n newSample.noiseValue = noiseValue\n db.session.add(newSample)\n db.session.commit()", "def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)", "def make_optional_silence_txt(self):\n raise NotImplementedError", "def addNoise(array,counts):\r\n if array.dtype == 'complex' :\r\n arrayout = addNoise(np.real(array),counts) + 1.0J * addNoise(np.imag(array),counts)\r\n else :\r\n if np.float64(counts) == 0.0e0 :\r\n arrayout = np.zeros(array.shape, dtype=arrayout.dtype)\r\n elif np.float64(counts) < 0.0e0 :\r\n print 'bg.addNoise : warning counts < 0'\r\n elif np.float64(counts) > 1.0e9 :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.normal(arrayout*np.float64(counts),np.sqrt(arrayout*np.float64(counts)))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n else :\r\n arrayout = np.zeros(array.shape)\r\n arrayout = normaliseInt(array)\r\n arrayout = np.random.poisson(arrayout*np.float64(counts))/np.float64(counts)\r\n tot = np.sum(array)\r\n arrayout = normaliseInt(arrayout,tot)\r\n return arrayout", "def corrupt_example(self, e):\n import random\n import copy\n e = copy.copy(e)\n last = e[-1]\n cnt = 0\n while e[-1] == last:\n e[-1] = random.randint(0, self.parameters.vocab_size-1)\n pr = 1./self.parameters.vocab_size\n cnt += 1\n # Backoff to 0gram smoothing if we fail 10 times to get noise.\n if cnt > 10: e[-1] = random.randint(0, self.parameters.vocab_size-1)\n weight = 1./pr\n return e, weight", "def create_splitword_negated(word_part):\n\n split_word = sentence.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create negated\n negated = chapter_input.new_tag('fe')\n negated['name'] = NEGATED_TAG_NAME\n negated['id'] = last_frame.get('id')+'_e2'\n last_frame.insert(2, negated)\n\n # Create negated <fenode>\n negated_fenode = chapter_input.new_tag('fenode')\n negated_fenode['idref'] = wordpart_idref.get('id')\n negated_fenode['is_split'] = 'yes'\n negated.insert(0, negated_fenode)", "def add_gaussian_noise(self, stdev, iteration = 1, method = 'absolute', normalize = \"on\"):\n\n self.number_of_replicate = iteration\n for fragment in self.fragments_for_mdv_calculation:\n number_of_mass_data = max(self.mdv[fragment].keys()) + 1\n noise = numpy.zeros((number_of_mass_data, iteration))\n for i in range(iteration):\n for number in range(number_of_mass_data):\n if method == 'relative':\n noise[number, i] = (numpy.random.randn() * stdev + 1) * self.mdv[fragment][number]['ratio']\n else:\n noise[number, i] = (numpy.random.randn() * stdev) + self.mdv[fragment][number]['ratio']\n if noise[number, i] < 0.0:\n noise[number, i] = 0.0\n #各フラグメント毎に総和を1にする。\n if normalize == \"on\":\n sumvalue = sum(noise[:,i])\n noise[:,i] = noise[:,i] / sumvalue\n for number in range(number_of_mass_data):\n self.mdv[fragment][number]['ratio']= sum(noise[number,:])/iteration\n #self.mdv[fragment][number]['std'] = numpy.std(noise[number,:])\n self.mdv[fragment][number]['data'] = numpy.array(noise[number,:])", "def createSentence(self, n=0, v=0, o=0, p=0,prep=True):\n sentence = ''\n if not n:\n n = np.random.randint(1, 5)\n if not v:\n v = np.random.randint(1, 5)\n if not o:\n o = np.random.randint(1, 5)\n sentence += self.createPhrase(nwords=n) + ' '\n if sentence[:-1] not in ('mi', 'sina'):\n sentence += 'li '\n sentence += self.createPhrase(nwords=v) + ' e '\n sentence += self.createPhrase(nwords=o)\n if prep:\n if not p:\n p = np.random.randint(1, 5)\n sentence += ' ' + np.random.choice(self.prepositions) + ' ' + self.createPhrase(nwords=p)\n return sentence", "def noise(self):\n return self._noise", "def noise_sentences_typos(sentences, typos : dict, prob = 1.0):\n\n from copy import deepcopy\n noised_sentences = deepcopy(sentences)\n \n cnt_noised_tokens = 0\n for sentence in noised_sentences: \n for token in sentence:\n token.text, noised = induce_noise_typos(token.text, typos, prob) \n if noised: \n cnt_noised_tokens += 1\n\n return noised_sentences, cnt_noised_tokens" ]
[ "0.61349237", "0.6130484", "0.60680324", "0.604623", "0.5941968", "0.58597803", "0.58269954", "0.58108455", "0.56916386", "0.56686974", "0.56611174", "0.55890733", "0.55856615", "0.55645496", "0.556166", "0.55571675", "0.5511801", "0.55111974", "0.5508282", "0.5461142", "0.54018235", "0.5400243", "0.5397725", "0.5388273", "0.5380952", "0.5362307", "0.53568375", "0.5338852", "0.5334128", "0.5333122", "0.53087014", "0.5300805", "0.52990687", "0.5295106", "0.52505845", "0.52410764", "0.5240549", "0.52364653", "0.5234559", "0.5225761", "0.5197349", "0.5193574", "0.5167727", "0.5166227", "0.515503", "0.5149329", "0.5148982", "0.5140569", "0.5134603", "0.5106346", "0.5105391", "0.5105233", "0.509723", "0.50959206", "0.5092999", "0.5089864", "0.50860965", "0.50727355", "0.50685227", "0.506076", "0.50598085", "0.5056332", "0.5049166", "0.5046318", "0.5044997", "0.5030535", "0.5030535", "0.50267464", "0.50267464", "0.5023032", "0.50190836", "0.5006555", "0.49961036", "0.49959698", "0.4995233", "0.4992862", "0.49709955", "0.49623933", "0.49553266", "0.49520102", "0.4949268", "0.49467388", "0.4939219", "0.49349526", "0.4919325", "0.49178138", "0.49139416", "0.49040106", "0.49027097", "0.4899452", "0.48935837", "0.4891863", "0.48820987", "0.48817378", "0.48773", "0.48697025", "0.48669064", "0.48634318", "0.48582238", "0.48542958" ]
0.8073021
0
Add articulatory bias to the nonsuffix segments in the WordForm.
def add_bias(self, bias_types): self.segments = deepcopy(self.segments) # Add the biases specified in the argument. if bias_types['final'] and len(self.segments) == 3: self.segments[2].add_bias('voiceless') if bias_types['medial'] and len(self.segments) == 4: self.segments[2].add_bias('voiced')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_suffix(self, suffix):\n # Append the suffix vowel to this WordForm.\n self.segments.append(Segment.new_segment(suffix))", "def add_noise(self):\n self.segments = deepcopy(self.segments)\n # Iterate through each of the first three Segments in the WordForm.\n for i in range(3):\n # Add noise to each Segment.\n self.segments[i].add_noise()", "def addNormalizing(self, name, seq):\n\n for i in xrange(len(seq) - self.kmer_size + 1):\n s = strandless(seq[i:i + self.kmer_size].upper())\n if \"N\" in s:\n continue\n self.normalizingKmers.add(s)", "def FindSuffix(self):\n self.numSuffixes = 0\n self.forceStress = 0\n resultslist = []\n for f in self.suffixes.finditer(self.wd):\n resultslist.append((f.group(), f.start()))\n if not resultslist: return\n # make sure *end* of word is in list! otherwise, 'DESP erate'\n if resultslist[-1][1] + len(resultslist[-1][0]) < len(self.wd):\n return\n resultslist.reverse()\n for res in resultslist:\n # if no vowel left before, false suffix ('singing')\n # n.b.: will choke on 'quest' etc! put in dictionary, I guess\n if not sre.search('[aeiouy]', self.wd[:res[1]]): break\n if res[0] == 'ing' and self.wd[res[1]-1] == self.wd[res[1]-2]:\n self.sylBounds.append(res[1] - 1) # freq special case\n else: self.sylBounds.append(res[1]) # sorted later\n self.wd = self.wd[:res[1]]\n self.numSuffixes += 1\n if res[0] in STRESSSUFFIX:\n self.forceStress = 0 - len(self.sylBounds)\n if res[0] in MULTISUFFIX:\n # tricky bit! it *happens* that secondary division in all these\n # comes after its first character; NOT inevitable!\n # also does not allow for 3-syl: 'ically' (which are reliable!)\n self.sylBounds.append(res[1]+1)\n self.numSuffixes += 1", "def set_bias(self, bias):\n assert 0x10 <= bias <= 0x17, \"Bias must be one of BIAS_1_4..BIAS_1_11.\"\n assert self.instr == self.INSTR_EXT, \"Please switch to extended instruction set first.\"\n self.bias = bias\n self.command([bias])", "def _fix_bias(self, op, attrs, num_inputs):\n if op not in [mx.sym.Convolution, mx.sym.Deconvolution, mx.sym.FullyConnected]:\n return attrs\n if num_inputs == 3:\n attrs['no_bias'] = False\n elif num_inputs == 2:\n attrs['no_bias'] = True\n else:\n raise ValueError(\"Unexpected number of inputs for: {}\".format(op))\n return attrs", "def add_possessive(results, form, poss):\n if not poss:\n return results\n\n # Add possessive suffix\n suffixes = nounspecs.possessive_suffixes[poss]\n if isinstance(suffixes, str):\n suffixes = [suffixes]\n results2 = []\n for suffix in suffixes:\n for v in results:\n parts = list(x for x in v)\n if suffix[0] != \"@\":\n for x in suffix:\n if x == \"A\":\n p = \"\".join(parts)\n m = re.search(\"([aouAOU])[^yäöYÄÖ]*$\", p)\n if m:\n parts.append(\"a\")\n else:\n parts.append(\"ä\")\n else:\n parts.append(x)\n v = \"\".join(parts)\n else:\n if form not in (\n \"ine-sg\", \"ine-pl\", \"ela-sg\", \"ela-pl\",\n \"all-sg\", \"all-pl\", \"ade-sg\", \"ade-pl\",\n \"abl-sg\", \"abl-pl\", \"tra-sg\", \"tra-pl\",\n \"ess-sg\", \"ess-pl\", \"abe-sg\", \"abe-pl\",\n \"ptv-sg\", \"ptv-pl\", \"cmt\",\n \"inf1-long\", \"inf2\", \"inf3\", \"inf4\", \"inf5\"):\n continue\n if len(v) < 2 or v[-1] not in \"aeiouyäö\":\n continue\n if v[-2] == v[-1]:\n continue\n v += v[-1]\n v += suffix[1:]\n if v:\n results2.append(v)\n return results2", "def possible_negation_suffix(text: str) -> bool:\n suffixes = (\"less\",)\n # length is mentioned so it doesn't consider \"less\" as containing the suffix\n return text.endswith(suffixes) and len(text) >= 5", "def _DisableSuffixIsRelevant(suffix: str, removal_type: str) -> bool:\n if suffix == FINDER_COMMENT_SUFFIX_GENERAL:\n return True\n if suffix == removal_type:\n return True\n return False", "def init_emb(self):\r\n initrange = 0.5 / self.embedding_dim\r\n self.embeddings.weight.data.uniform_(-initrange, initrange)\r\n self.affine.weight.data.uniform_(-0, 0)\r\n self.affine.bias.data.zero_()", "def _add_to_existing_flowable(self, fb, new_terms):\n biog = ('124-38-9' in fb)\n for term in new_terms:\n self._fm.add_synonym(fb, term)\n if biog and bool(biogenic.search(term)):\n self._bio_co2.add_term(term) # ensure that bio term is a biogenic synonym", "def add_suffix(in_image,\n suffix_str):\n bandnames = in_image.bandNames().map(lambda elem: ee.String(elem).toLowerCase().cat('_').cat(suffix_str))\n nb = bandnames.length()\n return in_image.select(ee.List.sequence(0, ee.Number(nb).subtract(1)), bandnames)", "def addOtherForm(documentName, word, unique):\r\n formRef = \":form_\" + replace_form(word.word)\r\n if word.transliteration and word.transliteration.word != \"\" and word.transliteration.word != \" \":\r\n formRef += \"_\" + word.transliteration.word\r\n formRef += \"_\" + unique\r\n\r\n formRef += \" a ontolex:Form;\\n\"\r\n\r\n writtenRepRef = \" ontolex:writtenRep \\\"\"\r\n writtenRepRef += word.word + \"\\\"\" + word.writingLanguage\r\n\r\n if word.transliteration and word.transliteration.word != \"\":\r\n writtenRepRef += \", \\\"\" + word.transliteration.word + \"\\\"\" + word.transliteration.writingLanguage\r\n writtenRepRef += \" .\"\r\n\r\n frequencyRef = \"\"\r\n if word.frequencyDict:\r\n frequencyRef = \"\\n\"\r\n for corpus,frequency in word.frequencyDict.items():\r\n if frequency != 0:\r\n frequencyRef +=' frac:frequency [a e2model:' + corpus +'; rdf:value \"' + str(frequency) + '\" ] ;\\n'\r\n frequencyRef = frequencyRef[:len(frequencyRef) -2]\r\n frequencyRef += \".\"\r\n formEntry = formRef + writtenRepRef\r\n if frequencyRef != \".\":\r\n formEntry = formEntry[:len(formEntry) -1]\r\n formEntry += \";\"\r\n formEntry += frequencyRef\r\n\r\n with open(documentName, 'a') as f:\r\n f.write(formEntry)\r\n f.write(\"\\n\\n\")\r\n return", "def rule_nnp_vbz_rb_vb(words):\n original_len = len(words)\n words = mutate_tag_seq(\n words,\n ['NNP','VBZ','RB','VB'],\n ['NNP','VB','RB']\n )\n if words is not None:\n if len(words) < original_len:\n i = index_tag_seq(words, ['NNP', 'VB', 'RB'])\n words[i+1].text += 's'\n words[i+1].tag = 'VBZ'\n return words", "def ner_tag_advertise(self, advertise: Dict[str, Any]):\n tmp_ad = advertise.copy()\n full_str: str = sc.debug_print(\n self.splitting_marking(text_input=tmp_ad[\"clean_text\"],\n ner_map=self.non_measure_map,\n measure_map=self.measure_map), self.debug)\n\n terms_input: List[str] = sc.debug_print(\n [self.reg_rules[\"ngram_clear_rgx\"].sub(\"\", word[0])\n for word in self.reg_rules[\"ngram_rgx\"].findall(full_str)], self.debug)\n\n model_input: [(str, (str, ...))] = sc.debug_print(self.get_tagged_sequence(terms_input), self.debug)\n\n # Build conflict dictionary\n clean_inputs: [(str, str)] = []\n conflict_words = dict()\n for word, ne in model_input:\n if len(ne) > 1:\n if word in conflict_words.keys():\n for tag in ne:\n conflict_words[word].add(tag)\n else:\n conflict_words[word] = {*ne}\n clean_inputs.append((word, random.choice(ne)))\n else:\n clean_inputs.append((word, ne[0]))\n\n tmp_ad[\"NER\"] = clean_inputs\n return tmp_ad", "def add_bias_level(self, bias):\n\n self.bias += bias\n self.signal += bias", "def add_superbias(self, gain=GAIN, biasfile=None):\n\n if biasfile is None:\n biasfile = resource_filename('detector', 'files/jwst_niriss_superbias_0137.fits')\n\n # Read the super bias from file (in science coordinates).\n with fits.open(biasfile) as hdu:\n superbias = hdu[1].data # [ADU]\n\n superbias = superbias*gain # [electrons]\n\n # Select the appropriate subarray.\n if self.subarray == 'SUBSTRIP96':\n slc = slice(1792, 1888)\n elif self.subarray == 'SUBSTRIP256':\n slc = slice(1792, 2048)\n elif self.subarray == 'FULL':\n slc = slice(0, 2048)\n else:\n raise ValueError('SUBARRAY must be one of SUBSTRIP96, SUBSTRIP256 or FULL')\n\n subbias = superbias[slc, :]\n\n # Add the bias level to the simulation.\n self.data = self.data + subbias\n\n self.modif_str = self.modif_str + '_bias'", "def add_emb(self, emb):\n self.embs.append(emb)", "def _energy_bias(self,vBz):\n return np.clip(0.1*np.log10(vBz)+0.6,0,1)", "def standardize_excl(self) -> None:\n # attempt to protect against augeas error in 0.10.0 - ubuntu\n # *.augsave -> /*.augsave upon augeas.load()\n # Try to avoid bad httpd files\n # There has to be a better way... but after a day and a half of testing\n # I had no luck\n # This is a hack... work around... submit to augeas if still not fixed\n\n excl = [\"*.augnew\", \"*.augsave\", \"*.dpkg-dist\", \"*.dpkg-bak\",\n \"*.dpkg-new\", \"*.dpkg-old\", \"*.rpmsave\", \"*.rpmnew\",\n \"*~\",\n self.root + \"/*.augsave\",\n self.root + \"/*~\",\n self.root + \"/*/*augsave\",\n self.root + \"/*/*~\",\n self.root + \"/*/*/*.augsave\",\n self.root + \"/*/*/*~\"]\n\n for i, excluded in enumerate(excl, 1):\n self.aug.set(\"/augeas/load/Httpd/excl[%d]\" % i, excluded)\n\n self.aug.load()", "def _swap_negation(self):\n\n if self.text.startswith('no '):\n self.text = self.text[3:]\n else:\n self.text = 'no ' + self.text\n return self", "def create_splitword_negated(word_part):\n\n split_word = sentence.find('splitword', {'idref' : t_id})\n wordpart_idref = split_word.find('part', {'word' : word_part})\n\n last_frame = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})[-1]\n\n # Create negated\n negated = chapter_input.new_tag('fe')\n negated['name'] = NEGATED_TAG_NAME\n negated['id'] = last_frame.get('id')+'_e2'\n last_frame.insert(2, negated)\n\n # Create negated <fenode>\n negated_fenode = chapter_input.new_tag('fenode')\n negated_fenode['idref'] = wordpart_idref.get('id')\n negated_fenode['is_split'] = 'yes'\n negated.insert(0, negated_fenode)", "def addCanonicalForm(documentName, lexWord):\r\n formRef = \":form_\" + replace_form(lexWord.word) + \"_\" + lexWord.unique_name + \" a ontolex:Form;\\n\"\r\n\r\n writtenRepRef = \" ontolex:writtenRep \\\"\"\r\n writtenRepRef += lexWord.word + \"\\\"\" + lexWord.writingLanguage\r\n\r\n if lexWord.transliteration.word != \" \" and lexWord.transliteration.word != \"\" :\r\n writtenRepRef += \", \\\"\" + lexWord.transliteration.word + \"\\\"\" + lexWord.transliteration.writingLanguage\r\n writtenRepRef += \" .\"\r\n\r\n frequencyRef = \"\"\r\n if lexWord.canonicalFrequencyDict:\r\n frequencyRef = \"\\n\"\r\n for corpus,frequency in lexWord.canonicalFrequencyDict.items():\r\n if frequency != 0:\r\n frequencyRef +=' frac:frequency [a e2model:' + corpus +'; rdf:value \"' + str(frequency) + '\" ] ;\\n'\r\n frequencyRef = frequencyRef[:len(frequencyRef) -2]\r\n frequencyRef += \".\"\r\n formEntry = formRef + writtenRepRef\r\n if frequencyRef != \".\":\r\n formEntry = formEntry[:len(formEntry) -1]\r\n formEntry += \"; \"\r\n formEntry += frequencyRef\r\n\r\n with open(documentName, 'a') as f:\r\n f.write(formEntry)\r\n f.write(\"\\n\\n\")\r\n return", "def _append_target_attention(df, eos, is_reverse):\n def _len_no_eos(s):\n return len([el for el in s.split() if el != eos])\n\n df = df.to_frame()\n df[\"taget attention\"] = [\" \".join(str(i) for i in range(_len_no_eos(inp))) for inp in df.index]\n if is_reverse:\n df[\"taget attention\"] = [ta[::-1] for ta in df[\"taget attention\"]]\n if eos != \"\":\n df[\"taget attention\"] = [ta + \" \" + str(len(ta.split())) for ta in df[\"taget attention\"]]\n return df", "def _bell_annotate(self, mfccs, bell_start_sample):\n if self._ipa_full_vector is None:\n raise ValueError(\"Full ipa not set. Call end_with_ipa() prior.\")\n mfcc_len = mfccs.shape[1]\n sample_ann = [[0]*len(self._ipa_full_vector)] * mfcc_len\n\n for i in range(bell_start_sample, mfcc_len):\n sample_ann[i] = self._ipa_full_vector\n self.annotated_samples = sample_ann", "def add_mention_mask(self, sample):\n mention_mask = []\n for token in sample['text_tokenized']:\n mention_mask.append([1. if token in sample['item_name_tokenized'] else 0.] * self._embedding_size)\n # Padding\n while len(mention_mask) < self._max_text_length:\n mention_mask.append(np.zeros(self._embedding_size))\n sample['text_mention_mask'] = mention_mask", "def fk_ease(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n num_syllables = _get_num_syllables(doc)\n if num_sentences == 0 or num_words == 0 or num_syllables == 0:\n return 0\n words_per_sent = num_words / num_sentences\n syllables_per_word = num_syllables / num_words\n return 206.835 - (1.015 * words_per_sent) - (84.6 * syllables_per_word)", "def _replace_suffix(self, word, suffix, replacement):\n assert word.endswith(suffix), \"Given word doesn't end with given suffix\"\n if suffix == \"\":\n return word + replacement\n else:\n return word[: -len(suffix)] + replacement", "def leggTilSang(self, nySang):\r\n self._sanger.append(nySang)", "def generate_semisupervized_label(self, idx_known, idx_unknown):\n tmp_df = self.data_info.set_index(['patientID','body_part'])\n # associate semi-supervized settings\n if len(idx_known) > 0:\n df_known = tmp_df.loc[idx_known,:]\n df_known['semi_label'] = df_known.abnormal_XR.apply(lambda x: -1 if x==1 else 1)\n df_unknown = tmp_df.loc[idx_unknown,:]\n df_unknown['semi_label'] = 0\n return pd.concat([df_known, df_unknown], axis=0).reset_index()\n else:\n df_unknown = tmp_df.loc[idx_unknown,:]\n df_unknown['semi_label'] = 0\n return df_unknown.reset_index()", "def DocumentInlineBlipInsertAfterElement(self):\n raise NotImplementedError()", "def generateSequenceBias(self, bias):\n\n if bias < 0 or bias > 1:\n raise ValueError(\"Bias must be a value between 0 and 1.\")\n else:\n for i in range(self.length):\n self.sequence.append(0 if random.random() < bias else 1)\n self.biasSeq = 1\n self.bias = bias", "def getSuffixesForWord(self, word):\n suffixes = self.word_suffixes.get(word, False)\n if suffixes is not False:\n return suffixes\n suffixes = []\n if word.isalpha():\n boundary = min(5, len(word))\n for i in range(1, boundary):\n suffixes.append(word[-i:])\n suffixes = tuple(suffixes)\n self.word_suffixes[word] = suffixes\n return suffixes", "def tag_stem_magnetic_ends(graph = None):\n\tmagnetic_ends = find_stem_magnetic_ends(graph = graph)\n\tfor magnetic_end in magnetic_ends:\n\t\tid1, id2 = magnetic_end\n\t\tgraph.edge[id1][id2]['label']= '$'\n\t\tgraph.edge[id1][id2]['type']='breakpoint'\n\treturn", "def set_trailing_sl(self, n_atr: float = 6):\n self.__n_atr = n_atr", "def hideAnnotations(self):\r\n widget = slicer.modules.NeedleFinderWidget\r\n nodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLAnnotationTextDisplayNode')\r\n for i in range(nodes.GetNumberOfItems()):\r\n node = nodes.GetItemAsObject(i)\r\n if widget.hideAnnotationTextButton.checked:\r\n node.SetTextScale(0)\r\n else:\r\n node.SetTextScale(3)", "def ApplyDemorgans(clause):\n from FuXi.DLP import breadth_first, breadth_first_replace\n replacementMap = {}\n for negDisj in [i for i in breadth_first(clause.body)\n if isinstance(i,Or) and i.naf]:\n replacementList = []\n for innerTerm in negDisj:\n assert isinstance(negDisj,Uniterm)\n innerTerm.naf = not innerTerm.naf\n replacementList.append(innerTerm)\n replacementMap[negDisj] = And(replacementList)\n for old,new in list(replacementMap.items()):\n list(breadth_first_replace(clause.body,candidate=old,replacement=new))", "def addbias(self, bias):\n daskD.wait(self.client.map(_call_addbias, self.vecDask, bias=bias, pure=False))\n return self", "def normQnA(dfin):\n\n update_log(er='norming Q',upload=False)\n dfin['normQ']= dfin.query_text_raw.apply(normalize_text, args=(True,True))\n dfin = dfin[dfin.normQ != 'dud_drop_me']\n\n #update_log(er='norming A',upload=True)\n #dfin['normA']= dfin.kcc_answer_raw.apply(normalize_text , args=(False,False))\n #dfin = dfin[dfin.normA != 'dud_drop_me']\n return(dfin)", "def addSuffixes(self, alist):\n for i, (word, filename) in enumerate(alist):\n withsuffix = self._findVideoFile(filename)\n alist[i] = (word, withsuffix)\n return alist", "def addNucleiName(self, nucleiName=r'$^{63}$Ni'):\n plt.text(0.48, 0.05, nucleiName, fontsize=48, transform=plt.gcf().transFigure)", "def retransform(self, bwt_seq: str, suffixes: List) -> str:\n # create empty list with same length as input sequence\n transformed_seq = [\"\"] * len(bwt_seq)\n if self.debug: print(f\"retransform function: bwt sequence: {bwt_seq}, suffixes: {suffixes}\") \n for idx, s in enumerate(suffixes):\n # rebuild original sequence by inplace inserts\n transformed_seq[(s-1 % len(bwt_seq))] = bwt_seq[idx]\n if self.debug: print(f\"retransform function: step: {idx}, letter: {bwt_seq[idx]}\") \n # build string from sequence list to restore original sequence\n if self.debug: print(f\"retransform function: transformed sequence: {''.join([c for c in transformed_seq])}\\n\") \n return \"\".join([c for c in transformed_seq])", "def setup_b_instance(self,norm,add_ps_mask=True):\n inst_tag = self.tag + '_'+str(self.flux_array_ebin)\n b = bsm.bayesian_scan_NPTF(tag=inst_tag,nside=self.nside,work_dir='/tmp/'+self.tag+'/',psf_dir=psf_dir,nlive=700)\n # Input the data, using the external data if provided\n if self.use_external_data:\n b.load_external_data(self.f1.CTB_en_bins,[self.external_data[self.flux_array_ebin]],self.f1.CTB_exposure_maps)\n else:\n b.load_external_data(self.f1.CTB_en_bins,self.f1.CTB_count_maps,self.f1.CTB_exposure_maps)\n\n if add_ps_mask:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False,ps_mask_array = self.f1.ps_mask_array)\n else:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False)\n\n b.add_new_template(self.f1.template_dict)\n b.rebin_external_data(1)\n\n b.add_poiss_model('ps_model','$A_{ps}$',[0.0,3.0],False)\n b.add_poiss_model('p7','$A_{p7}$',[0.0,2.0],False)\n b.add_poiss_model('bubs','$A_{bubs}$',[0.0,2.0],False)\n b.add_poiss_model('iso','$A_{iso}$',[0.0,3.0],False)\n # Add in a fixed J_map template\n b.add_fixed_templates({'J_map':[norm*self.J_map_arr[self.flux_array_ebin]/np.mean(self.J_map_arr[self.flux_array_ebin])]})\n\n b.initiate_poissonian_edep()\n return b", "def add_bb_noise(bb):\n\n center = bb.centre()\n center_4_times = np.array(list(center) * 4)\n bb_v = bb.as_vector()\n bb_center = bb_v - center_4_times\n scale_factor_x = np.random.uniform(1, 1.1)\n scale_factor_y = np.random.uniform(1, 1.2)\n scale_vector = np.array([scale_factor_x, scale_factor_y] * 4)\n scaled_bb_center = np.multiply(bb_center, scale_vector)\n bb_width = bb_v[5] - bb_v[1]\n bb_height = bb_v[2] - bb_v[0]\n #translation up to translation_factor of the new width and height\n translation_factor = 0.2\n\n t_y = np.random.uniform(-(translation_factor * bb_height), translation_factor * bb_height)\n t_x = np.random.uniform(-(translation_factor * bb_width), translation_factor * bb_width)\n t = np.array([t_x, t_y] * 4)\n scaled_bb_center = scaled_bb_center + t\n back_to_image = scaled_bb_center + center_4_times\n return back_to_image", "def to_adverb(self):\n\n text = self.text\n ending = text[-1]\n if ending == \"e\":\n text = text[0:-1]+\"ly\"\n else:\n text = text+\"ly\"\n\n return self.copy_with(typ=\"AdverbPhrase\",\n text=text)\n\n # return AdverbPhrase(**self.locals(skip=[\"text\", \"typ\", \"variants\"]),\n # text=text,\n # **self.variants)", "def augment_sentence(self):\n # Initialize a tracker to ensure we don't make more than the desired number of changes\n changes = 0\n # Make a queue for later\n queue = [self.sentence]\n\n # While we haven't made too many changes and we still have stuff to change, do work!\n while changes < self.max_changes and len(queue) > 0:\n # Take a sentence from the queue and blast it apart into a POS-tagged list\n current_sentence = queue.pop(0)\n tokenized_sentence = nltk_methods.string_to_pos_tagged_list(current_sentence)\n sentence_length = len(tokenized_sentence)\n # Now modify it according to the variation rate\n for i in range(self.variation_rate):\n # Set variable for tracking a change\n has_changed = False\n attempts = 0\n # Keep trying to make a change until either:\n # 1) You've made a change, OR\n # 2) You've tried to make a change for half the words in the sentence with no success\n while has_changed is not True and attempts <= sentence_length/2:\n syn_sent = tokenized_sentence\n swap_sent = tokenized_sentence\n insert_sent = tokenized_sentence\n del_sent = tokenized_sentence\n successful_changes = 0\n # Hand the sentence off to the specific augmentation methods\n # Note that these methods can all return empty strings, so make sure to handle that\n synonym_replaced_sentence = self.__synonym_replacement(syn_sent)\n if synonym_replaced_sentence is not \"\":\n queue.append(synonym_replaced_sentence)\n self.augmented_sentence_list.append(synonym_replaced_sentence)\n successful_changes += 1\n\n swapped_sentence = self.__swap_two_random_words(swap_sent)\n if swapped_sentence is not \"\":\n queue.append(swapped_sentence)\n self.augmented_sentence_list.append(swapped_sentence)\n successful_changes += 1\n\n inserted_sentence = self.__insert_random_synonym(insert_sent)\n if inserted_sentence is not \"\":\n queue.append(inserted_sentence)\n self.augmented_sentence_list.append(inserted_sentence)\n successful_changes +=1\n\n # We don't want to delete the sentence into oblivion, so have a threshold for smallest possible sentence\n if len(del_sent) >= 15:\n deleted_word_sentence = self.__delete_random_word(del_sent)\n if deleted_word_sentence is not \"\":\n queue.append(deleted_word_sentence)\n self.augmented_sentence_list.append(deleted_word_sentence)\n successful_changes += 1\n \n # Now update the while loop flags\n if successful_changes >= 4:\n has_changed = True\n attempts += 1\n changes += 2", "def update_bias(self):\n self._bias = self._bias + self.update_bias_value\n self.bias_clipping()", "def _define_no_nb_system(self,\n system,\n neglected_angle_terms,\n atom_proposal_order):\n import copy\n from simtk import unit\n no_nb_system = copy.deepcopy(system)\n _logger.info(\"\\tbeginning construction of no_nonbonded final system...\")\n _logger.info(f\"\\tinitial no-nonbonded final system forces {[force.__class__.__name__ for force in list(no_nb_system.getForces())]}\")\n\n num_forces = no_nb_system.getNumForces()\n for index in reversed(range(num_forces)):\n force = no_nb_system.getForce(index)\n if force.__class__.__name__ == 'NonbondedForce' or force.__class__.__name__ == 'MonteCarloBarostat':\n if self._use_14_nonbondeds and force.__class__.__name__ == 'NonbondedForce':\n for particle_index in range(force.getNumParticles()):\n [charge, sigma, epsilon] = force.getParticleParameters(particle_index)\n force.setParticleParameters(particle_index, charge*0.0, sigma, epsilon*0.0)\n\n for exception_index in range(force.getNumExceptions()):\n p1, p2, chargeprod, sigma, epsilon = force.getExceptionParameters(exception_index)\n if len(set(atom_proposal_order).intersection(set([p1, p2]))) == 0: #there is no growth index in this exception, so we\n force.setExceptionParameters(exception_index, p1, p2, chargeProd = chargeprod * 0.0, sigma = sigma, epsilon = epsilon * 0.0)\n\n else:\n no_nb_system.removeForce(index)\n\n elif force.__class__.__name__ == 'HarmonicAngleForce':\n num_angles = force.getNumAngles()\n for angle_idx in neglected_angle_terms:\n p1, p2, p3, theta0, K = force.getAngleParameters(angle_idx)\n force.setAngleParameters(angle_idx, p1, p2, p3, theta0, unit.Quantity(value=0.0, unit=unit.kilojoule/(unit.mole*unit.radian**2)))\n\n # #the last thing to do for bookkeeping is to delete the torsion force associated with the extra ring-closing and chirality restraints\n #\n # #first, we see if there are two CustomTorsionForce objects...\n # custom_torsion_forces = [force_index for force_index in range(no_nb_system.getNumForces()) if no_nb_system.getForce(force_index).__class__.__name__ == 'CustomTorsionForce']\n # if len(custom_torsion_forces) == 2:\n # #then the first one is the normal growth torsion force object and the second is the added torsion force object used to handle chirality and ring-closing constraints\n # no_nb_system.removeForce(max(custom_torsion_forces))\n\n forces = { no_nb_system.getForce(index).__class__.__name__ : no_nb_system.getForce(index) for index in range(no_nb_system.getNumForces()) }\n _logger.info(f\"\\tfinal no-nonbonded final system forces {forces.keys()}\")\n\n #bonds\n bond_forces = forces['HarmonicBondForce']\n _logger.info(f\"\\tthere are {bond_forces.getNumBonds()} bond forces in the no-nonbonded final system\")\n\n #angles\n angle_forces = forces['HarmonicAngleForce']\n _logger.info(f\"\\tthere are {angle_forces.getNumAngles()} angle forces in the no-nonbonded final system\")\n\n #torsions\n torsion_forces = forces['PeriodicTorsionForce']\n _logger.info(f\"\\tthere are {torsion_forces.getNumTorsions()} torsion forces in the no-nonbonded final system\")\n\n\n return no_nb_system", "def initialize_forget_bias(self, bias_value):\n self.tree_lstm.initialize_forget_bias(bias_value)", "def gibber(self): \n for x in self.consonants:\n if (x in self.sentence):\n \t self.sentence = self.sentence.replace(x, x+'o'+unicode(x).lower())", "def remove_abbr(self, ):\n if self.AttributeNames.ABBR in self.attrs:\n del self.attrs[self.AttributeNames.ABBR]\n return self", "def append_rephrase(self, qn):\n # TODO:\n pass", "def negations(self) -> str:", "def actualizar_biases(self):\n\t\tfor i in range(len(self.neuronas)):\n\t\t\tself.neuronas[i].bias += (self.neuronas[i].alpha * self.delthas[i])", "def _make_guided_attention_mask(ilen, olen, sigma):\n grid_x, grid_y = paddle.meshgrid(\n paddle.arange(olen), paddle.arange(ilen))\n grid_x = grid_x.cast(dtype=paddle.float32)\n grid_y = grid_y.cast(dtype=paddle.float32)\n return 1.0 - paddle.exp(-(\n (grid_y / ilen - grid_x / olen)**2) / (2 * (sigma**2)))", "def update_emccd_bias(self,emccd_bias):\n if (emccd_bias != None):\n self.emccd_bias = emccd_bias\n self.signal_status_message.emit('Set EMCCD bias to {}'.format(self.emccd_bias))\n self.signal_emccd_bias.emit(self.emccd_bias)", "def bias(self, value):\n self.mbmod.bias = value", "def negation_check(self,sentence):", "def set_redundant_linker_to_avergae(self, modified_matrix):\n modified_matrix[:, 0:self.selex_predict_str_adaptor, :] = 0.25\n modified_matrix[:, self.selex_str_len - self.selex_predict_str_adaptor:self.selex_str_len, :] = 0.25\n return modified_matrix", "def NBIAS(self):\n return len(self.STARS[\"dist\"])", "def update_bias(self):\n self._bias = self._bias + self.update_bias_value", "def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.word.bias.data.fill_(0)\n self.word.weight.data.uniform_(-0.1, 0.1)", "def test_forbidden_words(self) -> None:\n pad_open: bool = False\n words: List[Word] = self.report.get_words()\n forbidden_words: List[Word] = []\n last_error: bool = False\n\n for word in words:\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n if (word.text in self.rules.forbidden_words) or any(\n [b in self.rules.forbidden_words for b in word.baseform]\n ):\n forbidden_words.append(word)\n last_error = True\n continue\n if last_error:\n last_error = False\n combo = \" \".join([w.text for w in forbidden_words])\n start, _ = self.report.get_word_postion(forbidden_words[0])\n _, end = self.report.get_word_postion(forbidden_words[-1])\n self.add_error(\n f\"Ordet {combo} får endast förekomma i citat.\", position=(start,end)\n )", "def _maskhg19(self):\n if len(self._current_block) > 2:\n self._current_block[0].text = self._current_block[1].text\n self._current_block[0].size = self._current_block[1].size\n self._current_block[0].setstring()\n self._current_block.remove(self._current_block[1])\n else:\n self._current_block = []", "def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} är inte tillåtet, \"\n f\"använd {u_word['alternative']} istället.\",\n word=word,\n )\n break", "def postprocess_segments(self):\n # make segs a list of mask arrays, it's easier to store\n # as there is a hdf5 equivalent\n for iseg, seg in enumerate(self.segs):\n mask = np.zeros(self._adata.shape[0], dtype=bool)\n mask[seg] = True\n self.segs[iseg] = mask\n # convert to arrays\n self.segs = np.array(self.segs)\n self.segs_tips = np.array(self.segs_tips)", "def insert_suffix(self, prefix, idx):\n parent_pos = self.path_to_matching_prefix(prefix)[-1]\n\n has_inserted = False\n for child_pos in self.children(parent_pos):\n if child_pos.element()._label[0] == prefix[0]:\n # Intermediate node is added between parent and child.\n j = 0\n while j < len(child_pos.element()._label) and \\\n child_pos.element()._label[j] == prefix[j]:\n j += 1\n\n # Update tree structure\n intermediate_pos = self._add(parent_pos, self._SuffixNode(prefix[:j], -1))\n intermediate_node = self._validate(intermediate_pos)\n\n child_node = self._validate(child_pos)\n child_node._parent = intermediate_node\n intermediate_node._children[child_node] = child_node\n parent_node = self._validate(parent_pos)\n del parent_node._children[child_node]\n\n # Set label of child node to be unmatched part of child label.\n child_pos.element()._label = child_pos.element()._label[j:]\n # create new leaf node containing unmatched part of suffix.\n self._add(intermediate_pos, self._SuffixNode(prefix[j:], idx))\n # break from for loop.\n has_inserted = True\n break\n\n # New node is inserted as child of parent.\n if not has_inserted:\n self._add(parent_pos, self._SuffixNode(prefix, idx))", "def remove_words_and_ngrams(self, document):\n for w in self.words_and_ngrams_exceptions:\n document = re.sub(w, '', document)\n return document", "def test_upper_sphere_neg_bias(self):\n weight = np.array([-3, 4]) # |weight| = 5\n bias = -1\n normed_weight = np.array([3 / 5, -4 / 5])\n normed_bias = 5\n # noinspection PyTypeChecker\n emb = ConceptEmbedding(normal_vec=weight, support_factor=bias)\n normed_emb = emb.unique_upper_sphere()\n\n # Format checks\n assert normed_emb.normal_vec.shape == emb.normal_vec.shape\n assert np.array(normed_emb.support_factor).shape == np.array(\n emb.support_factor).shape\n\n # Value checks\n assert np.allclose(normed_emb.normal_vec, normed_weight), \\\n (\"Wrong normalized weight: expected {}, but was {}\"\n .format(normed_weight, normed_emb.normal_vec))\n assert np.allclose(normed_emb.support_factor, normed_bias), \\\n (\"Wrong normalized bias: expected {}, but was {}\"\n .format(normed_bias, normed_emb.support_factor))", "def needs_suffix_link(self, node):\n self.link_to(node)\n self.node_missing_suffix_link = node", "def add_notice_to_docstring(\n doc, instructions, no_doc_str, suffix_str, notice):\n if not doc:\n lines = [no_doc_str]\n else:\n lines = doc.splitlines()\n lines[0] += ' ' + suffix_str\n\n notice = [''] + notice + [instructions]\n\n if len(lines) > 1:\n # Make sure that we keep our distance from the main body\n if lines[1].strip():\n notice.append('')\n\n lines[1:1] = notice\n else:\n lines += notice\n\n return '\\n'.join(lines)", "def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)", "def exclude_bias_and_norm(path: Tuple[Any], val: jnp.ndarray) -> jnp.ndarray:\n del val\n if path[-1] == \"bias\" or path[-1] == 'scale':\n return False\n return True", "def add_sentence(self, sentence):\n cleaned = self.clean_string(sentence)\n stemmed = self.stem(cleaned)\n self.texts.append(stemmed)", "def addtosigma_dfa(Din, addition):\n for symb in addition:\n assert(type(symb)==str and len(symb)==1\n ),(\"Adding non-string or longer than 1 string \" +\n symb + \" in addtosigma_dfa.\")\n assert(Din[\"Sigma\"] & addition == set()\n ),(\"Din[Sigma] already has these symbols: \"+\n str(Din[\"Sigma\"] & addition))\n D = Din.copy()\n D[\"Sigma\"] = Din[\"Sigma\"] | addition\n return D", "def _update_directives(self, **dirs):\n this_node_dirs = {'usett' : False,\n 'outputname_1' : 'loc',\n 'outputname_2' : 'invscaled',\n 'outputname_3' : 'invscaleoffd'}\n this_node_dirs.update(dirs)\n super(MergeSeqsNormalwNormalEv, self)._update_directives(**this_node_dirs)", "def hide_correctness(self):\n self.hiddens.add('correct')\n self.hiddens.add('score')", "def add_mea_labels(self):\n # TODO call signalalign if not called\n mea_alignment = self.get_signalalign_events(mea=True)\n # rna reference positions are on 5' edge aka right side of kmer\n if self.rna:\n mea_alignment[\"reference_index\"] -= self.kmer_index\n else:\n mea_alignment[\"reference_index\"] += self.kmer_index\n\n self.aligned_signal.add_label(mea_alignment, name=\"mea_signalalign\", label_type='label')\n return True", "def get_extras(self, text=None):\n if text is None:\n text = self.nltk_text(self.text)\n # Tag parts of speech\n tagged = nltk.pos_tag(text)\n # Try for composed NNP / NNPS\n is_proper_noun = False\n text = []\n proper_noun = \"\"\n for (word, tag) in tagged:\n if not is_proper_noun and (tag == 'NNP' or tag == 'NNPS'):\n # Start building a proper noun\n proper_noun = word\n # Set it true\n is_proper_noun = True\n # Add it to annotations anyway\n text.append(word)\n elif tag == 'NNP' or tag == 'NNPS':\n # Previous was proper noun. So it may be combined\n proper_noun += \" \" + word\n # Add the single word to annotations anyway\n text.append(word)\n elif is_proper_noun and tag == 'IN':\n # Add what we have by now to the text\n text.append(proper_noun)\n # Previous was proper noun. So it may be composed\n proper_noun += \" \" + word\n elif is_proper_noun:\n # Add what we have by now to the text\n text.append(proper_noun)\n # Finished with proper noun, so set it false\n is_proper_noun = False\n # Remove duplicates\n seen = {}\n result = []\n for w in text:\n if w in seen:\n continue\n seen[w] = 1\n result.append(w)\n # Eliminate common\n result = [w for w in result if w.lower() not in self.common_words and\n w.lower() not in stopwords.words('english')]\n return result", "def __wordsToSuffixes__(self):\n suffixes = defaultdict(int)\n for word, tag in self.getWordTagDict():\n for suffix in self.getSuffixesForWord(word):\n suffixes[(suffix, tag)] += 1\n return suffixes", "def bias(self) -> Optional[str]:\n return pulumi.get(self, \"bias\")", "def show_hidden_word(secret_word, old_letters_guessed):\n word = \"\"\n for i in secret_word:\n if i in old_letters_guessed:\n word = word + \" \" +(i)\n else:\n word = word + \" _\"\n return word", "def corrupt_example(self, e):\n import random\n import copy\n e = copy.copy(e)\n last = e[-1]\n cnt = 0\n while e[-1] == last:\n e[-1] = random.randint(0, self.parameters.vocab_size-1)\n pr = 1./self.parameters.vocab_size\n cnt += 1\n # Backoff to 0gram smoothing if we fail 10 times to get noise.\n if cnt > 10: e[-1] = random.randint(0, self.parameters.vocab_size-1)\n weight = 1./pr\n return e, weight", "def add_excl_parts(db, usernames):\n desc = \"Replicating the effect \" + \\\n \"of priming with common vs rare ideas in individual \" + \\\n \"brainstorming with revised interface\"\n exp_id= 'tN33ATDiCukWfj5G7'\n # exps = db.experiments.find()\n exp = db.experiments.find_one({'_id': exp_id})\n\n db.experiments.update({'_id': exp_id},\n {'$set': {'excludeUsers': list(usernames), 'description': desc}})\n # exp['excludeUsers'] = list(usernames)\n exp = db.experiments.find_one({'_id': exp_id})\n print exp['excludeUsers']\n print exp['description']", "def keep_words(self, idx):\n print('{} words have been removed'.format(self.data.shape[1] - len(idx)))\n self.data = self.data[:, idx]\n self.vocab = [self.vocab[i] for i in idx]", "def calc_idf(self, nd):\n # collect idf sum to calculate an average idf for epsilon value\n idf_sum = 0\n # collect words with negative idf to set them a special epsilon value.\n # idf can be negative if word is contained in more than half of documents\n negative_idfs = []\n for word, freq in nd.items():\n idf = math.log(self.corpus_size - freq + 0.5) - math.log(freq + 0.5)\n self.idf[word] = idf\n idf_sum += idf\n if idf < 0:\n negative_idfs.append(word)\n self.average_idf = idf_sum / len(self.idf)\n\n eps = self.epsilon * self.average_idf\n for word in negative_idfs:\n self.idf[word] = eps", "def gerundify(verb):\n if verb.endswith(\"e\"):\n verb = verb[:-1]\n\n if random() < 0.4:\n if (\n not verb.startswith(\"a\")\n and not verb.startswith(\"e\")\n and not verb.startswith(\"i\")\n and not verb.startswith(\"o\")\n and not verb.startswith(\"u\")\n ):\n verb = \"a-\" + verb\n\n return verb + \"ing\"", "def augment(self):\n for n in self.notes:\n n.augment()", "def add_tex_to_binning(self, binning_dict):\n if 'reco' in binning_dict['name']:\n sub_string = 'reco'\n elif 'true' in binning_dict['name']:\n sub_string = 'true'\n else:\n sub_string = None\n if 'energy' in binning_dict['name']:\n binning_dict['tex'] = r'$E_{%s}$'%sub_string\n elif 'coszen' in binning_dict['name']:\n binning_dict['tex'] = r'$\\cos\\theta_{Z,%s}$'%sub_string", "def get_nd_form(e,text):\r\n if e.is_container():\r\n return NdForm.x\r\n if e.check_sc(WP_punct):\r\n if text == '.' or \\\r\n text == '?' or \\\r\n text == '!' or \\\r\n text == ':' or \\\r\n text == ';':\r\n return NdForm.terminator\r\n elif text == ',':\r\n return NdForm.comma\r\n return NdForm.x\r\n if e.is_verb():\r\n # \"sub\": set of terms in subject clause\r\n sub = e.get_subnodes([SR_agent,SR_topic,SR_exper])\r\n if e.v_iso_sub is not None:\r\n # this is subject-verb\r\n return NdForm.verbclause\r\n elif e.check_vp(VP_query):\r\n # explicitly marked as query\r\n return NdForm.queryclause\r\n elif len(sub) == 0:\r\n if e.check_vp(VP_gerund|VP_inf|VP_root):\r\n return NdForm.action\r\n # default is \"verb-clause\"\r\n return NdForm.verbclause\r\n if len(e.wrds) == 1:\r\n # a word. Default is \"X\", but look for useful cases.\r\n wrd = e.get_wrd(0)\r\n if vcb.check_prop(wrd,WP_query):\r\n return NdForm.queryword\r\n if vcb.check_prop(wrd,WP_n|WP_detw):\r\n return NdForm.n\r\n if vcb.check_prop(wrd,WP_conj):\r\n return NdForm.conjword\r\n if vcb.check_prop(wrd,WP_mod):\r\n return NdForm.mod\r\n # use default\r\n return NdForm.x\r\n # a phrase. possessive? (\"John's cat\")\r\n poss_contract = vcb.lkup(\"'s\",False)\r\n if poss_contract in e.wrds:\r\n return NdForm.n\r\n # compound modifier? (\"very happy\", \"sad and miserable\")\r\n is_mod = True\r\n for wrd in e.wrds:\r\n if not vcb.check_prop(wrd,WP_mod|WP_conj):\r\n is_mod = False\r\n break\r\n if is_mod:\r\n return NdForm.mod\r\n # conjunction phrase? (\"boys and girls\")\r\n for wrd in e.wrds:\r\n if vcb.check_prop(wrd,WP_conj):\r\n return NdForm.conjphrase\r\n break\r\n # remaining tests based on first word\r\n wrd = e.get_wrd(0)\r\n if vcb.check_prop(wrd,WP_query):\r\n # \"how many\", \"what time\"\r\n return NdForm.queryphrase\r\n if vcb.check_prop(wrd,WP_dets|WP_detw):\r\n return NdForm.n\r\n # default\r\n return NdForm.x", "def hamming_distance_to_true_naive(self, true_line, line, query_name, restrict_to_region='', normalize=False, padfo=None, debug=False):\n\n true_naive_seq = utils.get_full_naive_seq(self.germlines, true_line)\n inferred_naive_seq = utils.get_full_naive_seq(self.germlines, line)\n\n left_hack_add_on = ''\n right_hack_add_on = ''\n if len(true_line['seq']) > len(line['seq']): # ihhhmmm doesn't report the bits of the sequence it erodes off the ends, so we have to add them back on\n # if len(true_naive_seq) > len(inferred_naive_seq): # hm, now why did I use line['seq'] stuff before?\n start = true_line['seq'].find(line['seq'])\n assert start >= 0\n end = len(line['seq']) + start\n left_hack_add_on = true_line['seq'][: start]\n right_hack_add_on = true_line['seq'][ end :]\n # extra_penalty = len(left_hack_add_on) + len(right_hack_add_on)\n inferred_naive_seq = 'N'*len(left_hack_add_on) + inferred_naive_seq + 'N'*len(right_hack_add_on)\n if debug:\n print ' adding to inferred naive seq'\n\n # if restrict_to_region == '':\n # print ' before', inferred_naive_seq\n if padfo is not None: # remove N padding from the inferred sequence\n inferred_naive_seq = inferred_naive_seq[padfo['padleft'] : ]\n if padfo['padright'] > 0:\n inferred_naive_seq = inferred_naive_seq[ : -padfo['padright']]\n # if restrict_to_region == '':\n # print ' after ', inferred_naive_seq\n\n bounds = None\n if restrict_to_region != '':\n bounds = utils.get_regional_naive_seq_bounds(restrict_to_region, self.germlines, true_line) # get the bounds of this *true* region\n true_naive_seq = true_naive_seq[bounds[0] : bounds[1]]\n inferred_naive_seq = inferred_naive_seq[bounds[0] : bounds[1]]\n\n if debug:\n print restrict_to_region, 'region, bounds', bounds\n print ' true ', true_naive_seq\n print ' infer', inferred_naive_seq\n\n if len(true_naive_seq) != len(inferred_naive_seq):\n raise Exception('still not the same lengths for %s\\n %s\\n %s' % (query_name, true_naive_seq, inferred_naive_seq))\n fraction, len_excluding_ambig = utils.hamming_fraction(true_naive_seq, inferred_naive_seq, return_len_excluding_ambig=True)\n total_distance = int(fraction * len_excluding_ambig)\n if len(true_naive_seq) == 0:\n print 'WARNING zero length sequence in hamming_distance_to_true_naive'\n return 0\n if normalize:\n return int(100 * (float(total_distance) / len(true_naive_seq)))\n else:\n return total_distance", "def mask_disc_markers(self, text: str) -> str:\n punctuations = \".?!;:-()'\\\"[]\"\n for elem in punctuations:\n text = text.replace(elem, \" \" + elem + \" \")\n text = \" \" + text + \" \"\n for dm in self.dms:\n text.replace(\" \" + dm + \" \", \" <mask> \" * len(dm.split()))\n return text", "def add_unknown_words(self, word_vecs, vocab, min_df=3, k=300):\n for word in vocab:\n if word not in word_vecs and vocab[word] >= min_df:\n word_vecs[word] = np.random.uniform(-0.25,0.25,k)", "def addNoise(self, sigma=1.0):\n noise = numpy.random.normal(loc=0, scale=sigma, size=(self.ny, self.nx))\n self.image += noise\n return", "def attention_bias_ignore_padding(memory_padding):\n\tret = tf.multiply(memory_padding, -1e18)\n\treturn tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)", "def newInternal(word, deflike):\n\n avedeflike = 0\n denom = len(word)\n for indices, f in fqs(word, 0.5):\n context, numer = internalContext(indices, word)\n if deflike[context]:\n avedeflike += numer/denom * f * deflike[context]\n return avedeflike", "def with_final_word_out(self, letters, allow_non_final=True):\n from copy import deepcopy\n\n new = deepcopy(self)\n new.construct_final_word_out(letters, allow_non_final)\n return new", "def set_gains(self, gain, flag_normalized=True):\n self.normalize_gains(gain)\n return super().set_gains(gain, flag_normalized=False)", "def neutralize(word, g, word_to_vec_map):\n\n ### START CODE HERE ###\n # Select word vector representation of \"word\". Use word_to_vec_map. (≈ 1 line)\n e = word_to_vec_map[word]\n\n # Compute e_biascomponent using the formula given above. (≈ 1 line)\n e_biascomponent = np.dot(e, g) / np.sum(np.dot(g, g)) * g\n # e_biascomponent = np.sqrt(np.sum(np.dot(e,e))) * cosine_similarity(e, g) * g/np.sqrt(np.sum(np.dot(g,g)))\n # Neutralize e by subtracting e_biascomponent from it\n # e_debiased should be equal to its orthogonal projection. (≈ 1 line)\n e_debiased = e - e_biascomponent\n ### END CODE HERE ###\n\n return e_debiased", "def add_guide_alignment(self):\n test_sam = self.get_signalalign_events(sam=True)\n events = self.get_resegment_basecall()\n cigar_labels = create_labels_from_guide_alignment(events=events, sam_string=test_sam,\n kmer_index=self.kmer_index)\n for i, block in enumerate(cigar_labels):\n # print(block)\n self.aligned_signal.add_label(block, name=\"guide_alignment{}\".format(i), label_type='guide')\n return True" ]
[ "0.6260935", "0.6102724", "0.51479083", "0.49983943", "0.48730773", "0.48502025", "0.4801285", "0.47731313", "0.4733862", "0.47119424", "0.4682679", "0.46698153", "0.4657512", "0.46404636", "0.4629202", "0.46024075", "0.45967445", "0.45915237", "0.4586859", "0.4568321", "0.45674026", "0.45450968", "0.45085713", "0.44973442", "0.4485545", "0.4484374", "0.4447037", "0.4438654", "0.4436774", "0.44347775", "0.4414686", "0.44080383", "0.43973878", "0.4396198", "0.43937153", "0.4390447", "0.43878815", "0.4386588", "0.43854642", "0.43849695", "0.4383158", "0.43786633", "0.43754044", "0.43723446", "0.43718064", "0.43647", "0.43590447", "0.43588978", "0.4352386", "0.4349645", "0.4349329", "0.43490082", "0.4344696", "0.4336392", "0.43301108", "0.4315847", "0.43140486", "0.4313569", "0.4307822", "0.42991382", "0.42985094", "0.42971852", "0.42962557", "0.42787108", "0.4273209", "0.42660362", "0.42614862", "0.42609125", "0.42578292", "0.42479178", "0.42458537", "0.4241677", "0.42397305", "0.42333773", "0.42324266", "0.4229966", "0.42284492", "0.42265862", "0.4226397", "0.42260092", "0.4225893", "0.42245778", "0.42192662", "0.4216149", "0.42007324", "0.4198188", "0.41933513", "0.4192792", "0.41892877", "0.41836724", "0.41830093", "0.41794643", "0.4177011", "0.41750214", "0.41744405", "0.4173335", "0.41680396", "0.4167615", "0.41672435", "0.41653052" ]
0.5587445
2
Return the distance between this WordForm and the one provided.
def distance(self, wf, positions = None, features = None): dist = 0 # Iterate through positions in the WordForm. for position in range(3): if positions == None or position in positions: if self.segments[position].seg_type == wf.segments[position].\ seg_type: # Use the WordForm's Segment to determine the possible # features of Segments in this position. for feature in self.segments[position].possible_features(): if features == None or feature in features: my_value = self.segments[position].\ get_feature_value(feature) comp_value = wf.segments[position].\ get_feature_value(feature) dist += abs(feature_distance(feature, my_value, comp_value)) else: return 100 return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(self):\n return self.value * len(self.alignment.query)", "def distance(self):\n return self._distance", "def distance(self) -> int:\n return 0", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def get_distance(self) -> int:\n return self.get_measurement_data().distance", "def distance(self):\n return Distance(length_of(self.position.au))", "def __distance_to(self, other: Any) -> float:\n return np.linalg.norm(self.pos - other.pos)", "def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])", "async def distance(self):\n return round(await self._rpc.distance(), 2)", "def distance_from_origin(self) -> float:\n return self._distance_from_origin", "def edit_distance(self, other):\n union = len(self) + len(other)\n return 1.0 - 2.0*(self.intersection(other)/union)", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def edit_distance(self, other):\r\n union = len(self) + len(other)\r\n return 1.0 - 2.0*(self.intersection(other)/union)", "def distance(self, other):\n # only used in triangle.__str__\n return hypot(self.x - other.x, self.y - other.y)", "def Distance(self, *args):\n return _Bnd.Bnd_Box_Distance(self, *args)", "def distance_factor(self):\n return self._distancefactor", "def distance_to(self, p):\n return (self - p).length()", "def distance_to(self, p):\n return (self - p).length()", "def distance_to(self, other):\n p_self, p_other = self.closest_points(other)\n return np.linalg.norm(p_self - p_other)", "def distance(self, other):\n # distance = math.sqrt((self.position.x - other.position.x) ** 2 +\n # (self.position.y - other.position.y) ** 2)\n distance = math.sqrt(sum((self.position - other.position) ** 2))\n return distance", "def render_distance(self) -> int:\n return self._render_distance", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def distanceTo(self, other):\n return self.position.distanceTo(other.position)", "def gram_edit_distance(self, gram1, gram2):\n distance = 0.0\n if gram1 == gram2:\n distance = 1.0\n return distance", "def distance(self):\n\t\tif len(self._scores < 2):\n\t\t\treturn 0.0\n\n\t\treturn self[-1] - self[-2]", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5", "def distance(self, other_pt, is_lla=True):\n return 0.0", "def gram_edit_distance(self, gram1, gram2):\r\n distance = 0.0\r\n if gram1 == gram2:\r\n distance = 1.0\r\n return distance", "def distance(self, other: \"Point\") -> float:\n if not isinstance(other, self.__class__):\n raise TypeError(\"Expected `other` to be an instance of `{}`\"\\\n .format(self.__class__))\n dx = self.x - other.x\n dy = self.y - other.y\n return sqrt((dx ** 2) + (dy ** 2))", "def distance_to(self, other):\n ox, oy = other\n return math.hypot(self[0] - ox, self[1] - oy)", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance(self, other):\n\n return hypot(self.x - other.x, self.y - other.y)", "def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)", "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)", "def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)", "def distance(self, other_room):\n return self.p[0].distanceSquare(other_room.p[0])", "def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def distanceFromOrigin(self):\n return ((self.x)**2+(self.y)**2)**0.5", "def distance(self, wn1, wn2):\n return abs(self.chunk_map[wn1] - self.chunk_map[wn2])", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def distanceTo(self,other):\n if not isinstance(other,Point):\n return \n return math.sqrt((self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2)", "def distance(self, other):\n return _binary_op(arctern.ST_Distance, self, other)", "def distance(self, position):\n s, r = self.local_coordinates(position)\n return abs(r) + max(s - self.length, 0) + max(0 - s, 0)", "def distance(self, other):\n ...", "def distance(self, location):\n return numpy.linalg.norm(self.vector_to(location))", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def distance(self, other: \"Location\") -> float:\n return haversine(self.latitude, self.longitude, other.latitude, other.longitude)", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def distance_to(self, other: Geometry[Scalar]) -> Scalar:\n return (self._distance_to_point(other)\n if isinstance(other, Point)\n else (non_negative_min(self._distance_to_point(point)\n for point in other.points)\n if isinstance(other, Multipoint)\n else other.distance_to(self)))", "def distance(self, p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))", "def distance(self, word_a, word_b):\n word_a, word_b = word_a.upper(), word_b.upper()\n s_a = self.word_lookup[word_a]\n s_b = self.word_lookup[word_b]\n j = 1\n max_len = min(len(s_a), len(s_b))\n while j <= max_len:\n if s_a[-j] != s_b[-j]:\n break\n j += 1\n return j", "def _get_distance_diff(self, input):\n nbatch = input.shape[0]\n in1 = input.unsqueeze(1).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n in2 = input.unsqueeze(2).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n dist = torch.pow(in1 - in2, 2).sum(3)\n return dist", "def length(self) -> float:\n return pos.distance(self.start, self.end)", "def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))", "def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def DistanceFromOrigin(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])", "def distance(self, other):\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)", "def distance_to(self, other):\n x0,y0 = self.x, self.y\n x1,y1 = other.x, other.y\n dist = math.sqrt((x1-x0) ** 2 + (y1-y0) ** 2)\n return int(dist)", "def computeDistance(self):\n distance = 0.0\n height = self.heightField.getNumber()\n ratio = self.indexField.getNumber()\n numBounces = self.bouncesField.getNumber()\n\n for bounce in range(numBounces):\n bounceHeight = height * ratio\n distance += height + bounceHeight\n height = bounceHeight\n\n self.distanceField.setNumber(distance)", "def distance(self,case):\r\n return max(abs(self.ligne-case.ligne),abs(self.colonne-case.colonne))", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def distance(self, right: GeoSpatialValue) -> ir.FloatingValue:\n return ops.GeoDistance(self, right).to_expr()", "def query_distance(self, instance1=(), instance2=()):\n distance = sum([pow((a - b), 2) for a, b in zip(instance1, instance2)])\n return distance", "def distance(self, to: \"Position\") -> int:\n return abs(self.x - to.x) + abs(self.y - to.y)", "def get_positional_distance(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The permutations must be of the same size.\")\n return sum([abs(a[i] - b[i]) for i in xrange(len(a))])", "def get_distance(self):\n values = self.speakers.values()\n values.sort(reverse=True)\n try:\n return abs(values[1]) - abs(values[0])\n except (IndexError, ValueError):\n return -1", "def distance_to(self, p):\n closest_pt = self.closest_point_to(p)\n return np.linalg.norm(p - closest_pt)", "def diff(self, word1, word2):\n v = self._vecs[self._index[word1]] - self._vecs[self._index[word2]]\n return v / np.linalg.norm(v)", "def distance_to(self, obj):\n\t\tx, y = self.position\n\t\tobj_x, obj_y = obj.position\n\t\treturn hypot(x - obj_x, y - obj_y)", "def distance(v: Vector, w: Vector) -> float:\n return magnitude(subtract(v, w))", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def distorted_distance(self):\n return self._distance", "def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))", "def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def embedding_distance(embedding_1: Embedding,\n embedding_2: Embedding,\n distance_metric: DistanceMetric) -> float:\n distance = embedding_distance_bulk(embedding_1.reshape(\n 1, -1), embedding_2.reshape(1, -1), distance_metric=distance_metric)[0]\n return distance", "def distance(self,pose1, pose2):\n return math.sqrt((pose1[0] - pose2[0]) ** 2 + (pose1[1] - pose2[1]) ** 2) + 0.001", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def displacement(self):\n return self[0].distance(self[-1])", "def norm_distance(self):\n graph_size = self.N + self.M\n return self.distance() / (1. * graph_size)", "def distance_to(self, v: Vector) -> float:\n return math.fabs(self.signed_distance_to(v))", "def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)", "def DistanceDimension(\n self,\n entity1: ConstrainedSketchVertex,\n entity2: ConstrainedSketchVertex,\n textPoint: tuple[float],\n value: float = None,\n reference: Boolean = OFF,\n ):\n pass", "def _distance(self, a, b):\n\n return self.distance_matrix[a - 1][b - 1]", "def length_dist(self,synset_1, synset_2):\n\t l_dist = sys.maxsize\n\t if synset_1 is None or synset_2 is None: \n\t return 0.0\n\t if synset_1 == synset_2:\n\t # if synset_1 and synset_2 are the same synset return 0\n\t l_dist = 0.0\n\t else:\n\t wset_1 = set([str(x.name()) for x in synset_1.lemmas()]) \n\t wset_2 = set([str(x.name()) for x in synset_2.lemmas()])\n\t if len(wset_1.intersection(wset_2)) > 0:\n\t # if synset_1 != synset_2 but there is word overlap, return 1.0\n\t l_dist = 1.0\n\t else:\n\t # just compute the shortest path between the two\n\t l_dist = synset_1.shortest_path_distance(synset_2)\n\t if l_dist is None:\n\t l_dist = 0.0\n\t # normalize path length to the range [0,1]\n\t return math.exp(-self.ALPHA * l_dist)" ]
[ "0.7310629", "0.7117631", "0.7048731", "0.7045804", "0.6998764", "0.69940305", "0.68218845", "0.66140974", "0.66028076", "0.65679866", "0.6541728", "0.6533054", "0.65226877", "0.65203345", "0.6491885", "0.6491787", "0.6487429", "0.6487429", "0.6478075", "0.6459302", "0.64487356", "0.6437967", "0.6393049", "0.6391465", "0.6379372", "0.63694936", "0.6365933", "0.6361173", "0.63596314", "0.63510793", "0.63466465", "0.6341612", "0.63399446", "0.63399446", "0.63399446", "0.63399446", "0.63399446", "0.63399446", "0.63399446", "0.6334119", "0.63321", "0.63310176", "0.63221014", "0.63118106", "0.6309151", "0.63029283", "0.6298458", "0.62966", "0.6286358", "0.62851787", "0.6279452", "0.6258125", "0.62562174", "0.62553096", "0.62537366", "0.6248529", "0.62455297", "0.62441576", "0.6237877", "0.62350124", "0.6234596", "0.6226944", "0.622562", "0.6225406", "0.6219549", "0.62103254", "0.62100935", "0.6203698", "0.62024915", "0.6191894", "0.6190424", "0.61849356", "0.61767584", "0.6173053", "0.61706924", "0.616501", "0.6152728", "0.61525035", "0.6133323", "0.613156", "0.6128748", "0.61280674", "0.612041", "0.61177856", "0.611451", "0.6111033", "0.61107916", "0.6110479", "0.60933393", "0.60860854", "0.6069566", "0.6062833", "0.6060269", "0.60555524", "0.6055495", "0.60546625", "0.60524124", "0.6050603", "0.6047115", "0.60459673" ]
0.6702924
7
Return the similarity between this WordForm and the one provided.
def similarity(self, wf, positions = None, features = None): # The similarity is the inverse square of the distance between the two # WordForms. Impose a minimum on distances (to deal with zero). dist = self.distance(wf, positions = positions, features = features) if dist < .1: dist = .1 sim = 1 / (dist ** 2) return sim
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wordSimilarityRatio(sent_1,sent_2):", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity(self, w1, w2):\r\n return self.represent(w1).dot(self.represent(w2))", "def similarity(self, word1, word2):\n common_vect = +np.ones(self.nEmbed) * 10000\n if word1 not in self.vocab and word2 in self.vocab:\n id_word_2 = self.w2id[word2]\n w1 = common_vect\n w2 = self.U[id_word_2]\n elif word1 in self.vocab and word2 not in self.vocab:\n id_word_1 = self.w2id[word1]\n w1 = self.U[id_word_1]\n w2 = common_vect\n elif word1 not in self.vocab and word2 not in self.vocab:\n w1 = common_vect\n w2 = common_vect\n else:\n id_word_1 = self.w2id[word1]\n id_word_2 = self.w2id[word2]\n w1 = self.U[id_word_1]\n w2 = self.U[id_word_2]\n\n # scalair = w1.dot(w2)/np.linalg.norm(w1,w2)\n similarity = w1.dot(w2) / (np.linalg.norm(w1) * np.linalg.norm(w2))\n # similarity = 1 / (1 + np.exp(-scalair))\n # similarity = scalair / (np.linalg.norm(w1) * np.linalg.norm(w2))\n return similarity", "def similarity(self, other):\n part = self.__part_converter(self.part)\n if part != self.__part_converter(other.part):\n return 0\n tresh = 0.2\n sss = wn.synsets(self.string, part)\n sso = wn.synsets(other.string, part)\n best_sim = 0\n for ss in sss:\n # if not match('^' + self.string + '\\..+', ss.name()):\n # continue\n for so in sso:\n # if not match('^' + other.string + '\\..+', so.name()):\n # continue\n sim = ss.wup_similarity(so)\n if (tresh < sim) and (best_sim < sim):\n best_sim = sim\n return best_sim", "def text_similarity(self, text_1: str, text_2: str):\n txt1 = self._pre_process(text_1)\n txt2 = self._pre_process(text_2)\n\n sim = self.model.wmdistance(txt1, txt2)\n\n if sim == inf:\n sim = INF_SIMILIARITY\n return sim", "def similarity(self, w1, w2):\r\n sim = self.represent(w1).dot(self.represent(w2))\r\n return sim", "def similarity(self, word1: str, word2: str, metric='cosine') -> float:\n if 0 == self.word2idx.get(word1, 0) or 0 == self.word2idx.get(word2, 0):\n return 0.\n\n return self.similarity_vec(self[word1], self[word2], metric=metric)\n # vec1 = self.__getitem__(word1).reshape((1, -1))\n # vec2 = self.__getitem__(word2).reshape((1, -1))\n # return 1 - distance.cdist(vec1, vec2, metric=metric).reshape(-1)", "def similarity(self, new_sentence):\n cleaned = self.clean_string(new_sentence)\n stemmed = self.stem(cleaned, train=False)\n\n if not set(stemmed).intersection(set(self.vocabulary.keys())):\n return None\n\n else:\n difference = set(stemmed) - set(self.vocabulary.keys())\n to_append = np.zeros((self.matrix.shape[0], len(difference)))\n matrix = np.append(self.matrix, to_append, axis=1)\n\n new_voc = copy.deepcopy(self.vocabulary)\n for word in difference:\n if word not in new_voc:\n new_voc[word] = len(new_voc)\n\n question_vector = self.stem2vec(stemmed, new_voc)\n result = np.matmul(matrix, question_vector)\n return np.argmax(result)", "def similarity(self, token1, token2):\n vec1 = self.get_vector(token1)\n vec2 = self.get_vector(token2)\n assert vec1 is not None and vec2 is not None, \"Cannot compute similarity between None type vectors.\"\n if not self.normalize:\n # if model not loaded as normalized embeddings \n vec1 = vec1 / np.linalg.norm(vec1)\n vec2 = vec2 / np.linalg.norm(vec2)\n return np.dot(vec1, vec2)", "def word_order_similarity(self,sentence_1, sentence_2):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = list(set(words_1).union(set(words_2)))\n\t windex = {x[1]: x[0] for x in enumerate(joint_words)}\n\t r1 = self.word_order_vector(words_1, joint_words, windex)\n\t r2 = self.word_order_vector(words_2, joint_words, windex)\n\t return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def similarity(self, wSet1, wSet2, idf): \n if len(wSet1) == 0 or len(wSet2) == 0:\n return 0.0\n else:\n defaultIDF = idf['unknownToken']\n intersection = wSet1.intersection(wSet2)\n# intersection = self.synonymIntersection(wSet1, wSet2, idf)\n if len(intersection) == 0:\n return 0\n sum1 = 0\n sum2 = 0\n intersectionSum = 0\n for word in wSet1:\n sum1 += (idf.get(word, defaultIDF))**2\n for word in wSet2:\n sum2 += (idf.get(word, defaultIDF))**2\n for word in intersection:\n intersectionSum += (idf.get(word, defaultIDF))**2\n \n if sum1 == 0 or sum2 == 0:\n return 0.0\n else:\n return intersectionSum/(math.sqrt(sum1) * math.sqrt(sum2))", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def similarity(self, x, y, keyboard_weight=None):\r\n dist = self.distance(x, y, keyboard_weight)\r\n max_len = max(len(x), len(y))\r\n max_dissimilarity = max_len * self.scale_coef\r\n similarity = 1 - dist / max_dissimilarity\r\n return similarity", "def similar_text(word1, word2) -> float:\n\n return textdistance.overlap.similarity(word1, word2)", "def similarity(self, char1, char2, weights=(1.0, 0.0, 0.0), as_tree=False):\n\n assert char1 in self.char_dict\n assert char2 in self.char_dict\n shape_w, sound_w, freq_w = weights\n\n if char1 in self.char_dict and char2 in self.char_dict:\n\n shape_sim = self.shape_similarity(char1, char2, as_tree=as_tree)\n sound_sim = self.pronunciation_similarity(char1, char2)\n freq_sim = 1.0 - self.char_dict[char2] / len(self.char_dict)\n\n return shape_sim * shape_w + sound_sim * sound_w + freq_sim * freq_w\n else:\n return 0.0", "def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)", "def similarity(self, e1, e2):\n\t\tpass", "def similarity_function_old(feature1, feature2):\n f1Magnitude = feature1.dot(feature1)\n f2Magnitude = feature2.dot(feature2)\n return 1 - feature1.dot(feature2) / (f1Magnitude * f2Magnitude)", "def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))", "def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))", "def similarity_with(self, other_text_analyzer):\n pass", "def compute_similarity(self, text1, text2):\n\n text1_dist = self.predict(text1)[0]\n text2_dist = self.predict(text2)[0]\n return jensenshannon(text1_dist, text2_dist)", "def compare(self) -> float:\n if not self._hadith_text1 or not self._hadith_text2:\n raise Exception('Hadith texts to compare not set. Use setHadithTexts() to set the texts...')\n\n text1 = self._hadith_text1_cleaned\n text2 = self._hadith_text2_cleaned\n\n if self._ignore_diacritics:\n text1 = self._remove_diacritics(self._hadith_text1_cleaned)\n text2 = self._remove_diacritics(self._hadith_text2_cleaned)\n\n sm = difflib.SequenceMatcher(None, text1, text2)\n return sm.ratio()", "def sentence_similarity(self,sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n\n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n\n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n vals = [synset.path_similarity(ss) if synset.path_similarity(ss) is not None else 0 for ss in synsets2]\n best_score = max(vals,default=0)\n\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n # Average the values\n if count == 0:\n return 0\n score /= count\n return score", "def calc_similarity_between_words(word1, word2):\n # pos = wn.Noun is mandatory otherwise the lowest common hypernym cant be found because of part of speach\n word1_synsets = wn.synsets(word1, pos=wn.NOUN)\n word2_synsets = wn.synsets(word2, pos=wn.NOUN)\n\n w1 = get_words_from_sysets(word1_synsets)\n w2 = get_words_from_sysets(word2_synsets)\n\n sim_matrix = np.zeros((len(w1), len(w2)))\n\n for i in range(len(w1)):\n for j in range(len(w2)):\n try:\n sim_matrix[i, j] = embeddings.distances(w1[i], [w2[j]])\n except KeyError:\n sim_matrix[i, j] = 1000\n continue\n\n w1_ind, w2_ind = np.unravel_index(np.nanargmin(sim_matrix, axis=None), sim_matrix.shape)\n lowest_common_hyp = (word1_synsets[w1_ind]).lowest_common_hypernyms(word2_synsets[w2_ind])\n return (sim_matrix[w1_ind, w2_ind], lowest_common_hyp)", "def similarities (self, listOfWords):\n \n # building the query dictionary\n queryDict = collections.defaultdict(int)\n for w in listOfWords:\n queryDict [w] += + 1.0\n \n # normalizing the query\n length = float (len (listOfWords))\n for k in queryDict:\n queryDict [k] /= length\n \n # computing the list of similarities\n sims = []\n for doc in self.documents:\n score = 0.0\n docDict = doc [1]\n for k in queryDict:\n if docDict.has_key (k):\n score += (queryDict [k] / self.corpusDict [k]) + (docDict [k] / self.corpusDict [k])\n sims.append ([doc [0], score])\n \n return sims", "def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity", "def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)", "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def cosine_similarity(self, v1: np.ndarray, v2: np.ndarray) -> float:\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))\n # return cosine_similarity(v1, v2)[0][0]", "def doc_doc_similarity(matrix_a, matrix_b):\n assert matrix_a.shape[1] == matrix_b.shape[0], \"Mismatched shape between matrix A and matrix B\"\n numerator = np.dot(matrix_a, matrix_b)\n assert numerator.shape == (matrix_a.shape[0], matrix_b.shape[1]), numerator.shape\n denominator = np.sqrt(np.sum(matrix_a ** 2, axis=1))[:, np.newaxis] * np.sqrt(\n np.sum(matrix_b.T ** 2, axis=1))[:, np.newaxis].T\n assert (denominator > 0).all(), \"Denominator is zero {}\".format(denominator)\n similarity_matrix = np.multiply(numerator, 1 / denominator)\n return similarity_matrix", "def mm_similarity(s1, s2):\n if filter(str.isalpha, s1) == filter(str.isalpha, s2):\n if len(s1) < len(s2):\n return float(len(s1)) / len(s2)\n else:\n return float(len(s2)) / len(s1)\n else:\n return 0.", "def similarity(self, query, documents):\n\n bow_query = self.dictionary.doc2bow(query)\n bow_docs = [self.dictionary.doc2bow(document) for document in documents]\n\n index = SoftCosineSimilarity(bow_docs, self.matrix)\n similarities = index[bow_query]\n\n return similarities", "def symmetric_sentence_similarity(self, sentence1, sentence2):\n return (self.sentence_similarity(sentence1, sentence2) + self.sentence_similarity(sentence2, sentence1)) / 2", "def similarity(self, other, ignore_abundance=False, downsample=False):\n return self._methodcall(lib.kmerminhash_similarity,\n other._get_objptr(),\n ignore_abundance, downsample)", "def synSimilarity(self, wSet1, wSet2): \n nW1 = len(wSet1)\n nW2 = len(wSet2)\n if nW1 == 0 or nW2 == 0:\n return 0.0\n synonyms1 = self.getSynonyms(wSet1)\n synonyms2 = self.getSynonyms(wSet2)\n \n # easy bit: find the number of identical words in each mention\n intersection = wSet1.intersection(wSet2)\n # now remove these words and look for synonyms between those left\n w1 = wSet1 - intersection\n w2 = wSet2 - intersection\n while len(w1) > 0:\n word1 = w1.pop()\n if word1 not in synonyms1:\n continue # no synonyms for this word\n \n for word2 in w2:\n if word2 not in synonyms2:\n continue # no synonyms for this word\n sharedSynsets = synonyms1[word1].intersection(synonyms2[word2])\n if len(sharedSynsets) > 0:\n # the two have at least one synset in common, consider them synonyms\n w2.remove(word2)\n intersection.add(word1)\n \n break\n return float(2*len(intersection)) / (nW1 + nW2)", "def similarity_score(self, lhs, rhs):\n pass", "def similarity(text1, text2):\n\n clean1 = clean(text1)\n clean2 = clean(text2)\n count_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=CountVectorizer)\n tfidt_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=TfidfVectorizer)\n similarity_dict = {'count': count_meas, 'tfidf': tfidt_meas}\n return similarity_dict", "def most_similar_word(self,word, word_set):\n\t max_sim = -1.0\n\t sim_word = \"\"\n\t for ref_word in word_set:\n\t sim = self.word_similarity(word, ref_word)\n\t if sim > max_sim:\n\t max_sim = sim\n\t sim_word = ref_word\n\t return sim_word, max_sim", "def term_similarity(node_a, node_b, embeddings):\n try:\n similarity = embeddings.similarity(node_a.__str__(), node_b.__str__())\n except Exception as e:\n # If term(s) does not occur in embedding similarity is always 0.\n similarity = 0\n logging.info(e)\n return similarity", "def word_rotator_similarity(x, y):\n return 1 - word_rotator_distance(x, y)", "def sentence_similarity_asym(sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n pathsim = [synset.path_similarity(ss) for ss in synsets2]\n if len(pathsim) == 0:\n #print sentence1, sentence2\n pathsim = [0]\n best_score = max(pathsim)\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n if count == 0:\n return 0\n # Average the values\n score /= count\n return score", "def get_simlilarity(self, text_to_query):\n\n texts = []\n\n for path in self.path_list:\n with open(path) as file:\n texts.append(file.read())\n texts.append(text_to_query)\n\n tfidf = TfidfVectorizer(tokenizer=clean_tonkenize)\n tfidf_matrix = tfidf.fit_transform(texts)\n\n return cosine_similarity(tfidf_matrix)", "def calc_similarity(lhs, rhs):\n lhs_decomp = decompose(lhs)\n rhs_decomp = decompose(rhs)\n dist = editdistance.eval(lhs_decomp, rhs_decomp)\n max_len = max(len(lhs_decomp), len(rhs_decomp))\n sim = float(max_len - dist) / float(max_len)\n logging.debug('SIM: [%s] vs [%s] ==> %d / %d = %f', lhs.encode('UTF-8'), rhs.encode('UTF-8'),\n max_len - dist, max_len, sim)\n return sim", "def sentence_similarity(sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n\n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n\n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n\n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n best_score = max([synset.path_similarity(ss) or 0 for ss in synsets2])\n\n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n if count == 0:\n return 0\n\n # Average the values\n score /= count\n return score", "def string_similarity(item_1, item_2):\n return SequenceMatcher(None, item_1.lower(), item_2.lower()).ratio()", "def distance(self, u, v):\n numerator = np.dot(u,v)\n denominator = np.linalg.norm(u) * np.linalg.norm(v)\n similarity = numerator/(denominator +1e-7)\n return similarity", "def get_similarity(user1: Rating, user2: Rating) -> float:\n shared = 0.0\n for m_id in user1:\n if m_id in user2:\n shared += user1[m_id] * user2[m_id]\n norm1 = 0.0\n for m_id in user1:\n norm1 = norm1 + user1[m_id] ** 2\n norm2 = 0.0\n for m_id in user2:\n norm2 = norm2 + user2[m_id] ** 2\n return (shared * shared) / (norm1 * norm2)", "def similarity(query,id):\n similarity = 0.0\n for term in query:\n if term in dictionary:\n similarity += inverse_document_frequency(term)*imp(term,id)\n similarity = similarity / length[id]\n return similarity", "def string_similarity(a, b):\n return SequenceMatcher(a=a, b=b).ratio()", "def calculate_similarity(self, tweets):\r\n if (len(tweets) == 1):\r\n return 0\r\n vectors = self.vectorizer.vectorize_data(tweets, False)\r\n\r\n temp = cosine_similarity(vectors[0:-1], vectors)\r\n temp = [item for sublist in temp for item in sublist]\r\n sim = sum(temp) / len(temp)\r\n return sim", "def similarity(pair: Tuple[Text, Text]) -> float:\n (a, b) = pair\n missing = (\n True\n if any(symbol not in Metrics.realine.feature_matrix for symbol in pair)\n else False\n )\n return 0.0 if missing else 1 - Metrics.realine.delta(a, b)", "def similarity_scores(self, other):\n word_score = compare_dictionaries(other.words, self.words)\n word_length_score = compare_dictionaries(other.word_lengths, self.words)\n stem_score = compare_dictionaries(other.stems, self.stems)\n sentence_length_score = compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n common_word_score = compare_lists(other.common_word, self.common_word)\n\n return [word_score, word_length_score, stem_score, sentence_length_score, common_word_score]", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def semanticSimilarity(self, text1, text2, distanceMeasure = \"cosine\"):\n return self._er.jsonRequestAnalytics(\"/api/v1/semanticSimilarity\", { \"text1\": text1, \"text2\": text2, \"distanceMeasure\": distanceMeasure })", "def similarity_function(feature1, feature2):\n # 256 HOG, 18 HSV, 512 Encoder\n # weight color more if using the full vector\n if len(feature1) > 785:\n salient1 = feature1[256:256 + 18].copy() # be careful not to modify feature vector in place\n salient2 = feature2[256:256 + 18].copy()\n feature1 = feature1.copy()\n feature2 = feature2.copy()\n feature1[256:256 + 18] = salient1 * 10\n feature2[256:256 + 18] = salient2 * 10\n\n abs_distance = np.abs(feature1 - feature2)\n return np.sum(abs_distance)", "def sentence_similarity(sentence1, sentence2):\n sentence1 = sentence1.tags\n sentence2 = sentence2.tags\n \n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n \n for synset in synsets1:\n \n li=[synset.path_similarity(ss) for ss in synsets2]\n m=0\n for i in range(len(li)):\n if li[i] is not None and m<li[i]:\n m=li[i]\n if m != 0:\n score += m\n count += 1\n\n if count is 0:\n score = 0\n else:\n score /= count\n return score", "def get_similarity(s1, s2):\n t0 = sorted(list(set(s1.split(' ')).intersection(set(s2.split(' ')))))\n t1 = sorted(list(set(t0 + s1.split(' '))))\n t2 = sorted(list(set(t0 + s2.split(' '))))\n\n r01 = SequenceMatcher(None, t0, t1).ratio()\n r02 = SequenceMatcher(None, t0, t2).ratio()\n r12 = SequenceMatcher(None, t1, t2).ratio()\n return max(r01, r02, r12)", "def diff(self, word1, word2):\n v = self._vecs[self._index[word1]] - self._vecs[self._index[word2]]\n return v / np.linalg.norm(v)", "def similarity(self, mass_function):\n return round(0.5 * (math.cos(math.pi * self.distance(mass_function)) + 1), 6)", "def get_similarity_score(self, reviewer1: Any, reviewer2: Any) -> float:\n v1 = self._vertices[reviewer1]\n v2 = self._vertices[reviewer2]\n return v1.reviewer_similarity_score(v2)", "def WordSim(self,testDF,listCourse,inCourse):\r\n #Obtain a single vector embedding for each course description (calculated by taking an average of each word \r\n #embedding that makes up each description)\r\n \r\n #Get the embedding from the dictionary for the list (reference) course\r\n aVec = self.VDF[\"Word\"][listCourse]\r\n #Calculate the embedding with the doc2Vec model.\r\n bVec = self._WordSimAveVec(testDF,inCourse)\r\n #Convert vectors to column vectors to be fed into the cosine_similarity function.\r\n A = np.expand_dims(aVec,0)\r\n B = np.expand_dims(bVec,0)\r\n #Calculate the cosine similarity between the two vectors.\r\n sim = cosine_similarity(A,B)\r\n return float(sim)", "def get_similar_score(a, b):\n\n # Count the amount of words that A and B have in common\n commons = get_common_words_count(a, b)\n\n # Compute the amount of common words, divided by the log\n # the length of sentence 1 plus the length of sentence 2.\n # This means that higher similarity weights will be given\n # to longer sentences up to the asymptote of log10\n\n if len(a) > 0 and len(b) > 0:\n log_denom = log10(len(a) * len(b))\n else:\n return 0\n\n # Avoid division by zero\n if log_denom == 0:\n return 0\n\n return commons / log_denom", "def get_cosine_sim(self):\r\n return CosineSimilarity().calculate_similarity(self.tweets)", "def cosine_similarity(self, x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def cosine_similarity(self, source_doc, input_doc):\n vectorizer = self.vectorizer or TfidfVectorizer(tokenizer=PlagiarismDetector.tokenize_and_stem, stop_words='english')\n tfidf = vectorizer.fit_transform([source_doc, input_doc])\n return ((tfidf * tfidf.T).A)[0, 1]", "def get_fuzz_ratio(first_word, second_word):\n return fuzz.ratio(first_word, second_word), first_word, second_word", "def sentence_similarity(self,wnsimilarity,sentence1, sentence2,icneed=False):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [self.tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n \n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets1:\n \n # Get the similarity value of the most similar word in the other sentence\n score_list=[]\n if icneed == True :\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss,self.brown_ic)\n score_list.append(temp)\n except:\n continue\n \n else:\n for ss in synsets2:\n try:\n temp=wnsimilarity(synset,ss)\n score_list.append(temp)\n except:\n continue\n \n \n score_list = np.array(score_list, dtype=np.float64)\n score_list = np.nan_to_num(score_list)\n# print(score_list)\n if len(score_list)>0:\n best_score = np.nanmax(score_list)\n else:\n best_score=0.0\n# print(best_score)\n# print(type(best_score))\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score =score + best_score\n# print(score)\n count = count+ 1\n \n \n# print(\"one sentence over\")\n # Average the values\n score /= count\n return score", "def compute_similarity(site_a, site_b):\n return np.linalg.norm(site_a - site_b)", "def text_similarity(this_text, other_text, shingle_length=5, minhash_size=200, random_seed=5):\n this_shingles = ShingledText(this_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n other_shingles = ShingledText(other_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n return this_shingles.similarity(other_shingles)", "def distance_unigrams_same(t1, t2):\n t1_terms = make_terms_from_string(t1)\n t2_terms = make_terms_from_string(t2)\n terms1 = set(t1_terms)\n terms2 = set(t2_terms)\n shared_terms = terms1.intersection(terms2)\n #print(shared_terms)\n all_terms = terms1.union(terms2)\n #print(all_terms)\n dist = 1.0 - (len(shared_terms) / float(len(all_terms)))\n return dist", "def get_similarity(self, ):\r\n customer_cos_similarity = cosine_similarity(self.rating_matrix, self.rating_matrix)\r\n customer_cos_similarity = pd.DataFrame(customer_cos_similarity,\r\n index=self.customer_vendor_matrix.index,\r\n columns=self.customer_vendor_matrix.index)\r\n # customer_pearson_similarity = np.corrcoef(self.rating_matrix,\r\n # self.rating_matrix,)\r\n # customer_pearson_similarity = pd.DataFrame(customer_pearson_similarity,\r\n # index=self.customer_vendor_matrix.index,\r\n # columns=self.customer_vendor_matrix.index)\r\n return customer_cos_similarity,\r\n # return customer_pearson_similarity run too slowly\r", "def gram_edit_distance(self, gram1, gram2):\n distance = 0.0\n if gram1 == gram2:\n distance = 1.0\n return distance", "def gram_edit_distance(self, gram1, gram2):\r\n distance = 0.0\r\n if gram1 == gram2:\r\n distance = 1.0\r\n return distance", "def cosine_similarity_tensorflow(tf_word_representation_A, tf_words_representation_B):\n a_normalized = tf.nn.l2_normalize(tf_word_representation_A, axis=-1)\n b_normalized = tf.nn.l2_normalize(tf_words_representation_B, axis=-1)\n similarity = tf.reduce_sum(\n tf.multiply(a_normalized, b_normalized), \n axis=-1\n )\n \n return similarity", "def wup_measure(self,a, b, similarity_threshold = 0.925, debug = False):\n if debug: print('Original', a, b)\n #if word_pair_dict.has_key(a+','+b):\n if a+','+b in self.word_pair_dict.keys():\n return self.word_pair_dict[a+','+b]\n\n def get_semantic_field(a):\n return wn.synsets(a, pos=wn.NOUN)\n\n if a == b: return 1.0\n\n interp_a = get_semantic_field(a)\n interp_b = get_semantic_field(b)\n if debug: print(interp_a)\n\n if interp_a == [] or interp_b == []:\n return 0.0\n\n if debug: print('Stem', a, b)\n global_max=0.0\n for x in interp_a:\n for y in interp_b:\n local_score=x.wup_similarity(y)\n if debug: print('Local', local_score)\n if local_score > global_max:\n global_max=local_score\n if debug: print('Global', global_max)\n\n # we need to use the semantic fields and therefore we downweight\n # unless the score is high which indicates both are synonyms\n if global_max < similarity_threshold:\n interp_weight = 0.1\n else:\n interp_weight = 1.0\n\n final_score = global_max * interp_weight\n self.word_pair_dict[a+','+b] = final_score\n return final_score", "def __getSimilarityScore(expected, actual):\n return SequenceMatcher(None, expected, actual).ratio()", "def cosine_similarity(v1, v2):\n return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))", "def cosine_similarity(v1, v2):\n # Cosine Sim:\n # Get the words that both have in common\n\n v1words = set(v1.keys())\n v2words = set(v2.keys())\n\n numerator_words = v1words.intersection(v2words)\n\n # Multiply and sum those counts\n numerator = 0.0\n for word in numerator_words:\n numerator += v1[word] * v2[word]\n\n\n # Divide by the sqrt of the product of the sum of the squares of the counts\n denominator = math.sqrt(math.magnitude(list(v1.values())) * math.magnitude(list(v2.values())))\n\n return numerator/denominator", "def reviewer_similarity_score(self, other: _Vertex) -> float:\n if self.degree() == 0 or other.degree == 0:\n return 0.0\n else:\n neighbours = self.neighbours\n other_neighbours = other.neighbours\n same_neighbours = neighbours.keys() & other_neighbours.keys()\n union = len(self.neighbours) + len(other.neighbours)\n sim_score_so_far = 0\n\n for vertex in same_neighbours:\n # 'bothered reviewing' bonus:\n sim_score_so_far += 1\n # 'love' bonus\n if self.neighbours[vertex] >= 9 and other.neighbours[vertex] >= 9:\n sim_score_so_far += 2\n # 'like' bonus\n elif self.neighbours[vertex] >= 7 and other.neighbours[vertex] >= 7:\n sim_score_so_far += 1\n\n return sim_score_so_far / union", "def similarity(self, q1, q2, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'similarity')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json", "def compute_similarity(self, input_doc) -> Dict:\n most_similar_so_far = dict(similarity_score=-1, doc=None)\n\n for doc_info in self.plag_dao.yield_docs():\n docs: List[Document] = doc_info['data']\n\n for doc in docs:\n similarity_score = self.cosine_similarity(doc.content, input_doc)\n if similarity_score > most_similar_so_far['similarity_score']:\n most_similar_so_far['similarity_score'] = similarity_score\n most_similar_so_far['doc'] = doc\n return most_similar_so_far", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()", "def pair(self, reference: Spectrum, query: Spectrum) -> float:\n binned_reference = self.model.spectrum_binner.transform([reference])[0]\n binned_query = self.model.spectrum_binner.transform([query])[0]\n reference_vector = self.model.base.predict(self._create_input_vector(binned_reference))\n query_vector = self.model.base.predict(self._create_input_vector(binned_query))\n\n return cosine_similarity(reference_vector[0, :], query_vector[0, :])", "def GetSuggesteeSimilarity(suggestee1_tags, suggestee2_tags):\n total_size = len(suggestee1_tags) + len(suggestee2_tags)\n if total_size == 0:\n return 1.\n size_of_intersection = 0\n elems_in_first = {}\n for tag in suggestee1_tags:\n elems_in_first[tag] = True\n for tag in suggestee2_tags:\n if tag in elems_in_first:\n size_of_intersection += 1\n return size_of_intersection / (total_size - size_of_intersection)", "def name_similarity(name_1, name_2, nickname_2=None):\n name_similarity = string_similarity(name_1, name_2)\n nickname_similarity = 0\n if nickname_2:\n nickname_similarity = string_similarity(name_1, nickname_2)\n return max(name_similarity, nickname_similarity)", "def _compute_user_similarity(self, user1, user2):\n return self._compute_pearson(user1, user2)", "def w2v_sim(self, s1, s2):\n v1 = self.word2vec.get_centroid_vector(s1)\n v2 = self.word2vec.get_centroid_vector(s2)\n return self.__cos_sim(v1, v2)", "def distance_bigrams_same(t1, t2):\n t1_terms = make_terms_from_string(t1)\n t2_terms = make_terms_from_string(t2)\n terms1 = set(ngrams(t1_terms, 2)) # was using nltk.bigrams\n terms2 = set(ngrams(t2_terms, 2))\n shared_terms = terms1.intersection(terms2)\n #print(shared_terms)\n all_terms = terms1.union(terms2)\n #print(all_terms)\n dist = 1.0\n if len(all_terms) > 0:\n dist = 1.0 - (len(shared_terms) / float(len(all_terms)))\n return dist", "def get_genre_similarity(self):\n genre_words = []\n for w in self.target_movie.genres.split('|'):\n w = w.strip('- ,:(){}[]')\n genre_words.append(w)\n\n print(genre_words)\n\n res = self.db.query(Movie).filter(\n Movie.movie_id != self.target_movie.movie_id).filter(\n Movie.movie_id.in_(self.recommendation_pool.keys())\n ).filter(or_(\n Movie.genres.ilike(r'%' + gw + r'%') for gw in genre_words\n )).all()\n\n print(\"%i records from partial genres match\" % len(res))\n GSW = self.GENRES_SIMILARITY_WEIGHT\n for rec in res:\n smid = rec.movie_id\n self.recommendation_pool[smid]['genres_similarity'] = \\\n jaccard_index(self.target_movie.genres, rec.genres, '|') * GSW", "def compute_similarity(self, seq_node, **kwargs):\n pass", "def cosine_similarity(cls, vec_a, vec_b):\n return np.dot(vec_a, vec_b) / \\\n (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))", "def name_similarity_metric(self, name, **kwargs):\n return rapidfuzz.fuzz.QRatio(self.infer_name(**kwargs), name)", "def similarity_scores(self, other):\n results = []\n\n words_score=compare_dictionaries(other.words, self.words)\n wordl_score=compare_dictionaries(other.word_lengths, self.word_lengths)\n stems_score=compare_dictionaries(other.stems, self.stems)\n sentl_score=compare_dictionaries(other.sentence_lengths, self.sentence_lengths)\n endings_score=compare_dictionaries(other.endings, self.endings)\n results+= [words_score]\n results+= [wordl_score]\n results+= [stems_score]\n results+= [sentl_score]\n results+= [endings_score]\n return results", "def total_char_similarity(a,b):\n\ta_words, b_words = map(norm.set_clean_tokens, [a,b])\n\n\ttotal_score = 0\n\tfor ai in a_words:\n\t\tfor bi in b_words:\n\t\t\ttotal_score += similar(ai, bi)\n\treturn total_score", "def plot_similarity(self) -> None:\n if isinstance(self.model, FastTextWrapper):\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x), sentence_level=True))\n else:\n self.valid_data[\"vector\"] = self.valid_data[\"text\"].apply(\n lambda x: self.model.inference(word_tokenize(x))[0])\n messages = list(self.valid_data[\"label\"])\n vectors = list(self.valid_data[\"vector\"])\n similarity_matrix(messages=messages, vectors=vectors, name=self.folder, save_path=self.base_path)" ]
[ "0.7596557", "0.747573", "0.747573", "0.7371653", "0.73133916", "0.7294227", "0.72550523", "0.71882695", "0.71096104", "0.70324767", "0.7010185", "0.6944519", "0.692075", "0.6890939", "0.6874697", "0.68725497", "0.67834336", "0.67505664", "0.6722828", "0.6527661", "0.6500944", "0.6483019", "0.6470009", "0.6463275", "0.6459873", "0.64470315", "0.64300597", "0.63874346", "0.63811654", "0.6362673", "0.6337989", "0.63230664", "0.63139427", "0.630117", "0.62932265", "0.6284852", "0.6280344", "0.6280302", "0.62775207", "0.6268797", "0.62649816", "0.6264387", "0.62492955", "0.62380695", "0.62284887", "0.62108135", "0.62066936", "0.6205662", "0.61993414", "0.61976165", "0.6184087", "0.6183606", "0.6171635", "0.61694664", "0.61616266", "0.6156815", "0.6126507", "0.61034256", "0.61027527", "0.6099131", "0.6055282", "0.60549897", "0.6051767", "0.60427934", "0.60404265", "0.6033297", "0.6022231", "0.5996503", "0.5993449", "0.59895384", "0.59725857", "0.5969811", "0.59585714", "0.59525406", "0.5951372", "0.5947389", "0.5936856", "0.5925327", "0.5913626", "0.5908093", "0.5906043", "0.5893747", "0.58904403", "0.5871885", "0.58714217", "0.5864666", "0.5864666", "0.5859819", "0.5850601", "0.5850422", "0.5847688", "0.5841119", "0.58407503", "0.5839982", "0.5839094", "0.5831104", "0.58304095", "0.5829912", "0.58268714", "0.58248717" ]
0.76069194
0
Return just the surface representation of the WordForm.
def sr(self): return ''.join([str(seg) for seg in self.segments])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def surface(self):\n return BRep_Tool_Surface(self.topods_shape())", "def get_preview(self, obj):\n\n return obj.cwr", "def get_preview(self, obj):\n\n return obj.cwr", "def exposedSurf(self):\n if self.precision:\n h = self.evaluations.exposedWing.edges[1].point1.x # height of trapezoid\n B = self.chordRootW # major base of trapezoid\n b = self.evaluations.chordIntersected.edges[1].length # minor base of trapezoid\n internalS = 2 * (0.5 * (b + B) * h) # wing surface internal at fuselage\n return self.surfaceW - internalS\n else:\n return self.surfaceW - self.fuselageDiameter * self.cMACW # first guess for a faster evaluation", "def surface_one_forms(self, point):\n vertex_0, vertex_1, vertex_2 = self._vertices(point)\n one_forms = gs.stack([vertex_1 - vertex_0, vertex_2 - vertex_0], axis=-2)\n return one_forms", "def goify(self, layout=None):\n xx,yy,zz = self.getXYZ(layout)\n surf = dict(\n type='surface',\n x=xx,\n y=yy,\n z=zz\n )\n return surf", "def surface(self):\n return self._surface", "def get_standard_form(self, frame):\n if self.standardised:\n return frame\n else:\n return self.crop( self.undistort(frame) )", "def normal_form(self, w):\n return self.element_class(self, self._normalize_word(w))", "def document(self):\n return self._modelPart.document()", "def project_embedding(self):\n w = self.feature_embedding.weight.data\n d = w.size(-1) - 1\n narrowed = w.narrow(-1, 1, d)\n tmp = 1 + torch.sum(torch.pow(narrowed, 2), dim=-1, keepdim=True)\n tmp.sqrt_()\n w.narrow(-1, 0, 1).copy_(tmp)\n return w # can be delete?", "def exposedWing(self):\n return SubtractedSolid(shape_in=self.wing, tool=self.fuselage,\n hidden=True)", "def get(self):\n # check if the post request has the file part\n words = request.args.get('words')\n words = ast.literal_eval(words)\n neighbors = request.args.get('neighbors')\n neighbors = ast.literal_eval(neighbors)\n\n bytes_io = BytesIO()\n visualize_embedding_results(words, neighbors, self.reverse_dictionary, self.embeddings, bytes_io)\n bytes_io.seek(0)\n return send_file(bytes_io, mimetype='image/png')", "def filter(self, word):\n \n word = word.lower()\n try:\n self.engine.fetch(word)\n except socket.error:\n raise LemmaAPIError\n part_of_speeches = self.engine.part_of_speeches\n\n \n self.basic_form = word\n for part in part_of_speeches:\n if part == 'verb':\n if self.engine.is_verb_conjugated():\n if not self.conEngine.is_verb_regular(word, self.engine.get_basic_verb()):\n self.basic_form = self.engine.get_basic_verb()\n return word\n else:\n self.basic_form = self.engine.get_basic_verb()\n\n elif part == 'noun':\n if self.engine.is_noun_plural():\n if not self.conEngine.is_noun_regular(word, self.engine.get_singular_noun()):\n self.basic_form = self.engine.get_singular_noun() \n return word\n else:\n self.basic_form = self.engine.get_singular_noun()\n\n return self.basic_form", "def writeGroundPlane(self,obj,renderer):\n\n result = \"\"\n bbox = FreeCAD.BoundBox()\n for view in obj.Group:\n if view.Source and hasattr(view.Source,\"Shape\") and hasattr(view.Source.Shape,\"BoundBox\"):\n bbox.add(view.Source.Shape.BoundBox)\n if bbox.isValid():\n import Part\n margin = bbox.DiagonalLength/2\n p1 = FreeCAD.Vector(bbox.XMin-margin,bbox.YMin-margin,0)\n p2 = FreeCAD.Vector(bbox.XMax+margin,bbox.YMin-margin,0)\n p3 = FreeCAD.Vector(bbox.XMax+margin,bbox.YMax+margin,0)\n p4 = FreeCAD.Vector(bbox.XMin-margin,bbox.YMax+margin,0)\n\n # create temporary object. We do this to keep the renderers code as simple as possible:\n # they only need to deal with one type of object: RenderView objects\n dummy1 = FreeCAD.ActiveDocument.addObject(\"Part::Feature\",\"renderdummy1\")\n dummy1.Shape = Part.Face(Part.makePolygon([p1,p2,p3,p4,p1]))\n dummy2 = FreeCAD.ActiveDocument.addObject(\"App::FeaturePython\",\"renderdummy2\")\n View(dummy2)\n dummy2.Source = dummy1\n ViewProviderView(dummy2.ViewObject)\n FreeCAD.ActiveDocument.recompute()\n\n result = self.writeObject(dummy2,renderer)\n\n # remove temp objects\n FreeCAD.ActiveDocument.removeObject(dummy2.Name)\n FreeCAD.ActiveDocument.removeObject(dummy1.Name)\n FreeCAD.ActiveDocument.recompute()\n\n return result", "def representation(self) -> DrawingObjects:\n pass", "def restore_geometry(self):\n return stools.SETTINGS.get(\"waveformWidget/geometry\")", "def envelop(self):\n return self.box", "def render_form():", "def _get_surface( self, lemma_morph, pos, ctxinfo ) : \n affix = \"\"\n parts = lemma_morph.rsplit(\"+\",1)\n if len(parts) == 1 or lemma_morph == \"+\": # No inflection, e.g. lemma_morph=\"+\"\n lemma = surface = lemma_morph\n elif len(parts) == 2 and \"+\" not in parts[0]: # Standard inflected unit, e.g. lemma_morph=\"be+s\"\n lemma, affix = parts\n if self.morphg_file is not None : \n lemma_morph = lemma_morph.replace(\"\\\"\",\"\\\\\\\"\") \n cmd = \"echo \\\"%s_%s\\\" | ${morphg_res:-./%s -t}\" % \\\n ( lemma_morph, pos, self.morphg_file )\n p = Popen(cmd, shell=True, stdout=PIPE).stdout\n #generates the surface form using morphg\n surface = str(p.readline(), self.encoding).split(\"_\")[0]\n p.close()\n else:\n ctxinfo.warn_once(\"Not using morphg, using lemma+affix instead of surface\")\n surface = lemma_morph\n else: # the token contains one or several '+', e.g. lemma_morph=\"C+++\"\n lemma = surface = parts[0]\n affix = parts[1]\n return ( surface, lemma, affix )", "def Wraith_Form(self):\t\t\n\t\tprint(self.name.Title() + \"Wraith\")", "def print_words_structure(self):\n print(\"Dictionary words:\")\n for _, word in kc_util.gen_db(self.dictionary_db.cursor()):\n print(word) \n print(\"\")", "def reversed_face(self):\n return Face(self.topods_shape().Reversed())", "def _all_word_forms(self):\n all_word_forms = []\n for word in self.cursor.execute(f\"\"\"\n SELECT word_form\n FROM {self.table_name}\n WHERE lemma='{self.word}'\n \"\"\"):\n all_word_forms.append(word[0])\n if all_word_forms == []:\n return None\n return all_word_forms", "def _repr_(self):\n return \"Projective hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def get_flashcard(self):\n return f'{self.word}; \"{self.definition}\"'", "def get_flashcard(self):\n return f'{self.word}; \"{self.definition}\"'", "def wireframe_only(self):\n return self._wireframe_only", "def regular_representation(self, side=\"left\"):\n S = self.basis().keys()\n return S.regular_representation(self.base_ring(), side)", "def w(self):\n return self._w", "def draw(self):\n rendered_string = \"\"\n\n # extract the wire labels as strings and get their maximum length\n wire_names = []\n padding = 0\n for i in range(self.full_representation_grid.num_wires):\n wire_name = str(self.active_wires.labels[i])\n padding = max(padding, len(wire_name))\n wire_names.append(wire_name)\n\n for i in range(self.full_representation_grid.num_wires):\n # format wire name nicely\n wire = self.full_representation_grid.wire(i)\n s = \" {:>\" + str(padding) + \"}: {}\"\n\n rendered_string += s.format(wire_names[i], 2 * self.charset.WIRE)\n\n for s in wire:\n rendered_string += s\n\n rendered_string += \"\\n\"\n\n for symbol, cache in [\n (\"U\", self.representation_resolver.unitary_matrix_cache),\n (\"H\", self.representation_resolver.hermitian_matrix_cache),\n (\"M\", self.representation_resolver.matrix_cache),\n ]:\n for idx, matrix in enumerate(cache):\n rendered_string += \"{}{} =\\n{}\\n\".format(symbol, idx, matrix)\n\n return rendered_string", "def render_wireframe(self, **kwds):\n proj = self.projection()\n if self.ambient_dim()==3:\n return proj.render_wireframe_3d(**kwds)\n if self.ambient_dim()==2:\n return proj.render_outline_2d(**kwds)\n raise ValueError, \"render_wireframe is only defined for 2 and 3 dimensional polyhedra.\"", "def get_word_boxes(self):\n word_boxes = self.lang.tool.image_to_string(\n self.image,\n lang=\"eng\",\n builder=pyocr.builders.WordBoxBuilder()\n )\n return word_boxes", "def getDefaultDisplayMode(self):\n return \"Wireframe\"", "def get_subsurface(self):\n w, h = self.rect.w, self.rect.h\n surface = pg.Surface((w, h))\n surface.set_colorkey((0, 0, 0))\n return pg.Surface((w, h))", "def show_word_cloud(self):\n\n cloud = WordCloud(str(self.comments))\n cloud.show_cloud()", "def render(self, mode='human'):", "def view(self):\r\n\t\t\r\n\t\t# add zero term\r\n\t\tt = self\r\n\t\tif t == []:\r\n\t\t\tt = [Te(0)]\r\n\t\t\r\n\t\t# display\r\n\t\tfor i in t:\r\n\t\t\ti.view()\r\n\t\t\t\r\n\t\t# spacer\r\n\t\tprint(' ')\r\n\t\t\t\r\n\t\treturn None", "def __repr__(self):\r\n s = 'Words:\\n' + str(self.words) + '\\n\\n'\r\n s += 'Word lengths:\\n' + str(self.wordlengths) + '\\n\\n'\r\n s += 'Stems:\\n' + str(self.stems) + '\\n\\n'\r\n s += 'Sentence lengths:\\n' + str(self.sentencelengths) + '\\n\\n'\r\n s += 'Gerunds:\\n' + str(self.gerund)\r\n return s", "def render(self):\n if self.viz_count == 0:\n return None\n if self.bbox is None or self.true is None:\n return None\n raise NotImplementedError # TODO", "def get_basic_form(self):\n return self.basic_form", "def definition(self, definition):\n result = Words()\n result.words = DefinitionHelper.words_for_definition(definition)\n return result", "def get_frame(self):\n if abs(dot_vectors(self.up_vector, self.mesh_normal)) < 1.0: # if the normalized vectors are not co-linear\n c = cross_vectors(self.up_vector, self.mesh_normal)\n if norm_vector(c) == 0:\n c = Vector(1, 0, 0)\n if norm_vector(self.mesh_normal) == 0:\n self.mesh_normal = Vector(0, 1, 0)\n return Frame(self.pt, c, self.mesh_normal)\n else: # in horizontal surfaces the vectors happen to be co-linear\n return Frame(self.pt, Vector(1, 0, 0), Vector(0, 1, 0))", "def get_model(self):\n return Doc()", "def focused_surface(self) -> Surface | None:\n focused_surface = self._ptr.focused_surface\n if focused_surface == ffi.NULL:\n return None\n return Surface(focused_surface)", "def focused_surface(self) -> Surface | None:\n focused_surface = self._ptr.focused_surface\n if focused_surface == ffi.NULL:\n return None\n return Surface(focused_surface)", "def doc(self):\n return {'_id': self._id,\n 'text': self.text}", "def repr(space, w_object):\n return space.repr(w_object)", "def surface():\n \"\"\"\n Get surface for plotting.\n\n :return fsaverage: surface locations as in nilearn\n :return surf: surface for plotting\n \"\"\"\n\n fsaverage = fetch_surf_fsaverage('fsaverage')\n surf = {}\n\n for key in [t + '_' + h for t in ['pial', 'infl'] for h in ['left', 'right']]:\n\n surf = load_surf_mesh(fsaverage[key])\n x, y, z = np.asarray(surf[0].T, dtype='<f4')\n i, j, k = np.asarray(surf[1].T, dtype='<i4')\n\n surf[key] = dict(x=x, y=y, z=z, i=i, j=j, k=k)\n\n return fsaverage, surf", "def docExtract(self):\n\n self.fv = []\n for doc in self.documents:\n self.fv.append(self.featureSet.extract(doc))\n\n # Convert to a numpy matrix.\n return np.array(np.asmatrix(self.fv))\n # return self.fv", "def _repr_(self):\n return \"Affine hypersurface defined by %s in %s\"%(\n self.defining_polynomial(), self.ambient_space())", "def sketch_und_part(self):\n if (self.dimension == '3D'):\n #Sketch Wuerfel zeichnen\n self.sketch_Wuerfel = self.model.ConstrainedSketch(\n name='Seitenansicht_Wuerfel',\n sheetSize=200.0)\n self.sketch_Wuerfel.rectangle(\n point1=(-self.laenge_x/2.0, -self.laenge_y/2.0),\n point2=(self.laenge_x/2.0, self.laenge_y/2.0))\n #Part Wuerfel generieren\n self.part_Wuerfel = self.model.Part(\n name=self.name+'_Wuerfel',\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY)\n self.part_Wuerfel.BaseSolidExtrude(\n sketch=self.sketch_Wuerfel,\n depth=self.laenge_z/2.0) #z-Symmetrie\n #Sketch Pore zeichnen (fuer Quader und Zylinder)\n self.sketch_Pore = self.model.ConstrainedSketch(\n name='Seitenansicht_Pore',\n sheetSize=200.0)\n if (self.typ_Pore == 'Quader'):\n self.sketch_Pore.rectangle(\n point1=(-self.porenparameter_x/2.0, -self.porenparameter_y/2.0),\n point2=(self.porenparameter_x/2.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Zylinder'):\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Ellipsoid' ):\n matlab.ellipsoidIgesOut(\n self.porenparameter_x,\n self.porenparameter_y,\n self.porenparameter_z,\n 'Ellipsoid')\n # if (self.porenparameter_x == self.porenparameter_z):\n # self.sketch_Pore.ConstructionLine(\n # point1=(0.0, -100.0),\n # point2=(0.0, 100.0))\n # self.sketch_Pore.EllipseByCenterPerimeter(\n # center=(0.0, 0.0),\n # axisPoint1=(self.porenparameter_x/2.0, 0.0),\n # axisPoint2=(0.0, self.porenparameter_y/2.0))\n # self.sketch_Pore.autoTrimCurve(\n # curve1=self.sketch_Pore.geometry[3],\n # point1=(-self.porenparameter_x/2.0, 0.0))\n # self.sketch_Pore.Line(\n # point1=(0.0, self.porenparameter_y/2.0),\n # point2=(0.0, -self.porenparameter_y/2.0))\n else:\n print('typ_Pore Error!')\n #Part Pore generieren\n if (self.typ_Pore == 'Ellipsoid' ):\n # if (self.porenparameter_x == self.porenparameter_z):\n # self.part_Pore.BaseSolidRevolve(\n # sketch=self.sketch_Pore,\n # angle=360.0,\n # flipRevolveDirection=OFF)\n self.iges_Datei = mdb.openIges(\n 'Ellipsoid.igs',\n msbo=False,\n trimCurve=DEFAULT,\n scaleFromFile=OFF)\n self.model.PartFromGeometryFile(\n name=self.name+'_Pore',\n geometryFile=self.iges_Datei,\n combine=False,\n stitchTolerance=1.0,\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY,\n convertToAnalytical=1,\n stitchEdges=1,\n scale=1) # Skalierung\n self.part_Pore = self.model.parts[self.name+'_Pore']\n self.part_Pore.AddCells(\n faceList = self.part_Pore.faces,\n flipped=False)\n del self.iges_Datei\n os.remove('abaqus_read_iges0.log') #Arbeitsordner aufraeumen\n os.remove('temp-Ellipsoid-new.sat')\n os.remove('Ellipsoid.igs')\n elif (self.typ_Pore == 'Quader' or 'Zylinder'):\n self.part_Pore = self.model.Part(\n name=self.name+'_Pore',\n dimensionality=THREE_D,\n type=DEFORMABLE_BODY)\n self.part_Pore.BaseSolidExtrude(\n sketch=self.sketch_Pore,\n depth=self.porenparameter_z)\n #Assemble\n self.assembly = self.model.rootAssembly\n self.assembly.DatumCsysByDefault(CARTESIAN)\n self.assembly.Instance(\n name=self.name+'_Pore',\n part=self.part_Pore,\n dependent=ON)\n self.assembly.Instance(\n name=self.name+'_Wuerfel',\n part=self.part_Wuerfel,\n dependent=ON)\n #Translation\n self.assembly.translate(\n instanceList=(self.name+'_Wuerfel', ),\n vector=(0.0, 0.0, -self.laenge_z/2.0))\n if (self.typ_Pore == 'Ellipsoid'):\n self.assembly.translate(\n instanceList=(self.name+'_Pore', ),\n vector=(0.0, 0.0, 0.0))\n elif (self.typ_Pore == 'Quader' or 'Zylinder'):\n self.assembly.translate(\n instanceList=(self.name+'_Pore', ),\n vector=(0.0, 0.0, -self.porenparameter_z/2.0))\n #Rotation\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(1.0, 0.0, 0.0),\n angle=self.porenparameter_rx)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(0.0, 1.0, 0.0),\n angle=self.porenparameter_ry)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, 0.0),\n axisDirection=(0.0, 0.0,1.0),\n angle=self.porenparameter_rz)\n #Schneiden\n self.assembly.InstanceFromBooleanCut(\n name='RVE',\n instanceToBeCut=self.assembly.instances[self.name+'_Wuerfel'],\n cuttingInstances=(self.assembly.instances[self.name+'_Pore'], ),\n originalInstances=SUPPRESS)\n self.assembly.deleteFeatures((self.name+'_Wuerfel', self.name+'_Pore', ))\n # del self.model.parts[self.name+'_Wuerfel']\n # del self.model.parts[self.name+'_Pore']\n self.part_RVE = self.model.parts[self.name]\n elif (self.dimension == '2D'):\n #Sketch Wuerfel zeichnen\n self.sketch_Wuerfel = self.model.ConstrainedSketch(\n name='Seitenansicht_Wuerfel',\n sheetSize=200.0)\n self.sketch_Wuerfel.rectangle(\n point1=(0.0, 0.0),\n point2=(self.laenge_x/2.0, self.laenge_y/2.0)) #x- und y-Symmetrie\n #Part Wuerfel generieren\n self.part_Wuerfel = self.model.Part(\n name=self.name+'_Wuerfel',\n dimensionality=TWO_D_PLANAR,\n type=DEFORMABLE_BODY)\n self.part_Wuerfel.BaseShell(sketch=self.sketch_Wuerfel)\n #Sketch Pore zeichnen\n self.sketch_Pore = self.model.ConstrainedSketch(\n name='Seitenansicht_Pore',\n sheetSize=200.0)\n if (self.typ_Pore == 'Ellipsoid'):\n self.sketch_Pore.ConstructionLine(\n point1=(0.0, -100.0),\n point2=(0.0, 100.0))\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n self.sketch_Pore.autoTrimCurve(\n curve1=self.sketch_Pore.geometry[3],\n point1=(-self.porenparameter_x/2.0, 0.0))\n self.sketch_Pore.Line(\n point1=(0.0, self.porenparameter_y/2.0),\n point2=(0.0, -self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Quader'):\n self.sketch_Pore.rectangle(\n point1=(-self.porenparameter_x/2.0, -self.porenparameter_y/2.0),\n point2=(self.porenparameter_x/2.0, self.porenparameter_y/2.0))\n elif (self.typ_Pore == 'Zylinder'):\n self.sketch_Pore.EllipseByCenterPerimeter(\n center=(0.0, 0.0),\n axisPoint1=(self.porenparameter_x/2.0, 0.0),\n axisPoint2=(0.0, self.porenparameter_y/2.0))\n else:\n print('typ_Pore Error!')\n #Part Pore generieren\n self.part_Pore = self.model.Part(\n name=self.name+'_Pore',\n dimensionality=TWO_D_PLANAR,\n type=DEFORMABLE_BODY)\n self.part_Pore.BaseShell(sketch=self.sketch_Pore)\n #Assemble\n self.assembly = self.model.rootAssembly\n self.assembly.DatumCsysByDefault(CARTESIAN)\n self.assembly.Instance(\n name=self.name+'_Wuerfel',\n part=self.part_Wuerfel,\n dependent=ON)\n self.assembly.Instance(\n name=self.name+'_Pore',\n part=self.part_Pore,\n dependent=ON)\n self.assembly.rotate(\n instanceList=(self.name+'_Pore', ),\n axisPoint=(0.0, 0.0, self.laenge_z/2.0),\n axisDirection=(0.0, 0.0, self.laenge_z/2.0+1),\n angle=self.porenparameter_rz)\n self.assembly.InstanceFromBooleanCut(\n name='RVE',\n instanceToBeCut=self.assembly.instances[self.name+'_Wuerfel'],\n cuttingInstances=(self.assembly.instances[self.name+'_Pore'], ),\n originalInstances=SUPPRESS)\n self.assembly.deleteFeatures((self.name+'_Wuerfel', self.name+'_Pore', ))\n del self.model.parts[self.name+'_Wuerfel']\n #del self.model.parts[self.name+'_Pore']\n self.part_RVE = self.model.parts[self.name]\n else:\n print('dimension Error!')", "def render(self, mode='human', close=False):\n return None", "def preview(self):\n if self._preview is None:\n self._preview = self.build_preview()\n return self._preview", "def full(self):\n # conveniently, all these calls return unicode if they're passed in unicode, so we\n # won't mangle unicode docstrings at this point.\n out = re.sub(r'\\n\\n', '\\v', self.raw)\n return out.replace('\\n', ' ').replace('\\v', '\\n\\n')", "def _repr_(self):\n desc = ''\n if self.n_vertices()==0:\n desc += 'The empty polyhedron'\n else:\n desc += 'A ' + repr(self.dim()) + '-dimensional polyhedron'\n desc += ' in '\n if self.field()==QQ: desc += 'QQ'\n else: desc += 'RDF'\n desc += '^' + repr(self.ambient_dim()) \n\n if self.n_vertices()>0:\n desc += ' defined as the convex hull of ' \n desc += repr(self.n_vertices())\n if self.n_vertices()==1: desc += ' vertex'\n else: desc += ' vertices'\n \n if self.n_rays()>0:\n if self.n_lines()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_rays())\n if self.n_rays()==1: desc += ' ray'\n else: desc += ' rays'\n \n if self.n_lines()>0:\n if self.n_rays()>0: desc += \", \"\n else: desc += \" and \"\n desc += repr(self.n_lines())\n if self.n_lines()==1: desc +=' line'\n else: desc +=' lines'\n\n return desc + \".\\n\";", "def toSurface(self):\n surface = pygame.Surface((self.getWidth(), self.getHeight()), 0, 8)\n surface.fill((255, 255, 255))\n black = surface.map_rgb((0, 0, 0))\n mapdata = pygame.PixelArray(surface)\n for pos in self._invalidPositions:\n try:\n mapdata[pos[0]][pos[1]] = black\n except:\n pass\n return surface", "def _get_and_build_text_structure(self):\n return Text_structure(self.filename, self)", "def fielddict(self):\n return self.fields.rectangledict", "def get_w(self):\n return self.w", "def setDisplayMode(self, mode):\n return \"Wireframe\"", "def word_forms(self, word):\n result = set()\n for dic_name in self.dictionaries.keys():\n for vector in self.dictionaries[dic_name].word_forms(word):\n result.add(tuple(vector))\n return filter(lambda x: len(x), result)", "def specific_surface(self):\n srf = BRepAdaptor_Surface(self.topods_shape())\n surf_type = self.surface_type()\n if surf_type == \"plane\":\n return srf.Plane()\n if surf_type == \"cylinder\":\n return srf.Cylinder()\n if surf_type == \"cone\":\n return srf.Cone()\n if surf_type == \"sphere\":\n return srf.Sphere()\n if surf_type == \"torus\":\n return srf.Torus()\n if surf_type == \"bezier\":\n return srf.Bezier()\n if surf_type == \"bspline\":\n return srf.BSpline()\n raise ValueError(\"Unknown surface type: \", surf_type)", "def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)", "def trivial_representation(self, side=\"twosided\"):\n S = self.basis().keys()\n return S.trivial_representation(self.base_ring())", "def _repr_(self):\n return \"Hyperbolic plane\"", "def generateWordCloud(self):\n d = path.dirname(__file__) if '__file__' in locals() else os.getcwd()\n \n text = open(path.join(d, 'wc.txt')).read()\n \n wordcloud = WordCloud().generate(text)\n \n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis('off')\n \n wordcloud = WordCloud(max_font_size=40).generate(text)\n plt.figure()\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis('off')\n image = wordcloud.to_file('wc.png')\n\n with open('wc.png', 'rb') as img_file:\n b64_string = base64.b64encode(img_file.read())\n\n wordcloud_img = str(b64_string.decode('utf-8'))\n\n if not wordcloud_img:\n raise Exception(\"No base64 string encoded\")\n\n return wordcloud_img", "def w(self): # pylint: disable=invalid-name\n return self._w", "def _get_full_vocabulary_string(self, html):\n # The kana represntation of the Jisho entry is contained in this div\n text_markup = html.select_one(\".concept_light-representation\")\n\n upper_furigana = text_markup.select_one(\".furigana\").find_all('span')\n\n # inset_furigana needs more formatting due to potential bits of kanji sticking together\n inset_furigana_list = []\n # For some reason, creating the iterator \"inset_furigana\" and then accessing it here\n # causes it to change, like observing it causes it to change. I feel like Schrodinger\n for f in text_markup.select_one(\".text\").children:\n cleaned_text = f.string.replace(\"\\n\", \"\").replace(\" \", \"\")\n if cleaned_text == \"\":\n continue\n elif len(cleaned_text) > 1:\n for s in cleaned_text:\n inset_furigana_list.append(s)\n else:\n inset_furigana_list.append(cleaned_text)\n\n children = zip_longest(upper_furigana, inset_furigana_list)\n\n full_word = []\n for c in children:\n if c[0].text != '':\n full_word.append(c[0].text)\n elif c[0].text == '' and contains_kana(c[1]):\n full_word.append(c[1])\n else:\n continue\n\n # print(''.join(full_word))\n # print(\"====\")\n return ''.join(full_word)", "def get_wordcloud():\n params = request.args\n result = None\n\n def set_result(x):\n nonlocal result # This is ugly, ew, gotta fix this\n result = x\n\n pipeline_zoo.get_full_word_distribution(set_result).feed_data((params, None))\n return jsonify({\n 'word_distribution': result\n })", "def _repr_(self):\n description = \"1-form \"\n if self._name is not None:\n description += \"'%s' \" % self._name\n return self._final_repr(description)", "def _repr_(self):\n description = \"1-form \"\n if self._name is not None:\n description += \"'%s' \" % self._name\n return self._final_repr(description)", "def __str__(self):\n return str(self.preview)", "def form(self):\n\t\treturn self._form", "def addOtherForm(documentName, word, unique):\r\n formRef = \":form_\" + replace_form(word.word)\r\n if word.transliteration and word.transliteration.word != \"\" and word.transliteration.word != \" \":\r\n formRef += \"_\" + word.transliteration.word\r\n formRef += \"_\" + unique\r\n\r\n formRef += \" a ontolex:Form;\\n\"\r\n\r\n writtenRepRef = \" ontolex:writtenRep \\\"\"\r\n writtenRepRef += word.word + \"\\\"\" + word.writingLanguage\r\n\r\n if word.transliteration and word.transliteration.word != \"\":\r\n writtenRepRef += \", \\\"\" + word.transliteration.word + \"\\\"\" + word.transliteration.writingLanguage\r\n writtenRepRef += \" .\"\r\n\r\n frequencyRef = \"\"\r\n if word.frequencyDict:\r\n frequencyRef = \"\\n\"\r\n for corpus,frequency in word.frequencyDict.items():\r\n if frequency != 0:\r\n frequencyRef +=' frac:frequency [a e2model:' + corpus +'; rdf:value \"' + str(frequency) + '\" ] ;\\n'\r\n frequencyRef = frequencyRef[:len(frequencyRef) -2]\r\n frequencyRef += \".\"\r\n formEntry = formRef + writtenRepRef\r\n if frequencyRef != \".\":\r\n formEntry = formEntry[:len(formEntry) -1]\r\n formEntry += \";\"\r\n formEntry += frequencyRef\r\n\r\n with open(documentName, 'a') as f:\r\n f.write(formEntry)\r\n f.write(\"\\n\\n\")\r\n return", "def __repr__(self):\n\n return \"<Terms term=%s>\" % (self.word)", "def __call__(self):\n return self._representation_vector", "def set_und_surface(self):\n if (self.dimension == '3D'):\n self.part_RVE.Set(\n cells=self.part_RVE.cells.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n elif (self.dimension == '2D'):\n self.part_RVE.Set(\n faces=self.part_RVE.faces.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n else:\n print('dimension Error!')", "def __repr__(self):\n return self.textual_representation().encode(\"utf-8\")", "def show(self, morph, direction='LR->'):\n parse = self(morph.form, direction)\n parse = {key: val[0] for key, val in parse.items()}\n form_mark = morph.form_str[0].split()\n for i in range(len(form_mark)):\n seg = form_mark[i]\n if parse['C1'][i] or parse['V1'][i]:\n seg = seg + ' •'\n if parse['(syll1'][i] or parse['(syll2'][i]:\n seg = '[' + seg\n if parse['syll1)'][i] or parse['syll2)'][i]:\n seg = seg + ']'\n if parse['(foot1'][i]:\n seg = '(' + seg\n if parse['foot1)'][i]:\n seg = seg + ')'\n form_mark[i] = seg\n return ' '.join(form_mark)", "def design(self):\n return self[self.design_columns]", "def design(self):\n return self[self.design_columns]", "def _get_document_representation(self, doc_id, index):\n vec = np.zeros(shape=(index.num_terms,), dtype=np.float64)\n for i, term in enumerate(sorted(index.get_index_terms())):\n vec[i] = self._tfidf(\n index.get_term_frequency(term, doc_id),\n index.get_document_frequency(term),\n index.get_document_count()\n )\n return vec", "def render_text_surfaces(self):\n self.images = [] # The text surfaces.\n line_width = 0\n line = []\n space_width = self.font.size(' ')[0]\n\n # Put the words one after the other into a list if they still\n # fit on the same line, otherwise render the line and append\n # the resulting surface to the self.images list.\n for word in self.text:\n line_width += self.font.size(word)[0] + space_width\n # Render a line if the line width is greater than the rect width.\n if line_width > self.rect.w:\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)\n line = []\n line_width = self.font.size(word)[0] + space_width\n\n line.append(word)\n\n # Need to render the last line as well.\n surf = self.font.render(' '.join(line), True, self.text_color)\n self.images.append(surf)", "def get_structure(self):\n return self.beta_sheet.model.structure", "def get_embedding_output(self):\n return self.embedding_output", "def _repr_(self):\n return \"Newform abelian subvariety %s of dimension %s of %s\" % (\n self.newform_label(), self.dimension(), self._ambient_repr())", "def _build_representation_layer(self,\n input_question_word,\n input_question_word_mask,\n input_question_subword,\n input_question_subword_mask,\n input_question_char,\n input_question_char_mask,\n input_context_word,\n input_context_word_mask,\n input_context_subword,\n input_context_subword_mask,\n input_context_char,\n input_context_char_mask): \n word_vocab_size = self.hyperparams.data_word_vocab_size\n word_embed_dim = self.hyperparams.model_representation_word_embed_dim\n word_dropout = self.hyperparams.model_representation_word_dropout if self.mode == \"train\" else 0.0\n word_embed_pretrained = self.hyperparams.model_representation_word_embed_pretrained\n word_feat_trainable = self.hyperparams.model_representation_word_feat_trainable\n word_feat_enable = self.hyperparams.model_representation_word_feat_enable\n subword_vocab_size = self.hyperparams.data_subword_vocab_size\n subword_embed_dim = self.hyperparams.model_representation_subword_embed_dim\n subword_unit_dim = self.hyperparams.model_representation_subword_unit_dim\n subword_feat_trainable = self.hyperparams.model_representation_subword_feat_trainable\n subword_window_size = self.hyperparams.model_representation_subword_window_size\n subword_hidden_activation = self.hyperparams.model_representation_subword_hidden_activation\n subword_dropout = self.hyperparams.model_representation_subword_dropout if self.mode == \"train\" else 0.0\n subword_pooling_type = self.hyperparams.model_representation_subword_pooling_type\n subword_feat_enable = self.hyperparams.model_representation_subword_feat_enable\n char_vocab_size = self.hyperparams.data_char_vocab_size\n char_embed_dim = self.hyperparams.model_representation_char_embed_dim\n char_unit_dim = self.hyperparams.model_representation_char_unit_dim\n char_feat_trainable = self.hyperparams.model_representation_char_feat_trainable\n char_window_size = self.hyperparams.model_representation_char_window_size\n char_hidden_activation = self.hyperparams.model_representation_char_hidden_activation\n char_dropout = self.hyperparams.model_representation_char_dropout if self.mode == \"train\" else 0.0\n char_pooling_type = self.hyperparams.model_representation_char_pooling_type\n char_feat_enable = self.hyperparams.model_representation_char_feat_enable\n fusion_type = self.hyperparams.model_representation_fusion_type\n fusion_num_layer = self.hyperparams.model_representation_fusion_num_layer\n fusion_unit_dim = self.hyperparams.model_representation_fusion_unit_dim\n fusion_hidden_activation = self.hyperparams.model_representation_fusion_hidden_activation\n fusion_dropout = self.hyperparams.model_representation_fusion_dropout if self.mode == \"train\" else 0.0\n fusion_trainable = self.hyperparams.model_representation_fusion_trainable\n \n with tf.variable_scope(\"representation\", reuse=tf.AUTO_REUSE):\n input_question_feat_list = []\n input_question_feat_mask_list = []\n input_context_feat_list = []\n input_context_feat_mask_list = []\n \n if word_feat_enable == True:\n self.logger.log_print(\"# build word-level representation layer\")\n word_feat_layer = WordFeat(vocab_size=word_vocab_size, embed_dim=word_embed_dim,\n dropout=word_dropout, pretrained=word_embed_pretrained, embedding=self.word_embedding,\n num_gpus=self.num_gpus, default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=word_feat_trainable)\n \n (input_question_word_feat,\n input_question_word_feat_mask) = word_feat_layer(input_question_word, input_question_word_mask)\n (input_context_word_feat,\n input_context_word_feat_mask) = word_feat_layer(input_context_word, input_context_word_mask)\n \n input_question_feat_list.append(input_question_word_feat)\n input_question_feat_mask_list.append(input_question_word_feat_mask)\n input_context_feat_list.append(input_context_word_feat)\n input_context_feat_mask_list.append(input_context_word_feat_mask)\n \n word_unit_dim = word_embed_dim\n self.word_embedding_placeholder = word_feat_layer.get_embedding_placeholder()\n else:\n word_unit_dim = 0\n self.word_embedding_placeholder = None\n \n if subword_feat_enable == True:\n self.logger.log_print(\"# build subword-level representation layer\")\n subword_feat_layer = SubwordFeat(vocab_size=subword_vocab_size, embed_dim=subword_embed_dim,\n unit_dim=subword_unit_dim, window_size=subword_window_size, hidden_activation=subword_hidden_activation,\n pooling_type=subword_pooling_type, dropout=subword_dropout, num_gpus=self.num_gpus,\n default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=subword_feat_trainable)\n \n (input_question_subword_feat,\n input_question_subword_feat_mask) = subword_feat_layer(input_question_subword, input_question_subword_mask)\n (input_context_subword_feat,\n input_context_subword_feat_mask) = subword_feat_layer(input_context_subword, input_context_subword_mask)\n \n input_question_feat_list.append(input_question_subword_feat)\n input_question_feat_mask_list.append(input_question_subword_feat_mask)\n input_context_feat_list.append(input_context_subword_feat)\n input_context_feat_mask_list.append(input_context_subword_feat_mask)\n else:\n subword_unit_dim = 0\n \n if char_feat_enable == True:\n self.logger.log_print(\"# build char-level representation layer\")\n char_feat_layer = CharFeat(vocab_size=char_vocab_size, embed_dim=char_embed_dim,\n unit_dim=char_unit_dim, window_size=char_window_size, hidden_activation=char_hidden_activation,\n pooling_type=char_pooling_type, dropout=char_dropout, num_gpus=self.num_gpus,\n default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=char_feat_trainable)\n \n (input_question_char_feat,\n input_question_char_feat_mask) = char_feat_layer(input_question_char, input_question_char_mask)\n (input_context_char_feat,\n input_context_char_feat_mask) = char_feat_layer(input_context_char, input_context_char_mask)\n \n input_question_feat_list.append(input_question_char_feat)\n input_question_feat_mask_list.append(input_question_char_feat_mask)\n input_context_feat_list.append(input_context_char_feat)\n input_context_feat_mask_list.append(input_context_char_feat_mask)\n else:\n char_unit_dim = 0\n \n feat_unit_dim = word_unit_dim + subword_unit_dim + char_unit_dim\n \n feat_fusion_layer = self._create_fusion_layer(feat_unit_dim, fusion_unit_dim,\n fusion_type, fusion_num_layer, fusion_hidden_activation, fusion_dropout,\n self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, fusion_trainable)\n input_question_feat, input_question_feat_mask = self._build_fusion_result(input_question_feat_list,\n input_question_feat_mask_list, feat_fusion_layer)\n input_context_feat, input_context_feat_mask = self._build_fusion_result(input_context_feat_list,\n input_context_feat_mask_list, feat_fusion_layer)\n \n return input_question_feat, input_question_feat_mask, input_context_feat, input_context_feat_mask", "def __repr__(self):\n name = 'sw'\n return \"{}({}, {})\".format(\n name,\n list(self),\n self.window_size)", "def draw_word_scheme(self) -> None:\n print(\"\".join(self.word2))", "def repr_ui(self):\n from lib.entities import entities_factory\n return (entities_factory.EntitiesFactory().\n convert_obj_repr_from_rest_to_ui(obj=self))", "def get_surface(self, new: bool = True) -> 'pygame.Surface':\n if new:\n return self.get_crop_rect(self.get_rect())\n return self._surface", "def rich(text):\n return full(text, False)", "def surfaceInfo(self, index):\n rawInfo = self.rawSurfaceInfo(index)\n if rawInfo is None:\n return None\n\n if \"GLAS\" in rawInfo:\n mat = Material.findByName(name=rawInfo[\"GLAS\"][0])\n else:\n mat = Air()\n\n curvature = float(rawInfo[\"CURV\"][0])\n if curvature == 0.0:\n radius = float(\"+inf\")\n else:\n radius = 1/curvature*self.factor\n \n if \"DIAM\" in rawInfo:\n diameter = 2*float(rawInfo[\"DIAM\"][0])*self.factor\n else:\n diameter = float(\"+inf\")\n\n spacing = float(rawInfo[\"DISZ\"][0])*self.factor\n\n return Surface(number=index, \n R=radius,\n mat=mat,\n spacing=spacing,\n diameter=diameter)", "def doc(self):\n try:\n return self.definition.doc\n except AttributeError:\n return self.raw_doc", "def preview():\n return render_template(\"controls/preview.html\")", "def createEmptyLayer(self):\n # , wt.greeting: False , wt.ects: False, wt.preReqs: False, wt.courseCodeMentioned: False\n layer = {wt.questionWord: \"\", wt.pronoun: \"\", wt.verb: \"\", wt.websiteName: \"\", wt.timeWord: \"\", wt.about: \"\",\n wt.weather: \"\", wt.when: \"\", wt.keywords: [], wt.courseID: \"\", wt.structureUnitCode: \"\",\n wt.sentence: [], wt.hangman: \"\", wt.what: \"\"}\n return layer", "def _get_surfaces(idf):\n surface_types = [\n 'BUILDINGSURFACE:DETAILED',\n 'FENESTRATIONSURFACE:DETAILED',\n ]\n surfaces = []\n for surface_type in surface_types:\n surfaces.extend(idf.idfobjects[surface_type])\n\n return surfaces", "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def fToc_Wtf(Vc1,Vc2,Vc3,Vk,Vrw,Ck,Dc1,Dc2,Dc3,Dk,Dw):\n\tGDen=fOrmGDen(Vc1,Vc2,Vc3,Vk,Vrw,Dc1,Dc2,Dc3,Dk,Dw)\n\tTocwf=Vk*Ck*Dk/GDen\n\treturn Tocwf", "def __repr__(self):\r\n printer = 'text model name: ' + str(self.name) + '\\n'\r\n printer += ' number of words: ' + str(len(self.words)) +'\\n'\r\n printer += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\r\n printer += ' number of stems: ' + str(len(self.stems)) + '\\n'\r\n printer += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\r\n printer += ' number of different punctuations: ' + str(len(self.punctuation)) \r\n return printer" ]
[ "0.60465777", "0.5572003", "0.5572003", "0.55544156", "0.5375487", "0.5374577", "0.52845716", "0.5260148", "0.5204533", "0.5131466", "0.5076054", "0.504823", "0.5015482", "0.50128734", "0.4972725", "0.4963215", "0.49538532", "0.4942636", "0.49395669", "0.49350926", "0.49289408", "0.49052683", "0.4899106", "0.48912352", "0.48813435", "0.48789668", "0.48789668", "0.48751238", "0.48693037", "0.48663515", "0.4854176", "0.4854035", "0.48408988", "0.48182824", "0.48082778", "0.47999516", "0.47919822", "0.47889656", "0.47826323", "0.47798318", "0.47775865", "0.4759474", "0.4751023", "0.47484174", "0.4746554", "0.4746554", "0.47427496", "0.473479", "0.47340882", "0.4723834", "0.47208476", "0.47139952", "0.47109106", "0.46990848", "0.46869028", "0.46800596", "0.4668431", "0.46664825", "0.46619958", "0.46619916", "0.4660987", "0.46558934", "0.46552646", "0.46504226", "0.46467304", "0.4645812", "0.464484", "0.46390703", "0.46386543", "0.46375376", "0.46325842", "0.46325842", "0.4631184", "0.4625126", "0.4624514", "0.46142262", "0.4595924", "0.4590762", "0.45872596", "0.4582453", "0.45824516", "0.45824516", "0.4581046", "0.4580161", "0.45794952", "0.45773944", "0.45742154", "0.45704934", "0.45681623", "0.45639637", "0.45634937", "0.45607093", "0.4558593", "0.4557846", "0.45547774", "0.45546624", "0.45510173", "0.4550728", "0.4550151", "0.45498672", "0.45458353" ]
0.0
-1
Return the number of segments in this WordForm.
def __len__(self): return len(self.segments)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def segment_n(self):\n return len(self.segment_lengths)", "def numSegments(self):\n\n return self.getHierView().numSegments()", "def getSegmentCount(self) -> int:\n ...", "def calculate_number_of_segments(self):\n return sum(len(eg.transcript_file.segments) for eg in self.exemplars)", "def get_number_of_segments(self):\n\n return len(self._break_points) - 1", "def total_segments(self):\n return self._total_segments", "def sections(self) -> int:\n return len(self.string.split(\".\"))", "def segment_lengths(self):\n if not hasattr(self, '_segment_lengths'):\n _ = self.length # Sets length and segment_lengths\n return self._segment_lengths", "def segment_counter(self):\n return self._data_writer.get_segment_counter()", "def num_parts(self):\n return self._num_parts", "def Test_NumSegments(Daten):\n N_Leitungen = len(Daten.PipeSegments)\n\n return N_Leitungen", "def getSegmentWidth(self):\n l = [segment.width for segment in self.segments]\n if l.count(l[0]) == len(l):\n return l[0]\n else:\n raise ValueError(\"The widths of the segments must be the same otherwise it makes no sense.\")", "def getNumod_byerSentences(self): # !!! Need to rewrite this to match graph\n return len(self.__document)", "def number_of_sections(self):\n sections = self.config.sections()\n return len(sections)", "def number_of_sections(self):\n #print (len(self.config.sections()))\n return len(self.config.sections())", "def length(self):\n if not hasattr(self, '_length'):\n lengths = np.linalg.norm(np.diff(self.points, axis=0), axis=1)\n self._segment_lengths = lengths\n self._length = lengths.sum()\n return self._length", "def count_segments(s):\n s = s.strip().split()\n return len(s)", "def __len__(self):\n return self.coarse_segm.size(0)", "def __len__(self):\n nlines = self.get_endline() - self.get_startline() + 1\n if nlines < 0:\n nlines = 0\n return nlines", "def count_segments(self, raw_only: bool = False) -> int:\n if self.segments:\n self_count = 0 if raw_only else 1\n return self_count + sum(\n seg.count_segments(raw_only=raw_only) for seg in self.segments\n )\n else:\n return 1", "def matched_length(self) -> int:\n return sum(seg.matched_length for seg in self.segments)", "def numspans(self):\n return self.mesh.size - 1", "def size(self):\n return len(self.array_form)", "def size(self):\n return len(self.sentence)", "def word_count(self):\n return len(self.text)", "def size(self):\n return len(self.id2term)", "def getNumCurveSegments(self):\n return _libsbml.Curve_getNumCurveSegments(self)", "def n(self):\n return len(self.marks)", "def len(self):\n return self.n", "def __len__(self):\n return sum(len(p) for p in self.parts)", "def size(self) -> int:\n return len(self.__fsmParses)", "def length(self):\n return pyvista.Box(self.bounds).length", "def getlen(self):\n if self.onlydiag():\n return self.lendiag()\n else:\n return len(self)", "def segments(self):\n return self._segments", "def num_dof(self) -> int:\n return len(self)", "def _fields_num(self):\n return len(self.paths)", "def vocab_size(self) -> int:\n return len(self._tokenizer)", "def vocab_size(self) -> int:\n return len(self._tokenizer)", "def segment_download_count(self):\n # type: () -> int\n return self._segment_download_count", "def length(self):\n\t\treturn self.n", "def get_num_docs(self):\n return len(self.vocab)", "def length(self):\n return self.count", "def wordCount(document):\n return float(len(document.split(None)))", "def word_count(self):\n return self._word_count", "def street_segment_count(Gu):\n if nx.is_directed(Gu): # pragma: no cover\n msg = \"`Gu` must be undirected\"\n raise ValueError(msg)\n return len(Gu.edges)", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size", "def __len__(self):\n return len(self._ngrams)", "def size(self):\n return self._counter", "def getLength(self):\n return self.count", "def __len__(self):\n return self.document_count", "def size(self):\n\t\treturn self._count", "def numFragments(self):\n\n self._fragment()\n return self._data['fragindex'].max() + 1", "def __len__(self):\n\n if self.is_finite_set:\n size = 0\n for set in self.sets:\n size += len(set)\n return size\n else:\n raise ValueError(\"'%s' is not a finite set.\" % self)", "def size(self):\n return len(self.sequence)", "def size(self):\n return self.N", "def nfragments(self):\n return len(self.__fragments)", "def getLength(self):\n return self.n", "def return_num_vertices(self):\n return self.__size", "def len(self):\n\t\t\n\t\treturn len(self.line)", "def count_fragments(self):\n return len(self.fragment_list)", "def size(self) -> int:\n return self.length", "def perimeter(self):\n return sum([s.length for s in self.segments])", "def size(self):\n return self.__length", "def length(self):\n return self.counter", "def __len__(self):\n return len(self.word_2_idx)", "def __len__(self):\n return len(self.words)", "def vocab_size(self) -> int:\n return len(self.vocabulary)", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def get_total_number_of_documents(self):\n return self.total_number_of_documents", "def size(self):\r\n return self.__length", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def paragraph_count(self, doc):\n\n paragraphs = doc.split(\"\\n\\n\")\n # remove the empty string\n return len([paragraph for paragraph in paragraphs if paragraph])", "def count(self):\n return len(self._components)", "def __len__(self) -> int:\n return len(self._waves)", "def size(self) -> int:\n size = self.da.length()\n return size", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def getNumberOfViews(self) -> int:\n ...", "def count(self):\n return self.size()", "def size(self):\n return self._length", "def V(self):\n return len(self.voc)", "def V(self):\n return len(self.voc)", "def number_syllables(self):\n return len(self.array_form)", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def char_count(self, doc):\n\n return len(doc)", "def fragments(self):\n return len(self.byteruns())", "def getSideCount(self):\n if self.__nsides is None:\n raise ValueError, \"No side count defined.\"\n return self.__nsides", "def get_number_of_words_in_document(self, document):\n for _set in self.sets:\n if document in _set:\n return self.sets[_set][document]['number_of_words']", "def __len__(self):\n return len(self.chunk_idxes)", "def size(self):\n return _(len(self._))", "def len(self):\n return self.index.value", "def total_form_count(self):\n if self.initial_extra:\n count = len(self.initial_extra) if self.initial_extra else 0\n count += self.extra\n return count\n else:\n return forms.BaseInlineFormSet.total_form_count(self)", "def get_number_of_document_word_occurs_in(self, word):\n return len(self.dictionary[word]['docs'])", "def n_cs(self):\n return np.size(self._cs, 0)", "def getNumTerms(self):\n\n return self._numTerms", "def vocab_size(self):\n return self._vocab_size", "def __len__(self):\n if self._words is None:\n return 0\n return len(self._words)", "def MSC_size(self):\n return len(self.get_MSC())", "def getNbStations(self) :\n return len(self._stations)", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def doc_count(self):\n\t\treturn self.index.collection.count()" ]
[ "0.80350745", "0.7816462", "0.7777629", "0.76368123", "0.7336316", "0.7148496", "0.7035357", "0.6993581", "0.6829113", "0.6728645", "0.6592498", "0.6582837", "0.65064055", "0.6500765", "0.6491579", "0.6489725", "0.6485269", "0.64289606", "0.6355083", "0.6320407", "0.63141876", "0.62827045", "0.6274846", "0.6195171", "0.61751115", "0.6171847", "0.6159649", "0.6152446", "0.609783", "0.6097378", "0.60874945", "0.6081096", "0.60679436", "0.604849", "0.6038178", "0.6013506", "0.6007613", "0.6007613", "0.6005595", "0.59990096", "0.598277", "0.5978363", "0.59756005", "0.5975156", "0.5968454", "0.5966994", "0.5965942", "0.5958852", "0.59563947", "0.5953859", "0.5949692", "0.59458524", "0.59425235", "0.5929141", "0.5917914", "0.5908254", "0.5897087", "0.58896005", "0.58871", "0.5886858", "0.58739996", "0.5863456", "0.5860798", "0.58556825", "0.5842325", "0.58417416", "0.5839643", "0.5829781", "0.58267444", "0.5825746", "0.58212227", "0.5813834", "0.5805692", "0.58028424", "0.58021796", "0.5801291", "0.5799705", "0.5798086", "0.57977074", "0.5797525", "0.5797525", "0.57962984", "0.5790599", "0.5788066", "0.5783669", "0.57806164", "0.57799095", "0.5777971", "0.57742214", "0.57741064", "0.5768228", "0.5766485", "0.57658285", "0.5760644", "0.5759647", "0.575795", "0.57528454", "0.5749621", "0.57494247", "0.5744563" ]
0.7166042
5
Given an Frame object, will return the bytes of that Frame's file. If provided, will also scale the size of the image and convert to the required format.
def convert_frames(frame, img_format: str, scale=None) -> bytes: path = frame.filename with open(path, "rb") as image_file: im = Image.open(image_file) converted_img = BytesIO() if scale: _LOGGER.debug("Scaling the image") (width, height) = (int(im.width * scale), int(im.height * scale)) _LOGGER.debug("Original size is {}wx{}h, new size is {}wx{}h".format(im.width, im.height, width, height)) im = im.resize([width, height]) im.save(converted_img, img_format) return converted_img.getvalue()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_to_image(self, frame, base64_encode=False):\n #NOTE: tuple (85010, 1) ndarray --> data reduction\n img_buf_arr = cv2.imencode(\".jpeg\", frame)[1]\n if base64_encode:\n img_buf_arr = b\"data:image/jpeg;base64,\" + base64.b64encode(img_buf_arr)\n return img_buf_arr\n return bytes(img_buf_arr)", "def getFrame(self):\n s, image = self.capture.read()\n return image", "def get_image(self, frame):\n msec = frame * config.MS_PER_FRAME\n frame = msec // 250\n return self.frames[frame % self.num_frames]", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def image_to_byte_array(image: Image, f='JPEG'):\n imgByteArr = io.BytesIO()\n image.save(imgByteArr, format=f)\n imgByteArr = imgByteArr.getvalue()\n return imgByteArr", "def decodeFrame(self, image):\n return image", "def to_blob(self):\n x = cv2.dnn.blobFromImage(self.frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n return x", "def convertFrame(self):\n try:\n height,width=self.currentFrame.shape[:2]\n img=QtGui.QImage(self.currentFrame,\n width,\n height,\n QtGui.QImage.Format_RGB888)\n img=QtGui.QPixmap.fromImage(img)\n self.previousFrame = self.currentFrame\n return img\n except:\n return None", "def decodeFrame(frameJson):\n frameBase64 = frameJson[\"imageBase64\"]\n return base64.b64decode(frameBase64)", "def readFrame(self):\n\t\tsuccess, self.frameImage = self.vidcap.read()\n\t\treturn success, self.frameImage", "def convertFrame(self):\r\n try:\r\n height, width = self.currentFrame.shape[:2]\r\n img = QtGui.QImage(self.currentFrame,\r\n width,\r\n height,\r\n QtGui.QImage.Format_RGB888)\r\n img = QtGui.QPixmap.fromImage(img)\r\n self.previousFrame = self.currentFrame\r\n return img\r\n except:\r\n return None", "def convertFrame(self):\n try:\n img = QImage(self.currentVideoFrame,\n self.currentVideoFrame.shape[1],\n self.currentVideoFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None", "def convertFrame(self):\n try:\n img = QImage(self.currentVideoFrame,\n self.currentVideoFrame.shape[1],\n self.currentVideoFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None", "def frames(self):\n while True:\n ret, frame = self.classification()\n if ret == True:\n yield cv2.imencode('.jpg', frame)[1].tobytes()\n else:\n break", "def get_frame(frame):\n\n return int.from_bytes(frame, byteorder='big')", "def test_get_image_and_to_byte_array_are_compatible(self):\n\n with open(self.subject, \"rb\") as f:\n content = f.read()\n\n image = image_helper.get_image(content)\n\n self.assertEqual(image.size, (800, 450))\n\n bytes_array = image_helper.to_byte_array(image)\n\n image = image_helper.get_image(bytes_array)\n\n self.assertEqual(image.size, (800, 450))", "def read(self):\n\n # Obtém frame da câmera.\n status , frame = super().read()\n\n if not status: return\n\n # Obtém a imagem.\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = Image.fromarray(frame)\n \n # Se a opção de efeito espelho estiver ativa, a imagem será invertida.\n if self.__mirror:\n frame = frame.transpose(Image.FLIP_LEFT_RIGHT)\n \n return ImageTk.PhotoImage(frame) , frame.size", "def make_blob(self, format=None):\n if format is not None:\n with self.convert(format) as converted:\n return converted.make_blob()\n library.MagickResetIterator(self.wand)\n length = ctypes.c_size_t()\n blob_p = library.MagickGetImageBlob(self.wand, ctypes.byref(length))\n if blob_p and length.value:\n blob = ctypes.string_at(blob_p, length.value)\n library.MagickRelinquishMemory(blob_p)\n return blob\n self.raise_exception()", "def frame(self):\n try:\n AppHelper.runConsoleEventLoop(installInterrupt=True)\n return str(self._delegate.frame.representations()[0].TIFFRepresentation().bytes())\n except:\n return None", "def image_to_byte(img):\n img2 = img.crop(box=None)\n byte_arr = io.BytesIO()\n img2.save(byte_arr, format='PNG')\n return byte_arr.getvalue()", "def get_movie_frame(movie_file, frame=0):\n movie = cv2.VideoCapture(movie_file)\n _, image = movie.read() \n height, width, _ = image.shape\n filename = os.path.splitext(movie_file)[0] + f'_{frame}.jpg'\n cv2.imwrite(filename, image)\n \n return filename, height, width", "def image_to_bytes(a, fmt='png'):\n a = np.uint8(a)\n f = io.BytesIO()\n ima = PIL.Image.fromarray(a).save(f, fmt)\n return f.getvalue()", "def read(self):\n\n # ret, image = self.video.read()\n (self.grabbed, self.frame) = self.cap.read()\n image = self.frame\n\n if image is not None:\n \"\"\"Update FPS, and incode received frame. \"\"\"\n self.fps.update()\n # TODO: add self.fps.fps() to image, if flagged raised.\n\n # We are using Motion JPEG, but OpenCV defaults to cap raw images,\n # so we must encode it into JPEG in order to correctly display the\n # video stream.\n\n # display a piece of text to the frame (so we can benchmark\n # fairly against the fast method)\n self.fps.stop()\n cv2.putText(image, \"FPS (simple): {:.2f}\".format(self.fps.fps()), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n self.frame = image.copy()\n\n ret, jpeg = cv2.imencode('.jpg', image)\n return jpeg.tobytes()\n else:\n self.logger.debug(\"in 'get_frame', video.read not success\")", "def _prepare_frame(self, frame):\n\n initial_h, initial_w = frame.shape[:2]\n scale_h, scale_w = initial_h / float(self.input_height), initial_w / float(self.input_width)\n\n in_frame = cv2.resize(frame, (self.input_width, self.input_height))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape(self.input_size)\n\n return in_frame, scale_h, scale_w", "def getFile(self, file_name: str) -> bytes:\n obj = self.driver.get_object(self.container_name, file_name)\n gen = self.driver.download_object_as_stream(obj)\n file_as_bytes = next(gen)\n return file_as_bytes", "def grabFrame(self):\r\n \r\n data, w, h, orientation = self.grabRawFrame()\r\n return Image.fromstring(\"RGB\", (w, h), data, \"raw\", \"BGR\", 0, orientation)", "def get_original_frame(self, frame):\n if self._pad_top is not None:\n frame = frame[self._pad_top:frame.shape[0] - self._pad_bottom,\n self._pad_left:frame.shape[1] - self._pad_right]\n if self._scale_factor is not None and self._scale_factor != 1:\n frame = cv2.resize(frame,\n (int(frame.shape[1] / self._scale_factor),\n int(frame.shape[0] / self._scale_factor)))\n return frame", "def get_frame(self):\n self._serial_port.close()\n self._serial_port.open()\n\n self._request_frame()\n\n serial_data = self._serial_port.readall()\n\n frame_start_idx = serial_data.find(BEGIN_FRAME) + len(BEGIN_FRAME)\n frame_end_idx = serial_data.find(END_FRAME)\n\n print serial_data[0:frame_start_idx]\n print serial_data[frame_end_idx:]\n\n raw_frame = serial_data[frame_start_idx:frame_end_idx]\n\n np_frame = np.fromstring(raw_frame, dtype=np.uint8)\n # np_frame = np_frame.reshape((30, 30))\n\n # image = cv2.fromarray(np_frame)\n\n # return image\n return np_frame", "def PIL_to_bytes(img, ext: str = 'png', quality: int = None):\n bytes_io = io.BytesIO()\n if quality is not None:\n img.save(bytes_io, format=ext, quality=quality)\n else:\n subsampling = -1 if ext.lower() in ['jpg', 'jpeg'] else 0\n img.save(bytes_io, format=ext, quality=100, subsampling=subsampling)\n bytes_io.seek(0)\n return bytes_io", "def get_byte(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_get_byte(self, *args)", "def read_frame(self):\n return self.decode_frame(self.grab_frame())", "def picture_bytes(self):\n return bytearray(self.picture_base64[\"data\"][\"data\"])", "def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n \n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg", "def get_image(self, frame):\n rect = pygame.Rect(0, 0, self.width, self.height)\n msec = frame * config.MS_PER_FRAME\n frame = msec // 100\n rect.y = self.height - (frame % self.height)\n return self.double.subsurface(rect)", "def readbytes(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_readbytes(self, *args)", "def outputSingleFrame(self, frame=None):\n if frame is None:\n frame = 1\n\n self.loadFringe(frame=frame)\n\n outputName = self.inputFilenames['ofd'][:-4] + '_single_f' + str(frame) + '.ofd'\n\n with open(outputName, 'wb') as f:\n self.rawBScan.astype('uint16').tofile(f)", "def get_bytes(self):\n return self.bytes[:-len(ImageStream.END_SEQUENCE)]", "def get_image(self) -> Image.Image:\n raw_buffer_data = self.get_raw_frame_buffer_object_data()\n image = Image.frombytes(\n \"RGBA\",\n self.get_pixel_shape(),\n raw_buffer_data,\n \"raw\",\n \"RGBA\",\n 0,\n -1,\n )\n return image", "def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg", "def set_frame_data(self, frame):\n ret, jpeg = cv2.imencode('.jpg', cv2.resize(frame, self.resolution))\n if not ret:\n raise Exception('Failed to set frame data')\n self.frame = jpeg", "def convertDetectFrame(self):\n \n self.processDetectFrame()\n try:\n img = QImage(self.currentDetectFrame,\n self.currentDetectFrame.shape[1],\n self.currentDetectFrame.shape[0],\n QImage.Format_RGB888\n )\n return img\n except:\n return None", "def readBytes(self, size=1):\n return self.bytes", "def test_to_byte_array(self):\n with Image.open(self.subject) as im:\n image = im.convert(\"RGB\")\n\n byte_array = image_helper.to_byte_array(image)\n\n self.assertGreater(len(byte_array), 0)", "def convert_to_raw(file):\n\n img = Image.open(file)\n img = img.convert('L') # convert to 8 bits per pixels\n (x, y) = img.size\n\n pixels = bytearray(list(img.getdata()))\n\n filename, file_extension = os.path.splitext(file)\n file2 = file.replace(file_extension, '.dat')\n file_name = str(x) + 'x' + str(y) + 'x8x1' + '_' + file2\n\n # print(file_name)\n\n with open(file_name, 'wb') as f:\n f.write(pixels)\n\n return file_name", "def _getJpegFrame(self, tileNum, entire=False): # noqa\n # This raises an InvalidOperationTiffError if the tile doesn't exist\n rawTileSize = self._getJpegFrameSize(tileNum)\n if rawTileSize <= 0:\n msg = 'No raw tile data'\n raise IOTiffError(msg)\n\n frameBuffer = ctypes.create_string_buffer(rawTileSize)\n\n bytesRead = libtiff_ctypes.libtiff.TIFFReadRawTile(\n self._tiffFile, tileNum,\n frameBuffer, rawTileSize).value\n if bytesRead == -1:\n msg = 'Failed to read raw tile'\n raise IOTiffError(msg)\n elif bytesRead < rawTileSize:\n msg = 'Buffer underflow when reading tile'\n raise IOTiffError(msg)\n elif bytesRead > rawTileSize:\n # It's unlikely that this will ever occur, but incomplete reads will\n # be checked for by looking for the JPEG end marker\n msg = 'Buffer overflow when reading tile'\n raise IOTiffError(msg)\n if entire:\n return frameBuffer.raw[:]\n\n if frameBuffer.raw[:2] != b'\\xff\\xd8':\n msg = 'Missing JPEG Start Of Image marker in frame'\n raise IOTiffError(msg)\n if frameBuffer.raw[-2:] != b'\\xff\\xd9':\n msg = 'Missing JPEG End Of Image marker in frame'\n raise IOTiffError(msg)\n if frameBuffer.raw[2:4] in (b'\\xff\\xc0', b'\\xff\\xc2'):\n frameStartPos = 2\n else:\n # VIPS may encode TIFFs with the quantization (but not Huffman)\n # tables also at the start of every frame, so locate them for\n # removal\n # VIPS seems to prefer Baseline DCT, so search for that first\n frameStartPos = frameBuffer.raw.find(b'\\xff\\xc0', 2, -2)\n if frameStartPos == -1:\n frameStartPos = frameBuffer.raw.find(b'\\xff\\xc2', 2, -2)\n if frameStartPos == -1:\n msg = 'Missing JPEG Start Of Frame marker'\n raise IOTiffError(msg)\n # If the photometric value is RGB and the JPEG component ids are just\n # 0, 1, 2, change the component ids to R, G, B to ensure color space\n # information is preserved.\n if self._tiffInfo.get('photometric') == libtiff_ctypes.PHOTOMETRIC_RGB:\n sof = frameBuffer.raw.find(b'\\xff\\xc0')\n if sof == -1:\n sof = frameBuffer.raw.find(b'\\xff\\xc2')\n sos = frameBuffer.raw.find(b'\\xff\\xda')\n if (sof >= frameStartPos and sos >= frameStartPos and\n frameBuffer[sof + 2:sof + 4] == b'\\x00\\x11' and\n frameBuffer[sof + 10:sof + 19:3] == b'\\x00\\x01\\x02' and\n frameBuffer[sos + 5:sos + 11:2] == b'\\x00\\x01\\x02'):\n for idx, val in enumerate(b'RGB'):\n frameBuffer[sof + 10 + idx * 3] = val\n frameBuffer[sos + 5 + idx * 2] = val\n # Strip the Start / End Of Image markers\n tileData = frameBuffer.raw[frameStartPos:-2]\n return tileData", "def _get_max_image_bytes(self):\n raise NotImplementedError(\"Abstract method not implemented\")", "def bytes(self, count):\n\n if count < 0:\n raise ValueError\n\n # fast path\n if self._bits == 0:\n data = self._fileobj.read(count)\n if len(data) != count:\n raise BitReaderError(\"not enough data\")\n return data\n\n return bytes(bytearray(self.bits(8) for _ in xrange(count)))", "def create_frame_blob(self):\n # self.image_blob = cv2.dnn.blobFromImage(\n # cv2.resize(self.frame, (300, 300)), 1.0, (300, 300),\n # (104.0, 177.0, 123.0), swapRB=False, crop=False)\n self.image_blob = cv2.dnn.blobFromImage(cv2.resize(self.frame, (300, 300)),\n 0.007843, (300, 300), 127.5)", "def grabRawFrame(self):\r\n \r\n self.surface = self.capture.get_image(self.surface)\r\n width, height = self.surface.get_size()\r\n return pygame.image.tostring(self.surface, 'RGB'), width, height, 1", "def image_to_fp(image, image_format):\n # type: (Any, str) -> IO[bytes]\n fp = io.BytesIO()\n image.save(fp, format=image_format) # save the content to fp\n fp.seek(0)\n return fp", "def createFileBytes(self, filename: unicode, offset: long, size: long, is_: java.io.InputStream, monitor: ghidra.util.task.TaskMonitor) -> ghidra.program.database.mem.FileBytes:\n ...", "def format(self, table):\n # requester can package returned byte stream into pytables in-memory file like this:\n # h5file = tables.open_file('array.h5', driver=\"H5FD_CORE\", driver_core_image=<received byte stream>, driver_core_backing_store=0)\n # array can then be accessed via\n # h5file.root.array.read()\n # OR, bytes can be saved into HDF5 file.\n name = 'array'\n arr = table.as_array()\n with tables.open_file('array.h5', \"w\", driver=\"H5FD_CORE\", driver_core_backing_store=0, filters=tables.Filters(complevel=5)) as h5file: # compression level 5\n h5file.create_array(h5file.root, name, arr)\n return h5file.get_file_image()", "def get_data_ge(logger, file):\n fp = open(file, 'rb')\n offset = 8192\n\n fp.seek(18)\n size, nframes = st.unpack('<ih',fp.read(6))\n if size != 2048:\n logger.error('GE image size unexpected: '+str(size))\n return None, 0, 0\n\n fsize = os.stat(str(fp).split(\"'\")[1]).st_size\n nframes_calc = (fsize - offset)/(2*size**2)\n\n if nframes != nframes_calc:\n logger.error('GE number frames unexpected: '+str(nframes))\n return None, 0, 0\n\n pos = offset\n fp.seek(pos)\n\n return fp, int(nframes_calc), size*size", "def save_frame(frame):\n try:\n img = Image.fromarray(frame.array, 'RGB')\n out_path = settings['app']['web_path']\n if not os.path.isabs(out_path):\n out_path = os.path.join(basepath, out_path)\n filename = os.path.join(out_path, 'static', 'latest.jpg')\n tmp_filename = '{}.part'.format(filename)\n img.save(tmp_filename, 'jpeg')\n os.rename(tmp_filename, filename)\n except Exception, error:\n print('Error saving frame: {}'.format(error))", "def processFrame(frame, shape=(84, 84)):\n frame = frame.astype(np.uint8) # cv2 requires np.uint8\n # Apply a rgb filter to convert RGB to Gray Scale\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n # crop image OpenCv2 function to format img[y:y + h, x:x + w]\n frame = frame[34:34+160, :160] # crop image\n frame = cv2.resize(frame, shape, interpolation=cv2.INTER_NEAREST)\n frame = frame.reshape((*shape, 1))\n #cv2.imshow('Cropped Image', frame)\n\n return frame", "def get_fame(self, w, h):\n frame = self.get_frame_read()\n frame = frame.frame\n frame = cv2.resize(frame, (w, h))\n\n return frame", "def ImgToBmp( filePath, size ):\n img = wx.Image( filePath )\n img.Rescale( size[0], size[1] )\n bmp = wx.BitmapFromImage( img )\n return bmp", "def get_mediafile_blob_data(self, old):\n if old[\"is_directory\"]:\n return None\n\n try:\n db_mediafile = Mediafile.objects.get(pk=old[\"id\"])\n except Mediafile.DoesNotExist:\n return None\n filename = db_mediafile.original_filename\n\n if use_mediafile_database:\n with connections[\"mediafiles\"].cursor() as cursor:\n cursor.execute(\n f\"SELECT data FROM {mediafile_database_tablename} WHERE id = %s\",\n [old[\"id\"]],\n )\n row = cursor.fetchone()\n if row is None:\n return None\n data = row[0]\n else:\n data = db_mediafile.mediafile.open().read()\n\n blob = base64.b64encode(data).decode(\"utf-8\")\n return filename, len(data), blob", "def get_file(self):\n img_hex = self._segments['preceding']\n\n if self.has_exif:\n img_hex += self._segments['APP1'].get_segment_hex()\n\n img_hex += self._segments['succeeding']\n\n return binascii.unhexlify(img_hex)", "def __convert_frames(self, frames):\n f = frames\n fragment = sppasChannelFrames(f)\n\n # Convert the sample width if it needs to\n if self._channel.get_sampwidth() != self._sampwidth:\n fragment.change_sampwidth(self._channel.get_sampwidth(), self._sampwidth)\n\n # Convert the self._framerate if it needs to\n if self._channel.get_framerate() != self._framerate:\n fragment.resample(self._sampwidth, self._channel.get_framerate(), self._framerate)\n\n return fragment.get_frames()", "def _convert_frame_data(jpeg_data):\n decoded_frames = tf.image.decode_jpeg(jpeg_data)\n return tf.image.convert_image_dtype(decoded_frames, dtype=tf.float32)", "def get_input_frame(self, frame):\n self._scale_factor = min(self._input_shape[1] / frame.shape[0],\n self._input_shape[0] / frame.shape[1])\n scaled = cv2.resize(frame, (int(frame.shape[1] * self._scale_factor),\n int(frame.shape[0] * self._scale_factor)))\n pad_width = (self._input_shape[0] - scaled.shape[1]) / 2\n pad_height = (self._input_shape[1] - scaled.shape[0]) / 2\n # floor&ceil values to account for possibly odd amount of padding\n self._pad_top = int(np.floor(pad_height))\n self._pad_left = int(np.floor(pad_width))\n self._pad_bottom = int(np.ceil(pad_height))\n self._pad_right = int(np.ceil(pad_width))\n nn_frame = cv2.copyMakeBorder(scaled, self._pad_top, self._pad_bottom,\n self._pad_left, self._pad_right,\n cv2.BORDER_CONSTANT)\n return nn_frame.transpose(2, 0, 1)", "def load_blob(abspath):\n loaded_numpy = np.load(abspath)\n loaded_frames = loaded_numpy['frames']\n loaded_shape = loaded_numpy['shape']\n no_frames = loaded_shape[0]\n width = loaded_shape[1]\n height = loaded_shape[2]\n frames_unpacked = np.unpackbits(loaded_frames)\n frames_unpacked = frames_unpacked.reshape((no_frames, width, height))\n return frames_unpacked", "def saveFrame(filepath, frame):\n if not filepath.lower().endswith('.png'):\n filepath += '.png'\n image = Image.fromarray(frame)\n image.save(filepath)", "def __processImage(self, f):\n try:\n imgobj = Image.open(f).convert('RGB')\n except:\n return None\n w, h = imgobj.size\n if w < h:\n # reduce width to required dimension and adjust height accordingly\n new_h = int(h * self.PROCESSING_DIM / w)\n resizedImg = imgobj.resize((self.PROCESSING_DIM, new_h))\n\n y_start = int(new_h / 2 - self.PROCESSING_DIM / 2)\n processedImage = resizedImg.crop((0, y_start, self.PROCESSING_DIM, y_start + self.PROCESSING_DIM))\n\n else:\n # reduce height to required dimension and adjust width accordingly\n new_w = int(w * self.PROCESSING_DIM / h)\n resizedImg = imgobj.resize((new_w, self.PROCESSING_DIM))\n\n x_start = int(new_w / 2 - self.PROCESSING_DIM / 2)\n processedImage = resizedImg.crop((x_start, 0, x_start + self.PROCESSING_DIM, self.PROCESSING_DIM))\n\n return processedImage", "def content_for_run(self,run=None,imagefile=None):\n if imagefile==None: imagefile=self.imagefile\n if run.bytes== -1:\n return chr(0) * run.bytes\n elif run.fill is not None : \n return chr(run.fill) * run.bytes\n else:\n imagefile.seek(run.img_offset)\n return imagefile.read(run.bytes)", "def read_bytes(self, size):\n return self.read('bytes:'+str(size))", "def get_image_size(frame) -> tuple:\n return tuple(frame.shape[1::-1])", "def get_frame(filename, frametime=None, frame_number=None, frame_string=None,\n pix_fmt='gray', bufsize=10**9, path_to_ffmpeg='ffmpeg', vsync='drop'):\n v_width, v_height = get_video_aspect(filename)\n \n if pix_fmt == 'gray':\n bytes_per_pixel = 1\n reshape_size = (v_height, v_width)\n elif pix_fmt == 'rgb24':\n bytes_per_pixel = 3\n reshape_size = (v_height, v_width, 3)\n else:\n raise ValueError(\"can't handle pix_fmt:\", pix_fmt)\n \n # Generate a frame string if we need it\n if frame_string is None:\n frame_string = ffmpeg_frame_string(filename, \n frame_time=frametime, frame_number=frame_number)\n \n # Create the command\n command = [path_to_ffmpeg, \n '-ss', frame_string,\n '-i', filename,\n '-vsync', vsync,\n '-vframes', '1', \n '-f', 'image2pipe',\n '-pix_fmt', pix_fmt,\n '-vcodec', 'rawvideo', '-']\n \n # To store result\n res_l = []\n frames_read = 0\n\n # Init the pipe\n # We set stderr to PIPE to keep it from writing to screen\n # Do this outside the try, because errors here won't init the pipe anyway\n pipe = subprocess.Popen(command, \n stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n bufsize=bufsize)\n\n try:\n read_size = bytes_per_pixel * v_width * v_height\n raw_image = pipe.stdout.read(read_size) \n if len(raw_image) < read_size:\n raise OutOfFrames \n flattened_im = np.fromstring(raw_image, dtype='uint8')\n frame = flattened_im.reshape(reshape_size) \n \n except OutOfFrames:\n print(\"warning: cannot get frame\")\n frame = None\n \n finally:\n # Restore stdout\n pipe.terminate()\n\n # Keep the leftover data and the error signal (ffmpeg output)\n stdout, stderr = pipe.communicate() \n \n # Convert to string\n if stdout is not None:\n stdout = stdout.decode('utf-8')\n if stderr is not None:\n stderr = stderr.decode('utf-8')\n \n return frame, stdout, stderr", "def get_bytes(ea, size, original=False):\n if ea is None:\n ea = ida_kernwin.get_screen_ea()\n res = b\"\"\n if original:\n for i in range(size):\n res += int2byte(ida_bytes.get_original_byte(ea + i))\n else:\n for i in range(size):\n res += int2byte(ida_bytes.get_wide_byte(ea + i))\n return res", "def get_image(self):\n return self.process_raw_image(self.get_raw_image())", "def get_image_size(self):", "def encoder(cls, frames) -> bytearray:\n\t\tframe_it = iter(frames)\n\t\tprev = next(frame_it).copy()\n\t\tall_events = get_events_by_position(frames)\n\n\t\t# Encode resolution and number of frames\n\t\tyield struct.pack('>3I', prev.shape[0], prev.shape[1], len(frames))\n\n\t\t# Encode first frame\n\t\tyield prev.tobytes()\n\n\t\t# Yield events for each pixel in turn\n\t\tyield from cls._events_to_bytes(all_events)", "def get_file_mine(file, size_to_read=(5 * (1024 * 1024))):\n return magic.from_buffer(file.read(size_to_read), mime=True)", "def read_as_bytes(filename):\n try:\n with open(filename, \"rb\") as file:\n bytes = array.array(\"B\")\n bytes.frombytes(file.read())\n return bytes\n except FileNotFoundError:\n print(f\"File not found: {filename}\")\n exit()", "def decode_frame(self, results, blob):\n height = int(results['height'])\n width = int(results['width'])\n channels = int(results['channels'])\n encoding = None\n\n if 'encoding_type' and 'encoding_level' in results:\n encoding = {\"type\": results['encoding_type'],\n \"level\": results['encoding_level']}\n # Convert to Numpy array and reshape to frame\n if isinstance(blob, list):\n # If multiple frames, select first frame for\n # visualization\n blob = blob[0]\n frame = np.frombuffer(blob, dtype=np.uint8)\n if encoding is not None:\n frame = np.reshape(frame, (frame.shape))\n try:\n frame = cv2.imdecode(frame, 1)\n except cv2.error as ex:\n self.log.error(\"frame: {}, exception: {}\".format(frame, ex))\n else:\n self.log.debug(\"Encoding not enabled...\")\n frame = np.reshape(frame, (height, width, channels))\n\n return frame", "def __get_next_yuv_frame(self):\n raw_frame_buffer = self.__file_object.read(self.__frame_raw_data_size)\n\n # Ignore FRAME header\n self.__file_object.readline()\n return raw_frame_buffer", "def serialize_frames_to_binary(frames, dt=1/30):\n\n if frames is not None:\n if not isinstance(frames, numpy.ndarray):\n raise TypeError(\"non-numpy-ndarray passed to serialize_frames\")\n \n if (dt < 0.):\n raise TypeError(\"time period dt should be positive real number\")\n \n from .FacesoftFlatbuffersSchema import SerializedKeyframesTimeline\n\n # samples per frame\n N = frames.shape[1]\n\n builder = flatbuffers.Builder(1024)\n f = builder.CreateNumpyVector(frames.ravel().astype(numpy.float32))\n\n SerializedKeyframesTimeline.SerializedKeyframesTimelineStart(builder)\n SerializedKeyframesTimeline.SerializedKeyframesTimelineAddKeyframesData(builder, f)\n SerializedKeyframesTimeline.SerializedKeyframesTimelineAddVersion(builder, get_version())\n SerializedKeyframesTimeline.SerializedKeyframesTimelineAddTimeBetweenFramesSecs(builder, dt)\n SerializedKeyframesTimeline.SerializedKeyframesTimelineAddSamplesPerFrame(builder, N)\n\n\n serializedData = SerializedKeyframesTimeline.SerializedKeyframesTimelineEnd(builder)\n\n builder.Finish(serializedData)\n\n return builder.Output()", "def image2bytes(image: np.ndarray, extension=\".png\"):\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if len(image.shape) == 3 else image\n return cv2.imencode(extension, image)[1].tobytes()", "def FrameSize(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.traffic.trafficitem.configelement.framesize.framesize import FrameSize\n return FrameSize(self)._select()", "def decode(self, path: str = None) -> str:\n if path:\n image = Image.open(path)\n else:\n image = self._encoded_image\n\n width, height = image.size\n pixels = image.load()\n\n binary_string = \"\"\n for x_pixel in range(width):\n for y_pixel in range(height):\n rgb = pixels[x_pixel, y_pixel]\n red, blue, green = self._rgb_to_binary(rgb)\n binary_string += red[-1] + blue[-1] + green[-1]\n\n return self._binary_string_to_str(binary_string, end=self._end_message)", "def get_frame(self,t):\n\n return pyfx.util.to_array(self._img_list[t],dtype=np.uint8,\n num_channels=4)", "def getframe(self, num):\n if num < 0 or num > self.nframes:\n raise Exception(\"Requested frame number is out of range\")\n # Do a deep copy of the header to make a new one\n newheader = {}\n for k in self.header.keys():\n newheader[k] = self.header[k]\n frame = pixiimage(header=newheader)\n frame.nframes = self.nframes\n frame.sequencefilename = self.sequencefilename\n infile = frame._open(self.sequencefilename, \"rb\")\n frame._readframe(infile, num)\n infile.close()\n return frame", "def format(self,size):\n if self.__units is None:\n # Human-readable\n return bcf_utils.format_file_size(size)\n elif self.__units == 'bytes':\n # Raw bytes\n return size\n else:\n return bcf_utils.format_file_size(size,units=self.__units)[0:-1]", "def Framesize(self):\n\t\treturn self._get_attribute('framesize')", "def decodeFrameForObjectDetection(frameJson):\n img = frameJson[\"imageBase64\"]\n nparr = np.fromstring(img.decode('base64'), np.uint8)\n newImage = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n return newImage", "def process_frame(self, frame):\n\t\treturn frame", "def to_PIL(frame):\n image = Image.fromarray(frame)\n image = image.convert('RGB')\n return image", "def _blob(self):\n return self._load_blob", "def read_gzip_bytes(self):\n with gzip.open(self, 'rb') as f:\n return f.read()", "def read(self):\r\n\t\t# get data from camera\r\n\t\tarray = self.ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)\r\n\t\t# get frame as numpy array\r\n\t\tframe = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))\r\n\t\t\r\n\t\t\"\"\"\r\n\t\tcamera_matrix = np.array([\r\n\t\t\t[4.5330796457901283e+02, 0., 6.1902229288626302e+02],\r\n\t\t\t[0., 4.5369175559310276e+02, 5.1298362120979994e+02],\r\n\t\t\t[0., 0., 1.]])\r\n\t\t\r\n\t\tdist_coeffs = np.array([\r\n\t\t\t-3.1812973406286371e-01, 9.6396352148682182e-02,\r\n\t\t\t2.9601124432187590e-03, 9.7700591472463412e-04,\r\n\t\t\t-1.1929681608809075e-02\r\n\t\t])\r\n\r\n\t\tframe = cv2.undistort(frame, camera_matrix, dist_coeffs, camera_matrix)\r\n\t\t\"\"\"\r\n\r\n\t\treturn frame", "def get_frame(fn, t, img_i=-1):\n in_ext = os.path.splitext(fn)[1]\n\n if in_ext == '.h5':\n if h5_is_itk(fn):\n return None # todo\n else:\n # assume file was converted from .lif\n sel = (slice(t, t + 1),)\n img = dd.io.load(fn, '/stack', sel=sel)\n img = img.squeeze()\n return img\n elif in_ext == '.lif':\n ir = lif_open(fn)\n if img_i == -1:\n img_i = lif_find_timeseries(fn)\n shape = get_shape(fn, img_i)\n frame = np.zeros(shape[1:], np.uint16)\n for z in range(shape[1]):\n frame[z] = ir.read(t=t, z=z, c=0, series=img_i, rescale=False)\n return frame\n\n else:\n raise UnsupportedFormatException('h5 and lif as time series format supported.')", "def _get_image_blob(im):\n im_orig = im.astype(np.float32, copy=True)\n # im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n # print('cfg.TEST.SCALES: {}'.format(cfg.TEST.SCALES)),\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n blob /= 255.0\n\n return blob, np.array(im_scale_factors)", "def byte_size(self) -> int:\n return pixel_formats[self._dtype][3] * self._components * self.width * self.height", "def toTexture(self) -> bytes:\n header = self._makeSfaHeader()\n\n # read pixels and convert to RGB565\n # XXX this can probably be done faster\n buf = np.array(dtype=np.uint16, ndmin=3)\n for y in range(self.height):\n for x in range(self.width):\n rgba = self.pixels[x,y]\n r = rgba >> 24 # XXX verify order\n g = (rgba >> 16) & 0xFF\n b = (rgba >> 8) & 0xFF\n buf[x,y] = (b >> 3) | ((g >> 2) << 5) | ((r >> 3) << 11)\n\n # byteswap\n buf = ((buf & 0x00FF) << 8) | ((buf & 0xFF00) >> 8)\n return header + buf.tobytes()", "def getFrameInfo(self, frameId, dataset = None):\n img_dir_left, img_dir_right = self._getImageDirs(dataset)\n img_file_left = os.path.join(img_dir_left, \"%06d.png\" % frameId)\n img_file_right = os.path.join(img_dir_right, \"%06d.png\" % frameId)\n calibration = self._getCamCalibration(frameId, dataset)\n\n return {\n 'dataset': dataset,\n 'frameId': frameId,\n 'img_left': loadImage(img_file_left) if os.path.isfile(img_file_left) else None,\n 'img_right': loadImage(img_file_right) if os.path.isfile(img_file_right) else None,\n 'calibration': calibration,\n 'lidar': self._getLidarPoints(calibration, frameId, dataset),\n 'labels': self._getFrameLabels(frameId, dataset),\n }", "def get_new_image(self):\n img = self.vid_mem_reader.get_latest_image()\n if not img: \n return None\n img = img[0]\n return convert_16to8(img)", "def read_binary(self):\n with self.open(\"rb\") as f:\n return f.read()", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def _get_image_blob(im):\n im_orig = im.astype(np.float32, copy=True)\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in 600:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)" ]
[ "0.5845967", "0.5829252", "0.568239", "0.5598484", "0.5597409", "0.5580428", "0.5558993", "0.55013925", "0.54556245", "0.54468995", "0.54443717", "0.54354507", "0.54354507", "0.54297215", "0.5396869", "0.5359741", "0.5324554", "0.5318987", "0.53071946", "0.53018034", "0.5250378", "0.52319074", "0.52235484", "0.52053475", "0.5169272", "0.5164558", "0.5150451", "0.51420194", "0.51172227", "0.5109753", "0.51000565", "0.50976145", "0.50946826", "0.50928795", "0.5092706", "0.5083144", "0.5057111", "0.50559276", "0.5055434", "0.5055434", "0.5055351", "0.5051477", "0.5046219", "0.5036478", "0.5035615", "0.5034703", "0.5028506", "0.49918884", "0.49784675", "0.49752885", "0.4969763", "0.49651757", "0.49286228", "0.49267352", "0.4915634", "0.49030533", "0.48982716", "0.48741445", "0.48714924", "0.48607385", "0.48538584", "0.4844704", "0.48413813", "0.48411426", "0.48385522", "0.48309848", "0.4827744", "0.48198584", "0.48159897", "0.48146003", "0.48000267", "0.47948447", "0.47896883", "0.47868428", "0.47855568", "0.4785221", "0.4778058", "0.4769269", "0.47639534", "0.47569928", "0.47487038", "0.47480392", "0.47464663", "0.47378263", "0.4734493", "0.4725876", "0.47236848", "0.4707167", "0.47023612", "0.46994278", "0.46963203", "0.46926227", "0.4673072", "0.46677992", "0.46661726", "0.4665741", "0.4663948", "0.46638328", "0.4660469", "0.46569332" ]
0.7540226
0
Given a dictionary, converts both the keys and values of it to string and returns it.
def stringify_dict(d: dict) -> dict: return {str(key): str(value) for key, value in d.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keys2string(dictionary):\r\n if not isinstance(dictionary, dict):\r\n return dictionary\r\n return dict((str(k), keys2string(v)) \r\n for k, v in dictionary.items())", "def convert_keys_to_string(dictionary):\n if not isinstance(dictionary, dict):\n return dictionary\n return dict((str(k), convert_keys_to_string(v)) \n for k, v in dictionary.items())", "def turn_keys_into_str(d):\n return { str(k) : v for k,v in d.items() }", "def get_dict_str(d: dict) -> str:\n\treturn str({str(u): str(v) for u, v in d.items()})", "def _dict_to_string(self, dictionary):\n st = ''\n for i in range(len(dictionary)):\n st = st + dictionary[i]\n return st", "def dict2str(dic):\n return ','.join(\"%s=%s\" % (key, val)\n for key, val in sorted(dic.items()))", "def dict2str(d):\n els = ['%s=%s' % (k,v) for k,v in sorted(d.items())]\n return seq2str(els)", "def dict_2_string(d):\n buff = io.StringIO()\n print_dictionary(d, output=buff)\n return buff.getvalue()", "def dict_keys_to_strings(d):\n return dict((str(k), v) for (k, v) in d.iteritems())", "def str_dict(d):\n return \", \".join(\"%s=%s\" % (k, d[k]) for k in d)", "def dict_values_to_text(d):\n body = []\n def recur(d):\n for v in d.values():\n if type(v) == dict:\n recur(v)\n elif v != \"\" and type(v) != bool:\n body.append(str(v))\n recur(d)\n # print(f\"dict to text {d['_id']} success\")\n return \" \".join(body)", "def stringify_values(dictionary):\n\n dict_copy = copy.deepcopy(dictionary)\n\n for key, value in dict_copy.iteritems():\n if isinstance(value, dict):\n dict_copy[key] = stringify_values(value)\n else:\n dict_copy[key] = str(value)\n return dict_copy", "def dict_to_perl_string(input_dict):\n pairs = []\n for k, v in sorted(filter(lambda k_v: k_v[1] != None, input_dict.items())):\n k = str(k)\n t = type(v).__name__\n if t == 'str':\n pairs.append(\"\\\"%s\\\" => \\\"%s\\\"\" % (k, escape_perl_string(v)))\n elif t == 'int':\n pairs.append(\"\\\"%s\\\" => %d\" % (k, v))\n elif t == 'float':\n pairs.append(\"\\\"%s\\\" => %f\" % (k, v))\n elif t == 'list':\n pairs.append(\"\\\"%s\\\" => %s\" % (k, list_to_perl_string(v)))\n elif t == 'dict':\n pairs.append(\"\\\"%s\\\" => %s\" % (k, dict_to_perl_string(v)))\n elif t == 'bool':\n if str(v) == \"True\":\n pairs.append(\"\\\"%s\\\" => %d\" % (k, 1))\n else:\n raise Exception(\"Unsupported type \" + str(t))\n return \"{%s}\" % \", \".join(pairs)", "def _dict_var_to_dict_str(dictionary):\n if isinstance(dictionary, dict):\n return {s.attr('name') if isinstance(s, nnvm.symbol.Symbol) else s:\n dictionary[s] for s in dictionary}\n else:\n return dictionary", "def keysToString(indict):\r\n newD = {}\r\n for k, v in indict.iteritems():\r\n newD[k.name] = v\r\n return newD", "def dictionarytoraw(dict):\n\n data = \"\"\n\n for key, val in dict.items():\n if isinstance(val,dict):\n for kkey, vval in iter(val.items()):\n if kkey is None:\n data += str(key) + NULL + str(vval) + NULL\n else:\n data += ''.join([str(key), '\\x1c', str(kkey), NULL, str(vval), NULL])\n else:\n data += str(key) + NULL + str(val) + NULL\n\n return (len(data) > 0 and data) or NULL", "def dictToSortedStr(indict: Dict[str, Any]) -> str:\n\n # first sort\n sortedlist = sorted(indict.items(), key=lambda item: item[0])\n\n # now assemble output\n outlist = [f\"{t[0]}:{str(t[1])}\" for t in sortedlist]\n\n return \",\".join(outlist)", "def stringify_keys(d):\n di = copy.deepcopy(d)\n for key in di.keys():\n # check inner dict\n if isinstance(d[key], dict):\n value = stringify_keys(d[key])\n else:\n value = d[key]\n\n # convert nonstring to string if needed\n if not isinstance(key, str):\n try:\n d[str(key)] = value\n except Exception:\n try:\n d[repr(key)] = value\n except Exception:\n raise\n\n # delete old key\n del d[key]\n return d", "def dict2argstr(d: Dict[str, Any]) -> str:\n return \",\".join(\"{!s}={!r}\".format(key, val) for (key, val) in d.items())", "def dict_to_str(origin_value, encode=None):\n value = copy.deepcopy(origin_value)\n for k, v in value.items():\n if isinstance(v, dict):\n value[k] = dict_to_str(v, encode)\n continue\n\n if isinstance(v, list):\n value[k] = list_to_str(v, encode)\n continue\n\n if encode:\n value[k] = encode(v)\n else:\n value[k] = v\n\n return value", "def param_dict_to_str(data):\n if data is None or not data:\n return \"\"\n pairs = []\n for key, val in data.items():\n if isinstance(val, (list, tuple, set)) or is_numpy_1d_array(val):\n pairs.append(str(key) + '=' + ','.join(map(str, val)))\n elif isinstance(val, string_type) or isinstance(val, numeric_types) or is_numeric(val):\n pairs.append(str(key) + '=' + str(val))\n elif val is not None:\n raise TypeError('Unknown type of parameter:%s, got:%s'\n % (key, type(val).__name__))\n return ' '.join(pairs)", "def _reprOfStringToValueMap (stringMap : Map) -> String:\n\n entrySeparator = u\"§\"\n entryTemplate = \"%s: %s\"\n keyList = sorted(list(stringMap.keys()))\n result = \"\"\n \n for key in keyList:\n value = stringMap[key] \n result += (iif(result == \"\", \"\", entrySeparator)\n + entryTemplate % (key, value))\n \n result = \"{\" + result + \"}\";\n return result", "def _render_dict_to_string(self, adict):\n alist = [ \"%s:%s\" % (self._render_thing(k), \n self._render_thing(adict[k])\n ) for k in adict.keys()]\n return \",\".join(self._render_row(alist))", "def render_dict(dict):\n\t\treturn str.encode(str(dict))", "def to_str(arr_v_str):\n # print(arr_v_str)\n for k,v in arr_v_str.items():\n arr_v_str[k] = str(v)\n # print(arr_v_str[k])\n\n return arr_v_str", "def convert_values_to_strings(dict,list_sep=', '):\n d=dict.copy()\n for key in dict.keys():\n if not isinstance(dict[key],str):\n try:\n d[key]=list_sep.join(dict[key])\n except:\n del d[key]\n return d", "def _to_string(self) -> str:\n\n string_list = []\n for key, value in self.__dict__.items():\n if isinstance(value, dict):\n string_list.append(key)\n string_list.extend('\\n'.join([\"Key: {:24}\\tValue: {}\".format(_key, _value) for _key, _value in value.items()]))\n else:\n string_list.append(\"Key: {:24}\\tValue: {}\\n\".format(key, value))\n return ''.join(string_list)", "def simplified_document_to_string(doc: dict) -> str:\n return \" \".join([str(value) for value in doc.values()])", "def to_string(cls, hierarchical_dict: dict) -> str:\n keys = cls.get_all_keys(hierarchical_dict)\n keys = sorted(keys)\n res = \"\"\n for key in keys:\n res += f\"{key} = {FuseUtilsHierarchicalDict.get(hierarchical_dict, key)}\\n\"\n\n return res", "def _DictToString(self, value_dict, str_length=5):\n\n def FormatValue(v, value_format, str_length):\n if isinstance(v, (int, float)):\n return value_format % v\n else:\n return str(v).rjust(str_length)\n\n text = []\n blank = '--'.rjust(str_length)\n\n if self._show_label:\n text.append(' '.join(k.rjust(str_length) for k in self._node_labels))\n\n if not self._precision:\n value_format = '%% %dd' % str_length\n else:\n value_format = '%% %d.%df' % (str_length, self._precision)\n\n text.append(' '.join(\n [FormatValue(value_dict[k], value_format, str_length)\n if k in value_dict else blank for k in self._node_labels]))\n\n return '\\n'.join(text)", "def native_stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):\n d = {}\n for k, v in iteritems(dict(dct_or_tuples)):\n k = _to_native_str(k, encoding)\n if not keys_only:\n if isinstance(v, dict):\n v = native_stringify_dict(v, encoding=encoding, keys_only=keys_only)\n elif isinstance(v, list):\n v = [_to_native_str(e, encoding) for e in v]\n else:\n v = _to_native_str(v, encoding)\n d[k] = v\n return d", "def _filterDictToStr(self, filterDict):\n values = []\n for key, vals in filterDict.items():\n if key not in ('contentRating', 'label', 'contentRating!', 'label!'):\n raise BadRequest(f'Unknown filter key: {key}')\n values.append(f\"{key}={'%2C'.join(vals)}\")\n return '|'.join(values)", "def lists_to_strings(dictionary, separator=' '):\n for key in dictionary:\n if isinstance(dictionary[key], list):\n dictionary[key] = separator.join(str(elem) for elem in dictionary[key])", "def dict_to_str(self, param_dict: Dict[str, Any], num_tabs: int) -> str:\n if not isinstance(param_dict, dict):\n return str(param_dict)\n else:\n append_newline = \"\\n\" if num_tabs > 0 else \"\"\n return append_newline + \"\\n\".join(\n [\n \"\\t\"\n + \" \" * num_tabs\n + \"{0}:\\t{1}\".format(\n x, self.dict_to_str(param_dict[x], num_tabs + 1)\n )\n for x in param_dict\n ]\n )", "def dictKeysToCSV(d):\n return \",\".join([str(val) for val in nestedDictKeys(d)])", "def str_list(d):\n\td_new = {}\n\tfor key, value in d.items():\n\t\td_new[key] = str(value)\n\n\treturn d_new", "def ToString():\n @pass_failures\n def to_string(data):\n value = data.value\n if isinstance(value, Mapping):\n value = {k: str(v) for k, v in value.items()}\n else:\n value = str(value)\n data.value = value\n return data\n return to_string", "def _format_dict(self, dict):\n\n result = \"\"\n for k, v in dict.items():\n result += \"\\n{0}: {1}\".format(k.capitalize(), v)\n\n return result", "def param2str(val):\n if isinstance(val, dict):\n try:\n return json.dumps(val)\n except TypeError:\n s = str(val)\n print(\"[WARNING] cannot convert value ('%s') to a string with json.dumps\" % s)\n\n return str(val)", "def convert_str(feature_vectors):\n for key in feature_vectors:\n feature_vectors[key] = map(lambda el: str(el), feature_vectors[key])", "def dict_to_kv(dict):\n return \",\".join(\n f\"{k}={','.join(v) if isinstance(v, list) else v}\"\n for k, v in dict.items()\n if v is not None\n )", "def stringify(input):\n if isinstance(input, dict):\n return dict([(stringify(key), stringify(value)) for key, value in input.iteritems()])\n elif isinstance(input, list):\n return [stringify(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf-8')\n else:\n return input", "def compact_idstr(dict_):\n from netharn import util\n import ubelt as ub\n short_keys = util.shortest_unique_prefixes(dict_.keys())\n short_dict = ub.odict(sorted(zip(short_keys, dict_.values())))\n idstr = ub.repr2(short_dict, nobr=1, itemsep='', si=1, nl=0,\n explicit=1)\n return idstr", "def sd_dict_to_syslog_str(cls, sd_dict):\n syslog_sds = ''\n for sd_key, sd_val in list(sd_dict.items()):\n syslog_str = '[{sd_key}'.format(sd_key=sd_key)\n\n for sub_key, sub_val in list(sd_val.items()):\n syslog_str = '{orig} {key}=\"{value}\"'.format(\n orig=syslog_str, key=sub_key, value=sub_val)\n syslog_str += ']'\n\n syslog_sds += syslog_str\n\n return syslog_sds", "def iterable_to_string(iterable, quoted=False):\n if isinstance(iterable, dict):\n iterable = [unicode(i) for i in iterable.values()]\n else:\n iterable = [unicode(i) for i in iterable]\n\n if quoted:\n iterable = ['\\'%s\\'' % unicode(i) for i in iterable]\n\n return ', '.join(iterable)", "def to_str(self):\n return '\\n'.join(str(entry) for entry in self.values())", "def key_to_string(cls, key):\n return '_'.join(map(str, key))", "def dict_to_json_str(d):\n\n json_entry = '\"{}\":{}'\n json_entry_str = '\"{}\":\"{}\"'\n entries = []\n\n for entry in d:\n key = entry\n value = d[entry]\n\n try:\n basestring\n except NameError:\n basestring = str\n\n if not value:\n value = False\n\n elif isinstance(value, basestring):\n value = value.replace(u'\"', u'\\\\\"')\n entries.append(json_entry_str.format(key, value))\n\n elif isinstance(value, bool):\n value = 'true' if value else 'false'\n entries.append(json_entry.format(key, value))\n\n elif isinstance(value, dict):\n entries.append(json_entry.format(key, dict_to_json_str(value)))\n\n else:\n entries.append(json_entry.format(key, value))\n\n return '{} {} {}'.format('{', ','.join(entries), '}')", "def coords_dict_to_coords_string(coords):\n longitude, latitude = None, None\n for k,v in coords.items():\n if \"at\" in k:\n latitude = v\n if \"ong\" in k:\n longitude = v\n if not longitude and latitude:\n print(\"Unable to identify longitude and latitude keys\")\n return \"\"\n coords_string = \"{:.2f}_{:.2f}\".format(longitude, latitude)\n return coords_string", "def to_str(v, encode=None):\n if isinstance(v, basestring_type):\n return v\n\n if isinstance(v, dict):\n return dict_to_str(v, encode)\n\n if isinstance(v, Iterable):\n return list_to_str(v, encode)\n\n if encode:\n return encode(v)\n else:\n return v", "def facts_to_str(user_data: Dict[str, str]) -> str:\n arg = list()\n\n for key, value in user_data.items():\n arg.append(f'{key} - {value}')\n\n return \"\\n\".join(arg).join(['\\n', '\\n'])", "def _make_trans_from_dict(translations):\n\n from_str = ''\n to_str = ''\n for key in translations:\n from_str += key\n to_str += translations[key]\n return str.maketrans(from_str, to_str)", "def stringify(self, root):\n for item in root:\n if isinstance(item, dict):\n kvs = list(item.items())\n for (key, value) in kvs:\n del item[key]\n item[str(key)] = str(value)\n else:\n for (pos, value) in enumerate(item):\n item[pos] = str(value)\n return root", "def dictValuesToCSV(d):\n return \",\".join([str(val) for val in nestedDictValues(d)])", "def pretty(d, indent=0):\n\tret_str = ''\n\tfor key, value in d.items():\n\n\t\tif isinstance(value, collections.Mapping):\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\n'\n\t\t\tret_str = ret_str + pretty(value, indent + 1)\n\t\telse:\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\t' * (indent + 1) + ' => ' + str(value) + '\\n'\n\n\treturn ret_str", "def serialize_dict(d):\n txt = '{'\n for k in d:\n txt += f'\"{k}\":'\n if isinstance(d[k], dict):\n txt += serialize_dict(d[k])\n if isinstance(d[k], str):\n txt += serialize_string(d[k])\n if isinstance(d[k], int):\n txt += serialize_number(d[k])\n txt += ','\n txt += '}'\n return txt", "def to_string(obj):\n if isinstance(obj, dict):\n str_obj = '{'\n for key, value in obj.items():\n str_obj += Parser.parse_text(key)+': '+Parser.parse_text(value)+'\\n'\n return str_obj + '\\b}'\n else:\n return Parser.parse_text(obj)", "def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])", "def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))", "def turn_collection_into_str(collection_of_books: tuple) -> str:\r\n book_str_list = []\r\n keys_list = list(collection_of_books[0].keys())\r\n keys_line = \"\\t\".join(keys_list)\r\n book_str_list.append(keys_line)\r\n\r\n for book_index in range(len(collection_of_books)):\r\n value_list = list(collection_of_books[book_index].values())\r\n if \"None\" in value_list:\r\n value_list[2] = \"\"\r\n each_book_as_str = \"\\t\".join(value_list)\r\n book_str_list.append(each_book_as_str)\r\n\r\n collection_of_books_as_str = \"\\n\".join(book_str_list)\r\n\r\n return collection_of_books_as_str", "def dictify(df):\n return {str(k): v for k, v in df.items()}", "def create_dictionary_string(\n dict_,\n open_dict=\"<<\",\n close_dict=\">>\",\n field_join=\"\\n\",\n key_value_join=\" \",\n has_empty_fields=False,\n):\n if has_empty_fields:\n dict_ = clear_empty_fields(dict_)\n\n return \"\".join(\n [\n open_dict,\n field_join.join([key_value_join.join(f) for f in dict_.items()]),\n close_dict,\n ]\n )", "def _records_to_string(records):\n parts = []\n for record in records:\n parts.append('\\n'.join(f'{k}: {v}' for k, v in record.items()))\n return '\\n\\n'.join(parts) + '\\n'", "def _render_dict(input_dict: Dict[str, Any]) -> Dict[str, Any]:\n output_dict = {}\n\n for key, value in input_dict.items():\n if isinstance(value, str):\n new_value = string.Template(value).substitute(_mapping)\n output_dict[key] = new_value\n elif isinstance(value, dict):\n output_dict[key] = _render_dict(value) # type: ignore\n else:\n output_dict[key] = value\n\n return output_dict", "def facts_to_str(user_data: Dict[str, str]) -> str:\r\n facts = [f'{key} - {value}' for key, value in user_data.items()]\r\n return \"\\n\".join(facts).join(['\\n', '\\n'])", "def json_str(item):\n\n if isinstance(item, dict):\n #return {json_str(key): json_str(value) for key, value in item.iteritems()}\n return dict((json_str(key), json_str(value)) for key, value in item.iteritems())\n elif isinstance(item, list):\n return [json_str(element) for element in item]\n elif isinstance(item, unicode):\n return item.encode('utf-8')\n else:\n return item", "def nice_string_output(d, extra_spacing=5, decimals=3):\n\n names = d.keys()\n max_names = len_of_longest_string(names)\n\n values = values_to_string(d.values(), decimals=decimals)\n max_values = len_of_longest_string(values)\n\n string = \"\"\n for name, value in zip(names, values):\n spacing = extra_spacing + max_values + max_names - len(name) - 1\n string += \"{name:s} {value:>{spacing}} \\n\".format(name=name, value=value, spacing=spacing)\n return string[:-2]", "def string(self):\n return str(self._dict)", "def loss_dict_to_str(running_loss_dict: Dict[str, float], logging_period: int) -> str:\n loss_str = \", \".join(\n [\n f\"{key}: {value / logging_period:.2f}\"\n for key, value in running_loss_dict.items()\n ]\n )\n return loss_str", "def tostring(self, key=None):\n if key is not None:\n return self._asline(key, self[key])\n else:\n out = ''\n for k in list(self.keys()):\n out += self._asline(k, self[k])\n return out", "def kv_str(kvl):\n return '[%s]' % ', '.join('%s: %s' % (k, v) for k, v in kvl)", "def skill_stringer(input_dict): #input a dictionary\r\n\treturn ', '.join('-'.join((k, str(v))) for k,v in sorted(input_dict.items())) #output formatted skill list string\r", "def to_text(adict):\n new_dict = {}\n for key, val in sorted(adict.items()):\n new_dict[key] = str(val)\n return '$delim${{{0}}}$delim$'.format(str(new_dict))", "def _format_values_in_map(self, dict_values: dict) -> dict:\n\n for key in dict_values.keys():\n if str(key).lower() == \"exception\":\n dict_values[key] = self._format_exception(str(dict_values[key]))\n break\n\n new_map = {}\n for key, value in dict_values.items():\n value = str(value)\n if ' ' in value:\n value = f'\"{value}\"'\n\n new_map[key] = f\"{key.replace(' ', '')}={value}\"\n return new_map", "def _encode(dictionary):\n # Strip ugly base64 padding.\n byteStr = bytearray(json.dumps(dictionary).encode())\n encodedStr = base64.urlsafe_b64encode(byteStr)\n return encodedStr.rstrip('='.encode())", "def string_dict(d, headline='DICTIONARY:', offset=25):\n template = '{:%is} {}' % offset\n rows = [template.format('\"{}\":'.format(n), d[n]) for n in sorted(d)]\n s = headline + '\\n' + '\\n'.join(rows)\n return s", "def encode_strings(o):\n\tif isinstance(o, list):\n\t\treturn [encode_strings(x) for x in o]\n\tif isinstance(o, dict):\n\t\treturn {k.encode('utf-8'): encode_strings(v) for k, v in o.items()}\n\tif isinstance(o, unicode):\n\t\treturn o.encode('utf-8')\n\treturn o", "def possession_stringer(input_dict):\r\n\treturn ', '.join(' x'.join((k, str(v))) for k,v in sorted(input_dict.items())) #output formatted skill list string\r", "def _to_string(self, data=None):\n if not data:\n raise ValueError(\"Please provide a correct data structure.\")\n\n if isinstance(data, dict):\n return str(json.dumps(data))\n elif isinstance(data, list):\n return ' '.join(data)\n else:\n return data", "def serialize_to_python(cls, value):\n if isinstance(value, OrderedDict):\n items = six.iteritems(value)\n else:\n items = sorted(six.iteritems(value),\n key=lambda pair: pair[0])\n\n return '{%s}' % ', '.join(\n '%s: %s' % (serialize_to_python(_key),\n serialize_to_python(_value))\n for _key, _value in items\n )", "def _tostr(t):\n\treturn t.__unicode__()", "def dumps(tuple_dict, key):\n tuple_dict[key] = json.dumps(tuple_dict[key]) if tuple_dict[key] != None else ''", "def keyvalue(dict, key):\n try:\n return dict[key]\n except KeyError:\n return ''", "def val2str(val):\n # Return the input if it's a string\n if isinstance(val,str ): valstr=val\n # Handle types where spaces are added\n elif isinstance(val,tuple): valstr=repr(val).replace(', ',',')\n elif isinstance(val,list ): valstr=repr(val).replace(', ',',')\n elif isinstance(val,dict ): valstr=repr(val).replace(', ',',').replace(': ',':')\n # Otherwise use repr()\n else: valstr=repr(val)\n # Return output\n return valstr", "def _encode_dict(source: dict) -> bytes:\n result_data = b\"d\"\n\n for key, value in source.items():\n result_data += encode(key) + encode(value)\n\n return result_data + b\"e\"", "def _labels_d2str(lbl_d: dict, _from: str = ''):\n lbl_str = _from\n for lbl_name, lbl_val in lbl_d.items():\n # check label name\n if not re.fullmatch(r'[a-zA-Z_]\\w*', lbl_name):\n raise ValueError(f'\"{lbl_name}\" is not a valid label name')\n # add comma before next block\n if lbl_str:\n lbl_str += ','\n # apply escapes to label_value\n for rep_args in [('\\\\', '\\\\\\\\'), ('\\n', '\\\\n'), ('\"', '\\\\\"')]:\n lbl_val = str(lbl_val).replace(*rep_args)\n # format label_str\n lbl_str += f'{lbl_name}=\"{lbl_val}\"'\n return lbl_str", "def to_str(value):\n if value is None:\n return \"\"\n if str(value) == value:\n return value\n try:\n return value.to_str()\n except AttributeError:\n try:\n return \"\\n\".join(to_str(v) for v in value)\n except TypeError:\n return str(value)", "def convert_list_to_string(key, data, errors, context): # noqa\n value = data.get(key, None)\n\n if not value:\n return\n\n if not isinstance(value, list):\n return\n\n data[key] = '{' + ','.join(map(str, value)) + '}'", "def _tokey(self, keys: Union[str, Iterable]):\n if hasattr(keys, \"encode\"): # str\n return keys.encode(\"utf-8\")\n elif hasattr(keys, \"decode\"): # bytes\n return keys\n return (self.Sep.join(keys).encode(\"utf-8\"))", "def simplify_dict(d: Dict[str, Any]) -> Dict[str, Any]:\n return {\n k: [ast_to_testing_string(n) for n in v] if k == \"children\" else v\n for k, v in d.items()\n }", "def conv_kv(val: ValidKVs) -> str:\n if isinstance(val, str): # Early out for speed\n return val\n elif val is True:\n return '1'\n elif val is False:\n return '0'\n elif isinstance(val, Matrix) or isinstance(val, FrozenMatrix):\n return str(val.to_angle())\n elif isinstance(val, float):\n return format_float(val)\n else:\n return str(val)", "def sort_and_encode(dictionary):\n return json.dumps(dictionary, separators=(',', ':'), sort_keys=True).encode('utf-8')", "def dict_to_hex(cls, d: Dict[str, Any]) -> str:\n md5 = hashlib.md5()\n keys = sorted(d.keys())\n for key in keys:\n value = d[key]\n if isinstance(value, dict):\n value = cls.dict_to_hex(value)\n else:\n value = hash('%s::%s' % (type(value), value))\n value = \"%s::%s\" % (key, value)\n md5.update(value.encode('utf-8'))\n return md5.hexdigest()", "def flatten_dict_string_keys(x):\n return {'/'.join(k): v for k, v in flatten_dict(unfreeze(x)).items()}", "def makeString(self, a):\n out = \"\"\n if type(a) is dict:\n for key, val in a.items():\n out = \"%s%s%s%s%s\" % (out, key, self.dataDelimiterKey, val, self.dataDelimiterEntry)\n return out\n elif type(a) is list:\n return \"%s%s\" % (self.dataDelimiterEntry.join(a), self.dataDelimiterEntry)", "def __str__(self):\n if len(self.keys()):\n return '{' + repr(self.keys()[0]) + ':' + repr(self[self.keys()[0]]) + ', ...'\n else:\n return super(FSDict, self).__str__()", "def messagetokeystring(message, keydict):\r\n return ''.join([' ' + str(keydict[char])\r\n if i - 1 >= 0\r\n and str(keydict[char])[0]\r\n == str(keydict[message[i - 1]])[0]\r\n else str(keydict[char])\r\n for i, char in enumerate(message)])", "def __str__(self):\n sio = StringIO()\n for k in self:\n sio.write(\"%s %s\\n\" % (repr(k), repr(self[k])))\n return sio.getvalue()", "def dict2argstr(args_dict):\n arg_str = \"\"\n for arg, value in args_dict.items():\n if value is not None:\n arg_str += \" --{} {}\".format(str(arg), str(value))\n return arg_str", "def __str__(self):\n if len(self.__keys) == 0:\n return '{}'\n output = '{'\n fmt = '{}: {}, '\n for key, val in zip(self.__keys, self.__vals):\n output += fmt.format(repr(key), repr(val))\n return output[:-2] + '}'" ]
[ "0.8282609", "0.8013096", "0.79114354", "0.7898864", "0.7838265", "0.77222395", "0.7700551", "0.75931054", "0.75236297", "0.74511623", "0.7303022", "0.727308", "0.70777315", "0.6990856", "0.69411385", "0.68564725", "0.6851728", "0.68371916", "0.6835815", "0.6810409", "0.6774563", "0.67688024", "0.6768504", "0.67625713", "0.67011476", "0.6698264", "0.66935414", "0.66269535", "0.65997267", "0.64988846", "0.64595747", "0.64539385", "0.6423746", "0.64208263", "0.6342147", "0.62572896", "0.6201415", "0.61751086", "0.6153759", "0.6135242", "0.61073506", "0.609678", "0.6075025", "0.60745704", "0.60463136", "0.6024087", "0.601717", "0.59939796", "0.59868455", "0.5971824", "0.5926406", "0.59127015", "0.58965296", "0.58851653", "0.5880382", "0.5856055", "0.5848185", "0.58388215", "0.58298624", "0.5819629", "0.5818148", "0.58029026", "0.5800154", "0.5784422", "0.5783496", "0.57705057", "0.57576245", "0.5752371", "0.5742374", "0.5734092", "0.57219315", "0.57125723", "0.5707486", "0.5700902", "0.5697225", "0.56655025", "0.5660212", "0.5656575", "0.56528723", "0.56259924", "0.56230783", "0.5618207", "0.5582559", "0.55782044", "0.5577457", "0.5562107", "0.55422455", "0.55271566", "0.5526282", "0.55258715", "0.54975516", "0.54943126", "0.5492712", "0.5485765", "0.54838943", "0.5480324", "0.54540586", "0.544079", "0.54255456", "0.5420819" ]
0.72467685
12
Given a dictionary, changes the key from snake case to lower camel case.
def lower_camel_casify_dict_keys(d: dict) -> dict: return {to_camel_case(key): value for key, value in d.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transform_from_camelcase(key):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', key)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def transform_to_camelcase(key):\n return Jsonifiable.lower_first(\n ''.join(c.capitalize() or '_' for c in key.split('_')))", "def convert_dict_keys_to_camel_case(d):\n data = {}\n for k, v in d.items():\n new_key = snake_to_camel_case(k)\n data[new_key] = d[k]\n return data", "def dict_keys_snake_to_camel_case(snake_dict: dict) -> dict:\n\n camel_dict = dict()\n\n for key, val in snake_dict.items():\n if isinstance(key, str):\n camel_dict[snake_to_camel_case(key)] = val\n else:\n camel_dict[key] = val\n\n return camel_dict", "def _lower(dictionary: dict):\n return {key.lower(): value.lower() for key, value in dictionary.items()}", "def transform_key(self, key):\n return key.lower()", "def lowercase_keys(input_dict):\n if not isinstance(input_dict,dict):\n return input_dict\n\n safe = dict()\n for key,value in input_dict.items():\n safe[str(key).lower()] = value\n return safe", "def lower_dict(input_dict):\r\n return {k.lower(): v for k, v in input_dict.iteritems()}", "def _convert_keys_to_lower(self, dictionary: dict) -> dict:\n lower_case_dictionary = OrderedDict()\n\n for key, value in dictionary.items():\n if not key.islower():\n if key.lower() in lower_case_dictionary.keys():\n raise ValueError(f\"Duplicate (case insensitive) key found: {key.lower()}\")\n if isinstance(value, dict):\n lower_case_dictionary[key.lower()] = self._convert_keys_to_lower(value)\n else:\n lower_case_dictionary[key.lower()] = value\n\n return lower_case_dictionary", "def _snake_to_camel(name, strict=False):\n if strict:\n name = name.lower()\n terms = name.split('_')\n return terms[0] + ''.join([term.capitalize() for term in terms[1:]])", "def lower_dict_keys(origin_dict):\n if not origin_dict or not isinstance(origin_dict, dict):\n return origin_dict\n\n return {key.lower(): value for key, value in origin_dict.items()}", "def convert_to_snake_case(camel_case_string):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', camel_case_string)\n s2 = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return s2.replace('__', '_')", "def __setitem__(self, key, value):\n super(CaseInsensitiveStringDict, self).__setitem__(key.lower(), value)", "def _lower(self, mapping):\n _mapping = {}\n for k, v in sorted(mapping.items()):\n k = k.lower()\n if k not in _mapping:\n _mapping[k] = v\n return _mapping", "def snake_to_camel_case(snake_str: str) -> str:\n\n words = snake_str.strip(\"_\").split(\"_\")\n return words[0] + \"\".join(word[:1].upper() + word[1:] for word in words[1:])", "def snake_to_camel_case(value):\n words = value.strip(\"_\").split(\"_\")\n return words[0].lower() + \"\".join([word.capitalize() for word in words[1:]])", "def camel_to_snake_case(name: str) -> str:\n return CAPITALS.sub(r'_\\1', name).lower().lstrip('_')", "def _camel_case_to_snake_case(text: str) -> str:\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", text).lower()", "def _case_convert_snake_to_camel(token: str) -> str:\n while True:\n try:\n # find next underscore\n underscore_loc = token.index('_')\n except ValueError:\n # converted all underscores\n break\n # is the underscore at the end of the string?\n if underscore_loc == len(token) - 1:\n break\n\n orig = token\n token = f'{orig[:underscore_loc]}{orig[underscore_loc+1].upper()}'\n # is there more after the capital?\n if len(orig) > underscore_loc+2:\n token += f'{orig[underscore_loc+2:]}'\n return token", "def camel_to_snake_case(value):\n return re_camel_case.sub(r\"_\\1\", value).strip(\"_\").lower()", "def _camel_to_snake(name):\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()", "def snake_to_camel_case(name: str, initial: bool = False) -> str:\n chunks = name.split('_')\n converted = [s.capitalize() for s in chunks]\n if initial:\n return ''.join(converted)\n else:\n return chunks[0].lower() + ''.join(converted[1:])", "def lowercase_dict_keys(self):\n lower1 = {}\n for key1, val1 in self.reffile_overrides.items():\n if isinstance(val1, dict):\n lower2 = {}\n for key2, val2 in val1.items():\n if isinstance(val2, dict):\n lower3 = {}\n for key3, val3 in val2.items():\n if isinstance(val3, dict):\n lower4 = {}\n for key4, val4 in val3.items():\n if isinstance(val4, dict):\n lower5 = {}\n for key5, val5 in val4.items():\n if isinstance(val5, dict):\n lower6 = {}\n for key6, val6 in val5.items():\n lower6[key6.lower()] = val6\n lower5[key5.lower()] = deepcopy(lower6)\n else:\n lower5[key5.lower()] = val5\n lower4[key4.lower()] = deepcopy(lower5)\n else:\n lower4[key4.lower()] = val4\n lower3[key3.lower()] = deepcopy(lower4)\n else:\n lower3[key3.lower()] = val3\n lower2[key2.lower()] = deepcopy(lower3)\n else:\n lower2[key2.lower()] = val2\n lower1[key1.lower()] = deepcopy(lower2)\n else:\n lower1[key1.lower()] = val1\n self.reffile_overrides = lower1", "def convert_camel_case(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def snake_case_to_headless_camel_case(snake_string):\n return ''.join([snake_string.split('_')[0]] +\n list(sub_string.capitalize()\n for sub_string in snake_string.split('_')[1:]))", "def snake_to_camel(snake_str):\n title_str = snake_str.split('_')\n return ' '.join(title_str).title()", "def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))", "def snake_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))", "def convert_dict_key_case(obj, converter):\n if not isinstance(obj, dict):\n return obj\n\n obj = obj.copy()\n for key in list(six.iterkeys(obj)):\n converted_key = converter(key)\n obj[converted_key] = convert_dict_key_case(obj.pop(key), converter)\n\n return obj", "def snake_to_camel(name):\n if name == \"role_arn\":\n return \"roleARN\"\n temp = name.split(\"_\")\n return temp[0] + \"\".join(ele.title() for ele in temp[1:])", "def screaming_snake_case(value: str, **kwargs: Any) -> str:\n return snake_case(value, **kwargs).upper()", "def __getitem__(self, key):\n return super(CaseInsensitiveStringDict, self).__getitem__(key.lower())", "def underscorecase(camelcased):\n return re.sub('([A-Z]+)', r'_\\1', camelcased).lower()", "def camel_to_snake(name):\n name = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', name).lower()", "def get_snake_case_from_camel_case(name: str) -> str:\n\n new_chars = []\n for i, char in enumerate(name): \n if i == len(name)-1 or i == 0: \n new_chars.append(char)\n elif char.isupper() and name[i+1].islower():\n new_chars.append('_')\n new_chars.append(char)\n elif char.islower() and name[i+1].isupper(): \n new_chars.append(char)\n new_chars.append('_')\n else: \n new_chars.append(char)\n\n new_name = ''.join(new_chars)\n return new_name.lower().replace('__', '_')", "def _camel_killer(attr):\n try:\n attr = str(attr)\n except UnicodeEncodeError:\n attr = attr.encode('utf-8', 'ignore')\n s1 = _first_cap_re.sub('\\\\1_\\\\2', attr)\n s2 = _all_cap_re.sub('\\\\1_\\\\2', s1)\n return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else s2.lower())", "def snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(map(str.lower, split_words(value)))", "def to_camelcase(variables: Mapping[str, Any]) -> Mapping[str, Any]:\n def convert(string):\n # thanks to https://stackoverflow.com/a/47253475/1327062\n return re.sub('_([a-zA-Z0-9])', lambda m: m.group(1).upper(), string)\n\n converted_dict = {}\n for key, value in variables.items():\n if isinstance(value, dict):\n value = to_camelcase(value)\n key = convert(key)\n converted_dict[key] = value\n return converted_dict", "def snake_to_camel_case(string: str):\n return ''.join(string_component.title() for string_component in string.split('_'))", "def convert_to_snake_case(string: str) -> str:\n\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)\n draft = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return draft.replace('__', '_')", "def camel_to_snake(name: str) -> str:\n name = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", name)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", name).lower()", "def camel_to_snake(column_name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', column_name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def snake_case_to_camel_case(s, separator='_'):\n return s.title().replace(separator, '')", "def namecase(name):\n return re.sub(r\"[A-Za-z]+('[A-Za-z]+])?\",\n lambda mo: _namecase.get(mo.group(0).lower(),\n mo.group(0).title()),\n name)", "def transform(legacy_data: dict) -> dict:\n return {v.lower(): key for key, value in legacy_data.items()\n for v in value}", "def to_lower_camelcase(name):\n return re.sub(r'(?:\\B_|\\b\\-)([a-zA-Z0-9])', lambda l: l.group(1).upper(),\n name)", "def CamelCase_to_snake_case(text):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', text)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def _ci_key(self, key: str) -> str:\n # pylint: disable=no-self-use\n return key.lower()", "def _ci_key(self, key: str) -> str:\n # pylint: disable=no-self-use\n return key.lower()", "def to_snake_case(string):\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)).lower()", "def camel_to_snake(string):\n return re.sub(r\"(?<!^)(?=[A-Z])\", \"_\", string).lower().replace(\"__\", \"_\")", "def to_snake_case(str):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n s2 = re.sub('-', '_', s1)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s2).lower()", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def change_case(word):\n return word.upper() if case == \"upper\" else word.lower()", "def parse_case_camel_to_snake(camel):\n\t# requirements = re\n\treturn re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\\1', camel).lower()", "def snakecase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(name).lower()", "def snake_to_camel(text: str) -> str:\n\n data = [\n i.capitalize()\n for i in text.split(\"_\")\n ]\n return \"\".join(data)", "def lower_first(key):\n return key[:1].lower() + key[1:] if key else ''", "def parse_case_snake_to_camel(snake, upper_first=True):\n\tsnake = snake.split('_')\n\tfirst_part = snake[0]\n\tif upper_first:\n\t\tfirst_part = first_part.title()\n\treturn first_part + ''.join(word.title() for word in snake[1:])", "def camel_case(value: str, **kwargs: Any) -> str:\n result = \"\".join(map(str.title, split_words(value)))\n return result[0].lower() + result[1:]", "def snake_to_camel(name):\n return \"\".join([piece.capitalize() for piece in name.split(\"_\")])", "def camel_to_snake(\n name: str, _re_snake: Pattern[str] = re.compile(\"[a-z][A-Z]\")\n) -> str:\n\n def repl(match: Match[str]) -> str:\n lower: str\n upper: str\n lower, upper = match.group() # type: ignore\n return f\"{lower}_{upper.lower()}\"\n\n return _re_snake.sub(repl, name).lower()", "def snake_case(string_to_convert):\n return ''.join(['_' + i.lower() if i.isupper()\n else i for i in string_to_convert]).lstrip('_')", "def name_to_snake_case(name: str) -> str:\n\n # From COBOL entity\n if '-' in name or name.isupper():\n return name.strip().lower().replace('-', '_')\n\n # From camelCase\n return re.sub(r'(?<!^)(?=[A-Z])', '_', name.strip()).lower()", "def rev_dash_snake_case(string_to_convert):\n return ''.join(i.capitalize() for i in string_to_convert.split('-'))", "def test_rename_key_single(self):\n\n # Test of the strict case\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"PyFunceble\": \"Funceble\",\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).rename_key({\"Py\": \"PyFunceble\"})\n\n self.assertEqual(expected, actual)\n\n # Test of the non-strict case\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\", \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"PyFunceble\": \"Funceble\",\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.to_test).rename_key({\"fun\": \"nuf\"}, strict=False)", "def task_1_fix_names_start_letter(data: DT) -> DT:\n for dic in data:\n if dic.get('name'):\n dic['name'] = dic['name'].capitalize()\n return data", "def mixed_snake_case(value: str, **kwargs: Any) -> str:\n return \"_\".join(split_words(value))", "def mixed_pascal_case(value: str, **kwargs: Any) -> str:\n return capitalize(mixed_case(value))", "def to_upper_case(signals):\n for _, signal in signals.items():\n signal.name = signal.name.upper()\n return signals", "def _name_convert_camel_case(self, meta):\n if isinstance(meta, list):\n new_meta = []\n for m in meta:\n new_meta.append(self._name_convert_camel_case(m))\n\n elif isinstance(meta, dict):\n new_meta = {}\n for (key, value) in meta.items():\n try:\n n_key = _attribute_map[key]\n except Exception:\n n_key = key\n new_meta[n_key] = self._name_convert_camel_case(value)\n else:\n new_meta = meta\n\n return new_meta", "def _snake_case(display_name):\n str_re = re.compile('[{0}]'.format(re.escape(string.punctuation)))\n str = str_re.sub(' ', display_name)\n str = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', str)\n str = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', str).lower()\n return re.sub(' +', '_', str)", "def invert_capitalization(word):\n if word.islower():\n return word.upper()\n else:\n return word.lower()", "def spinalcase(string):\n\n return re.sub(r\"_\", \"-\", snakecase(string))", "def camelcase(value):\n rest = value.split(\"_\")\n return rest[0] + \"\".join(word.title() for word in rest[1:])", "def lowercase_name(name):\n return name.lower()", "def to_camel_case(s):\n if s[:1].isupper() and '_' not in s:\n return s\n else:\n return snake_case_to_camel_case(s)", "def _camelify(words):\n newText = ''\n for word in words:\n if newText == '':\n newText = word[:1].lower() + word[1:]\n else:\n newText = '%s%s' % (newText, word.capitalize())\n return newText", "def _case_convert_capital_to_snake(token: str) -> str:\n # split on uppercase characters\n # this split works by inserting a space before each uppercase character, then space-splitting\n components = re.sub(r'([A-Z])', r' \\1', token).split()\n return '_'.join(components).lower()", "def to_camel_case(string: str, first_lower: bool = False) -> str:\n if first_lower:\n first, _, rest = string.partition(\"_\")\n else:\n first, rest = (\"\", string)\n return first.lower() + \"\".join(part.capitalize() for part in rest.split(\"_\"))", "def camelcase_to_sentencecase(name: str) -> str:\n return stringcase.sentencecase(stringcase.snakecase(name)).lower()", "def lower_case_really():", "def camel_filter(val):\n titlecase = val.title()\n return re.sub(r\"[\\W^_]\", \"\", titlecase)", "def make_snake_case(string):\n return snake_re.sub(r'_\\1', string).lower()", "def test_rename_key_strict_single(self):\n\n expected = {\n \"Hello\": \"world\",\n \"World\": {\"world\": \"hello\"},\n \"funilrys\": [\"Fun\", \"Ilrys\"],\n \"PyFunceble\": \"Funceble\",\n \"pyfunceble\": [\"funilrys\"],\n }\n\n actual = Dict(self.test_subject).rename_key({\"Py\": \"PyFunceble\"})\n\n self.assertEqual(expected, actual)", "def camel_case_text(text):\n newText = format_camel_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def uppersnakecase(string):\n\n return uppercase(snakecase(string))", "def snake_to_camel(string):\n \n camel_case = []\n\n for word in string.split(\"_\"):\n camel_case.append(word.title())\n\n \"\".join(camel_case)", "def setToLowercase(self, value):\n return self._set(toLowercase=value)", "def snake_case_text(text):\n newText = format_snake_case(text)\n Text(\"%(text)s\").execute({\"text\": newText})", "def camel_to_snake(s):\n subbed = _underscorer1.sub(r'\\1_\\2', s)\n return _underscorer2.sub(r'\\1_\\2', subbed).lower()", "def snakecase(string):\n\n string = re.sub(r\"[\\-\\.\\s]\", '_', str(string))\n if not string:\n return string\n return lowercase(string[0]) + re.sub(r\"[A-Z]\", lambda matched: '_' + lowercase(matched.group(0)), string[1:])", "def underscored2camel_case(v):\n vlist = v.split('_')\n c = []\n for n, el in enumerate(vlist):\n if el:\n if n == 0:\n c.append(el)\n else:\n c.extend([el[0].upper(), el[1:]])\n return ''.join(c)", "def camelize(name):\n return ''.join([bit.capitalize() for bit in name.split('_')])", "def force_title_case(etl, field_names, **kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_function,\r\n function=value.force_title_case, **kwargs\r\n )\r\n tuple(func(field_name=name) for name in field_names)", "def uncamel(s):\n for pat in uncamel_patterns:\n s = pat.sub(r'\\1_\\2', s)\n return s.lower()", "def uncamel(s):\n for pat in uncamel_patterns:\n s = pat.sub(r'\\1_\\2', s)\n return s.lower()", "def _to_camel_case(text: str) -> str:\n return \"\".join(word.title() for word in text.split(\"_\"))", "def camel_to_snake(s):\n no_camel = \"\".join([\"_\" + c.lower() if c.isupper() else c for c in s]).lstrip(\"_\")\n return no_camel.replace(\"__\", \"_\")", "def underscore_to_camelcase(word, initial_capital=False):\n words = [x.capitalize() or \"_\" for x in word.split(\"_\")]\n if not initial_capital:\n words[0] = words[0].lower()\n\n return \"\".join(words)" ]
[ "0.7801124", "0.77649593", "0.77066845", "0.7620036", "0.7379338", "0.7279606", "0.71873266", "0.71728736", "0.7140577", "0.70116407", "0.69478273", "0.69024104", "0.68458456", "0.6818799", "0.6726056", "0.67258394", "0.6712159", "0.6710847", "0.67080796", "0.6662867", "0.6595556", "0.65670604", "0.6565834", "0.6563058", "0.65605944", "0.6554441", "0.6543572", "0.6543572", "0.65242714", "0.6453751", "0.6443439", "0.64398897", "0.6414722", "0.637828", "0.6372452", "0.6364891", "0.6359347", "0.6358621", "0.6330924", "0.6330436", "0.63273805", "0.6325891", "0.6322361", "0.63206846", "0.6318821", "0.63183457", "0.63155943", "0.63049275", "0.63049275", "0.62978834", "0.628258", "0.6280081", "0.62746096", "0.62737817", "0.627136", "0.6269077", "0.6267126", "0.62607217", "0.62547594", "0.6249642", "0.62413216", "0.6233101", "0.6231622", "0.6218573", "0.6215407", "0.6209617", "0.6196262", "0.6193712", "0.61927265", "0.6192026", "0.61808085", "0.61741763", "0.6152475", "0.61523473", "0.61447054", "0.61424935", "0.614084", "0.61403775", "0.6135856", "0.61268204", "0.6115318", "0.6107982", "0.61040586", "0.61002606", "0.6098853", "0.60886586", "0.60763645", "0.60717696", "0.6071039", "0.6069849", "0.6068383", "0.6068056", "0.60661423", "0.6060074", "0.6052596", "0.6040592", "0.6040592", "0.6039769", "0.6037399", "0.6036822" ]
0.7839561
0
Ensure IPCMessageSubscriber.connect gets wrapped by salt.utils.asynchronous.SyncWrapper.
async def test_ipc_connect_sync_wrapped(io_loop, tmp_path): if salt.utils.platform.is_windows(): socket_path = ports.get_unused_localhost_port() else: socket_path = str(tmp_path / "noexist.ipc") subscriber = salt.utils.asynchronous.SyncWrapper( salt.transport.ipc.IPCMessageSubscriber, args=(socket_path,), kwargs={"io_loop": io_loop}, loop_kwarg="io_loop", ) with pytest.raises(tornado.iostream.StreamClosedError): # Don't `await subscriber.connect()`, that's the purpose of the SyncWrapper subscriber.connect()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sync_connect(self):\n loop = asyncio.get_event_loop()\n task = loop.create_task(self.connect())\n loop.run_until_complete(task)", "async def _connect(self):\n pass", "async def connect(self):\n raise NotImplementedError", "async def on_connect(self) -> None:", "async def connect(self):\n pass", "async def on_connect(self):\n pass", "async def on_connect(self):\r\n self._try_shutdown_twitch()\r\n self.stream_thread = self.connect_thread()", "def _connect(self):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def handle_connect(self):\n if self.use_ssl:\n self.ssl = ssl.wrap_socket(self.socket)\n self.set_socket(self.ssl)", "async def connect(self):\n try:\n self._cmd_stream = await self._connect()\n self.inc_counter(\"%s.connected\" % self.objname)\n self.logger.info(\"Connected: %s\", self._extra_info)\n except Exception as e:\n self.logger.error(\"Connect Failed %r\", e)\n self.inc_counter(\"%s.failed\" % self.objname)\n raise e", "async def _async_connect_to_chromecast(self):\n _LOGGER.debug(\n \"[%s %s] Connecting to cast device by service %s\",\n self._name,\n self._cast_info.friendly_name,\n self._cast_info.cast_info.services,\n )\n chromecast = await self.hass.async_add_executor_job(\n pychromecast.get_chromecast_from_cast_info,\n self._cast_info.cast_info,\n ChromeCastZeroconf.get_zeroconf(),\n )\n self._chromecast = chromecast\n\n if CAST_MULTIZONE_MANAGER_KEY not in self.hass.data:\n self.hass.data[CAST_MULTIZONE_MANAGER_KEY] = MultizoneManager()\n\n self.mz_mgr = self.hass.data[CAST_MULTIZONE_MANAGER_KEY]\n\n self._status_listener = CastStatusListener(\n self, chromecast, self.mz_mgr, self._mz_only\n )\n chromecast.start()", "def _connect(self):\r\n self.sock = socket.socket()\r\n host = \"pubsub.pubnub.com\"\r\n port = 80\r\n if self.use_ssl:\r\n self.sock = ssl.wrap_socket(self.sock)\r\n port = 443\r\n self.sock.connect((host, port))\r\n self.connected = True", "def connect(self, reconnect=True, *args, **kwargs):\n pass", "async def async_connect(self):\n # Test the router is accessible.\n try:\n data = await self.connection.async_get_connected_devices()\n self.success_init = data is not None\n except OSError as ex:\n _LOGGER.warning(\n \"Error [%s] connecting %s to %s.\",\n str(ex),\n DOMAIN,\n self.host,\n )\n raise ConnectionError(\"Cannot connect to D-Link router\")\n\n if not self.connection.is_connected:\n _LOGGER.error(\"Error connecting %s to %s\", DOMAIN, self.host)\n raise ConnectionError(\"Cannot connect to D-Link router\")", "async def __initiate_connection(self):\r\n\r\n chainlink_model = ChainlinkResolver.resolve(self.name)\r\n if chainlink_model is None:\r\n LoggerInterface.error(f'The chainlink {self.name} is not registered yet. Register it first!')\r\n return\r\n\r\n self.socket_client.set_callback(self.callback)\r\n self.socket_client.set_using_chainlink(chainlink_model)\r\n await self.socket_client.connect()", "def connect(self):\n\t\tself.printed_sub = False\n\t\tself.client.connect(BROKER)\n\t\tself.client.loop_forever()", "def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable", "def test_connectEvent(self):\n reactor = self.buildReactor()\n\n self.listen(reactor, ServerFactory.forProtocol(Protocol))\n connected = []\n\n class CheckConnection(Protocol):\n def connectionMade(self):\n connected.append(self)\n reactor.stop()\n\n clientFactory = Stop(reactor)\n clientFactory.protocol = CheckConnection\n\n needsRunningReactor(reactor, lambda: self.connect(reactor, clientFactory))\n\n reactor.run()\n\n self.assertTrue(connected)", "async def _connect(self):\n if not self._reader:\n self._reader = asyncio.create_task(self._read())", "async def test_connection(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True", "def _connectf(self, connection):\r\n\r\n # in case the SSL connection is still undergoing the handshaking\r\n # procedures (marked as connecting) ignores the call as this must\r\n # be a duplicated call to this method (to be ignored)\r\n if connection.ssl_connecting: return\r\n\r\n # verifies if there was an error in the middle of the connection\r\n # operation and if that's the case calls the proper callback and\r\n # returns the control flow to the caller method\r\n error = connection.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)\r\n if error: self.on_error(connection.socket); return\r\n\r\n # checks if the current connection is SSL based and if that's the\r\n # case starts the handshaking process (async non blocking) otherwise\r\n # calls the on connect callback with the newly created connection\r\n if connection.ssl: connection.add_starter(self._ssl_client_handshake)\r\n else: self.on_connect(connection)\r\n\r\n # runs the starter process (initial kick-off) so that all the starters\r\n # registered for the connection may start to be executed, note that if\r\n # the SSL handshake starter has been registered its first execution is\r\n # going to be triggered by this call\r\n connection.run_starter()", "async def async_connect(self) -> None:\n # pylint: disable-next=import-outside-toplevel\n import paho.mqtt.client as mqtt\n\n result: int | None = None\n try:\n result = await self.hass.async_add_executor_job(\n self._mqttc.connect,\n self.conf[CONF_BROKER],\n self.conf.get(CONF_PORT, DEFAULT_PORT),\n self.conf.get(CONF_KEEPALIVE, DEFAULT_KEEPALIVE),\n )\n except OSError as err:\n _LOGGER.error(\"Failed to connect to MQTT server due to exception: %s\", err)\n\n if result is not None and result != 0:\n _LOGGER.error(\n \"Failed to connect to MQTT server: %s\", mqtt.error_string(result)\n )\n\n self._mqttc.loop_start()", "def subscribe(self):\n fd = libplasma.subscribe(self.conn)\n self.notification_sock = socket.fromfd(fd, socket.AF_UNIX, socket.SOCK_STREAM)\n # Make the socket non-blocking.\n self.notification_sock.setblocking(0)", "async def async_connect_socket(streamer_obj: class_definition_and_manipulation.StreamerObj) -> None:\r\n reader, writer = await asyncio.open_connection(encryption_key.cfg_host,\r\n int(encryption_key.cfg_port))\r\n\r\n writer.write(f'CAP REQ :twitch.tv/membership twitch.tv/tags twitch.tv/commands\\r\\n'.encode('utf-8'))\r\n print(f\"Connecting to socket for {streamer_obj.name}\")\r\n\r\n writer.write(\"PASS {}\\r\\n\".format(encryption_key.decrypted_pass).encode('utf-8')) # password\r\n writer.write(\"NICK #zerg3rrbot\\r\\n\".encode('utf-8')) # bot name\r\n writer.write(f\"JOIN #{streamer_obj.name}\\r\\n\".encode('utf-8'))\r\n\r\n await writer.drain()\r\n streamer_obj.stream_socket_writer = writer\r\n streamer_obj.stream_socket_reader = reader", "def connect(self):\n self.conn.add_listener(self.handle_connection_change)\n self.conn.start_async()", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "async def async_connect(self) -> None:\n params = {\"ns\": self._namespace, \"accessToken\": self._access_token}\n try:\n await self._sio.connect(\n f\"{API_URL_BASE}?{urlencode(params)}\",\n namespaces=[self._namespace],\n transports=[\"websocket\"],\n )\n except (ConnError, SocketIOError) as err:\n raise WebsocketError(err) from None", "def handle_connect(self):\n pass", "def test_interface(self):\n reactor = self.buildReactor()\n connector = self.connect(reactor, ClientFactory())\n self.assertTrue(verifyObject(IConnector, connector))", "def test_connect_success():\n\n t = Thread(target=setup_socket)\n t.start()\n\n data_sender = DataSender('127.0.0.1', 12345)\n server_response = data_sender.notify('test')\n\n assert server_response == 'ok'\n\n data_sender.close()\n t.join()", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def _on_connect(self, client, userdata, flags, rc):\n self.subscribe(self.topic)", "def _connect_async(self):\n self._pgconn = libpq.PQconnectStart(ascii_to_bytes(self.dsn))\n if not self._pgconn:\n raise exceptions.OperationalError('PQconnectStart() failed')\n elif libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:\n raise self._create_exception()\n\n libpq.PQsetNoticeProcessor(\n self._pgconn, self._notice_callback, ffi.NULL)", "async def connect(self):\n connect = asyncio.gather(*[conn.connect_to_server() for name, conn in self._exchange_connections.items()])\n wait_for = asyncio.gather(*[self.on_connection(name) for name, conn in self._exchange_connections.items()])\n await asyncio.gather(connect, wait_for)", "def subscribe(receiver, catchup):", "def test_connect_subscriber():\n config = {\"listeners\": \"localhost:8080\"}\n registry = Registry()\n registry.new(name=\"test\", backend=\"dummy\", **config)\n\n dummy = registry[\"test\"]\n subscriber = dummy.subscribe([\"mytopic\"])\n message = subscriber.listen()\n\n assert message == \"Dummy Message\"\n subscriber._connect.assert_called_once()", "def _subscribe(self, signal, reconnect=False):\n if reconnect:\n if signal not in self._downstream_reconnect:\n self._downstream_reconnect.append(signal)\n else:\n if signal not in self._downstream:\n self._downstream.append(signal)", "async def connect(self):\n await asyncio.gather(self._exchange_connection.connect_to_server(), self.on_connection())", "def _connect_callback(self, future):\n if future.exception() is None:\n self._ws_connection = future.result()\n self._on_connection_success()\n self._read_messages()\n else:\n self._on_connection_error(future.exception())", "async def test_libp2pclientconnection_connect_disconnect(self):\n assert self.connection.is_connected is False\n try:\n await self.connection_node.connect()\n await self.connection.connect()\n assert self.connection.is_connected is True\n\n await self.connection.disconnect()\n assert self.connection.is_connected is False\n except Exception:\n raise\n finally:\n await self.connection_node.disconnect()", "def test_is_connected():\n with Replacer() as r:\n queue_manager = uuid4().hex\n channel = uuid4().hex\n host = uuid4().hex\n port = \"1431\"\n conn_info = \"%s(%s)\" % (host, port)\n\n for expected in(True, False):\n\n def _connectTCPClient(*ignored_args, **ignored_kwargs):\n pass\n\n def _getattr(self, name):\n if expected:\n class _DummyMethod(object):\n pass\n # The mere fact of not raising an exception will suffice\n # for QueueManager._is_connected to understand it as an\n # all's OK condition.\n return _DummyMethod\n else:\n raise Exception()\n\n r.replace('pymqi.QueueManager.connectTCPClient', _connectTCPClient)\n r.replace('pymqi.PCFExecute.__getattr__', _getattr)\n\n qmgr = pymqi.QueueManager(None)\n qmgr.connectTCPClient(queue_manager, pymqi.cd(), channel, conn_info)\n\n eq_(qmgr.is_connected, expected)", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def _notifyConnect(self, function, **kwargs):\n self._sig_connect.subscribe(function, **kwargs)", "async def test_connection_scope(path):\n communicator = WebsocketCommunicator(ConnectionScopeValidator(), path)\n connected, _ = await communicator.connect()\n assert connected\n await communicator.disconnect()", "async def connect(self) -> None:\n exceptions = (\n OSError,\n ConnectionClosed,\n aiohttp.ClientError,\n asyncio.TimeoutError,\n errors.HTTPException,\n )\n\n async def throttle() -> None:\n now = time.monotonic()\n between = now - last_connect\n sleep = random.random() * 4 if between > 600 else 100 / between ** 0.5\n log.info(f\"Attempting to connect to another CM in {sleep}\")\n await asyncio.sleep(sleep)\n\n while not self.is_closed():\n last_connect = time.monotonic()\n\n try:\n self.ws = await asyncio.wait_for(SteamWebSocket.from_client(self, cm_list=self._cm_list), timeout=60)\n except exceptions:\n await throttle()\n continue\n\n try:\n while True:\n await self.ws.poll_event()\n except exceptions as exc:\n if isinstance(exc, ConnectionClosed):\n self._cm_list = exc.cm_list\n self.dispatch(\"disconnect\")\n finally:\n if not self.is_closed():\n await throttle()", "def slot_client_connected(self, _sender, _data):\r\n self.check_connect_ready()", "def on_connect(self):\n log.info(\"Stream connected\")", "async def connect(self, conn_factory):\n assert False", "async def connect(self) -> None:\n self.client = mqtt.Client()\n self.client.on_message = self.on_message\n self.client.connect(self.host, self.port)\n self.client.loop_start()\n self.client.subscribe(LSST_GENERAL_TOPIC)\n self.connected = True\n self.log.debug(\"Connected.\")", "def connect_to_peer(self):\n pass", "async def connect(self) -> bool:\n\n # Special exception message for _CONNECTING.\n if self._state == self._CONNECTING:\n raise IncorrectStateException((\"connect() may not be called\"\n \" multiple times.\"))\n\n if self._state != self._NOT_RUNNING:\n raise IncorrectStateException((\"disconnect() must complete before\"\n \" connect() may be called again.\"))\n\n logger.debug(\"Connecting...\")\n\n # Now we're sure we're in the _NOT_RUNNING state, we can set our state.\n # Important: No await-ing has occurred between checking the state and\n # setting it.\n self._state = self._CONNECTING\n\n success = await self._connect()\n\n if success:\n logger.debug(\"Starting event loop\")\n self._event_loop = asyncio.create_task(self._run())\n self._state = self._RUNNING\n self._events.fire(\"connected\")\n else:\n self._state = self._NOT_RUNNING\n\n logger.debug(\"Sending connected notification\")\n async with self._connected_condition:\n self._connected_condition.notify_all()\n\n logger.debug(\"Connected\" if success else \"Connection failed\")\n return success", "def connect(self) -> None:\n ...", "def _connect_later(self, wait_time):\n # Trivial function, but useful for unit testing\n self._io_loop.call_later(wait_time, self._connect, True)", "def onConnecting(self, transport_details):", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def test_connect(connection, events, writer, schedule, flush):\n schedule(connection.connect())\n flush()\n assert connection.connected\n assert not writer.closed\n assert events.triggered(\"CLIENT_CONNECT\")", "async def test_handle_message_connected_empty(\n subject: helpers.ModuleListener, mock_callback: AsyncMock\n) -> None:\n message = models.Message(status=\"connected\", connections=[])\n await subject.handle_message(message=message)\n mock_callback.assert_called_once_with([], [])", "def connect(self):\n #print(\"try to connect connect\")\n if self._loop is not None and not self._loop.ready():\n #print(\"RE\")\n raise RuntimeError(\"Already (auto-re)connecting\")\n self._loop = gevent.spawn(self._run)", "async def test_plaintext_connection(conn: APIConnection, resolve_host, socket_socket):\n loop = asyncio.get_event_loop()\n protocol = _get_mock_protocol(conn)\n messages = []\n protocol: Optional[APIPlaintextFrameHelper] = None\n transport = MagicMock()\n connected = asyncio.Event()\n\n def _create_mock_transport_protocol(create_func, **kwargs):\n nonlocal protocol\n protocol = create_func()\n protocol.connection_made(transport)\n connected.set()\n return transport, protocol\n\n def on_msg(msg):\n messages.append(msg)\n\n remove = conn.add_message_callback(on_msg, {HelloResponse, DeviceInfoResponse})\n transport = MagicMock()\n\n with patch.object(\n loop, \"create_connection\", side_effect=_create_mock_transport_protocol\n ):\n connect_task = asyncio.create_task(conn.connect(login=False))\n await connected.wait()\n\n protocol.data_received(\n b'\\x00@\\x02\\x08\\x01\\x10\\x07\\x1a(m5stackatomproxy (esphome v2023.1.0-dev)\"\\x10m'\n )\n protocol.data_received(b\"5stackatomproxy\")\n protocol.data_received(b\"\\x00\\x00$\")\n protocol.data_received(b\"\\x00\\x00\\x04\")\n protocol.data_received(\n b'\\x00e\\n\\x12\\x10m5stackatomproxy\\x1a\\x11E8:9F:6D:0A:68:E0\"\\x0c2023.1.0-d'\n )\n protocol.data_received(\n b\"ev*\\x15Jan 7 2023, 13:19:532\\x0cm5stack-atomX\\x03b\\tEspressif\"\n )\n await asyncio.sleep(0)\n await connect_task\n assert conn.is_connected\n assert len(messages) == 2\n assert isinstance(messages[0], HelloResponse)\n assert isinstance(messages[1], DeviceInfoResponse)\n assert messages[1].name == \"m5stackatomproxy\"\n remove()\n await conn.force_disconnect()\n await asyncio.sleep(0)", "def connect(self, callback=None):\n self.__load_callback(callback)\n if callback is not None:\n self.__connection.connect_async()\n else:\n self.__connection.connect()", "def connect(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\treturn self.connect_imap()\n\t\telse:\n\t\t\treturn self.connect_pop()", "def connect():", "def connect(self):\n if self._zerorpc:\n return\n try:\n self._zerorpc = _ZeroRPCClient(connect_to=self._address, timeout=self._timeout)\n self._zerorpc._events.setsockopt(zmq.LINGER, 0) # when we teardown, we want to discard all messages\n except:\n self._zerorpc = None\n raise", "async def connect(self, **kwargs) -> bool:\n return await self._backend.connect(**kwargs)", "def connect(self):\n raise NotImplementedError", "def connect(self):\n raise NotImplementedError", "def test_sync_no_connection(self):\n response = support.run_command('sync')\n self.assert_has_error_code(response, 'NO_REMOTE_CONNECTION')", "def _connect(*args):\n return None, None", "def _connect(*args):\n return None, None", "def connect(self):\n if self._connect is None:\n raise MissingFunctionDefinition(\"connect method is not mapped\")\n if not self.connected:\n self._connect()", "async def test_nodeclient_pipe_connect():\n f = asyncio.Future()\n f.set_result(None)\n pipe = Mock()\n pipe.connect.return_value = f\n node_client = NodeClient(pipe, Mock())\n await node_client.connect()\n pipe.connect.assert_called_once()", "def _wrapConnect(self, callableObject: Callable) -> Callable:\n\n @staticmethod # type:ignore\n def call(*args: Any) -> None:\n callableObject(*args)\n self._oldConnect(*args)\n\n return call", "def connect_thread():\n return factory.connect_thread(SlaveService, remote_service = SlaveService)", "async def test_create_async_connector() -> None:\n connector = await create_async_connector()\n assert connector._loop == asyncio.get_running_loop()\n await connector.close_async()", "async def on_connected(self):\n self._connected = True", "def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to broker\")\n client.connected_flag = True\n else:\n print(\"Connection failed\")\n client.connected_flag = False", "async def async_connect(self) -> None:\n try:\n self.endpoints = await self.async_get()\n except ClientResponseError as err:\n raise AuthorizationError from err\n except (TimeoutError, ClientError) as err:\n raise ConnectError(err) from err", "def on_connect(client, userdata, flags, rc):\n if rc == 0:\n client.subscribe(topic_subscribe)\n print(\"connected OK with returned code=\", rc)\n else:\n print(\"Bad connection with returned code=\", rc)", "def connect(self):\n broadcast(\n \"Connect\", \n self.connection, \n self.network\n )\n \n listen(self.address, self.connection, self.message_handler)", "def _connect(self):\n if not self.isChild:\n msg = \"SessionManager._connect: failed to spawn %s, timeout is : %s\" % (self.command, self.sshTimeout)\n try:\n self.spawnProc = pexpect.spawn(self.command,\n self.args, self.sshTimeout)\n if not self.spawnProc:\n raise SessionManagerException(msg)\n self._postConnect()\n self.isConnected = True\n except pexpect.TIMEOUT:\n raise SessionManagerException(\"Timeout while \" + msg)\n except pexpect.EOF:\n raise SessionManagerException(\"SessionManager._connect :End of File condition while \" + msg)\n except Exception, exc:\n raise SessionManagerException('SessionManager._connect: caught %s' % exc)\n else:\n cmdline = self.command + ' ' + string.join(self.args,' ')\n self.spawnProc.sendline(cmdline)\n self.isConnected = True", "def connection_callback(self, connected):\n self._connection_queue.put_nowait(connected)", "async def _communicate(self, action):\n # print(action, self.server_side)\n while True:\n\n try:\n return action()\n except ssl.SSLWantReadError:\n # May indicate data hasn't been transferred to the server yet.\n await self._send()\n await self._recv()\n except ssl.SSLWantWriteError:\n # await self._recv()\n await self._send()", "async def async_reconnect(self) -> None:\n await self.async_disconnect()\n await asyncio.sleep(1)\n await self.async_connect()", "def handle_connect(self):\n self.logger.debug('handle_connect()')\n\n # Default terminator\n self.set_terminator(CRLF)\n\n # See if we need to wrap the socket in SSL\n if self.use_ssl:\n if not _have_ssl:\n self.logger.error(\"SSL requested but not available\")\n raise ValueError(\"SSL not available\")\n\n self.logger.debug('Wrapping in SSL')\n self._socket = self.socket\n self.socket = ssl.wrap_socket(self._socket,\n do_handshake_on_connect=False)", "def _mqtt_on_connect(\n self,\n _mqttc: mqtt.Client,\n _userdata: None,\n _flags: dict[str, int],\n result_code: int,\n properties: mqtt.Properties | None = None,\n ) -> None:\n # pylint: disable-next=import-outside-toplevel\n import paho.mqtt.client as mqtt\n\n if result_code != mqtt.CONNACK_ACCEPTED:\n _LOGGER.error(\n \"Unable to connect to the MQTT broker: %s\",\n mqtt.connack_string(result_code),\n )\n return\n\n self.connected = True\n dispatcher_send(self.hass, MQTT_CONNECTED)\n _LOGGER.info(\n \"Connected to MQTT server %s:%s (%s)\",\n self.conf[CONF_BROKER],\n self.conf.get(CONF_PORT, DEFAULT_PORT),\n result_code,\n )\n\n self.hass.create_task(self._async_resubscribe())\n\n if birth := self.conf.get(CONF_BIRTH_MESSAGE, DEFAULT_BIRTH):\n\n async def publish_birth_message(birth_message: PublishMessage) -> None:\n await self._ha_started.wait() # Wait for Home Assistant to start\n await self._discovery_cooldown() # Wait for MQTT discovery to cool down\n # Update subscribe cooldown period to a shorter time\n self._subscribe_debouncer.set_timeout(SUBSCRIBE_COOLDOWN)\n await self.async_publish(\n topic=birth_message.topic,\n payload=birth_message.payload,\n qos=birth_message.qos,\n retain=birth_message.retain,\n )\n\n birth_message = PublishMessage(**birth)\n asyncio.run_coroutine_threadsafe(\n publish_birth_message(birth_message), self.hass.loop\n )\n else:\n # Update subscribe cooldown period to a shorter time\n self._subscribe_debouncer.set_timeout(SUBSCRIBE_COOLDOWN)", "async def connect(self):\n self._conn = await self._loop.run_in_executor(\n None, connector.Connector, self._creds\n )", "def run_and_propagate(self):\n # The timeline by which we must connect to the server and receiving all bytes.\n overall_deadline = self.__time() + RedirectorClient.CLIENT_CONNECT_TIMEOUT\n\n # Whether or not the client was able to connect.\n connected = False\n\n try:\n # Do a busy loop to waiting to connect to the server.\n # Note, for testing purposes, it is important we get the time before we invoke `connect`, since\n # the simulated calls to allow for connection advance the clock. By capturing the time before we\n # invoked `connect`, we can easily see if the connect state later changes (because the time is different\n # than our captured time).\n last_busy_loop_time = self.__time()\n while self._is_running():\n if self.__channel.connect():\n connected = True\n break\n\n self._sleep_for_busy_loop(\n overall_deadline, last_busy_loop_time, \"connection to be made.\"\n )\n last_busy_loop_time = self.__time()\n\n # If we aren't running any more, then return. This could happen if the creator of this instance\n # called the `stop` method before we connected.\n if not self._is_running():\n return\n\n if not connected:\n raise RedirectorError(\n \"Could not connect to other endpoint before timeout.\"\n )\n\n # Keep looping, accepting new bytes and writing them to the appropriate stream.\n while self._is_running():\n # Busy loop waiting for more bytes.\n if not self.__wait_for_available_bytes(overall_deadline):\n break\n\n # Read one integer which should contain both the number of bytes of content that are being sent\n # and which stream it should be written to. The stream id is in the lower bit, and the number of\n # bytes is shifted over by one.\n # 2->TODO struct.pack|unpack in python < 2.7.7 does not allow unicode format string.\n code = compat.struct_unpack_unicode(\"i\", self.__channel.read(4))[\n 0\n ] # Read str length\n\n # The server sends -1 when it wishes to close the stream.\n if code < 0:\n break\n\n bytes_to_read = code >> 1\n stream_id = code % 2\n\n content = self.__channel.read(bytes_to_read).decode(\"utf-8\", \"replace\")\n\n if stream_id == RedirectorServer.STDOUT_STREAM_ID:\n self.__stdout.write(content)\n else:\n self.__stderr.write(content)\n finally:\n if connected:\n self.__channel.close()", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "def connect(self):\n pass", "async def main():\n\n async def talk(bus, keys):\n \"\"\" generate some test messages \"\"\"\n\n for v in range(5):\n for k in keys:\n await asyncio.sleep(0.35)\n await bus.send(Message(\"local\", k, v))\n\n async def listen(bus, pattern):\n await asyncio.sleep(1.5)\n try:\n async for x in bus.listen(pattern):\n print(f\"listen({pattern}):\", x)\n except asyncio.CancelledError:\n pass\n\n async def monitor():\n \"\"\" echo bus status every 2 sec \"\"\"\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())\n\n ps = BasicMessageBus()\n await ps.connect()\n\n tunnel_config = {\n \"ssh_address_or_host\": (\"robnee.com\", 22),\n \"remote_bind_address\": (\"127.0.0.1\", 6379),\n \"local_bind_address\": (\"127.0.0.1\",),\n \"ssh_username\": \"rnee\",\n \"ssh_pkey\": os.path.expanduser(r\"~/.ssh/id_rsa\"),\n }\n bridge = MessageBridge(\"cat.\", tunnel_config, ps)\n\n aws = (\n talk(ps, (\"cat.dog\", \"cat.pig\", \"cow.emu\")),\n listen(ps, \".\"),\n listen(ps, \"cat.\"),\n listen(ps, \"cat.pig\"),\n bridge.start(),\n monitor(),\n )\n await wait_graceafully(aws, timeout=15)\n\n await ps.close()\n \n print(\"main: done\")" ]
[ "0.65937483", "0.6521959", "0.6248328", "0.60831505", "0.6030129", "0.6018049", "0.58920634", "0.58730686", "0.58308804", "0.5825015", "0.58228856", "0.5811319", "0.58058876", "0.57596046", "0.57403564", "0.5738906", "0.5736388", "0.5714165", "0.5698439", "0.5685224", "0.56685305", "0.5665709", "0.5658102", "0.56237024", "0.56208843", "0.5600711", "0.5600711", "0.5600711", "0.55924636", "0.556868", "0.5563006", "0.5543376", "0.54859686", "0.54859686", "0.54859686", "0.54780823", "0.54728955", "0.54660684", "0.5457352", "0.5457186", "0.54414797", "0.544065", "0.5430403", "0.5417658", "0.5400974", "0.5363412", "0.53563476", "0.535577", "0.53535837", "0.53523976", "0.53403884", "0.532369", "0.53189087", "0.53181666", "0.5314743", "0.53132755", "0.5310409", "0.53097796", "0.5303546", "0.5302207", "0.5296315", "0.52928346", "0.5284331", "0.5280607", "0.5271342", "0.52693456", "0.5268836", "0.5268598", "0.52655196", "0.52655196", "0.5263693", "0.5263475", "0.5263475", "0.5257554", "0.525606", "0.5222643", "0.5215552", "0.521028", "0.5207565", "0.51952744", "0.51952493", "0.51844025", "0.5183354", "0.51819384", "0.518021", "0.51710767", "0.5169039", "0.5168926", "0.51668173", "0.515924", "0.5158357", "0.5157195", "0.5157195", "0.5157195", "0.5157195", "0.5157195", "0.5157195", "0.5157195", "0.5157195", "0.51561844" ]
0.7188033
0
Receives a list and a search term. Use a loop to go through the list and see if the string is there. if it is return "string found". if not, return "string not found"
def search_for_string(lst_str, stringy): if stringy in lst_str: return "Found string" else: return "string not found"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)", "def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass", "def check_word_in_list_in_string(list, string):\n stuff = [string for word in list if(word in string)]\n return stuff", "def listsearch(query, item):\n fh = ''\n if not isinstance(item, six.string_types):\n fh = item[1]\n item = item[0]\n\n return bool(re.search(query, item) or\n re.search(query, fh))", "def not_found(tlist: list, search_string: str):\n fail_msg = \"No\"\n if \"actor\" in tlist:\n remain = len([t for t in tlist if t != 'actor'])\n if remain == 0:\n spot = \"\"\n if remain == 1:\n spot = \" or\"\n if remain == 2:\n spot = \",\"\n fail_msg = f\"{fail_msg} actor{spot}\"\n if \"indicator\" in tlist:\n remain = len([t for t in tlist if t != 'indicator'])\n fail_msg = f\"{fail_msg} indicator{' or' if remain > 0 else ''}\"\n if \"report\" in tlist:\n fail_msg = f\"{fail_msg} report\"\n fail_msg = f\"{fail_msg} matches found for {bold(search_string)}.\"\n\n raise SystemExit(fail_msg)", "def contains(str_or_list, val_to_find):\n \n return (val_to_find in str_or_list)", "def search(self, word):", "def linear_search(key, my_list):\n key = word.upper()\n my_list = dictionary_list\n if key in my_list:\n if not key:\n print(word)", "def find(self, search):\n if type(search) == str:\n search = [search]\n\n for s in search:\n if self.text.lower().find(s.lower()) != -1:\n return True\n\n return False", "def search(self, word: str) -> bool:\n # Checking if the word is present in the list.\n return word in self.mylist", "def search_by_string(self):\n print(\"*** String Search ***\\n\")\n print(\"Enter a search string.\\n\")\n print(\"- NAME and NOTE will be searched for all tasks -\")\n print(\"- Searching IS case-sensitive, but partial matches will be returned -\\n\")\n while True:\n try:\n search_string = input(\">>> \")\n results = self.regex_entry_search(search_string)\n except re.error:\n print(\"Couldn't parse search query. Please try again.\")\n else:\n clear_screen()\n print(f\"Found {len(results)} matches for string \\\"{search_string}\\\"...\\n\")\n self.print_selected_entries(results)\n break", "def linearsearch(input, value):\n count = 0\n for i in input:\n if (value == i):\n count += 1\n if count > 0:\n return \"Value, {0}, is in the list\".format(value)\n else:\n return \"Value, {0}, cannot be found\".format(value)", "def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break", "def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass", "def search(query_string):", "def exact_search(string, row):\n clear_screen()\n found = False\n for item in row:\n if string.lower() in item[\"Task\"].lower() \\\n or string.lower() in item[\"Notes\"].lower():\n print_entry(item)\n found = True\n if found is False:\n print(\"No Entries Found..\")", "def is_input_list(sentence_word,input_list):\r\n\t\r\n\tfor input_word in input_list:\r\n\t\tif input_word in sentence_word:\r\n\t\t\treturn input_word\r\n\t\t\r\n\treturn \"none\"", "def findentity(string):\r\n for x in entitylist:\r\n if x in string:\r\n print(f\"(Doc.{i})--Entity = {x.title()}\")\r\n break", "def pageContains(page, strList):\n for text in strList:\n if text in page['data']:\n logging.log(5, 'Found string %s' % text)\n return True\n\n return False", "def search(self, term):", "def __find_string_in_response(self, fullResponse, searchFor):\n check = True\n rawResponse = fullResponse;\n if \"result\" not in rawResponse.text:\n check = False\n else:\n responseJSON = rawResponse.json()\n length_responseJSON = len(responseJSON[\"result\"])\n for i in range(0,length_responseJSON,1):\n check = searchFor in responseJSON[\"result\"][i][\"first_name\"]\n if check == False:\n return check\n return check", "def find_match(people, STRs):\n for person in people:\n if compare_str(person, STRs):\n return person[\"name\"]\n return \"No match\"", "def search_keyword_in_list(keyword, input_list):\n\n match_list = []\n for element in input_list:\n if element.__name__ == keyword:\n if WarriorCliClass.mock or WarriorCliClass.sim:\n if element.__dict__.get(\"mockready\") is None:\n pNote_level(\"The selected keyword {} isn't supported in trial mode\".format(element.__name__), \"ERROR\")\n else:\n pNote_level(\"Keyword {} is being mocked\".format(element.__name__), \"INFO\")\n match_list.append(element)\n else:\n match_list.append(element)\n return match_list", "def list_has_substring(substring, l):\n found_substring = False\n for item in l:\n if substring in item:\n found_substring = True\n break\n\n return found_substring", "def substring_in_list(s, varlist):\n if varlist is None:\n return False\n is_sub = False\n for v in varlist:\n if v in s:\n is_sub = True\n break\n return is_sub", "def find_item(value: str, items: WebElements) -> WebElement:\n for item in items:\n if value in item.text.lower():\n return item", "def index_containing_substring(search_list, substring, multiples=True):\n num_found = 0\n list_index = -1\n\n for index, s in enumerate(search_list):\n if substring in s:\n if num_found == 0:\n list_index = index\n\n num_found += 1\n\n if list_index == -1:\n raise ValueError(search_list.index(substring))\n else:\n if not multiples and num_found > 1:\n raise MultipleStringError(\"Multiple {0} found in search_list.\".format(substring))\n else:\n return list_index", "def word_in_list(word_list):\n word_set = set(word_list)\n inp_word = \"\"\n while inp_word != \"/q\":\n if inp_word == \"/q\":\n break\n inp_word = input(\"What word do you want to check? ('/q' to stop) > \")\n if inp_word in word_set:\n print(f\"Word '{inp_word}' is in the list!\")\n else:\n print(f\"Cannot find word '{inp_word}' in the list.\")", "def find_by_exact_match(self):\n while True: \n self.task_name_search = input(\"What is the keyword/s you are looking\"\n \" for? Press Q to quit to the main screen: \").strip()\n if self.task_name_search.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n x = self.dict_list\n return x\n self.find_by_exact_match_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(self.task_name_search, value):\n self.find_by_exact_match_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_exact_match_list)\n break\n self.del_or_edit()", "def find_str(self, find_exp, where):\n found = False\n for item in where:\n if find_exp in str(item):\n self.assertTrue(True)\n found = True\n break\n if not found:\n self.assertTrue(False)", "def match_start_string(list_to_search, substring):\n # Whitespace is stripped before and after the substring,\n # but not within (e.g. \" New York City \" -> \"New York City\").\n clean_substring = substring.lstrip().rstrip().lower()\n items_found = []\n ([items_found.append(item) for item in list_to_search\n if clean_substring == item[:len(clean_substring)].lower()])\n return items_found", "def find_possible(search_string):\n codes = []; names = []\n search_string = search_string.lower()\n for c,n in name_given_code.items():\n\n if (search_string in n):\n codes.append(c)\n names.append(n)\n\n return codes, names", "def search_single_word(word):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def search_multiple_words(words):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def find(ss, list_seq):\n\tfor item in list_seq:\n\t\tif item in ss:\n\t\t\treturn True\n\treturn False", "def find_item_by_name(list_, namegetter, name):\n matching_items = [i for i in list_ if namegetter(i) == name]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name) + '$', re.IGNORECASE)\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name))\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name), re.IGNORECASE)\n matching_items = [i for i in list_ if prog.match(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name))\n matching_items = [i for i in list_ if prog.search(namegetter(i))]\n if len(matching_items) == 0:\n prog = re.compile(re.escape(name), re.IGNORECASE)\n matching_items = [i for i in list_ if prog.search(namegetter(i))]\n return matching_items", "def grep(string,list):\n import re\n expr = re.compile(string)\n return filter(expr.search,list)", "def check_the_list_for_matching(checked_list: list, phrase_to_match: str) -> bool:\n for word in checked_list:\n if phrase_to_match.startswith(word):\n return True\n return False", "def search_list(search):\n fun_list = basic_list_exception.make_list()\n for x in range(len(fun_list)):\n try:\n location = fun_list.index(search)\n return location\n except ValueError:\n return -1", "def find(items, term, key=None):\n if key is None:\n key = lambda other: term == other\n \n for item in items:\n if key(item):\n return item", "def search_term():\n search = input(\"Enter term or string: \")\n entries = select_entries()\n entries = entries.where(\n (Entry.task_name.contains(search)) |\n (Entry.notes.contains(search)))\n view_entries(entries)\n return entries", "def check_words(title, wordlist, verbose=False):\n\tfor word in wordlist:\n\t\tif title.find(word) >= 0:\n\t\t\tif verbose:\n\t\t\t\tprint(\"\\t\\tFOUND '\"+word+\"' IN:\", title)\n\t\t\treturn True\n\treturn False", "def make_query(term):\n def search(text):\n s=term.lower()\n if s in text.lower():\n return True\n return False\n return search", "def search_keyword(self,keyword):\n for entry in self.available_fields_list:\n for x in entry:\n if keyword in x:\n print(entry)\n break\n return", "def lookup(self, term):\n results = []\n lookup_term = term.lower()\n for char, latex, description, user_description in self.entries:\n if (char == term or\n latex.startswith(lookup_term) or\n latex[1:].startswith(lookup_term) or\n lookup_term in description.lower() or\n (user_description and lookup_term in user_description)):\n results.append((char, latex, description, user_description))\n return results", "def search(text, languages, results):\n if not languages:\n return results\n else:\n language = languages.pop()\n match = re.search(language, text)\n if match:\n results.add(language)\n text = u''.join(text.split(language))\n return search(text, languages, results)", "def search_any(self, word_list):\n # Same as search_all except uses the built-in any()\n return [k for k,v in self.data_values.iteritems() \n if any(w.lower() in v.lower() for w in word_list)]", "def search(self):\n if self.substring in [None, \"\"]:\n print(\"Invalid Value For Substring\")\n elif self.string in [None, \"\"]:\n print(\"Invalid Value For String\")\n elif len(self.substring) > len(self.string):\n print(\"Length of Substring Less Than String\")\n else:\n posn = self.comparison()\n if posn == -1:\n print(\" Substring Not Found :: Search Failed\")\n else:\n print(\" Substring Found at Position --> \", posn+1)", "def search(wordslst, strlst):\r\n word_appearance_list = {}\r\n lst = []\r\n for word in wordslst:\r\n num_of_appearance = 0\r\n for string in strlst:\r\n if word in string:\r\n num_of_appearance += 1\r\n place = string.find(word)\r\n updated_str = string[place + 1:]\r\n while word in updated_str: # in case word more than once in\r\n # string we cut the first letter off the string (so we\r\n # don't count the same appearance twice) and check if the\r\n # word appears again.\r\n place = updated_str.find(word)\r\n updated_str = updated_str[place + 1:]\r\n num_of_appearance += 1\r\n if num_of_appearance > 0: # To eliminate adding words that don't\r\n # appear.\r\n word_appearance_list.update({word: num_of_appearance})\r\n sorted_keys = sorted(word_appearance_list.keys()) # organise the words\r\n # alphabetically\r\n for index in sorted_keys:\r\n lst.append((index, str(word_appearance_list[index])))\r\n return lst", "def search(self, find_val):\n return False", "def simple_text_search(s, t):\n return any([s == t[i:i + len(s)] for i in range(len(t) - len(s))])", "def search(words):\n newlist = [w for w in words if 'son' in w]\n return newlist", "def parameter_finder(target_list, search_list, msgflag=False, exact=False):\n target_list = [x.lower() for x in target_list]\n\n indexes = []\n\n if isinstance(search_list, str):\n cont = 0\n search_list = search_list.lower()\n for t in target_list:\n if exact == False and search_list in t:\n indexes.append(cont)\n elif exact == True and search_list == t:\n indexes.append(cont)\n cont += 1\n if isinstance(search_list, list):\n search_list = [x.lower() for x in search_list]\n\n for s in search_list:\n s = str(s)\n for cont, t in enumerate(target_list):\n if exact == False and s in t:\n print((s, t))\n indexes.append(cont)\n elif exact == True and s == t:\n print((s, t))\n indexes.append(cont)\n\n if msgflag == True:\n length = len(indexes)\n if length > 1: print(\"There were several ocurrences\")\n if length == 0: print(\"No ocurrences found\")\n\n return indexes", "def find(self, args):\n curpl = self.ui.leftwin.highlighted().data\n if not args:\n if not self.find_list:\n self.err_print('At least one argument required')\n return\n else:\n term = args[0]\n if len(args) == 1:\n key = curpl.sort_key\n elif len(args) > 1:\n key = args[1]\n if key not in song.tags:\n self.err_print('Invalid key: ' + key)\n return\n\n self.find_list = (ii for ii, item in enumerate(curpl.data) if item[key] == term)\n\n try:\n ind = next(self.find_list)\n except StopIteration:\n self.err_print('Not found.')\n return\n\n self.ui.jump_to_ind(ind, len(curpl.data), self.ui.rightwin)\n\n self.ui.switch_view_right()", "def _selStrFromList(item_str, lst_str):\n strs = [s for s in lst_str if item_str == s]\n if len(strs) != 1:\n strs = [s for s in lst_str if s in item_str]\n if len(strs) != 1:\n raise ValueError(\"Cannot find %s uniquely in %s\"\n % (item_str, str(lst_str)))\n return strs[0]", "def check_for_strings(text, strings):\n for string in strings:\n if text.find(string) >= 0:\n return True\n return False", "def match(self, word_list, expecting):\n\n if word_list:\n\n word = word_list.pop(0)\n\n if word[0] == expecting:\n return word\n else:\n return None\n else:\n return None", "def str_in_list(l1, l2):\n return [i for i in l1 if i.lower() in l2]", "def search(self, string):\n fid = open(os.path.join(self.output_path, \"%s.html\" % TEST_FILE_STEM), \"r\")\n found = False\n for line in fid.readlines():\n if re.search(string, line):\n found = True\n break\n fid.close()\n return found", "def linear_search(vlist, srchval): # somewhat different from book\n#Look at each item in list. If it equals the value you are looking for, stop.\n # linear_search_2.py\n index = 0\n for item in vlist:\n if item == srchval:\n return index # implicit break\n index += 1\n \n return -1", "def search(pattern, string):\n result = []\n if re.search(pattern, string):\n result.append(string)\n return result", "def search(self, w: str) -> bool:\n if not w:\n return self.end\n return w[0] in self.d and self.d[w[0]].search((len(w) > 1 and w[1:]) or '')", "def search(ctx, search_string, json):\n if json:\n _search_json(ctx.obj['client'], search_string)\n else:\n _search(ctx.obj['client'], search_string)", "def search(self, q):\n if len(q) > self.n: #checks to see if the length of q is larger than n\n raise Exception(\"q cannot be larger than n\") #raises an exception if it is\n return mybinsearch(self.sortedList, q, self.ststr) >= 0 # returns True if q is found in the list and False if it's not", "def contains(listWords, target):\n wordObj = None\n for i in listWords:\n if i.name == target:\n wordObj = i\n break\n return wordObj", "def search(self, word: str) -> bool:\n node = self\n for c in word:\n node = node.d.get(c)\n if not node:\n return False\n return node.end", "def search_string(self, string, ignore_case=False):\n for ypos in range(self.model_dimensions[\"rows\"]):\n line = self.string_get(ypos + 1, 1, self.model_dimensions[\"columns\"])\n if ignore_case:\n line = line.lower()\n if string in line:\n return True\n return False", "def substring_search(word, collection):\n return [item for item in sorted(collection) if item.startswith(word)]", "def search_string():\r\n global file, split, detail, search\r\n result = search.get()\r\n\r\n file = open('Question_pool.txt','r')\r\n for line in file.readlines():\r\n answer = line.split(',')\r\n if result in answer[0] or result in answer[1] or result in answer[2] or result in answer[3] or result in \\\r\n answer[4] or result in answer[5] or result in answer[6] or result in answer[7] or result in answer[8]:\r\n Label(search_question_frame, text=answer[0:9]).grid()", "def search_all(self, word_list):\n return [k for k,v in self.data_values.iteritems() \n if all(w.lower() in v.lower() for w in word_list)]", "def search(self, title):\n close_matches = self.get_close_matches_by_title(title)\n count = 0\n for item in self.item_list.values():\n if item.title in close_matches:\n print(item)\n count += 1\n if count == 0:\n print(\"No result found.\")", "def contains(self, searchstr: str):\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index < 0:\n return False\n return True", "def search(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n results = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n results.append(Colors.YELLOW + video[\"file\"] + Colors.END + \" - \" + video[\"source\"] + \" - \" +\n video[\"title\"])\n if results:\n for result in results:\n safeprint(result)\n else:\n safeprint(\"No video matching the given query was found.\")", "def exec_list_contains(order_type):\n input_list = get_list_input()\n result = list_contains(input_list, order_type)\n print(result)", "def search(self, word):\n if len(word) not in self.length_set:\n return False\n for i in self.mutate(word):\n if i in self.s:\n return True\n return False", "def lookup_search_term():\n while True:\n search_query = input('Show entries containing (in name or notes): ')\n if validate_lookup_search_term_format(search_query):\n break\n print('** Please enter search term **')\n return (Entry.select().where(Entry.employee_name.contains(search_query)) |\n Entry.select().where(Entry.task_notes.contains(search_query)))", "def exact_match(self):\n text_to_match = input(\"Enter the text to search for> \")\n return text_to_match", "def find_exact1(value: str, items: WebElements) -> WebElement:\n for item in items:\n if value == item.text.split('\\n')[0].lower():\n return item", "def search(self, word):\n level = self.trie\n for c in word:\n if c in level:\n level = level[c]\n else:\n return False\n return self.end in level", "def lookup(index, keyword):\n for item in index:\n if item[0] == keyword:\n return item[1]\n return []", "def search_keyword(self):\n\n task_name = input(\"\\nEnter a search term:\")\n\n if len(task_name) == 0:\n input(\"\\nSearch Term cannot be empty!\\n\")\n return self.search_keyword()\n else:\n return task_name", "def search(word, current_directory, search_result_list=search_list):\n if search_result_list:\n for counter in range(len(search_result_list)):\n search_result_list.pop()\n if current_directory:\n searcher_object = CompleteSearch(current_directory, word)\n searcher_object.start()\n searcher_object.join()\n return remove_equals(search_result_list)\n\n else:\n for cleaner in range(len(search_result_list)):\n search_result_list.pop()\n for driver in drivers():\n searcher_object = CompleteSearch(driver, word)\n searcher_object.start()\n return remove_equals(search_result_list)", "def search(self, word):\n def r_search(word,i,d):\n if len(word) <= i:\n return True\n \n if d == 0:\n return False\n \n return (word[i] in d) and r_search(word,i+1,d[word[i]])\n \n tri = self.root.d\n if len(word) == 0: \n return True\n \n if len(tri) == 0:\n return False\n \n return r_search(word + '$',0,tri)", "def SearcherInDescription(input):\n\n logs.logger.debug(\n \"Start to search in description.\")\n try:\n NormalizedMySearchedWords = unicodedata.normalize(\n 'NFKD', input.upper()).encode('ASCII', 'ignore')\n ListOfAllCost = GetAllCostsFromDB()\n ListOfSearchedCosts = []\n for item in ListOfAllCost:\n NormalizedMyText = unicodedata.normalize(\n 'NFKD', item.description.upper()).encode('ASCII', 'ignore')\n if NormalizedMyText.__contains__(NormalizedMySearchedWords):\n ListOfSearchedCosts.append(item)\n logs.logger.info(\n \"You search in description\")\n return ListOfSearchedCosts\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def search(self, word):\n now = self.tree\n for i in word:\n if i in now:\n now = now[i]\n else:\n return False\n return True if 'end' in now else False", "def findJones(aList):\r\n return aList[1] == 'Jones'", "def ask_search():\n\n print(\n\"\"\"\nPlease enter your desired keywords for the lexical dispersion analysis. For quick templates, enter the following keys:\n\ntemplate_insurance: insurance identifier terms\ntemplate_contract: contract identifier terms\ntemplate_privacy: privacy contract identifier terms\n\nTo stop entering keywords, simply enter an empty input.\n\"\"\"\n )\n\n #asking user for search terms\n ask = True\n search = []\n\n while ask == True:\n temp = input(\"Enter a keyword: \")\n if temp == \"\":\n break\n elif temp == \"template_insurance\":\n search = [\"treatment\", \"premium\", \"claim\", \"benefit\", \"exclusions\", \"charges\", \"payment\", \"occupation\"]\n break\n elif temp == \"template_contract\":\n search = [\"defined\",\"liability\",\"service\",\"confidential\",\"terminate\",\"law\", \"breach\"]\n break\n elif temp == \"template_privacy\":\n search = [\"purpose\",\"personal\",\"data\",\"collect\",\"transfer\",\"services\",\"contact\",\"provide\",\"authority\",\"marketing\",\"retention\",\"consent\",\"analysis\",\"analytics\"]\n break\n else:\n search.append(temp)\n\n return search", "def _search(client, search_string):\n if search_string is None:\n logger.info(uxstring.UxString.list_all, fg=\"green\")\n\n current_page = 0\n total_pages = get_search_results(client, search_string, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n next_page = get_next_page(prompt_resp, current_page)\n if next_page == -1:\n model_id = prompt_resp\n display_search_info(client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n elif next_page != current_page:\n get_search_results(client, search_string, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def search(self, value):\n pass", "def find_match(name, dictionary):\n if name == '':\n # raise \"Didn't find name\"\n return False\n search_name = (' ').join(name.split(' ')[:-1])\n if search_name in dictionary:\n return search_name\n else:\n return find_match(search_name, dictionary)", "def search_word(word : str = typer.Argument(..., help=\"Searches the trie if the word exists\")):\n response_url = url + \"/search/\" + word\n response = requests.get(response_url)\n typer.echo(response.json()[\"status\"])", "def search(self, word):\n if not word:\n return False\n if word[0] not in self.trie:\n return False\n cur = self.trie[word[0]]\n for char in word[1:]:\n if char not in cur.nexts:\n return False\n cur = cur.nexts[char]\n return (cur and cur.isTerm) == True", "def word_search(doc_list, keyword):\n indices = []\n # Iterate through the indices (i) and elements (doc) of documents\n for i, doc in enumerate(doc_list):\n # Split the string doc into a list of words (according to whitespace)\n tokens = doc.split()\n # Make a transformed list where we 'normalize' each word to facilitate matching.\n # Periods and commas are removed from the end of each word, and it's set to all lowercase.\n normalized = [token.rstrip('.,').lower() for token in tokens]\n # Is there a match? If so, update the list of matching indices.\n if keyword.lower() in normalized:\n indices.append(i)\n return indices", "def doFind(self, str):\n for value in self.doId2do.values():\n if repr(value).find(str) >= 0:\n return value", "def lookup(name, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n match = False\n for entry_name in phonebook_data:\n if name.lower() in entry_name.lower():\n match = True\n print entry_name, phonebook_data[entry_name]\n\n if not match:\n print \"No matches found.\"", "def search(self, query_string):\n terms = query_string.lower().split()\n result = set(self.wordDict[terms[0]])\n if len(result) == 0:\n return list()\n else:\n for t in terms[2:]:\n records_containing_t = self.wordDict[t]\n result = result.intersection(records_containing_t)\n return [self.get_record_dict(id).getTuple() for id in result]", "def search(self, word):\n lenw = len(word)\n if lenw not in self.bag: return False\n return any([self.equal_to(word, item) for item in self.bag[lenw]])", "def search(x, e):\n match = vmatch if type(list(x)[0]) == str else where\n ret = unnest(logical2idx(match(list(x), e)))\n if ret == []:\n return False\n return x[e] if type(x) == dict else x[ret[0]]", "def word_finder(word, text):\r\n word = word.lower()\r\n text = str(text).lower()\r\n match = re.search(word, text)\r\n if match:\r\n return True\r\n return False", "def search(self, tokens: List[str]) -> bool:\n item = \"\".join(tokens)\n if item in self._masked_items:\n return False\n\n cur = self._root\n for token in tokens:\n if token not in cur.children:\n return False\n cur = cur.children[token]\n\n return cur.is_term" ]
[ "0.7277806", "0.7085442", "0.70570374", "0.7042019", "0.6853034", "0.68284607", "0.6799059", "0.6741653", "0.67367554", "0.6710084", "0.6704459", "0.6700498", "0.65898585", "0.65151054", "0.6475276", "0.6447157", "0.64241886", "0.6397633", "0.6358592", "0.6356772", "0.6352888", "0.63513005", "0.6316425", "0.62975174", "0.6293719", "0.62935233", "0.628278", "0.6274371", "0.6268759", "0.6230866", "0.6227182", "0.6211877", "0.6153306", "0.612429", "0.6124035", "0.6114204", "0.6110645", "0.6056243", "0.6054092", "0.6052948", "0.60523665", "0.60294425", "0.60252976", "0.60231876", "0.60211843", "0.5995878", "0.5993408", "0.59846437", "0.5978851", "0.5926362", "0.5922007", "0.5913361", "0.5899539", "0.5891006", "0.58847195", "0.5884427", "0.5872563", "0.58649915", "0.58468896", "0.58462244", "0.58417296", "0.58309126", "0.5807095", "0.5795001", "0.57928824", "0.57857746", "0.5768846", "0.57680255", "0.57666546", "0.57361823", "0.57321715", "0.57292086", "0.57160383", "0.571473", "0.57140875", "0.5710084", "0.570977", "0.5706305", "0.5704582", "0.5703608", "0.56913394", "0.56905663", "0.56884396", "0.5676595", "0.5670985", "0.5668459", "0.5667711", "0.5654752", "0.5651741", "0.5635433", "0.5634254", "0.56303114", "0.56246424", "0.5622506", "0.5620415", "0.56174356", "0.5616223", "0.56148183", "0.56146854", "0.5612596" ]
0.8100969
0
expression = andExpr { "or" andExpr }
def expression( ):#DOUBLE CHECK THIS tok = tokens.peek( ) if debug: print("Expression: ", tok) left = andExpr( ) #does the left side of the grammar tok = tokens.peek( ) while tok == "or": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series tokens.next() right = andExpr( ) left = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS "or" tok = tokens.peek( ) return left
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_expression_and_or(self):\n\n # Checks several examples with \"and\" and \"or\" operators\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\", \"multi_host\": False}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (1)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": True}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (2)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": False}], [\"networks\"]))\n self.assertFalse(value, \"complex expression (3)\")", "def AND(*expressions):\n return {'$and': list(expressions)}", "def OR(*expressions):\n return {'$or': list(expressions)}", "def and_(a, b):", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def or_(a, b):", "def _conjunction_op(spec, *expressions):", "def _conjunction_op(spec, *expressions):", "def __or__(self, query):\r\n return Or([self, query]).normalize()", "def and_(*args, **kwargs):\n ...", "def QueryOR(db):\n return Query(db, orelse=True)", "def logical_or(lhs, rhs):\n return _make.logical_or(lhs, rhs)", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def _and(it):\n return 1 if it[0]==1 and it[1]==1 else 0", "def Nand(*args):\n return Not(And(*args))", "def Or(*conditions):\n def orPred(db):\n from functools import reduce\n return reduce(lambda result, c: result.add(c(db)),\n conditions, Result())\n\n return orPred", "def RewriteOR(self, left, right):\n return None", "def or_list(conditionList):\n return functools.reduce(numpy.logical_or, conditionList)", "def and_list(conditionList):\n return functools.reduce(numpy.logical_and, conditionList)", "def convert_broadcast_logical_or(node, **kwargs):\n return create_basic_op_node('Or', node, kwargs)", "def test_searchOr(self):\n return self._messageSetSearchTest('OR 1 2', [1, 2])", "def test_sqpp_paren_expr1_and_expr2_or_expr3_WORDS_equiv(self):\n self.assertEqual(self.parser.parse_query('(expr1 and expr2) or expr3'),\n self.parser.parse_query('(expr1 + expr2) | expr3'))", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def combine_expressions(expressions, relation='AND', licensing=Licensing()):\n if not expressions:\n return\n\n if not isinstance(expressions, (list, tuple)):\n raise TypeError(\n 'expressions should be a list or tuple and not: {}'.format(\n type(expressions)))\n\n # Remove duplicate element in the expressions list\n expressions = list(dict((x, True) for x in expressions).keys())\n\n if len(expressions) == 1:\n return expressions[0]\n\n expressions = [licensing.parse(le, simple=True) for le in expressions]\n if relation == 'OR':\n return str(licensing.OR(*expressions))\n else:\n return str(licensing.AND(*expressions))", "def test_or(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.or_(right) == exp", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def or_(*args, **kwargs):\n ...", "def __or__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Or(self, other)", "def test_sqpp_paren_expr1_and_expr2_or_expr3_WORDS(self):\n self.assertEqual(self.parser.parse_query('(expr1 and expr2) or expr3'),\n ['+', 'expr1 + expr2', '|', 'expr3'])\n #['+', '+ expr1 | expr3', '+', '+ expr2 | expr3'])", "def __or__(self, other):\n return self.fam.c_binop('or', self, other)", "def OR(f, g):\n def _or(x):\n return f(x) | g(x)\n return _or", "def test_orOperator(self):\n xp = XPathQuery(\"//bar[@attrib5='value4' or @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5, self.bar6])", "def test_predicate9(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('foo') == 'bar') | xpb.foobar]\n exp = '/foo/bar[@foo = \"bar\" or /foobar]'\n self.assertEqual(xp.tostring(), exp)", "def conjuncts(s):\n return dissociate(\"AND\", s)", "def match(ctx, expr):\n if \"$or\" in expr:\n for x in expr[\"$or\"]:\n if match(ctx, x):\n return True\n return False\n else:\n for x in expr:\n if x not in ctx:\n return False\n if isinstance(expr[x], dict):\n for m in expr[x]:\n mf = matchers.get(m)\n if mf:\n if not mf(ctx[x], expr[x][m]):\n return False\n else:\n return False\n elif ctx.get(x) != expr[x]:\n return False\n return True", "def test_multi_shortcut():\n age = User.age >= 3\n condition = bloop.condition.And(age)\n assert condition.conditions == [age]\n\n condition = bloop.condition.Or(age)\n assert condition.conditions == [age]", "def join_with_or(values) -> str:\n return join_with_and(values, 'or')", "def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred", "def logical_or(x1, x2, f=None):\n return _cur_framework(x1, f=f).logical_or(x1, x2)", "def _do_conjunction(self, _and=(\"and\", \"e\", \"en\", \"et\", \"und\", \"y\")):\n w = self.words\n if len(w) > 2 and w[-2].type == \"CC\" and w[-2].chunk is None:\n cc = w[-2].string.lower() in _and and AND or OR\n ch1 = w[-3].chunk\n ch2 = w[-1].chunk\n if ch1 is not None and \\\n ch2 is not None:\n ch1.conjunctions.append(ch2, cc)\n ch2.conjunctions.append(ch1, cc)", "def _disjunction_op(spec, *expressions):", "def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp", "def logical_and(lhs, rhs):\n return _make.logical_and(lhs, rhs)", "def _logical_and(*args):\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)", "def OR(r, s):\n return lambda l, i: r(l, i) or s(l, i)", "def _prefix_and(*exprs, **kwargs):\n anded = ' AND '.join('(%s)' % expr for expr in exprs if expr)\n if len(anded) == 0:\n return ''\n return kwargs.get('prefix', 'WHERE ') + anded", "def Expression(self, paren=False):\n left = self.Conjunction(paren)\n while self.currtok[1].name == \"OR\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Conjunction()\n left = BinaryExpr(op, left, right, paren)\n return left", "def distribute_and_over_or(s):\n if s.op == '|':\n s = associate('|', s.args)\n if s.op != '|':\n return distribute_and_over_or(s)\n if len(s.args) == 0:\n return FALSE\n if len(s.args) == 1:\n return distribute_and_over_or(s.args[0])\n conj = find_if((lambda d: d.op == '&'), s.args)\n if not conj:\n return s\n others = [a for a in s.args if a is not conj]\n rest = associate('|', others)\n return associate('&', [distribute_and_over_or(c|rest)\n for c in conj.args])\n elif s.op == '&':\n return associate('&', map(distribute_and_over_or, s.args))\n else:\n return s", "def Conjunction(self, paren=False):\n left = self.Equality(paren)\n while self.currtok[1].name == \"AND\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Equality(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def OR(self, operand2, *operands):\n\t\treturn OR((self, operand2) + operands)", "def check_for_or(sql_str):\r\n try:\r\n if rex.search(\"WHERE\", sql_str, rex.IGNORECASE):\r\n if rex.search(' or ', sql_str.split('WHERE')[1], rex.IGNORECASE) is not None:\r\n raise sqlErr(\"OR Detected!\")\r\n except Exception as e:\r\n raise e", "def test_multi_chains_flatten():\n age = User.age >= 3\n name = User.name == \"foo\"\n email = User.email != \"bar\"\n\n and_condition = bloop.condition.Condition()\n or_condition = bloop.condition.Condition()\n for c in [age, name, email]:\n and_condition &= c\n or_condition |= c\n assert and_condition == bloop.condition.And(age, name, email)\n assert or_condition == bloop.condition.Or(age, name, email)", "def logical_and(x1, x2, f=None):\n return _cur_framework(x1, f=f).logical_and(x1, x2)", "def anyarg(symbol, fact, expr):\n return Or(*[fact.subs(symbol, arg) for arg in expr.args])", "def or_where(self, wheres: List[Union[Tuple, BinaryExpression]]) -> B[B, E]:", "def disjuncts(s):\n return dissociate(\"OR\", s)", "def __or__(self, other: Any) -> Operators:\n return self.operate(or_, other)", "def _or(cls, arg1, arg2):\n return arg1 or arg2", "def f_or(*args):\n f = Or(*args).factor()\n return f if f in B else f.factor()", "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def test_singleOr(self):\n\n x1 = t.Or([t.Exactly(\"x\")])\n x = t.Exactly(\"x\")\n self.assertEqual(writePython(x), writePython(x1))", "def _sql_where(cur, tables, andalso, orelse, prefix=None, aggregate=False):\n disjunctions = []\n andsql = _cond_where_sql(cur, andalso, tables, prefix=prefix,\n aggregate=aggregate)\n andsql = ' AND '.join(andsql)\n\n if len(andsql) > 0:\n andsql = '(%s)' % andsql\n disjunctions.append(andsql)\n disjunctions += _cond_where_sql(cur, orelse, tables, prefix=prefix,\n aggregate=aggregate)\n\n if len(disjunctions) == 0:\n return ''\n return '(%s)' % (' OR '.join(disjunctions))", "def _and(cls, arg1, arg2):\n return arg1 and arg2", "def and_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value and right_value else 0", "def visit_and(self, left_result: T, right_result: T) -> T:", "def to_not_and_or(formula: Formula) -> Formula:\r\n # Task 3.5\r\n\r\n map_operators = {'->': Formula.parse('(~p|q)'),\r\n '+': Formula.parse('((p&~q)|(~p&q))'),\r\n '<->': Formula.parse('~((p&~q)|(~p&q))'),\r\n '-&': Formula.parse('~(p&q)'),\r\n '-|': Formula.parse('~(p|q)'),\r\n 'F': Formula.parse('(p&~p)'),\r\n 'T': Formula.parse('~(p&~p)')}\r\n return formula.substitute_operators(map_operators)", "def sqlors(left, lst):\n if isinstance(lst, iters):\n lst = list(lst)\n ln = len(lst)\n if ln == 0:\n return SQLQuery(\"1=2\")\n if ln == 1:\n lst = lst[0]\n\n if isinstance(lst, iters):\n return SQLQuery(['('] + \n sum([[left, sqlparam(x), ' OR '] for x in lst], []) +\n ['1=2)']\n )\n else:\n return left + sqlparam(lst)", "def __or__(self, other):\n if other is None:\n return self.copy()\n elif isinstance(other, (Query, QueryCompound)):\n return self.or_(other)\n else:\n out = self.copy()\n out.addMath(Query.Math.Or, other)\n return out", "def or_filter(self):\n return self.__or", "def simplify_and_node(parse_str=None, location=None, tokens=None):\n if len(tokens) == 1:\n return tokens[0]\n else:\n return AndNode(tokens.asList())", "def Nor(*args):\n return Not(Or(*args))", "def test_predicate10(self):\n xpb = XPathBuilder()\n pred = xpb.attr('foo').equals('bar').log_or(xpb.foobar)\n xp = xpb.foo.bar.where(pred)\n exp = '/foo/bar[@foo = \"bar\" or /foobar]'\n self.assertEqual(xp.tostring(), exp)", "def andify(things: list):\n if len(things) < 1:\n return \"\"\n return (f'{\", \".join(things[:-1])}' +\n f'{\", and \" if len(things) > 2 else (\" and \" if len(things) > 1 else \"\")}' +\n f'{things[-1]}')", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def __and__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return And(self, other)", "def and_filter(self):\n return self.__and", "def test_add_q_or(self):\n query = Query(FakeDocument)\n\n q_1 = Q(foo=42)\n q_2 = Q(foo=128)\n\n query.add_q(q_1)\n query.add_q(q_2, conn=Q.OR)\n\n self.assertEqual(\n u'((foo:\"42\") OR (foo:\"128\"))',\n unicode(query))", "def __and__(self, query):\r\n return And([self, query]).normalize()", "def test_sqpp_long_or_chain(self):\n self.assertEqual(self.parser.parse_query('p0 or p1 or p2 or p3 or p4'),\n ['+', 'p0', '|', 'p1', '|', 'p2', '|', 'p3', '|', 'p4'])", "def filter_or(filters):\n def filt(item):\n for f in filters:\n if f(item):\n return True\n return False\n return filt", "def test_sqpp_paren_expr1_not_expr2_and_paren_expr3_or_expr4_WORDS(self):\n self.assertEqual(self.parser.parse_query('(expr1) not expr2 and (expr3) or expr4'),\n ['+', 'expr1', '-', 'expr2', '+', 'expr3', '|', 'expr4'])\n #['+', '+ expr1 | expr4', '+', '- expr2 | expr4', '+', '+ expr3 | expr4'])", "def _or(self, _or):\n\n self.__or = _or", "def _or(self, _or):\n\n self.__or = _or", "def _or(self, _or):\n\n self.__or = _or", "def _or(self, _or):\n\n self.__or = _or", "def _or(self, _or):\n\n self.__or = _or", "def test_evaluate_or_expression(self):\n value = self.evaluate_common(\"false or false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false or 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false or true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true or false\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true or true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true or null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false or null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"null or null\")\n self.assertTrue(value.value is False, \"Expected False\")", "def bitwise_or(lhs, rhs):\n return _make.bitwise_or(lhs, rhs)", "def test_andOperator(self):\n xp = XPathQuery(\"//bar[@attrib4='value4' and @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5])", "def test_evaluate_paren_expression(self):\n p = odata.Parser(\"(false and false or true)\")\n # note that 'or' is the weakest operator\n e = p.parse_common_expression()\n value = e.evaluate(None)\n self.assertTrue(value.value is True, \"Expected True\")\n p = odata.Parser(\"(false and (false or true))\")\n # should change the result\n e = p.parse_common_expression()\n value = e.evaluate(None)\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\n \"(((((((false) and (((false)) or true)))))))\")\n self.assertTrue(value.value is False, \"Expected False - multibrackets\")", "def f_and(*args):\n f = And(*args).factor()\n return f if f in B else f.factor()", "def visit_or(self, left_result: T, right_result: T) -> T:", "def or_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value or right_value else 0", "def __or__(self, other):\n return self.or_(other)", "def AND(self, operand2, *operands):\n\t\treturn AND((self, operand2) + operands)", "def and_join(sequence):\n return ', '.join(sequence[:-1]) + ',' * (len(sequence) > 2) + ' and ' * (len(sequence) > 1) + sequence[-1]", "def or_(self, other):\n if not isinstance(other, (Query, QueryCompound)) or other.isNull():\n return self.copy()\n elif self.isNull():\n return other.copy()\n else:\n # grow this if the operators are the same\n if self.__op == QueryCompound.Op.And:\n queries = list(self.__queries) + [other]\n return QueryCompound(*queries, op=QueryCompound.Op.Or)\n else:\n return QueryCompound(self, other, op=QueryCompound.Op.Or)", "def test_sqpp_paren_expr1_and_expr2_or_expr3_WORDS_equiv_SYMBOLS(self):\n self.assertEqual(self.parser.parse_query('(expr1 and expr2) or expr3'),\n self.parser.parse_query('(expr1 + expr2) or expr3'))", "def exactlyonearg(symbol, fact, expr):\n pred_args = [fact.subs(symbol, arg) for arg in expr.args]\n res = Or(*[And(pred_args[i], *[~lit for lit in pred_args[:i] +\n pred_args[i+1:]]) for i in range(len(pred_args))])\n return res" ]
[ "0.74250734", "0.72573024", "0.7222321", "0.7148551", "0.71077687", "0.7057985", "0.6887752", "0.6887752", "0.6884606", "0.68829834", "0.68762136", "0.68715703", "0.68482435", "0.68293345", "0.67756486", "0.6733616", "0.671462", "0.6710137", "0.67097753", "0.6688344", "0.668174", "0.6659065", "0.6652031", "0.66514456", "0.66452724", "0.6593566", "0.65931606", "0.656109", "0.6561", "0.65534836", "0.65308905", "0.6526723", "0.650935", "0.6490717", "0.64827365", "0.647692", "0.64704144", "0.64595765", "0.6457805", "0.6456552", "0.6437941", "0.64283735", "0.6423545", "0.6420361", "0.6416169", "0.641137", "0.63963765", "0.6389751", "0.63885874", "0.63879156", "0.63823164", "0.63700026", "0.6347524", "0.6345025", "0.6332761", "0.6326919", "0.6323706", "0.6313556", "0.6312635", "0.63082796", "0.6298829", "0.6278735", "0.62748474", "0.6269511", "0.62575436", "0.6246541", "0.6245464", "0.623519", "0.62301046", "0.62236863", "0.62123936", "0.6199213", "0.6180763", "0.61778164", "0.61724305", "0.6169225", "0.6161391", "0.61522704", "0.6135862", "0.61324835", "0.61311245", "0.61300796", "0.61231536", "0.61231536", "0.61231536", "0.61231536", "0.61231536", "0.6104681", "0.6102984", "0.6098248", "0.60687476", "0.6065863", "0.6056116", "0.60412526", "0.6032071", "0.60303855", "0.6024006", "0.6017914", "0.60054404", "0.5965127" ]
0.6960249
6
andExpr = relationalExpr { "and" relationalExpr }
def andExpr( ): #DOUBLE CHECK THIS tok = tokens.peek( ) if debug: print("andExpr: ", tok) left = relationalExpr( ) #does the left side of the grammar tok = tokens.peek( ) while tok == "and": #checks to see if there is the token "and" and will preform what is inside the curly bracket since it is a series tokens.next() right = relationalExpr( ) left = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING tok = tokens.peek( ) return left
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def AND(*expressions):\n return {'$and': list(expressions)}", "def __and__(self, query):\r\n return And([self, query]).normalize()", "def and_(a, b):", "def And(*conditions):\n def andPred(db):\n from functools import reduce\n return reduce(lambda result, c: c(result),\n conditions, db)\n\n return andPred", "def __and__(self, other):\n return self.fam.c_binop('and', self, other)", "def convert_broadcast_logical_and(node, **kwargs):\n return create_basic_op_node('And', node, kwargs)", "def and_(*args, **kwargs):\n ...", "def _and(it):\n return 1 if it[0]==1 and it[1]==1 else 0", "def __and__(self, other):\n if other is None:\n return self.copy()\n elif isinstance(other, (Query, QueryCompound)):\n return self.and_(other)\n else:\n out = self.copy()\n out.addMath(Query.Math.And, other)\n return out", "def _prefix_and(*exprs, **kwargs):\n anded = ' AND '.join('(%s)' % expr for expr in exprs if expr)\n if len(anded) == 0:\n return ''\n return kwargs.get('prefix', 'WHERE ') + anded", "def __and__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return And(self, other)", "def test_and(\n self,\n left: Result[int, str],\n right: Result[int, str],\n exp: Result[int, str],\n ) -> None:\n assert left.and_(right) == exp", "def Nand(*args):\n return Not(And(*args))", "def AND(f, g):\n def _and(x):\n return f(x) & g(x)\n return _and", "def _and(cls, arg1, arg2):\n return arg1 and arg2", "def logical_and(lhs, rhs):\n return _make.logical_and(lhs, rhs)", "def visit_and(self, left_result: T, right_result: T) -> T:", "def f_and(*args):\n f = And(*args).factor()\n return f if f in B else f.factor()", "def __and__(self, other: Any) -> Operators:\n return self.operate(and_, other)", "def and_bexp(env, node):\n left_value = node.left.interpret(env)\n right_value = node.right.interpret(env)\n return 1 if left_value and right_value else 0", "def conjuncts(s):\n return dissociate(\"AND\", s)", "def test_andOperator(self):\n xp = XPathQuery(\"//bar[@attrib4='value4' and @attrib5='value5']\")\n self.assertEqual(xp.matches(self.e), True)\n self.assertEqual(xp.queryForNodes(self.e), [self.bar5])", "def _daat_and(self):\n raise NotImplementedError", "def AND(r, s):\n return lambda l, i: r(l, i) and s(l, i)", "def simplify_and_node(parse_str=None, location=None, tokens=None):\n if len(tokens) == 1:\n return tokens[0]\n else:\n return AndNode(tokens.asList())", "def and_list(conditionList):\n return functools.reduce(numpy.logical_and, conditionList)", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def _and(self, _and):\n\n self.__and = _and", "def and_filter(self):\n return self.__and", "def logical_and(x1, x2, f=None):\n return _cur_framework(x1, f=f).logical_and(x1, x2)", "def __and__(self, other):\n return self.and_(other)", "def predicate_and(\n cls, left: \"ClaimPredicate\", right: \"ClaimPredicate\"\n ) -> \"ClaimPredicate\":\n return cls(\n claim_predicate_type=ClaimPredicateType.CLAIM_PREDICATE_AND,\n and_predicates=ClaimPredicateGroup(left, right),\n or_predicates=None,\n not_predicate=None,\n abs_before=None,\n rel_before=None,\n )", "def AND(self, operand2, *operands):\n\t\treturn AND((self, operand2) + operands)", "def _logical_and(*args):\n args_ = [_static_value(x) for x in args]\n if any(x is not None and not bool(x) for x in args_):\n return constant_op.constant(False)\n if all(x is not None and bool(x) for x in args_):\n return constant_op.constant(True)\n if len(args) == 2:\n return math_ops.logical_and(*args)\n return math_ops.reduce_all(args)", "def andify(things: list):\n if len(things) < 1:\n return \"\"\n return (f'{\", \".join(things[:-1])}' +\n f'{\", and \" if len(things) > 2 else (\" and \" if len(things) > 1 else \"\")}' +\n f'{things[-1]}')", "def __and__(self, other):\n return self._operation_and(other)", "def and_(self, other):\n if not isinstance(other, (Query, QueryCompound)) or other.isNull():\n return self.copy()\n elif self.isNull():\n return other.copy()\n else:\n # grow this if the operators are the same\n if self.__op == QueryCompound.Op.And:\n queries = list(self.__queries) + [other]\n return QueryCompound(*queries, op=QueryCompound.Op.And)\n else:\n return QueryCompound(self, other, op=QueryCompound.Op.And)", "def build_match_clause_and(field, string):\r\n answer = {}\r\n tmp = {}\r\n tmp[field] = {}\r\n tmp[field]['query'] = string\r\n tmp[field]['operator'] = 'and'\r\n answer['match'] = tmp\r\n return answer", "def and_condition_expr(args: dict) -> dict:\n args = deepcopy(args)\n existing_names = args.get(\"ExpressionAttributeNames\", dict())\n for ex_n in _range_str(ex_attr_name):\n # find an unused expression attribute name\n if ex_n not in existing_names:\n break\n\n names = {ex_n: name}\n cond_expr = condition_fmt.format(name=ex_n)\n args[\"ExpressionAttributeNames\"] = {**existing_names, **names}\n return and_condition(args, cond_expr)", "def test_expression_and_or(self):\n\n # Checks several examples with \"and\" and \"or\" operators\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_3\", \"multi_host\": False}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (1)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": True}], [\"networks\"]))\n self.assertTrue(value, \"complex expression (2)\")\n\n expression = BooleanExpression(\"NORMAL\", or_(and_(models.Network.label != \"network_3\", models.Network.multi_host == True), models.Network.label == \"network_3\"))\n value = expression.evaluate(KeyedTuple([{\"label\": \"network_2\", \"multi_host\": False}], [\"networks\"]))\n self.assertFalse(value, \"complex expression (3)\")", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def Conjunction(self, paren=False):\n left = self.Equality(paren)\n while self.currtok[1].name == \"AND\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Equality(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def as_relational(self, symbol):\n return And(*[set.as_relational(symbol) for set in self.args])", "def andalso(self, *conds):\n self._andalso += conds\n return self", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def and_join(sequence):\n return ', '.join(sequence[:-1]) + ',' * (len(sequence) > 2) + ' and ' * (len(sequence) > 1) + sequence[-1]", "def and_(self, other):\n if not isinstance(other, (Query, QueryCompound)) or other.isNull():\n return self.copy()\n elif not self:\n return other.copy()\n else:\n return orb.QueryCompound(self, other, op=orb.QueryCompound.Op.And)", "def test_multi_chains_flatten():\n age = User.age >= 3\n name = User.name == \"foo\"\n email = User.email != \"bar\"\n\n and_condition = bloop.condition.Condition()\n or_condition = bloop.condition.Condition()\n for c in [age, name, email]:\n and_condition &= c\n or_condition |= c\n assert and_condition == bloop.condition.And(age, name, email)\n assert or_condition == bloop.condition.Or(age, name, email)", "def test_predicate11(self):\n xpb = XPathBuilder()\n xp = xpb.a.b.c[(xpb.attr('d') == 'e') & xpb.foo[xpb.attr('z') == 'ab']]\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"ab\"]]'\n self.assertEqual(xp.tostring(), exp)", "def add_statement_and(self, a, b, out=None):\n if out is None:\n out = self.port_name_generator.generate() \n\n s = 'And(a=%s, b=%s, out=%s)' % (a, b, out)\n self.parts_statements.append(s)\n return out", "def __and__(self, obj):\n return self._boolean_operation(obj, operator.__and__)", "def _build_and(self) -> str:\n return dedent(\n \"\"\"\n @SP\n M=M-1\n A=M\n D=M\n @SP\n M=M-1\n A=M\n M=M&D\n @SP\n M=M+1\n \"\"\"\n )", "def _(obj: And, visitor: BooleanExpressionVisitor[T]) -> T:\n left_result: T = visit(obj.left, visitor=visitor)\n right_result: T = visit(obj.right, visitor=visitor)\n return visitor.visit_and(left_result=left_result, right_result=right_result)", "def _do_conjunction(self, _and=(\"and\", \"e\", \"en\", \"et\", \"und\", \"y\")):\n w = self.words\n if len(w) > 2 and w[-2].type == \"CC\" and w[-2].chunk is None:\n cc = w[-2].string.lower() in _and and AND or OR\n ch1 = w[-3].chunk\n ch2 = w[-1].chunk\n if ch1 is not None and \\\n ch2 is not None:\n ch1.conjunctions.append(ch2, cc)\n ch2.conjunctions.append(ch1, cc)", "def allargs(symbol, fact, expr):\n return And(*[fact.subs(symbol, arg) for arg in expr.args])", "def combine_expressions(expressions, relation='AND', licensing=Licensing()):\n if not expressions:\n return\n\n if not isinstance(expressions, (list, tuple)):\n raise TypeError(\n 'expressions should be a list or tuple and not: {}'.format(\n type(expressions)))\n\n # Remove duplicate element in the expressions list\n expressions = list(dict((x, True) for x in expressions).keys())\n\n if len(expressions) == 1:\n return expressions[0]\n\n expressions = [licensing.parse(le, simple=True) for le in expressions]\n if relation == 'OR':\n return str(licensing.OR(*expressions))\n else:\n return str(licensing.AND(*expressions))", "def test_predicate7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('name') == 'foo') & (xpb.attr('x') == 'x')]\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def test_multi_shortcut():\n age = User.age >= 3\n condition = bloop.condition.And(age)\n assert condition.conditions == [age]\n\n condition = bloop.condition.Or(age)\n assert condition.conditions == [age]", "def _sql_where(cur, tables, andalso, orelse, prefix=None, aggregate=False):\n disjunctions = []\n andsql = _cond_where_sql(cur, andalso, tables, prefix=prefix,\n aggregate=aggregate)\n andsql = ' AND '.join(andsql)\n\n if len(andsql) > 0:\n andsql = '(%s)' % andsql\n disjunctions.append(andsql)\n disjunctions += _cond_where_sql(cur, orelse, tables, prefix=prefix,\n aggregate=aggregate)\n\n if len(disjunctions) == 0:\n return ''\n return '(%s)' % (' OR '.join(disjunctions))", "def __and__(self, other):\n assert isinstance(other, Filter)\n new_query = \"({}) & ({})\".format(self.query, other.query)\n return Filter(query=new_query)", "def test_predicate12(self):\n xpb = XPathBuilder()\n pred = (xpb.attr('d').equals('e')\n .log_and(xpb.foo.where(xpb.attr('z').equals('abc'))))\n xp = xpb.a.b.c.where(pred)\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"abc\"]]'\n self.assertEqual(xp.tostring(), exp)", "def _op_and_(self, left: Any, right: Any) -> Any:\n if isinstance(left, list):\n # induce an intersect with Collection\n return Intersect(left, right)\n\n left, right = _recycle_left_right(left, right)\n left = Series(left).fillna(False)\n right = Series(right).fillna(False)\n return left & right", "def test_predicate8(self):\n xpb = XPathBuilder()\n pred = (xpb.attr('name').equals('foo')\n .log_and(xpb.attr('x').equals('x')))\n xp = xpb.foo.bar.where(pred)\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def And(iterable):\n try:\n gen = iter(iterable)\n first = next(gen)\n first.__class__ = ParserElement\n base = first + next(gen) # once (+) to have a new element\n for expr in gen:\n base += expr # in place addition to avoid copying\n return base\n except StopIteration: # only one element\n return first", "def test_sqpp_paren_expr1_and_expr2_or_expr3_WORDS_equiv(self):\n self.assertEqual(self.parser.parse_query('(expr1 and expr2) or expr3'),\n self.parser.parse_query('(expr1 + expr2) | expr3'))", "def as_relational(self, x):\n x = sympify(x)\n if self.right_open:\n right = x < self.end\n else:\n right = x <= self.end\n if self.left_open:\n left = self.start < x\n else:\n left = self.start <= x\n return And(left, right)", "def bitwise_and(self, other: Any) -> ColumnOperators:\n\n return self.operate(bitwise_and_op, other)", "def test_predicate10(self):\n xpb = XPathBuilder()\n pred = xpb.attr('foo').equals('bar').log_or(xpb.foobar)\n xp = xpb.foo.bar.where(pred)\n exp = '/foo/bar[@foo = \"bar\" or /foobar]'\n self.assertEqual(xp.tostring(), exp)", "def test_join_and(self):\n self.assertEqual(join_and(self.fruits_singular, plural=False),\n \"apple, a orange and a banana\")\n self.assertEqual(join_and(self.fruits_plural, plural=True),\n \"apples, oranges and bananas\")\n self.assertEqual(join_and([\"apple\"], plural=False),\n \"apple\")\n self.assertEqual(join_and([\"apples\"], plural=True),\n \"apples\")\n self.assertEqual(join_and([], plural=True),\n \"\")\n self.assertEqual(join_and([], plural=False),\n \"\")", "def AND(self, values: pdarray) -> Tuple[Union[pdarray, List[Union[pdarray, Strings]]], pdarray]:\n if values.dtype not in [akint64, akuint64, bigint]:\n raise TypeError(\"AND is only supported for pdarrays of dtype int64, uint64, or bigint\")\n\n return self.aggregate(values, \"and\") # type: ignore", "def __init__(self, cased: bool = True):\n super().__init__()\n self.and_token = \"And\" if cased else \"and\"", "def as_relational(self, symbol):\n A, B = self.args\n\n A_rel = A.as_relational(symbol)\n B_rel = Not(B.as_relational(symbol))\n\n return And(A_rel, B_rel)", "def all(*args):\n if not args:\n raise ValueError(\"Any must take at least 1 argument\")\n if len(args) == 1:\n return args[0]\n ret = _make.And(args[0], args[1])\n for i in range(2, len(args)):\n ret = _make.And(ret, args[i])\n return ret", "def bitwise_and(lhs, rhs):\n return _make.bitwise_and(lhs, rhs)", "def __and__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__and__', other)", "def __insert_and(tree1, tree2):\n if tree1 is None:\n return tree2\n elif tree2 is None:\n return tree1\n else:\n return AndNode(tree1, tree2)", "def _operation_and(self, other):\n self._check_items(other)\n return ReadingSet(self._set & self._get_other_set(other))", "def filter_and(filters):\n def filt(item):\n for f in filters:\n if not f(item):\n return False\n return True\n return filt", "def test_sqpp_paren_expr1_and_expr2_or_expr3_WORDS(self):\n self.assertEqual(self.parser.parse_query('(expr1 and expr2) or expr3'),\n ['+', 'expr1 + expr2', '|', 'expr3'])\n #['+', '+ expr1 | expr3', '+', '+ expr2 | expr3'])", "def is_logical_and_not(node):\n if not isinstance(node, LogicalAnd):\n return False\n lhs = node.get_input(0)\n rhs = node.get_input(0)\n def input_predicate(op):\n return isinstance(op, LogicalNot) or is_logical_and_not(op)\n return input_predicate(lhs) and input_predicate(rhs)", "def test_pathop11(self):\n xpb = XPathBuilder()\n xp = (xpb.foo.log_and(xpb.bar)\n .log_or(xpb.baz).parenthesize()\n .log_and(xpb.foobar))\n exp = '(/foo and /bar or /baz) and /foobar'\n self.assertEqual(xp.tostring(), exp)\n # different notation but same xpath expression (no explicit braces!)\n xp = ((xpb.foo.log_and(xpb.bar.log_or(xpb.baz)))\n .parenthesize().log_and(xpb.foobar))", "def test_predicate9(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('foo') == 'bar') | xpb.foobar]\n exp = '/foo/bar[@foo = \"bar\" or /foobar]'\n self.assertEqual(xp.tostring(), exp)", "def AND(condition_1, condition_2):\n if(type(condition_1) == bool):\n if(type(condition_2) == bool):\n return(condition_1 and condition_2)\n else:\n print('Invalid type: second condition does not evaluate to True or False.')\n else:\n print('Invalid type: first condition does not evaluate to True or False.')", "def test_evaluate_and_expression(self):\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"false and 0\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"false and true\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and false\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"true and true\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"true and null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and null\")\n self.assertTrue(value.value is False, \"Expected False\")\n value = self.evaluate_common(\"false and false\")\n self.assertTrue(value.value is False, \"Expected False\")", "def __and__(self, other):\r\n if self.field.characteristic == 2:\r\n return runtime.and_(self, other)\r\n\r\n return super().__and__(other)", "def t_and(self, other):\n if self is TRUE and other is TRUE:\n return TRUE\n if self is FALSE or other is FALSE:\n return FALSE\n return UNKNOWN", "def __or__(self, query):\r\n return Or([self, query]).normalize()", "def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"relationalExpr: \", tok)\n\tleft = addExpr( )\n\texpr = \"\"\n\ttok = tokens.peek( )\n\tif tok in relations:\n\t\trel = relation( ) # expecting a relation to start off \n\t\tright = expression( ) # if there is a relation we expect there to be an expression to the right of the relation\n\t\texpr = BinaryExpr( rel, left, right )\n\t\treturn expr #fix this for syntax tree maybe\n\n\treturn left", "def bitwise_and(self):\n register = self.return_middle_registers(self.opcode)\n self.registers[register[0]] = (\n self.registers[register[0]] & self.registers[register[1]])\n logger.info(\"Bitwise AND on V{} and V{} for {}\".format(\n register[0],\n register[1],\n self.registers[register[0]]))", "def __and__(self, other):\n return self.bind(\n lambda left: other.__parser__().bind(\n lambda right: self.unit((left, right))))", "def le(cls, __and=True, __key=None, **kwargs):\r\n return _queries(\"<=\", __key, __and, kwargs.items())", "def parse_conditions_to_expr(\n expr: Sequence[Any], entity: Entity, arrayjoin: Set[str]\n) -> Optional[Expression]:\n\n def and_builder(expressions: Sequence[Expression]) -> Optional[Expression]:\n if not expressions:\n return None\n return combine_and_conditions(expressions)\n\n def or_builder(expressions: Sequence[Expression]) -> Optional[Expression]:\n if not expressions:\n return None\n return combine_or_conditions(expressions)\n\n def preprocess_literal(op: str, literal: Any) -> Expression:\n \"\"\"\n Replaces lists with a function call to tuple.\n \"\"\"\n if isinstance(literal, (list, tuple)):\n if op not in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is a sequence. \"\n \"Operator must be IN/NOT IN\"\n ),\n report=False,\n )\n literals = tuple([Literal(None, lit) for lit in literal])\n return FunctionCall(None, \"tuple\", literals)\n else:\n if op in [\"IN\", \"NOT IN\"]:\n raise ParsingException(\n (\n f\"Invalid operator {op} for literal {literal}. Literal is not a sequence. \"\n \"Operator cannot be IN/NOT IN\"\n ),\n report=False,\n )\n return Literal(None, literal)\n\n def unpack_array_condition_builder(\n lhs: Expression, op: str, literal: Any\n ) -> Expression:\n function_name = \"arrayExists\" if op in POSITIVE_OPERATORS else \"arrayAll\"\n\n # This is an expression like:\n # arrayExists(x -> assumeNotNull(notLike(x, rhs)), lhs)\n return FunctionCall(\n None,\n function_name,\n (\n Lambda(\n None,\n (\"x\",),\n FunctionCall(\n None,\n \"assumeNotNull\",\n (\n FunctionCall(\n None,\n OPERATOR_TO_FUNCTION[op],\n (Argument(None, \"x\"), preprocess_literal(op, literal)),\n ),\n ),\n ),\n ),\n lhs,\n ),\n )\n\n def simple_condition_builder(lhs: Expression, op: str, literal: Any) -> Expression:\n if op in UNARY_OPERATORS:\n if literal is not None:\n raise ParsingException(\n f\"Right hand side operand {literal} provided to unary operator {op}\",\n report=False,\n )\n return unary_condition(OPERATOR_TO_FUNCTION[op], lhs)\n\n else:\n if literal is None:\n raise ParsingException(\n f\"Missing right hand side operand for binary operator {op}\",\n report=False,\n )\n return binary_condition(\n OPERATOR_TO_FUNCTION[op], lhs, preprocess_literal(op, literal)\n )\n\n return parse_conditions(\n parse_expression,\n and_builder,\n or_builder,\n unpack_array_condition_builder,\n simple_condition_builder,\n entity,\n expr,\n arrayjoin,\n 0,\n )", "def _conjunction_op(spec, *expressions):", "def _conjunction_op(spec, *expressions):", "def test_and_multichain(self) -> None:\n assert Ok(2).and_(Ok(3)).and_(Ok(4)).and_(Ok(5)) == Ok(5)", "def _getQuery(self, keys, any):\n operator = 'or' if any else 'and'\n def _and(lhs, rhs): return (lhs & rhs)\n def _or(lhs, rhs): return (lhs | rhs)\n join = {'and': _and, 'or': _or}[operator]\n iter = keys.iteritems()\n key, val = iter.next()\n query = (where(key) == val)\n for key, value in iter:\n query = join(query, (where(key) == value))\n return query", "def __sub__(self, query):\r\n\r\n return And([self, Not(query)]).normalize()" ]
[ "0.7694933", "0.7391504", "0.7278942", "0.72714126", "0.72140443", "0.71525294", "0.71054393", "0.7005105", "0.6971053", "0.6965941", "0.69083124", "0.68721735", "0.68457156", "0.68190354", "0.6809348", "0.6774739", "0.67718345", "0.6753265", "0.6741417", "0.67354465", "0.67034066", "0.66916305", "0.6630332", "0.66224724", "0.661375", "0.6570111", "0.65543115", "0.65543115", "0.65543115", "0.65543115", "0.65543115", "0.65505743", "0.65398335", "0.64921546", "0.6490488", "0.64814067", "0.64612025", "0.645878", "0.6410322", "0.6397607", "0.6396024", "0.63491786", "0.6341808", "0.63144803", "0.6291112", "0.62873644", "0.6275524", "0.627035", "0.6247887", "0.6238953", "0.62069863", "0.61952597", "0.6189364", "0.6114815", "0.6095957", "0.6064093", "0.6062146", "0.6028804", "0.6018024", "0.60177314", "0.6011256", "0.6010193", "0.600138", "0.5922409", "0.58951634", "0.5890173", "0.58859396", "0.58643275", "0.585232", "0.5844413", "0.58071625", "0.58060837", "0.5799809", "0.57566047", "0.5747435", "0.57412624", "0.57278293", "0.57237697", "0.5718921", "0.56996655", "0.5693163", "0.56554055", "0.56549263", "0.56517506", "0.5646946", "0.56467986", "0.56379086", "0.5627732", "0.5615635", "0.560672", "0.5601761", "0.5596504", "0.5586308", "0.5575994", "0.5569551", "0.5548315", "0.5548315", "0.55438036", "0.55335206", "0.55325186" ]
0.8083135
0
relationalExpr = addExpr [ relation addExpr ]
def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS tok = tokens.peek( ) if debug: print("relationalExpr: ", tok) left = addExpr( ) expr = "" tok = tokens.peek( ) if tok in relations: rel = relation( ) # expecting a relation to start off right = expression( ) # if there is a relation we expect there to be an expression to the right of the relation expr = BinaryExpr( rel, left, right ) return expr #fix this for syntax tree maybe return left
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_relation(wn, source, target, new_rel, change_list=None):\n insert_rel(source, new_rel, target, change_list)\n if new_rel in inverse_synset_rels:\n inv_rel_type = inverse_synset_rels[new_rel]\n insert_rel(target, inv_rel_type, source, change_list)", "def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def as_relational(self, symbol):\n return And(*[set.as_relational(symbol) for set in self.args])", "def polyrelsimp(expr):\n return expr.replace(lambda rel: isinstance(rel, Rel),\n lambda rel: expand_polyeq(rel))", "def as_relational(self, symbol):\n A, B = self.args\n\n A_rel = A.as_relational(symbol)\n B_rel = B.as_relational(symbol)\n\n return Xor(A_rel, B_rel)", "def add_relation(cls, row_id, rel_obj):\n obj = cls.query.filter_by(id=row_id).first()\n # obj = db.session.query(cls).filter_by(id=row_id).first()\n #print(type(obj))\n if cls.__name__ == 'Actor':\n obj.filmography.append(rel_obj)\n elif cls.__name__ == 'Movie':\n obj.cast.append(rel_obj)\n return commit(obj)", "def createRelation(rid, rlabel, list, x, y):\n relation = Relation(rid, rlabel, x, y)\n list.append(relation)", "def add_relation(self, qid, relation, qid2):\n if self._kg_symbols is not None:\n self._kg_symbols.add_relation(qid, relation, qid2)", "def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)", "def _setRelation(self, node):\n if getattr(self, \"relation\", None):\n element = etree.SubElement(node, 'relation')\n element.text = getattr(self, \"relation\")", "def _add_related(related, dep, all_related, index, connector=None):\n doc = {}\n doc[\"relationForm\"] = dep\n doc[\"rawName\"] = related\n doc[\"tokenIndex\"] = int(index)\n doc[\"offsetStart\"] = A.lookup[int(index)][\"start\"]\n doc[\"offsetEnd\"] = A.lookup[int(index)][\"end\"]\n doc[\"connector\"] = \"\" if connector is None else connector\n if not doc in all_related:\n all_related.append(doc)\n return all_related", "def _follow_relation_set(self, rel_expr,\n inverted):\n if not self.context.is_group(rel_expr.type_name):\n raise RelationNameError(rel_expr.type_name,\n 'Expression type is not a relation group.')\n g = self.context.get_group(rel_expr.type_name)\n if inverted == +1:\n with tf.name_scope('follow_group_%s' % rel_expr.type_name):\n return (self.follow(g.subject_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.object_rel)\n else:\n with tf.name_scope('follow_group_%s_inverse' % rel_expr.type_name):\n return (self.follow(g.object_rel, -1) *\n rel_expr.follow(g.relation_rel, -1)).follow(g.subject_rel)", "def Relation(self, paren=False):\n left = self.Addition(paren)\n if self.currtok[1].name in {\"GREATERTHAN\", \"LESSTHAN\", \"LET\", \"GET\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Addition(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)", "def __push_relation(self, id1, id2, id1_name, id2_name, table):\n # case: No entry about relation is in DB yet\n if not self.__postgre_db.is_in_table(table, id1_name + \"=\" + str(\n id1)):\n self.__postgre_db.insert(table, {\n id1_name: id1, id2_name: [id2], \"aggregation\": 0})\n\n # case: Entry about single_pattern is in DB\n else:\n old_list = self.__postgre_db.get(table, id1_name + \"=\" + str(\n id1), id2_name)\n new_list = list(set(old_list + [id2]))\n self.__postgre_db.update(\n table, id2_name + \"=\" + add_quotes(replace_brackets(str(new_list))), id1_name + \"=\" + str(id1))", "def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)", "def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)", "def add_sense_relation(wn, source, target, new_rel, change_list=None):\n insert_sense_rel(wn, source, new_rel, target, change_list)\n if new_rel in inverse_sense_rels:\n inv_rel_type = inverse_sense_rels[new_rel]\n insert_sense_rel(wn, target, inv_rel_type, source, change_list)", "def as_relational(self, x):\n x = sympify(x)\n if self.right_open:\n right = x < self.end\n else:\n right = x <= self.end\n if self.left_open:\n left = self.start < x\n else:\n left = self.start <= x\n return And(left, right)", "def __le__(self, other):\n return _generate_relational_expression(_le, self, other)", "def add_unary_constraint(self, var, relation, integer):\n constraint = lambda left_value: relation(left_value, integer)\n if var in self.__constraints:\n self.__constraints[var].append(constraint)\n else:\n self.__constraints[var] = [constraint]", "def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)", "def add_expr_to_comp(self, comp, expr):\n if not isinstance(comp, cellml_component):\n comp = self.model.get_component_by_name(comp)\n if not hasattr(comp, u'math'):\n # Create the math element\n math = comp.xml_create_element(u'math', NSS[u'm'])\n comp.xml_append(math)\n # Append this expression\n comp.math.xml_append(expr)", "def __add__(self, right):\n # TODO: move over to any coercion model!\n if not isinstance(right, MatrixMorphism):\n R = self.base_ring()\n return self.parent()(self.matrix() + R(right))\n if not right.parent() == self.parent():\n right = self.parent()(right)\n M = self.matrix() + right.matrix()\n return self.domain().Hom(right.codomain())(M)", "def create_relation(self, left_node, rel, right_node):\n rel = Relationship(left_node, rel, right_node)\n self.graph.merge(rel)\n return", "def add_relation(term_pair, term_info, tokenized_text, bags):\n tokenized_text = tokenized_text.copy()\n \n found_relation = False\n term_pair_key = \" -> \".join(term_pair)\n \n # restrict to closest occurence of the two terms in the sentence\n indices = get_closest_match(term_info[term_pair[0]][\"indices\"], \n term_info[term_pair[1]][\"indices\"])\n \n term1_text = \" \".join(tokenized_text[indices[0][0]:indices[0][1]])\n term2_text = \" \".join(tokenized_text[indices[1][0]:indices[1][1]])\n \n # tag term pair in the sentence\n tokenized_text = \" \".join(insert_relation_tags(tokenized_text, indices))\n \n if term_pair_key in bags[\"no-relation\"]:\n term_ix = bags[\"no-relation\"].index(term_pair_key)\n bags[\"no-relation\"][term_ix][\"sentences\"].append(tokenized_text)\n else:\n bags[\"no-relation\"].append({term_pair_key: {\"sentences\": [tokenized_text], \"relation\": \"no-relation\"}})\n \n return bags", "def as_relational(self, symbol):\n A, B = self.args\n\n A_rel = A.as_relational(symbol)\n B_rel = Not(B.as_relational(symbol))\n\n return And(A_rel, B_rel)", "def add_relationship(self, relationship):\n self.relationships[relationship.parent].append(relationship)", "def __add__(self, right_rule):\n self.__subrules.append(right_rule)\n return self", "def add_binary_constraint(self, left_var, relation, right_var):\n self.__constraints[(left_var, right_var)] = relation", "def as_relational(self, symbol):\n return Or(*[Eq(symbol, elem) for elem in self])", "def __add__(self, other):\n\n other = Formula(other)\n terms = self.terms + other.terms\n pieces = sorted([(term.name, term) for term in terms])\n terms = [piece[1] for piece in pieces]\n f = Formula(terms)\n if _namespace_equal(self.namespace, other.namespace):\n f.namespace = self.namespace\n return f", "def declare_relation(self,\n rel_name,\n domain_type,\n range_type,\n trainable = False,\n dense = False):\n if rel_name in self._declaration:\n raise RelationNameError(rel_name, 'Multiple declarations for relation.')\n reserved = dir(NeuralQueryExpression)\n if rel_name in reserved:\n logging.warn(\n 'rel_name prohibits expr.%s() as it matches a reserved word in: %r',\n rel_name, reserved)\n self._declaration[rel_name] = RelationDeclaration(rel_name, domain_type,\n range_type, trainable,\n dense)\n for type_name in [domain_type, range_type]:\n if type_name not in self._symtab:\n self._symtab[type_name] = symbol.SymbolTable()\n self._rel_name_symtab.insert(rel_name)", "def relate(self, other):\n ...", "def __add__(self, second_rule):\n return AndRule(self, second_rule)", "def is_relational(*args):\n return _ida_hexrays.is_relational(*args)", "def expr_ext(self, rule_name, method):\n expr = Expression([Prefix([Suffix([Name([rule_name])])])])\n if method == \"prepend\":\n self.children.insert(0, expr)\n elif method == \"append\":\n self.children.append(expr)\n else: assert False, \"Method of extension not supported: '{0}'\".format(method)", "def add_relations(self, relations: List[Relation]):\n if not relations:\n return None\n labels_str = relations[0].rel_type\n prop_str = \",\\n\".join(\n [\"rel.%s = relation.%s\" % (k, k) for k in relations[0].data]\n )\n query = \"\"\"\n UNWIND $relations AS relation\n MATCH (e1 {id: relation.source_id}), (e2 {id: relation.target_id})\n MERGE (e1)-[rel:%s]->(e2)\n SET %s\n \"\"\" % (\n labels_str,\n prop_str,\n )\n rel_params = []\n for rel in relations:\n rd = dict(source_id=rel.source_id, target_id=rel.target_id, **rel.data)\n rel_params.append(rd)\n return self.create_tx(query, query_params={\"relations\": rel_params})", "def add_edge(self, rtype, node1, node2):\n self.nodes[node1].add_relation(rtype,node2)\n self.nodes[node2].add_predecessor(rtype,node1)\n self.dirty = True", "def add_links(G, df, col1, col2, relation):\n df_tmp = df[(~df[col1].isnull()) & (~df[col2].isnull()) & (~df[relation].isnull())]\n links = list(zip(df_tmp[col1], df_tmp[col2], df_tmp[relation]))\n G.add_edges_from([(src, trg, dict(type=rel)) for src, trg, rel in links])\n print(\"Edges (%s->%s,%s) were added\" % (col1, col2, relation))", "def _append_operator(self, operator):", "def relations_to(self, end_node):", "def substitute_LHS_of_expression_1_into_expression_2(\n expression1, expression2, relation\n):\n return relation(\n expression2.lhs.subs(expression1.lhs, expression1.rhs),\n expression2.rhs.subs(expression1.lhs, expression1.rhs),\n evaluate=False,\n )", "def addAlleleOfGene(self, allele_id, gene_id, rel_id=None):\n if rel_id is None:\n rel_id = self.globaltt[\"is_allele_of\"]\n self.graph.addTriple(allele_id, rel_id, gene_id)", "def _add(self, other):\n if isinstance(other, SeqFormula):\n form1, v1 = self.formula, self.variables[0]\n form2, v2 = other.formula, other.variables[0]\n formula = form1 + form2.subs(v2, v1)\n start, stop = self._intersect_interval(other)\n return SeqFormula(formula, (v1, start, stop))", "def test_add():\n # Test for addition with scalar Rnode object and float value\n x = Rnode(0.11)\n z = x**2 + x\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value **2 + x.value\n assert x.grad() == sum(weight * var.grad()\n for weight, var in x.children)\n except AssertionError as e:\n print(e)", "def modify_rhs(self, expr, operator, var):\n assert isinstance(var, cellml_variable)\n # Ensure var is available in expr's component\n local_var_name = var.name\n source_comp = var.component\n expr_comp = expr.component\n if source_comp != expr_comp:\n local_var = self.connect_variables(var, (expr_comp.name, var.fullname(cellml=True)))\n local_var_name = local_var.name\n # Change expr\n rhs = expr.eq.rhs\n expr.safe_remove_child(rhs)\n new_rhs = mathml_apply.create_new(var.model, operator, [rhs, local_var_name])\n expr.xml_append(new_rhs)\n return expr", "def __radd__(self,that):\n return self.__opExpand2(that,np.add)", "def isRelational(self, *args):\n return _libsbml.ASTBasePlugin_isRelational(self, *args)", "def add_rxn(self, reaction: db.Reaction):\n # Add two rxn nodes\n rxn_nodes = []\n reaction_id = reaction.id().string()\n\n for i in range(0, 2):\n # Add rxn node between lhs and rhs compound\n rxn_node = ';'.join([reaction_id, str(i)])\n rxn_node += ';'\n self.graph.add_node(rxn_node, color='rxn_node')\n rxn_nodes.append(rxn_node)\n # Convert to strings\n reactants = reaction.get_reactants(db.Side.BOTH)\n reactant_types = reaction.get_reactant_types(db.Side.BOTH)\n weights = self._get_weight(reaction)\n # Add lhs aggregates and connect\n for lhs_cmp, lhs_type in zip([i.string() for i in reactants[0]],\n [i.name for i in reactant_types[0]]):\n if lhs_cmp not in self.graph:\n self.graph.add_node(lhs_cmp, type=lhs_type)\n required_cmps_lhs = [s.string() for s in reactants[0]]\n required_cmps_lhs.remove(lhs_cmp)\n self.graph.add_edge(lhs_cmp, rxn_nodes[0], weight=weights[0], required_compounds=required_cmps_lhs,\n required_compound_costs=None)\n self.graph.add_edge(rxn_nodes[1], lhs_cmp, weight=0.0, required_compounds=None)\n # Add rhs aggregates and connect\n for rhs_cmp, rhs_type in zip([i.string() for i in reactants[1]],\n [i.name for i in reactant_types[1]]):\n if rhs_cmp not in self.graph:\n self.graph.add_node(rhs_cmp, type=rhs_type)\n required_cmps_rhs = [s.string() for s in reactants[1]]\n required_cmps_rhs.remove(rhs_cmp)\n self.graph.add_edge(rhs_cmp, rxn_nodes[1], weight=weights[1], required_compounds=required_cmps_rhs,\n required_compound_costs=None)\n self.graph.add_edge(rxn_nodes[0], rhs_cmp, weight=0.0, required_compounds=None)\n\n # # # Loop over reaction nodes to add required compounds info to downwards edges; might be unnecessary\n node_index = 1\n for node in rxn_nodes:\n for key in self.graph[node].keys():\n self.graph.edges[node, key]['required_compounds'] = \\\n self.graph.edges[key, rxn_nodes[node_index]]['required_compounds']\n node_index -= 1", "def substitute_RHS_of_expression_1_into_expression_2(\n expression1, expression2, relation\n):\n return relation(\n expression2.lhs.subs(expression1.rhs, expression1.lhs),\n expression2.rhs.subs(expression1.rhs, expression1.lhs),\n evaluate=False,\n )", "def test_add_relation_type(self):\n pass", "def test_add_uri_statement(self):\n self.graph.add((artis, RDF.type, zoo))\n self.assertEquals(len(self.graph), 1)\n\n self.graph.add((artis, RDF.type, org))\n self.assertEquals(len(self.graph), 2)\n\n self.graph.add((artis, RDF.type, zoo))\n self.assertEquals(len(self.graph), 2)", "def _additem(self, relationship):\n rIds = [rel._rId for rel in self._values]\n if relationship._rId in rIds:\n tmpl = \"cannot add relationship with duplicate rId '%s'\"\n raise ValueError(tmpl % relationship._rId)\n self._values.append(relationship)\n self.__resequence()\n # register as observer of partname changes\n relationship._target.add_observer(self)", "def add(lhs, rhs):\n return _make.add(lhs, rhs)", "def __radd__(self, oth):\n\t\toth_m = oth\n\t\tif not isinstance(oth_m, Matrix):\n\t\t\toth_m = Matrix(oth_m)\n\t\tres_m = oth_m._add(self)\n\t\tif isinstance(oth,Matrix):\n\t\t\treturn res_m\n\t\telse:\n\t\t\treturn type(oth)(res_m._unnest())", "def addConstraint(constraint, problem):\n problem += constraint", "def mutate_add_edge(child):\n # TODO: can maybe be improved by sparqling\n nodes = list(child.nodes)\n if len(nodes) < 2:\n return child\n node1, node2 = random.sample(nodes, 2)\n var_edge = gen_random_var()\n new_triple = (node1, var_edge, node2)\n return child + (new_triple,)", "def isRelational(self):\n return _libsbml.ASTNode_isRelational(self)", "def relationships(self):", "def parse_relation(self, term):\n pass", "def relate(self, other: Compound[Scalar]) -> Relation:\n return ((Relation.EQUAL\n if self is other\n else _relate_sets(self._points_set, other._points_set))\n if isinstance(other, Multipoint)\n else self._relate_geometry(other))", "def relation( self, obj, relType ):\n raise NotImplementedError(\"relation\")", "def __radd__(self, other):\n return self + other", "def __radd__(self, other):\n return self + other", "def relate(a, b, **kwargs):\n return lib.relate(a, b, **kwargs)", "def test_related_add_existing_child(related_record, testdata):\n docs = testdata[\"documents\"]\n series = testdata[\"series\"]\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc6 = Document.get_record_by_pid(docs[4][\"document_pid\"])\n ser10 = Series.get_record_by_pid(series[1][\"series_pid\"])\n\n # Should fail if trying to add a child that already has relations\n with pytest.raises(RelatedRecordError):\n ser10.related.add_language(doc1)\n with pytest.raises(RelatedRecordError):\n ser10.related.add_language(doc6)", "def adp(lhs,rhs):\n test=lambda s: s[0]=='`'\n assert test(lhs)==True,'error: lhs should be non-terminal'\n lhs=so.getSymbol(lhs[1:],terminal=False,autocreate=True)\n rhs=[so.getSymbol(s[1:],False,True) if test(s) else so.getSymbol(s,True,True) for s in rhs]\n return addProduction(lhs,rhs)", "def addition(self):\n\t\treturn lambda anything: self.__class__(\n\t\t\t(self[:], disj, checked_proposition(anything)[:])\n\t\t)", "def update_relation(wn, source, target, new_rel, change_list=None):\n delete_rel(source, target, change_list)\n insert_rel(source, new_rel, target, change_list)\n if new_rel in inverse_synset_rels:\n inv_rel_type = inverse_synset_rels[new_rel]\n delete_rel(target, source, change_list)\n insert_rel(target, inv_rel_type, source, change_list)", "def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)", "def add_relationship(self, rel: ResourceRelationshipDescriptor) -> None:\n self._relationships[assert_not_none(rel.name)] = rel.bind(self)", "def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def associate(op, args):\n args = dissociate(op, args)\n if len(args) == 0:\n return _op_identity[op]\n elif len(args) == 1:\n return args[0]\n else:\n return Expr(op, *args)", "def addAlias(self, alias, node):", "def rewrite_refs(sexpr, from_args, base_offsets):\n\n def rewrite_node(sexpr):\n # Push unboxing into the state variables of distributed aggregates\n if isinstance(sexpr, expression.AggregateExpression):\n if sexpr.is_decomposable():\n ds = sexpr.get_decomposable_state()\n lsms = rewrite_statemods(ds.get_local_statemods(), from_args, base_offsets) # noqa\n rsms = rewrite_statemods(ds.get_remote_statemods(), from_args, base_offsets) # noqa\n\n if lsms or rsms:\n sexpr.set_decomposable_state(\n expression.DecomposableAggregateState(\n ds.get_local_emitters(), lsms,\n ds.get_remote_emitters(), rsms,\n ds.get_finalizer()))\n return sexpr\n\n if not isinstance(sexpr, expression.DottedRef):\n return sexpr\n elif sexpr.table_alias not in from_args:\n raise NoSuchRelationException(sexpr.table_alias)\n else:\n op = from_args[sexpr.table_alias]\n scheme = op.scheme()\n\n debug_info = None\n if not sexpr.field:\n offset = 0\n elif isinstance(sexpr.field, int):\n if sexpr.field >= len(scheme):\n raise ColumnIndexOutOfBounds(str(sexpr))\n offset = sexpr.field\n else:\n assert isinstance(sexpr.field, basestring)\n offset = scheme.getPosition(sexpr.field)\n debug_info = sexpr.field\n\n offset += base_offsets[sexpr.table_alias]\n return expression.UnnamedAttributeRef(offset, debug_info)\n\n def recursive_eval(sexpr):\n \"\"\"Rewrite a node and all its descendents\"\"\"\n newexpr = rewrite_node(sexpr)\n newexpr.apply(recursive_eval)\n return newexpr\n\n return recursive_eval(sexpr)", "def spanrelations(self, rightbranching = False, leftbranching = False, interpunction = True):\n \t\t#Create normal span relations\n \t\tspanrels = {}", "def __add__(self,that):\n return self.__opExpand2(that,np.add)", "def relative(relative, ns):\n\n if not ns:\n phrase = statement(relative)\n else:\n relative.sn = ns\n phrase = imperative(relative)\n relative.sn = []\n return phrase", "def __iadd__(self, term):\n self.add(term)\n return self", "def __ge__(self, other):\n return _generate_relational_expression(_le, other, self)", "def test_radd():\n # Test for reverse addition with scalar Rnode object and float value\n x = Rnode(0.11)\n z = 0.5 + x\n z.grad_value = 1.0\n\n try:\n assert z.value == x.value + 0.5\n except AssertionError as e:\n print(e)", "def __add__(self, other):\r\n return self.add(other)", "def add_expression(binary_addr, s):\n\n assert not isinstance(s, labelmanager.Label) # TODO!?\n # TODO: Warn/assert if addr already in expressions? Allow overriding this via an optional bool argument?\n if binary_addr not in expressions:\n expressions[binary_addr] = s", "def combine_expressions(expressions, relation='AND', licensing=Licensing()):\n if not expressions:\n return\n\n if not isinstance(expressions, (list, tuple)):\n raise TypeError(\n 'expressions should be a list or tuple and not: {}'.format(\n type(expressions)))\n\n # Remove duplicate element in the expressions list\n expressions = list(dict((x, True) for x in expressions).keys())\n\n if len(expressions) == 1:\n return expressions[0]\n\n expressions = [licensing.parse(le, simple=True) for le in expressions]\n if relation == 'OR':\n return str(licensing.OR(*expressions))\n else:\n return str(licensing.AND(*expressions))", "def rdf_update_connections(rdf, prop, obj, subj, owl):\n conname = prop.split('#')[-1]\n print(\"createcon \"+str(obj)+ \" \" + str(subj))\n obj.relationships.create(conname, subj)\n for i in rdf.objects(subject=prop, predicate=RDFS.subPropertyOf):\n print(i)\n rdf_update_connections(rdf, i, obj, subj, owl)\n for i in rdf.objects(subject=prop, predicate=owl.inverseOf):\n conname = i.split('#')[-1]\n subj.relationships.create(conname, obj)", "def __add__(self, Q):\n return Q", "def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out", "def __or__(self, other):\n return self.__add__(other)", "def __add__(self, other):\n return Rabbit(0,self, other) # o-id, self-parent, other-otherparent", "def __add__(self, other):\n\n if isinstance(other, Term) and other.name is 'intercept':\n return Formula(self, namespace=self.namespace)\n else:\n return Term.__add__(self, other)", "def create_expression(sv, tree): \r\n if not tree: return None # nothing to do\r\n o,A,B=tree\r\n\r\n if o==Obr: # bracketed expression: remove brackets \r\n return create_expression(sv, A) # RECURSIVE\r\n\r\n nam=tree_join(tree)\r\n verify_expression(tree, nam) # check name validity \r\n\r\n if nam in sv.Object and not sv.Object[nam].isnew: # don't replace existing name unless new user call\r\n nod=sv.Object[nam] # use old name \r\n return (nam, None, None) # replace expression with name \r\n \r\n nod=add_object(sv, nam) # create object (unless new user call)\r\n nod.isexpression=True\r\n nod.isnew=False # process only once\r\n \r\n # link expression (only for new nodes)\r\n if o==Comma: # special case: list: clause for each changing element\r\n li=[]\r\n for t in A:\r\n exprs=create_expression(sv, t) # RECURSIVE\r\n if exprs: li=li+[exprs]\r\n vlu=(Comma, li, None) # list of elements \r\n nod.clauses=[(Starttree,vlu)] # start clause for whole list ((Start, None, None), (Comma, li, None)) \r\n for t in li: # each term is a triplet\r\n if t and not is_fixed(t[0]):\r\n add_change_clause(sv, nod, t, vlu)\r\n\r\n return (nam, None, None) # name for the list\r\n\r\n # some sort of expression except a list\r\n exprsA=create_expression(sv, A)\r\n exprsB=create_expression(sv, B)\r\n vlu=(o, exprsA, exprsB) # reduce to a simple operation between two expressions \r\n\r\n # make start clauses, and change clause for non-fixed objects (do not repeat 'change')\r\n nod.clauses=[(Starttree, vlu)] # ((Start, None, None), vlu) \r\n if o in sv.Object and not is_fixed(o):\r\n add_change_clause(sv, nod, (o, None, None), vlu)\r\n if A and not is_fixed(A[0]):\r\n add_change_clause(sv, nod, exprsA, vlu)\r\n if B and B!=A and not is_fixed(B[0]):\r\n add_change_clause(sv, nod, exprsB, vlu)\r\n \r\n if o==Since: # special case: conditions for \"since\" \r\n pl=create_expression(sv, (Plus, exprsB, exprsA)) # RECURSIVE \r\n nod.clauses[-1]=((Change, pl, None), vlu) # when change(event+delay): (Since, exprsA, exprsB) \r\n add_change_clause(sv, nod, exprsB, vlu) # when change(event)...\r\n # n.b. changing delay during 'since' should have no effect\r\n \r\n return (nam, None, None) # replace expression with name \r", "def add_constraint(self, left_var, relation, right_var_or_value):\n if right_var_or_value.isdigit():\n self.add_unary_constraint(left_var, relation, right_var_or_value)\n else:\n self.add_binary_constraint(left_var, relation, right_var_or_value)", "def add_edge(self, val1, val2, weight, directional=False):\n\t\tnode1 = self.get_node(val1)\n\t\tnode2 = self.get_node(val2)\n\n\t\tnode1.add_edge(node2, weight)\n\t\tif not directional:\n\t\t\tnode2.add_edge(node1, weight)", "def link_expr(self, expr):\n if expr.kind == PTN.VAR_EXP:\n self.link_to_dec(expr)\n self.print_debug(expr.line_number, self.link_message(expr))\n elif expr.kind == PTN.ARR_EXP:\n self.link_to_dec(expr)\n self.link_expr(expr.index)\n self.print_debug(expr.line_number, self.link_message(expr))\n elif expr.kind == PTN.FUN_CALL_EXP:\n self.link_to_dec(expr, function=True)\n self.print_debug(expr.line_number, self.link_message(expr))\n if expr.params is not None:\n map(self.link_expr, expr.params)\n elif expr.kind in (PTN.ADDR_EXP,\n PTN.DEREF_EXP,\n PTN.NEG_EXP):\n self.link_expr(expr.exp)\n elif isinstance(expr, OpExpNode):\n self.link_expr(expr.l_exp)\n self.link_expr(expr.r_exp)", "def derive_relationship(\n self,\n variable_follower,\n variable_leaders,\n ):\n return super().derive_relationship(\n variable_follower=variable_follower,\n variable_leaders=variable_leaders,\n interpkind=\"linear\",\n )", "def _do_relation(self):\n if self.chunks:\n ch = self.chunks[-1]\n for relation, role in ch.relations:\n if role == \"SBJ\" or role == \"OBJ\":\n self.relations[role][relation] = ch\n if ch.type in (\"VP\",):\n self.relations[ch.type][ch.relation] = ch", "def addEdge(self, e):\n v = e.either()\n w = e.other(v)\n self._validateVertex(v)\n self._validateVertex(w)\n self._adj[v].add(e)\n self._adj[w].add(e)\n self._E += 1", "def ADD (self, n1, n2):", "def _add_relationship(self, reltype, target_part):\n rId = self._relationships._next_rId\n rel = _Relationship(rId, reltype, target_part)\n self._relationships._additem(rel)\n return rel" ]
[ "0.6230691", "0.61780995", "0.60156584", "0.590059", "0.58807737", "0.5856698", "0.5843209", "0.58185554", "0.5784956", "0.5745773", "0.57166684", "0.5676181", "0.5653579", "0.5636797", "0.5615812", "0.5576591", "0.5564806", "0.5537048", "0.55229557", "0.5472008", "0.54082906", "0.5404519", "0.53998846", "0.53828263", "0.5378791", "0.53647834", "0.5357061", "0.5326924", "0.5315686", "0.5312122", "0.5310432", "0.5285241", "0.5280621", "0.5278127", "0.52708066", "0.5253349", "0.5247338", "0.5232592", "0.52189106", "0.52184135", "0.52035666", "0.51941526", "0.5190281", "0.51621085", "0.51620096", "0.5151764", "0.5148705", "0.5126635", "0.51236635", "0.5106961", "0.51018983", "0.50983906", "0.50975573", "0.50962925", "0.509383", "0.50884354", "0.50868964", "0.50862265", "0.5082521", "0.50822294", "0.50684106", "0.5057747", "0.5056991", "0.50565016", "0.50565016", "0.5048294", "0.5045556", "0.50371915", "0.5031487", "0.503016", "0.50181633", "0.5012954", "0.50035155", "0.5001075", "0.49991074", "0.4983168", "0.49751985", "0.49745554", "0.49720815", "0.49703932", "0.49694777", "0.49606362", "0.4956571", "0.49487835", "0.49381545", "0.49358425", "0.49342898", "0.49309602", "0.49300733", "0.49299595", "0.49273023", "0.49260926", "0.4922951", "0.49202847", "0.49172133", "0.49032104", "0.49010175", "0.49004573", "0.48929626", "0.48849568" ]
0.7208437
0
factor = number | '(' expression ')'
def factor( ): tok = tokens.peek( ) if debug: print ("Factor: ", tok) if re.match( Lexer.number, tok ): expr = Number(tok) tokens.next( ) tok = tokens.peek( ) return expr if tok == "(": tokens.next( ) # or match( tok ) expr = addExpr( )#might need to change to expression( ) tokens.peek( ) tok = match( ")" ) return expr if re.match( Lexer.identifier, tok ): # added this to take into accout identifiers expr = VarRef(tok) tokens.next( ) return expr if re.match( Lexer.String, tok ): # added this to take into account strings expr = String( tok ) return expr error( "Invalid operand" ) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _num_factor(number, factor):\n assert factor != 0\n return number // factor", "def make_multiplier(factor):\n return lambda x: factor * x", "def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'", "def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'", "def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'", "def make_anonymous_factorial():\n return 'YOUR_EXPRESSION_HERE'", "def __init__(self, factor: FunctionType or Number = 1):\n\n if isinstance(factor, Number):\n self.func = lambda x: int(x * factor)\n elif isinstance(factor, FunctionType):\n self.func = lambda x: int(factor(x))", "def factorize(x: int):\n pass", "def visit_factor(self, node, children):\n if self.debug:\n print(\"Factor {}\".format(children))\n if len(children) == 1:\n return children[0]\n sign = -1 if children[0] == '-' else 1\n return sign * children[-1]", "def _factor_non_decimal(value):\n result = 1\n factors = sympy.factorint(value)\n for factor, power in six.iteritems(factors):\n if factor not in [2, 5]:\n result *= factor ** power\n return result", "def factorize(x):\n pass", "def parseFactors(cmds):\n print(\"Factor\")\n if cmds[0] == 'D':\n parseExpr(cmds[2:len(cmds)-1])\n elif cmds[0] == '(':\n parseExpr(cmds[1:len(cmds)-1])\n else:\n parseNumber(cmds)", "def fact(n: \"some non-negative integer\") -> \"n! or 0 if n < 0\":\n if n < 0:\n return 0\n elif n <= 1:\n return 1\n else:\n return n * fact(n - 1)", "def Factor(self):\n if self.currtok[1].name in {\"MINUS\", \"NOT\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n prime = self.primary()\n return Factor(op, prime)\n return self.primary()", "def factorial_eval(n):\n lst = list(range(1, n+1))\n return eval(str(lst).replace(', ', '*')[1:-1:])", "def mul_factor(factors: List[Tuple[int, int]]) -> int:\n n = 1\n for f in factors:\n n *= f[0] ** f[1]\n return n", "def calceNumerator ( term , numeratorN1 , numeratorN2 ) :\n if term == limit :\n if term % 3 == 0 :\n return ( 2 * int ( term / 3 ) * numeratorN1 ) + numeratorN2\n return numeratorN1 + numeratorN2\n\n multiplier = 1\n if term % 3 == 0 :\n multiplier = 2 * int ( term / 3 )\n numerator = multiplier * numeratorN1 + numeratorN2\n\n return calceNumerator ( term + 1 , numerator , numeratorN1 )", "def factorize(number, factors, result=None):\n if result is None:\n result = []\n factor = _max_factor(number, factors)\n amount = _num_factor(number, factor)\n remain = _remainder(number, factor)\n result.append((amount, factor))\n if remain == 0:\n return result\n return factorize(remain, factors, result)", "def make_anonymous_factorial():\n return lambda n: 1 if n == 1 else mul(n, make_anonymous_factorial()(sub(n, 1)))", "def factor(expr, conj=False):\n if expr in B:\n return expr\n else:\n return expr.factor(conj)", "def get_factorial(number):\n if number == 1:\n return 1\n else:\n return number * get_factorial(number - 1)", "def exp(x):\n if isinstance(x, int):\n x = Expression(x)\n return _exp(x)", "def make_power(number,pow):\r\n def dispatch(x):\r\n if(x==0):\r\n return number\r\n elif(x==1):\r\n return pow\r\n return dispatch", "def multiple_of(factor):\n\n class multiple_of(int):\n \"\"\"Int type in [A; B] range.\"\"\"\n\n def __init__(self, k):\n assert int(k) % factor == 0, (k, factor)\n super(multiple_of, self).__init__()\n\n return multiple_of", "def fact(num: int) -> int:\n return 1 if (num < 1) else num * fact(num - 1)", "def make_anonymous_factorial():\n return lambda val : (lambda f, v : f(f, v)) (lambda f, v : 1 if v == 0 else mul(v, f(f, sub(v, 1))), val)", "def factor(n):\r\n\t# Rewritten to align with SAGE. Previous semantics available as factors(n).\r\n\tif (abs(n) == 1): return \"Unable to factor \"+str(n) # Can't deal with units\r\n\tfactspow = []\r\n\tcurrfact = None\r\n\tfor thefact in factors(n):\r\n\t\tif thefact != currfact:\r\n\t\t\tif currfact != None:\r\n\t\t\t\tfactspow += [(currfact,thecount)]\r\n\t\t\tcurrfact = thefact\r\n\t\t\tthecount = 1\r\n\t\telse:\r\n\t\t\tthecount += 1\r\n\tfactspow += [(thefact,thecount)]\r\n\treturn factspow", "def __mul__(self, factor):\n def mul(output, target, params):\n return self(output, target, params) * factor\n return type(self)(type(self).__reserved_init, mul, factor * (1. if self._fact is None else self._fact), self._name)", "async def calculate_score(expression, score_factor):\n # The score asymptotically approaches the max score\n # based on the length of the expression.\n return (1 - (1 / ((len(expression) + 1) ** 2))) * score_factor", "def task17_factorial(num):\n result = 1\n for i in range(1, num + 1):\n result *= i\n return result", "def factorial(number):\n if number == 0:\n return 1\n return number * factorial(number - 1)", "def factor(n):\n\t# Rewritten to align with SAGE. Previous semantics available as factors(n).\n\tif ((abs(n) == 1) or (n == 0)): raise ValueError('Unable to factor {0}'.format(n))\n\tfactspow = []\n\tcurrfact = None\n\tfor thefact in factors(n):\n\t\tif thefact != currfact:\n\t\t\tif currfact != None:\n\t\t\t\tfactspow += [(currfact,thecount)]\n\t\t\tcurrfact = thefact\n\t\t\tthecount = 1\n\t\telse:\n\t\t\tthecount += 1\n\tfactspow += [(thefact,thecount)]\n\treturn tuple(factspow)", "def factorial(number):\n result = 1\n while number:\n result *= number\n number -= 1\n return result", "def helper(num):\r\n \r\n return lambda x: num * product(x)", "def ceil_div_mul(value, factor):\n return (((value) + (factor)-1) // (factor))*(factor)", "def prime_factorization(num):\n return prime_factors_p(num, _sieve)", "def multiply(number, word):\n return int(number) * word", "def calculate_factorial(num: int, factorial_value: int):\n if num <= 1:\n return factorial_value\n\n return calculate_factorial(num - 1, num * factorial_value)", "def math_operation(expression):\n if not str(expression[0]).isdigit() or not str(expression[2]).isdigit():\n # eliminates the error call for float and negative numbers\n if not str(expression[0]).replace('.', '1').replace('-', '1').isdigit() or \\\n not str(expression[2]).replace('.', '1').replace('-', '1').isdigit():\n raise ValueError(f'{expression} - check this fragment, something wrong.')\n if expression[2] == 0 and expression[1] == '/':\n raise ValueError(f'{expression} - division by zero.')\n operator = expression[1]\n if operator == '**':\n return expression[0]**expression[2]\n elif operator == '*':\n return expression[0]*expression[2]\n elif operator == '/':\n return expression[0]/expression[2]\n elif operator == '+':\n return expression[0]+expression[2]\n elif operator == '-':\n return expression[0]-expression[2]", "def numerator(self, ???):", "def is_factor(f, n):\r\n return n%f == 0", "def factorial(n):\n\n # the code for factorial", "def compile(expression):", "def factorial(number):\n\n if number == 1:\n return number\n\n return number * factorial(number-1)", "def fact(n):\n\treturn int(factorial(n, exact=True))", "def mult(value, arg):\n return int(value)*int(arg)", "def fn(i, expr, total, last):\n if i == len(num): \n if total == target: ans.append(expr)\n else: \n for ii in range(i, len(num) if num[i] != \"0\" else i+1): \n val = int(num[i:ii+1])\n if i == 0: fn(ii+1, num[i:ii+1], val, val)\n else: \n fn(ii+1, expr + \"*\" + num[i:ii+1], total - last + last * val, last * val)\n fn(ii+1, expr + \"+\" + num[i:ii+1], total + val, val)\n fn(ii+1, expr + \"-\" + num[i:ii+1], total - val, -val)", "def fact(n):\n return float(misc.factorial(n, True))", "def registerFactor(currency, factor):", "def factorial(n: int):\n return 1 if n == 0 else factorial(n-1)*n", "def _max_factor(number, factors):\n return max(n for n in factors if n <= number)", "def myfunc(num):\n print(num**2 - 3*num + 7)", "def exp_term(x, i):\n return x**i/math.factorial(i)", "def factorial(number):\n num_factorial = 1\n for i in range(1, number+1):\n num_factorial*=i\n return num_factorial # returns a value back to the calling statement", "def floor_div_mul(value, factor):\n return (((value) // (factor))) *(factor)", "def factorial(num):\n if num == 0:\n return 1\n else:\n return num * factorial(num - 1)", "def f(n):\n\treturn gmpy.fac(n)", "def doubler(number):\n return number * 2", "def test_factorization_special_case(self):\n factorization = pauli_word_exp_factorization(1.2, PauliWord(\"ZZIIIIIZZ\"))\n print(factorization)\n self.assertEqual(\n len(factorization),\n 5\n )", "def factor(n):\n import math\n if not n >= 0:\n raise ValueError(\"n must be >= 0\")\n if math.floor(n) != n:\n raise ValueError(\"n must be exact integer\")\n if n + 1 == n:\n raise OverflowError(\"n too large\")\n result = 1\n factor = 2\n while factor <= n:\n result *= factor\n factor += 1\n return result", "def __imul__(self, factor):\n self._fact = factor * (1. if self._fact is None else self._fact)\n return self", "def compile_word(word):\r\n\r\n if word.isupper():\r\n terms = [('%s*%s' % (10**i, d)) for (i,d) in enumerate(word[::-1])]\r\n return '(' + '+'.join(terms) + ')'\r\n else:\r\n return word", "def fac(n:int) -> int :\n\n factorial = 1\n while n >= 1:\n factorial *= n\n n -= 1\n return factorial", "def what_number(number):\r\n # if <expr>:\r\n # elif <expr>:\r\n # else:\r\n if number > 0:\r\n return \"positive\"\r\n elif number == 0:\r\n return \"zero\"\r\n else:\r\n return \"negative\"", "def make_anonymous_factorial():\n\t# u need to use a helper function if your lambda statement does\n\t# not have a name\n\t# fact = lambda n: 1 if n == 1 else mul(n, fact(sub(n, 1)))\n\tdef recursive2(f, n):\n\t\treturn f(f, n)\n\n\tdef recursive(n):\n\n\t\treturn recursive2((lambda rec, n: 1 if n == 1 else mul(n, rec(sub(n, 1)))), n)\n\n\treturn recursive", "def factorial(num):\n if num == 1:\n return num\n else:\n return num * factorial(num - 1)", "def mult(value, arg):\n return int(value)-int(arg)", "def square_factor(a):\n f = a if isinstance(a, dict) else factorint(a)\n return Mul(*[p**(e//2) for p, e in f.items()])", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def _eval_(self, x):\n if isinstance(x, Rational):\n return gamma(x+1)\n elif isinstance(x, (Integer, int)) or self._is_numerical(x):\n return py_factorial_py(x)\n\n return None", "def prod(factors):\n return reduce(operator.mul, factors, 1)", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def factorial(num):\n if num==1:\n return 1\n else:\n return(num*factorial(num-1))", "def five_mult(x):\r\n return 5 * x", "def factorial(n):\n return reduce(mul, range(1, n), 1)", "def factor(num: int) -> \"EvenFactorization\":\n base = num\n two_power = 0\n\n # divide out the greatest power of k\n while base % 2 == 0:\n base //= 2\n two_power += 1\n\n return EvenFactorization(base, two_power)", "def product(factors):\n product = 1\n for i in factors:\n product *= i\n return product", "def factorial(num):\n if num <= 1:\n return 1\n\n if num > 1:\n return num * factorial(num-1)", "def tail_factorial(\n number: int,\n accumulator: int = 1,\n) -> int:\n if number == 0:\n return accumulator\n\n return tail_factorial(\n number=number - 1,\n accumulator=accumulator * number,\n )", "def mul_by_num(num):\n def h(x):\n return num * x\n\n\n return h", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def liouville_lambda(number: int) -> int:\n if not isinstance(number, int):\n msg = f\"Input value of [number={number}] must be an integer\"\n raise TypeError(msg)\n if number < 1:\n raise ValueError(\"Input must be a positive integer\")\n return -1 if len(prime_factors(number)) % 2 else 1", "def factorone(n):\n\tif (is_prime(n)): return n\n\tfor fact in (2,3,5,7,11,13,17,19,23,29):\n\t\tif n%fact == 0: return fact\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned", "def factorial(n):\n if n == 0:\n return 1\n else:\n return reduce((lambda x, y: x * y), range(1, n + 1))", "def result_factorization(N,factors):\n if len(factors) !=2:\n print(\"len(factors) != 2\")\n sys.exit()\n\n if N != factors[0] * factors[1]:\n print(\"factors[0]×factors[1] != N @print_factors in ModulesFactorization\")\n sys.exit()\n\n if factors[0] ==1:\n print(\"自明な約数しか見つかりませんでした。\")\n return False\n else:\n print(\"{0}={1}×{2}\".format(N,factors[0],factors[1]))\n return True", "def evaluate(expression):\n if isinstance(expression, int):\n return expression\n elif isinstance(expression, str): # operator\n try:\n return operators[expression]\n except KeyError:\n raise InvalidOperator(expression)\n else:\n exps = [evaluate(exp) for exp in expression]\n if len(exps) == 0:\n raise NullExpression()\n operator = exps.pop(0)\n if callable(operator):\n if len(exps) == 2:\n arg1, arg2 = exps\n return operator(arg1, arg2)\n elif len(exps) < 2:\n raise MissingArguments()\n else:\n raise TooManyArguments()\n else:\n raise InvalidOperator(operator)", "def factorone(n):\r\n\tif (is_prime(n)): return n\r\n\tfor fact in [2,3,5,7,11,13,17,19,23,29]:\r\n\t\tif n%fact == 0: return fact\r\n\treturn factorPR(n) # Needs work - no guarantee that a prime factor will be returned\r", "def calculate_factorial(\n self, value: \"int\", fail_if_bigger_than: \"int\" = None\n ) -> \"int\":\n pass", "def raises(number, exponent):\n if exponent == 0:\n return 1\n else:\n return number * raises(number, exponent - 1)", "def get_factors(number):\n\n factors = [1, number]\n\n for i in range(2, int(math.sqrt(number))):\n if number % i == 0:\n factors.extend([i, number / i])\n\n return(factors)", "def normalize(expression):\n if not list(x for x, _ in expression if x == 4):\n return expression\n result = []\n prev = None\n for elem in expression:\n count, factor = elem\n if count == 4:\n if power_of_ten(prev):\n prev = minuend(elem)\n else:\n result.pop()\n prev = minuend(prev)\n result.append((1, factor))\n else:\n prev = elem\n result.append(prev)\n return result", "def plurals(num):\r\n if num != 1:\r\n return ('s')\r\n return ('')", "def fact(x):\n try:\n print(x, \"factorial =\", math.factorial(x))\n menu()\n except ValueError:\n print(\"Must be a positive whole number\")\n fact(get_in(\"1\"))\n except Exception as e:\n expt(e.args)", "def factorize(it):\n assert it > 0, \"cannot factorize %s\" % it\n while it % 2 == 0: # special case 2 - allows exclusion of even numbers later\n yield 2\n it //= 2\n factor = 3\n while factor * factor <= it:\n while it % factor == 0:\n yield factor\n it //= factor\n factor += 2\n if it > 1: # remainder is a prime\n yield it", "def multiply(value, multiplier):\n return value*multiplier", "def factorial(n):\n if n < 0:\n return \"There is no factorial for such number\"\n elif n <= 1:\n return 1\n else:\n return n * factorial(n-1)", "def arithmeticFactor(numbers,str_result,target):\n success = False\n success2 = False\n intialnums = copy.deepcopy(numbers)\n for i in range(len(numbers)):\n x= numbers[i]\n if isFactor(x,target):\n factornum = target//x\n numbers.remove(x)\n numbers,str_temp,success = arithmeticBasic(numbers,str_result,factornum)\n if not success:\n numbers,str_temp2,success2 = arithmeticDivision1(numbers,str_result,factornum)\n #print(success)\n if not success2 and not success:\n numbers,str_temp2,success2 = arithmeticDivision2(numbers,str_result,factornum)\n if not success2 and not success:\n numbers,str_temp,success = arithmeticApproximate(numbers,str_result,factornum)\n \n if success:\n #print('a')\n str_result += str(x)+'*'+'('+str_temp+')'\n break\n elif success2:\n #print('b')\n str_result += str(x)+'*'+str_temp2\n success = True\n numbers.clear()\n numbers.append(target)\n break\n else:\n str_result = \"\"\n numbers.clear()\n numbers=copy.deepcopy(intialnums)\n #numbers.insert(0,x)\n return numbers,str_result, success", "def helper(s):\n opt = '+'\n stack = []\n cur = 0\n # add an extra opt in the end to trigger the last operation.\n for i, c in enumerate(s):\n if c.isdigit():\n cur = cur * 10 + int(c)\n # make sure the last operation is performed.\n if c != ' ' or i == len(s) - 1:\n if opt in ['+', '-']:\n sign = 1 if opt == '+' else -1\n stack.append(cur * sign)\n elif opt == '*':\n stack[-1] = stack[-1] * cur\n elif opt == '/':\n stack[-1] = int(stack[-1] / cur)\n opt = c\n cur = 0\n return sum(stack)", "def factorial(n):\n if n < 1:\n raise ValueError(\"Not a valid value, must be greater than zero\")\n elif n == 1:\n return 1\n else:\n return n * factorial(n-1)", "def evaluate(expr):\n def isdigit(ch):\n try:\n int(ch)\n return True\n except ValueError:\n return False\n\n def evaluate_helper(expr, index):\n ch = expr[index]\n if ch == '(':\n # complex\n index += 1 # move past (\n\n # get the left operand\n left, index = evaluate_helper(expr, index)\n opr = expr[index]\n index += 1 # move past the operator\n\n # get the right operand\n right, index = evaluate_helper(expr, index)\n index += 1 # to move past closing paranthesis\n if opr == '+':\n return left + right, index\n elif opr == '*':\n return left * right, index\n\n \n else:\n if isdigit(ch):\n value = 0\n while isdigit(ch):\n value = value * 10 + int(ch)\n index += 1\n if index < len(expr):\n ch = expr[index]\n else:\n break\n return value, index\n\n \n\n return evaluate_helper(expr, 0)[0]" ]
[ "0.65296596", "0.6441182", "0.641148", "0.641148", "0.641148", "0.641148", "0.63577914", "0.60211015", "0.5988639", "0.5983069", "0.59670454", "0.5808318", "0.5783513", "0.57786024", "0.57604265", "0.5731007", "0.5716077", "0.57154644", "0.5681956", "0.5650473", "0.56236184", "0.56027806", "0.5599781", "0.5594414", "0.5582844", "0.5572393", "0.5541159", "0.547502", "0.54528815", "0.5436597", "0.54302186", "0.54277515", "0.5414122", "0.5371669", "0.5359336", "0.5354051", "0.53333133", "0.5327252", "0.5323293", "0.53214365", "0.5313255", "0.5310726", "0.5306709", "0.5304913", "0.5304849", "0.52972937", "0.52948385", "0.52936774", "0.5277122", "0.52769464", "0.52766216", "0.5275729", "0.5271662", "0.52703995", "0.52639854", "0.5241211", "0.5238244", "0.52375764", "0.5236802", "0.52360344", "0.52312404", "0.5224302", "0.5220103", "0.52089345", "0.5208431", "0.52042353", "0.5198392", "0.51859087", "0.51789266", "0.5172666", "0.5167614", "0.5167269", "0.5162378", "0.5147237", "0.512974", "0.5115363", "0.51047105", "0.5101282", "0.5100443", "0.5099003", "0.5098473", "0.5096493", "0.5094288", "0.50875825", "0.5073319", "0.5067302", "0.50634027", "0.506208", "0.5060407", "0.5054159", "0.5053142", "0.50391245", "0.50276273", "0.50243443", "0.50221175", "0.5021953", "0.5021901", "0.50192404", "0.5015796", "0.5014743" ]
0.74144316
0
term = factor { ('' | '/') factor }
def term( ): tok = tokens.peek( ) if debug: print ("Term: ", tok) left = factor( ) tok = tokens.peek( ) while tok == "*" or tok == "/": tokens.next() right = factor( ) left = BinaryExpr( tok, left, right ) tok = tokens.peek( ) return left
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_term(self, node, children):\n if self.debug:\n print(\"Term {}\".format(children))\n term = children[0]\n for i in range(2, len(children), 2):\n if children[i-1] == \"*\":\n term *= children[i]\n else:\n term /= children[i]\n if self.debug:\n print(\"Term = {}\".format(term))\n return term", "def factor( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"Factor: \", tok)\n\tif re.match( Lexer.number, tok ):\n\t\texpr = Number(tok)\n\t\ttokens.next( )\n\t\ttok = tokens.peek( )\n\t\treturn expr\n\tif tok == \"(\":\n\t\ttokens.next( ) # or match( tok )\n\t\texpr = addExpr( )#might need to change to expression( )\n\t\ttokens.peek( )\n\t\ttok = match( \")\" )\n\t\treturn expr\n\tif re.match( Lexer.identifier, tok ): # added this to take into accout identifiers\n\t\texpr = VarRef(tok)\n\t\ttokens.next( )\n\t\treturn expr\n\tif re.match( Lexer.String, tok ): # added this to take into account strings\n\t\texpr = String( tok )\n\t\treturn expr\n\n\terror( \"Invalid operand\" )\n\treturn", "def from_term(term):\n if term is None:\n return term\n elif isinstance(term, (six.string_types, int, float)):\n return term\n elif isinstance(term, dict):\n return {k: from_term(v) for k, v in term.items()}\n elif isinstance(term, list):\n return [from_term(t) for i, t in enumerate(term)]\n elif issubclass(term.__class__, (Matcher,)):\n return term.generate()\n else:\n raise ValueError('Unknown type: %s' % type(term))", "def parseTerms(cmds):\n if len(cmds) != 0:\n print(\"Term\")\n delimit = \"[*|/|%]+\"\n factors = re.split(delimit, cmds[0])\n parseFactors(cmds[0])\n parseTerms(cmds[1:])", "def Term(self, paren=False):\n left = self.Factor()\n while self.currtok[1].name in {\"TIMES\", \"DIVISION\", \"MOD\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Factor()\n left = BinaryExpr(op, left, right, paren)\n return left", "def parse_term(self) -> SyntaxNode:\n return self._parse_cat_binary(\"M\", self.parse_value)", "def quote_poly_term(term: str):\n match = re.match(r\"^(\\w+)\\^(\\d+)$\", term)\n if match:\n return (match[1], int(match[2]))\n else:\n return (term, 1)", "def get_node_by_term(nodes, term):\n return nodes[sum([ord(c) for c in term]) % len(nodes)]", "def process(self, term):\n #get the last character\n #is there a possibility multiple punctuation at start and end?\n length = len(term)\n firstChar = term[0:1]\n if str(firstChar).isalnum():\n term = term\n else:\n #print \"cutting first letter \" + firstChar + \" from \" +term\n term = term[1:length]\n #print \"term now \" +term\n #get length again incase punctuation at start and end\n length = len(term)\n lastChar = term[length-1:length]\n if str(lastChar).isalnum():\n term = term\n else:\n #print \"cutting last letter \" + lastChar + \"from \" + term\n term = term[0:length-1]\n #print \" is now \" + term\n\n #now check if there's nothing left, then don't add, if there is, add it\n if term:\n return term\n else:\n return None", "def helper(s):\n opt = '+'\n stack = []\n cur = 0\n # add an extra opt in the end to trigger the last operation.\n for i, c in enumerate(s):\n if c.isdigit():\n cur = cur * 10 + int(c)\n # make sure the last operation is performed.\n if c != ' ' or i == len(s) - 1:\n if opt in ['+', '-']:\n sign = 1 if opt == '+' else -1\n stack.append(cur * sign)\n elif opt == '*':\n stack[-1] = stack[-1] * cur\n elif opt == '/':\n stack[-1] = int(stack[-1] / cur)\n opt = c\n cur = 0\n return sum(stack)", "def tokenize(eq):\n\n def push(token):\n if token != \"\":\n if token[0].isdigit():\n tokens.append(int(token))\n else:\n tokens.append(token)\n\n tokens = []\n token = \"\"\n\n for t in eq:\n if t == \" \":\n push(token)\n token = \"\"\n if t == \"+\" or t == \"*\" or t == \"(\" or t == \")\":\n push(token)\n push(t)\n token = \"\"\n elif t.isdigit():\n token += t\n\n push(token)\n return tokens", "def evaluate_terms(terms):\n expr_terms = [x for x in terms]\n\n while expr_terms.count('^') != 0:\n expr_terms = eval_expon(expr_terms)\n\n while MUL_DIV_RE.search(str(expr_terms)) is not None:\n expr_terms = eval_a_op_b(expr_terms, 'md')\n\n while len(expr_terms) != 1:\n expr_terms = eval_a_op_b(expr_terms, 'pm')\n\n return expr_terms[0]", "def apply_on_each_term(query: str, function: Callable) -> str:\n\n is_inside_a_term = False\n search_term = ''\n final_query = ''\n for character in query:\n\n if character == '[':\n search_term += character\n is_inside_a_term = True\n continue\n\n if is_inside_a_term:\n search_term += character\n if character == ']':\n search_term = function(search_term)\n final_query += search_term\n search_term = ''\n is_inside_a_term = False\n else:\n final_query += character\n\n return final_query", "def match_term(self,state,re_term):\r\n return None", "def cry(s : str) -> CryptolTerm:\n return CryptolTerm(s)", "def end_term(query):\n if query.endswith(' '):\n return query[query[:-1].rfind(' ')+1:]\n else:\n return query[query.rfind(' ')+1:]", "def act_on_literal(self, *, value):\n assert not isinstance(value, PolarsTerm)", "def format_term(term: Union[BNode, Literal, URIRef, Variable]) -> str:\n if isinstance(term, URIRef):\n return str(term)\n elif isinstance(term, BNode):\n return '?v_' + str(term)\n elif isinstance(term, Literal):\n return format_literal(term)\n else:\n return term.n3()", "def Factor(self):\n if self.currtok[1].name in {\"MINUS\", \"NOT\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n prime = self.primary()\n return Factor(op, prime)\n return self.primary()", "def tf(self, term, text):\n return text.count(term) / len(text)", "def list_of(term):\n return (\n Optional(\n term +\n ZeroOrMore(Suppress(Literal(',')) + term) +\n Optional(Suppress(Literal(',')))\n )\n )", "def sans_parens(s):\n s = prep_simple_str(s)\n \n total = s[0]\n \n for c in s[1:]:\n if c == \")\":\n return total\n elif c == \"*\":\n op = lambda a,b: a * b\n elif c == \"+\":\n op = lambda a,b: a + b\n else:\n total = op(total, c)\n return total", "def compile_word(word):\r\n\r\n if word.isupper():\r\n terms = [('%s*%s' % (10**i, d)) for (i,d) in enumerate(word[::-1])]\r\n return '(' + '+'.join(terms) + ')'\r\n else:\r\n return word", "def sanitize_input(term: str) -> str:\n return term.strip().replace(\"*\", \"\").replace(\"'\", \"\\\\'\").replace(\"~\", \"\")", "def _split_term(term):\n if '*' in term:\n variable_and_parameter = term.split('*')\n variable_and_parameter = [label.strip()\n for label in variable_and_parameter]\n else:\n raise TermNotProduct(term)\n\n if len(variable_and_parameter) != 2:\n raise TermNotProduct(term)\n\n return variable_and_parameter", "def fold_term(self, term):\n if isinstance(term, Var):\n return self.var(term.name)\n elif isinstance(term, Const):\n return self.const(term.name)\n elif isinstance(term, Dist):\n return self.dist(term.name)\n elif isinstance(term, Func):\n return self.func(\n term.funcname,\n tuple( self.fold_term(x)\n for x in term.args ))\n elif isinstance(term, Eq):\n return self.encode_eq(term)\n elif isinstance(term, Disj):\n return self.encode_disj(term)\n elif isinstance(term, Conj):\n return self.encode_conj(term)\n raise RuntimeError('Invalid term {}'.format(term))", "def cry_f(s : str) -> CryptolTerm:\n return CryptolTerm(to_cryptol_str_customf(s, frames=1))", "def _clean_term(self, term):\n return filter(lambda char: char in allowed_chars, term)", "def next_term(x):\n if x%2 == 0:\n return x/2\n else:\n return 3*x + 1", "def fold_term(self, init_repr, term):\n if isinstance(term, Var):\n return self.var(init_repr, term.name)\n elif isinstance(term, Const):\n return self.const(init_repr, term.name)\n elif isinstance(term, Dist):\n return self.dist(init_repr, term.name)\n elif isinstance(term, Func):\n return self.func(\n init_repr,\n term.funcname,\n [ self.fold_term(init_repr, x)\n for x in term.args ])\n elif isinstance(term, Eq):\n return self.eq(\n init_repr,\n term.pos,\n self.fold_term(init_repr, term.t1),\n self.fold_term(init_repr, term.t2))\n elif isinstance(term, Disj):\n return self.disj(\n init_repr,\n term.role,\n [ self.fold_term(init_repr, eq)\n for eq in term.eqs ])\n elif isinstance(term, Conj):\n return self.conj(\n init_repr,\n [ self.fold_term(init_repr, disj)\n for disj in term.disjs ])", "def get_tf(term, document):\n\n term_list = [term.lower() for term in document.split()]\n num_of_words_in_doc = len(document.split())\n term_count_in_doc = term_list.count(term)\n\n return term_count_in_doc / num_of_words_in_doc", "def term_from_uri(uri):\n if uri is None:\n return None\n # This insures that if we get a Literal with an integer value (as we\n # do for modification positions), it will get converted to a string,\n # not an integer.\n if isinstance(uri, rdflib.Literal):\n uri = str(uri.toPython())\n # This is to handle URIs like\n # http://www.openbel.org/bel/namespace//MAPK%20Erk1/3%20Family\n # or\n # http://www.openbel.org/bel/namespace/MAPK%20Erk1/3%20Family\n # In the current implementation, the order of the patterns\n # matters.\n patterns = ['http://www.openbel.org/bel/namespace//(.*)',\n 'http://www.openbel.org/vocabulary//(.*)',\n 'http://www.openbel.org/bel//(.*)',\n 'http://www.openbel.org/bel/namespace/(.*)',\n 'http://www.openbel.org/vocabulary/(.*)',\n 'http://www.openbel.org/bel/(.*)']\n for pr in patterns:\n match = re.match(pr, uri)\n if match is not None:\n term = match.groups()[0]\n term = unquote(term)\n return term\n # If none of the patterns match then the URI is actually a simple term\n # for instance a site: \"341\" or a substitution: \"sub(V,600,E)\"\n return uri", "def part_one():\n return symbol_counts[\"~\"] + symbol_counts[\"|\"]", "def simplify(term):\n simplified_term = term\n\n if isinstance(term,StatementTerm):\n simplified_term = StatementTerm(subject_term=simplify(term.get_subject_term()),\n predicate_term=simplify(term.get_predicate_term()),\n copula=term.get_copula(),\n interval=term.interval)\n elif isinstance(term,CompoundTerm):\n if term.connector == NALSyntax.TermConnector.Negation and \\\n len(term.subterms) == 1 and \\\n isinstance(term.subterms[0],CompoundTerm) and \\\n term.subterms[0].connector == NALSyntax.TermConnector.Negation :\n # (--,(--,(S --> P)) <====> (S --> P)\n # Double negation theorem. 2 Negations cancel out\n simplified_term = simplify(term.subterms[0].subterms[0]) # get the inner statement\n # elif NALSyntax.TermConnector.is_conjunction(term.connector):\n # #(&&,A,B..C)\n # new_subterms = []\n # new_intervals = []\n # for i in range(len(term.subterms)):\n # subterm = simplify(term.subterms[i])\n # if i < len(term.intervals): new_intervals.append(term.intervals[i])\n # if isinstance(subterm, CompoundTerm) and subterm.connector == term.connector:\n # # inner conjunction\n # new_subterms.extend(subterm.subterms)\n # new_intervals.extend(subterm.intervals)\n # else:\n # new_subterms.append(subterm)\n #\n # simplified_term = CompoundTerm(subterms=new_subterms,\n # term_connector=term.connector,\n # intervals=new_intervals)\n elif term.connector is NALSyntax.TermConnector.ExtensionalDifference:\n pass\n elif term.connector is NALSyntax.TermConnector.IntensionalDifference:\n pass\n elif term.connector is NALSyntax.TermConnector.ExtensionalImage:\n pass\n elif term.connector is NALSyntax.TermConnector.IntensionalImage:\n pass\n\n\n return simplified_term", "def termrep(terminals):\n if terminals is None:\n return ''\n else:\n s = '.'\n try:\n for t in terminals:\n s += str(t) + '.'\n return s[0:-1] # shaves final dot\n except TypeError:\n return '.' + str(terminals)", "def _marshal_term(term):\n if isinstance(term, int):\n term = str(term)\n return term", "def consume(self, word):\r\n visited = set()\r\n length = len(word)\r\n\r\n @lru_cache(maxsize=None)\r\n def parse(elem, idx):\r\n if elem.isNonterminal():\r\n if (elem, idx) in visited:\r\n raise LeftRecursive(\"Left recursive is prohibited: \" + elem.symbol)\r\n else:\r\n visited.add((elem, idx))\r\n return parse(elem.tree.arg.exp, idx)\r\n if elem is epsilon:\r\n if idx <= length:\r\n return 0\r\n else:\r\n return None\r\n if isinstance(elem, Terminal):\r\n if idx < length:\r\n return 1 if word[idx] == elem.symbol else None\r\n else:\r\n return None\r\n if elem.symbol == '>>':\r\n r1 = parse(elem.tree.left.exp, idx)\r\n if r1 is None:\r\n return None\r\n r2 = parse(elem.tree.right.exp, idx + r1)\r\n if r2 is None:\r\n return None\r\n return r1 + r2\r\n if elem.symbol == '|':\r\n r1 = parse(elem.tree.left.exp, idx)\r\n if r1 != None:\r\n return r1\r\n return parse(elem.tree.right.exp, idx)\r\n if elem.symbol == '~':\r\n r1 = parse(elem.tree.arg.exp, idx)\r\n return 0 if r1 is None else None\r\n if elem.symbol == '+':\r\n r1 = parse(elem.tree.arg.exp, idx)\r\n if r1 is None:\r\n return None\r\n r2 = parse(elem, idx + r1)\r\n return r1 if r2 is None else r1 + r2\r\n\r\n result = parse(self, 0)\r\n parse.cache_clear()\r\n return result", "def term_to_rdflib(term: str) -> Term:\n if term.startswith('?'):\n return Variable(term[1:])\n elif term.startswith(\"\\\"\"):\n return from_n3(term)\n else:\n return URIRef(term)", "def minuend(term):\n keys = sorted(ROMAN_NUMERALS.keys())\n _, factor = term\n next_factor = keys[keys.index(factor) + 1]\n return (1, next_factor)", "def puncture(self,x):", "def RewriteTerm(self, key, op, operand, key_type):\n if key not in self._keys or op != '=':\n return None\n return operand", "def augmented_term_fequency(term,tokens):\n\tterm = processes_and_tokenize(term)[0] #make sure term is in correct form\n\n\tmax_count = max([tokens.count(t) for t in tokens])\n\treturn tokens.count(term)/max_count", "def parse_term(s, ctx, mctx, ops):\n s = strip_parens(s.strip())\n if re.search(\"^\\w*$\",s): # If a single word, either a variable or a constant\n if s in ops:\n return Con(ops[s], [], ctx, mctx)\n elif s in ctx:\n return Var(s, ctx, mctx)\n elif s in mctx:\n return MVar(s, [], ctx, mctx)\n elif s.startswith(\"O\") and s[1:] in mctx:\n return MVar(s, [], ctx, mctx, is_hole=True)\n else:\n raise Exception(\"Unbound variable: \" + \"'\" + s + \"'\")\n\n elif re.search(\"^\\w*\\[.*\\]$\", s): # If a metavariable\n m = re.search(\"^(\\w)*\\[(.*)\\]$\", s)\n mvar = m.group(1)\n env = m.group(2)\n if not env:\n return MVar(mvar, [], ctx, mctx, is_hole=mvar.startswith(\"O\"))\n else:\n return MVar(mvar, [Term.parse_term(t, ctx, mctx, ops) for t in split_tuple(env)], ctx, mctx)\n\n elif re.search(\"^([\\w ]*)\\.(.*)$\", s): # If a variable under binders\n m = re.search(\"^([\\w ]*)\\.(.*)$\", s)\n bound = m.group(1).split()\n tm = m.group(2)\n return Term.parse_term(tm, bound + ctx, mctx, ops)\n\n elif re.search(\"^(\\w*) *\\((.*)\\)$\", s): # If an expression\n m = re.search(\"^(\\w*) *\\((.*)\\)$\", s)\n op = m.group(1)\n args = m.group(2)\n return Con(ops[op], [Term.parse_term(t, ctx, mctx, ops) for t in split_tuple(args)], ctx, mctx)\n else:\n raise Exception(\"Can't parse: \" + s)", "def _normal_ordered_term(term, coefficient):\n term = list(term)\n ordered_term = FermionOperator()\n for i in range(1, len(term)):\n for j in range(i, 0, -1):\n left_sub_term = term[j - 1]\n right_sub_term = term[j]\n # Swap operators if left operator is annihilation op and right operator is\n # a\\dagger operator\n if not left_sub_term[1] and right_sub_term[1]:\n term[j], term[j - 1] = left_sub_term, right_sub_term\n coefficient = coefficient * -1\n # If indice are same, employ the anti-commutation relationship\n # And generate the new term\n if left_sub_term[0] == right_sub_term[0]:\n new_term = term[:(j - 1)] + term[(j + 1):]\n ordered_term += _normal_ordered_term(\n new_term, -coefficient)\n # Deal with the case with same operator\n elif left_sub_term[1] == right_sub_term[1]:\n # If indice are same,evaluate it to zero.\n if left_sub_term[0] == right_sub_term[0]:\n return ordered_term\n # Swap them if same operator but lower index on left\n if left_sub_term[0] < right_sub_term[0]:\n term[j], term[j - 1] = left_sub_term, right_sub_term\n coefficient = coefficient * -1\n\n # Add the term and return.\n ordered_term += FermionOperator(tuple(term), coefficient)\n return ordered_term", "def escapedSeq(term):\n\tfor char in term:\n\t\tif char in escapeRules.keys():\n\t\t\tyield escapeRules[char]\n\t\telse:\n\t\t\tyield char", "def slashout(value):\n intvalue = value//divisor\n slashes = \"#\" * intvalue\n return slashes", "def compile_term(self):\n\n\t\tself.outfile.write('<term>\\n')\n\n\t\tcount = 0\n\n\t\twhile(self.tokenizer.get_token() not in [')',']',';',',', '/', '|', '<', '>', '=', '*', '+', '&']):\n\t\t\tif self.tokenizer.get_token().isdigit():\n\t\t\t\tself.outfile.write(self.tokenizer.int_value())\n\t\t\telif '\"' in self.tokenizer.get_token():\n\t\t\t\tself.outfile.write(self.tokenizer.str_value())\n\t\t\telif self.tokenizer.get_token() in ['true', 'false', 'null', 'this']:\n\t\t\t\tself.outfile.write(self.tokenizer.keyword())\n\t\t\telif self.tokenizer.get_token() == '-' and count == 0:\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\t\t\t\tself.compile_term()\n\t\t\telif self.tokenizer.get_token() == '-' and count > 0:\n\t\t\t\tbreak\n\t\t\telif self.tokenizer.get_token() == '~':\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\t\tif self.tokenizer.get_token() != '(':\n\t\t\t\t\tself.compile_term()\n\n\t\t\t\telse:\n\t\t\t\t\tself.outfile.write('<term>\\n' + self.tokenizer.symbol())\n\t\t\t\t\tself.compile_expression()\n\t\t\t\t\txml = self.tokenizer.symbol() + '</term>\\n'\n\t\t\t\t\tself.outfile.write(xml)\n\n\t\t\telif self.tokenizer.get_token() == '(':\n\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\t\t\t\tself.compile_expression()\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\telif self.tokenizer.get_token() == '[':\n\t\t\t\txml = self.tokenizer.symbol()\n\t\t\t\tself.outfile.write(xml)\n\n\t\t\t\tself.compile_expression()\n\n\t\t\t\tself.outfile.write(self.tokenizer.symbol())\n\n\t\t\telif self.tokenizer.get_token() == '.':\n\t\t\t\txml = self.tokenizer.symbol() + self.tokenizer.identifier() + self.tokenizer.symbol() + '<expressionList>\\n'\n\t\t\t\tself.outfile.write(xml)\n\n\t\t\t\tif self.tokenizer.get_token() != ')':\n\t\t\t\t\tself.compile_expression_list()\n\n\t\t\t\tself.outfile.write('</expressionList>\\n' + self.tokenizer.symbol())\n\t\t\n\t\t\telse:\n\t\t\t\tself.outfile.write(self.tokenizer.identifier())\n\n\t\t\tcount = count + 1\n\n\t\tself.outfile.write('</term>\\n')\n\n\t\tif self.tokenizer.get_token() in self.tokenizer._operands:\n\t\t\tif self.tokenizer.get_token() in ['<', '>', '\"', '&']:\n\t\t\t\txml = '<symbol> ' + CompilationEngine._operands.get(self.tokenizer.get_token()) + ' </symbol>\\n'\n\t\t\t\tself.tokenizer.advance()\n\t\t\telse:\n\t\t\t\txml = self.tokenizer.symbol()\n\n\t\t\tself.outfile.write(xml)\n\t\t\tself.compile_term()", "def split_terms(self, operation):\n return [self.format_number(t) for t in operation.split('/')]", "def sanitize(formula):\n TR_UNICODE = {u\"·\": u\"*\", u\"−\": u\"-\"}\n return \"\".join(TR_UNICODE.get(ch, ch) for ch in formula if ch > \" \")", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def _get_term(self):\n return self.__term", "def clean_term(term, site='', siteWordCount=None, dataType=''): # dtype\n if pd.isna(term): \n print(\"(clean_term) Input term is NaN: {}\".format(term))\n return ''\n if not isinstance(term, str): \n return str(term)\n\n insigWords = LoincTable.stop_words # [\"IN\", \"FROM\", \"ON\", \"OR\", \"OF\", \"BY\", \"AND\", \"&\", \"TO\", \"BY\", \"\", \" \"]\n \n modTerm = (term.replace(\"'\", \"\").replace(\",\", \" \").replace(\".\", \" \") \\\n .replace(\":\", \" \").replace('\\t', \" \").replace(\"^\", \" \").replace(\"+\", \" \")\\\n .replace(\"*\", \" \").replace(\"~\", \" \").replace(\"(\", \" \").replace(\")\", \" \")\\\n .replace(\"!\", \" \").replace(\"[\", \" \").replace(\"]\", \" \").replace(\"{\", \" \").replace(\"}\", \" \")\\\n .replace(\"_\", \" \").replace(\"|\", \" \").replace('\"', \" \").split(\" \"))\n\n #############################################################################\n i = 0\n while i < len(modTerm):\n modTerm[i] = re.sub(r\"\\d{1,2}[\\/-]\\d{1,4}([\\/-]\\d{2,4})*|\\d{6}\", \"\", modTerm[i])\n if modTerm[i] != None and len(modTerm[i]) > 0:\n i = i + 1\n else:\n modTerm.remove(modTerm[i])\n #############################################################################\n\n # remove repeated tokens \n modTerm = sorted(set(modTerm), key=modTerm.index)\n\n j = 0\n nameSplit = list()\n while j < len(modTerm):\n splits = modTerm[j].replace(\"/\", \" \").replace(\"\\\\\", \" \").replace(\"-\", \" \").split(\" \")\n k = 0\n while ((k < len(splits)) and (len(splits[k]) > 0) and (splits[k] not in insigWords)):\n newWord = splits[k].strip()\n nameSplit.append(newWord)\n\n if len(site) > 0 and isinstance(siteWordCount, dict): \n siteWordCount[site][newWord] += 1\n k = k + 1\n j = j + 1\n\n return \" \".join(nameSplit)", "def _getDenom(expr):\n l = len(expr)\n den = ''\n i=0\n while i<l:\n if expr[i:i+2] == '/(' or expr[i:i+3] == '/ (':\n if den != '': den += '*'\n den += expr[i+1]\n par = 1\n i += 2\n while par > 0:\n if expr[i] == '(': par += 1\n elif expr[i] == ')': par -= 1\n den += expr[i]\n i += 1\n else :i += 1\n return den", "def mysplit(equation):\r\n return split(\"([+-/*])\", equation.replace(\" \", \"\"))", "def proc(form, term, idx):\n assert term == tensor.local_terms[idx]\n assert form == expected_terms[idx]\n return 'N'", "def parseFactors(cmds):\n print(\"Factor\")\n if cmds[0] == 'D':\n parseExpr(cmds[2:len(cmds)-1])\n elif cmds[0] == '(':\n parseExpr(cmds[1:len(cmds)-1])\n else:\n parseNumber(cmds)", "def __init__(self, **terms):\n self.terms = []\n for op, v in terms.iteritems():\n # use '_xx' to differentiate terms with same operator\n op = op.split('_')[0]\n if op == 'search':\n val = RegExp(v)\n elif op == 'match':\n val = RegExp(v, match=True)\n else:\n val = v\n try:\n op = self.operators[op]\n except KeyError:\n raise UnknownOperatorError(\"Operator '%s'\" % op)\n self.terms.append((op, val))", "def mycut(value, arg):\r\n return value.replace(arg, '')", "def switch_root_to_ternary_prefix(s): # special case for prefix\n if s == 'ה':\n return '?:'\n else:\n return s", "def _reduced(self, elem, separator):\n if re.search(r'^\\d+$', elem):\n return URLTreeNode.REDUCED_NUM_LITERAL, '[NUMBER]'\n elif separator == '/':\n return URLTreeNode.REDUCED_PATH_LITERAL, '[...]'\n else:\n return URLTreeNode.REDUCED_PARAM_LITERAL, '[...]'", "def __init__(self, term=None, coefficient=1.0):\n self.terms = {}\n if term is not None:\n term, parity = _sort_majorana_term(term)\n self.terms[term] = coefficient * (-1)**parity", "def prepare_terms(terms, search_mode):\n if search_mode in (\"contains\", \"starts_with\"):\n terms = terms.replace(\"_\", \"\\_\").replace(\"%\", \"\\%\")\n\n if search_mode == \"contains\":\n terms = \"%\" + terms + \"%\"\n elif search_mode == \"starts_with\":\n terms = terms + \"%\"\n return terms", "def escapeSolrArg(term):\n\tterm = term.replace('\\\\', r'\\\\') # escape \\ first\n\treturn \"\".join([nextStr for nextStr in escapedSeq(term)])", "def any_term(cls, *terms: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"anyTerm\", [*terms])", "def test_term(self):\n\t\tterm_one = schrodinger.term(0)\n\t\tself.assertEqual(1, term_one(0).numpy())\n\t\tterm_two = schrodinger.term(1)\n\t\tself.assertEqual(0, term_two(0).numpy())", "def ccut(value,arg):\n return value.replace(arg, '')", "def censor(text: Optional[str]) -> str:\n char = \"*\"\n text = text if text else \"\"\n return text[0] + (len(text) - 1) * char if text else text", "def _make_product(terms):\n if terms:\n product = terms[0]\n for term in terms[1:]:\n product = Mul((product, term))\n return product \n else:\n return Const(1)", "def prefix_value(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n raise ValueError('%(s)s contains forbidden characters'\n ' (%(forbidden)s)'\n % locals())\n stripped = s.strip('/')\n if stripped:\n return stripped.join('//')\n return '/'", "def __init__(self: 'StarTree', child: 'RegexTree') -> None:\n UnaryTree.__init__(self, '*', child)", "def _fadefunc(self, t):\r\n return t * t * t * (t * (t * 6 - 15) + 10)", "def __init__(self, phrase):\r\n self.phrase = [Character(letter) if letter != ' '\r\n else ' ' for letter in phrase]", "def assign_term(urs):\n\n if isinstance(urs, six.string_types):\n return MAPPING.get(urs, UNKNOWN)\n return assign_term(urs.get_rna_type())", "def eval2(text):\n\n numbers, operators = [], []\n for glyph in text.split():\n if glyph.isdigit():\n rhs = int(glyph)\n if operators and operators[-1] in '*/':\n lhs = numbers.pop()\n operating = operators.pop()\n if operating == '*':\n numbers.append(lhs * rhs)\n else:\n numbers.append(lhs // rhs)\n else:\n numbers.append(rhs)\n else:\n operators.append(glyph)\n\n while operators:\n lhs, rhs = numbers[0], numbers[1]\n operating = operators[0]\n if operating == '+':\n numbers = [lhs + rhs] + numbers[2:]\n else:\n numbers = [lhs - rhs] + numbers[2:]\n operators = operators[1:]\n\n return numbers.pop()", "def __call__(self, *args: TermLike) -> 'Term':\n return Term(self, args)", "def _sort_majorana_term(term):\n if len(term) < 2:\n return term, 0\n center = len(term) // 2\n left_term, left_parity = _sort_majorana_term(term[:center])\n right_term, right_parity = _sort_majorana_term(term[center:])\n merged_term, merge_parity = _merge_majorana_terms(left_term, right_term)\n return merged_term, (left_parity + right_parity + merge_parity) % 2", "def __call__(self, *args, **kw):\n return self.transform(Term.__call__(self, *args, **kw))", "def basic_ttr(n_terms, n_words):\n if n_words == 0:\n return 0\n return n_terms / n_words", "def _exec_md(self, stack, operand):\n if stack and stack[-1] in {'*', '/'}:\n operator, v = stack.pop(), stack.pop()\n if operator == \"*\":\n stack.append(v * operand)\n else:\n stack.append(v // operand)\n else:\n stack.append(operand)\n return 0", "def FSMLetterSymbol(letter):\n return FSMEmptyWordSymbol if letter is None else repr(letter)", "def compile_word(word):\n # Your code here.\n if word.isalpha() and word.islower():\n return word\n if not word.isalpha():\n return word\n result = []\n mul = 1\n word = word[::-1]\n for w in word:\n if w.isalpha and w.isupper():\n result.append(str(mul) + '*' + w + \"+\")\n else:\n result.append(w)\n mul = mul*10\n ans = ''.join(result)\n return ans[:-1]", "def literal_symbol(literal):\n if literal.op == '~':\n return literal.args[0]\n else:\n return literal", "def __init__(self, expr):\n super(Combine, self).__init__(_silent_pattern(expr))", "def search(self, term):", "def potential_multi_term(tagged) :\n res = True\n for tag in tagged :\n res = res and stemgrammar(tag)\n return res", "def __init__(self, char, freq):\n\t\t\tself.char = char\n\t\t\tself.freq = freq\n\t\t\tself.left = None\n\t\t\tself.right = None", "def visit_factor(self, node, children):\n if self.debug:\n print(\"Factor {}\".format(children))\n if len(children) == 1:\n return children[0]\n sign = -1 if children[0] == '-' else 1\n return sign * children[-1]", "def Calc():\n print('Please type a maths expression with 2 intergers or floats and an operator \"+\", \"-\", \"*\" or \"/\"')\n inp = (input())\n for char in inp:\n if char not in '1234567890.-+*/':\n print('Please restart the program and only type valid characters')\n return\n operators = [\"+\", \"-\", \"*\", \"/\"]\n buf = ''\n operand1 = 0.0\n operand2 = 0.0\n for char in inp:\n if char not in operators:\n buf += char\n else:\n operator = char\n operand1 = float(buf)\n buf = ''\n operand2 = float(buf)\n res = 0.0\n if operator == '+':\n res = su(operand1, operand2)\n elif operator == '-':\n res = sub(operand1, operand2)\n elif operator == '*':\n res = mu(operand1, operand2)\n elif operand2==0:\n return \"Can not divide by 0\"\n else:\n res = di(operand1, operand2)\n print(res)\n return res", "def compare(string1: str, string2: str, /) -> int:\n ...", "def plurals(num):\r\n if num != 1:\r\n return ('s')\r\n return ('')", "def numerator(self, ???):", "def test_greedy_zero_or_more_complex():\n grammar = r\"\"\"\n S: (\"a\" | \"b\" \"c\")* \"a\"*;\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a b c a b c a a a\")\n assert len(forest) == 4\n\n grammar = r\"\"\"\n S: (\"a\" | \"b\" \"c\")*! \"a\"*;\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a b c a b c a a a\")\n assert len(forest) == 1", "def reduce_base(word: str) -> str:\n porter = PorterStemmer()\n return porter.stem(word)" ]
[ "0.6671328", "0.5942621", "0.578661", "0.5665476", "0.5546639", "0.55136436", "0.55068946", "0.5483381", "0.5461509", "0.5405435", "0.53730965", "0.5351918", "0.5335714", "0.5334143", "0.5312731", "0.5308194", "0.52843577", "0.52693665", "0.52682096", "0.52596307", "0.52582663", "0.5255085", "0.5236891", "0.520382", "0.5183893", "0.51641446", "0.515877", "0.5132851", "0.5126308", "0.50719494", "0.50688595", "0.5067399", "0.5057749", "0.5051295", "0.50420547", "0.50368905", "0.50284946", "0.50257707", "0.50182045", "0.5013443", "0.50102377", "0.50004274", "0.499163", "0.4984819", "0.49593142", "0.49529842", "0.49337128", "0.49014756", "0.48985925", "0.48982897", "0.48982897", "0.48982897", "0.48982897", "0.48982897", "0.48982897", "0.48982897", "0.48982897", "0.48982897", "0.48645717", "0.48523077", "0.48490107", "0.4846156", "0.48424253", "0.48319873", "0.48300636", "0.48294237", "0.48195603", "0.4806253", "0.48033807", "0.48005483", "0.47974005", "0.4788301", "0.47874537", "0.4785344", "0.4772648", "0.47689363", "0.47602025", "0.47571963", "0.47548538", "0.47437787", "0.47404143", "0.47340086", "0.4732389", "0.47075948", "0.47054294", "0.4704666", "0.4700407", "0.46897092", "0.46873638", "0.466955", "0.46684963", "0.46673176", "0.4665738", "0.46608153", "0.46573228", "0.4656761", "0.46550643", "0.46507084", "0.46480736", "0.4640434" ]
0.7452738
0
addExpr = term { ('+' | '') term }
def addExpr( ): tok = tokens.peek( ) if debug: print ("addExpr: ", tok) left = term( ) tok = tokens.peek( ) while tok == "+" or tok == "-": tokens.next() right = term( ) left = BinaryExpr( tok, left, right ) tok = tokens.peek( ) return left
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(text):\n orig = dispb[\"text\"]\n new = orig + text\n ops = [\"+\",\"-\",\"*\",\"/\"]\n # conditions\n # length 21\n if len(new) > 21:\n dispb[\"text\"] = orig\n return 0\n \n # one calc at a time\n if len(orig) > 0:\n if (orig[-1] in ops) & (text in ops):\n dispb[\"text\"] = orig\n return 0\n\n dispb[\"text\"] = new\n return 0", "def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)", "def _add_to_expression(self, item):\n addition = ' {%s} ' % item.text()\n expression = self.ui.expression\n pos = expression.cursorPosition()\n text = str(expression.displayText())\n expression.setText(text[:pos] + addition + text[pos:])", "def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def convert_elementwise_add(node, **kwargs):\n return create_basic_op_node('Add', node, kwargs)", "def __radd__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(other, self)", "def _append_operator(self, operator):", "def math_add():\n a = int(request.args.get(\"a\"))\n b = int(request.args.get(\"b\"))\n return str(add(a, b))", "def adp(lhs,rhs):\n test=lambda s: s[0]=='`'\n assert test(lhs)==True,'error: lhs should be non-terminal'\n lhs=so.getSymbol(lhs[1:],terminal=False,autocreate=True)\n rhs=[so.getSymbol(s[1:],False,True) if test(s) else so.getSymbol(s,True,True) for s in rhs]\n return addProduction(lhs,rhs)", "def quote_plus(s, safe='', encoding=None, errors=None):\n if ' ' in s:\n s = quote(s, safe + ' ', encoding, errors)\n return s.replace(' ', '+')\n return quote(s, safe, encoding, errors)", "def __add__(self, other: Any) -> ColumnOperators:\n return self.operate(add, other)", "def convert_addn(node, **kwargs):\n return create_basic_op_node('Sum', node, kwargs)", "def addition():\r\n error_handler()\r\n f1.delete(0, END)\r\n a1 = float(operand.get())\r\n a2 = float(operator.get())\r\n result = a1 + a2\r\n f1.insert(10, str(result))", "def __iadd__(self, term):\n self.add(term)\n return self", "def __add__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n error_msg = (\n f'Invalid expression during addition to {self}: [{other}]'\n )\n raise excep.biogemeError(error_msg)\n return Plus(self, other)", "def __add(thiselement, char):\n if thiselement == None:\n return char\n return thiselement + char", "def add(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n sum = str(args[0] + args[1])\n return sum", "def __radd__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(add, other)", "def test_sqpp_plus_expr1_minus_paren_expr2(self):\n self.assertEqual(self.parser.parse_query(\"+ expr1 - (expr2)\"),\n ['+', 'expr1', '-', 'expr2'])", "def visit_expression(self, node, children):\n if self.debug:\n print(\"Expression {}\".format(children))\n expr = 0\n start = 0\n # Check for unary + or - operator\n if text(children[0]) in \"+-\":\n start = 1\n\n for i in range(start, len(children), 2):\n if i and children[i - 1] == \"-\":\n expr -= children[i]\n else:\n expr += children[i]\n\n if self.debug:\n print(\"Expression = {}\".format(expr))\n\n return expr", "def add_concat(infix_regex: str):\n\n result = \"\"\n\n # we use None to symbolize the start of the string\n cant_concat_from = ['(', '|', None]\n cant_concat_to = ['*', '+', ')', '|']\n last_char = None\n\n for char in infix_regex:\n if char not in cant_concat_to and last_char not in cant_concat_from:\n result += '.'\n result += char\n last_char = char\n\n return result", "def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex", "def add_expr_to_comp(self, comp, expr):\n if not isinstance(comp, cellml_component):\n comp = self.model.get_component_by_name(comp)\n if not hasattr(comp, u'math'):\n # Create the math element\n math = comp.xml_create_element(u'math', NSS[u'm'])\n comp.xml_append(math)\n # Append this expression\n comp.math.xml_append(expr)", "def add_subtract(statement):\r\n operators = list(filter(lambda x: x in ('+', '-'), statement))\r\n index = statement.index(operators[0])\r\n\r\n # Find operands\r\n op1, op2 = find_operands(statement, index)\r\n\r\n # Perform operation\r\n if operators[0] == '+':\r\n result = op1 + op2\r\n elif operators[0] == '-':\r\n result = op1 - op2\r\n\r\n # Replace operator and operands with result\r\n remove_and_replace(statement, index, result)\r\n\r\n return statement", "def visit_Unary(self, node):\n op = node.op.type\n if op == PLUS:\n return +self.visit(node.expr)\n elif op == MINUS:\n return -self.visit(node.expr)", "def __add__(self, other):\n if isinstance(other, NeuralQueryExpression):\n self._check_type_compatibility(self.type_name, other.type_name, 'add')\n provenance = NQExprProvenance(\n operation='add', inner=self.provenance, other=other.provenance)\n return self.context.as_nql(self.tf + other.tf, self.type_name, provenance)\n else:\n # hopefully a constant\n provenance = NQExprProvenance(\n operation='add',\n inner=self.provenance,\n args=(None, other),\n other=NQExprProvenance(operation='constant'))\n return self.context.as_nql(self.tf + other, self.type_name, provenance)", "def plus(self, a, b):\n return a + b", "def add(a, b):\n c = Calculator()\n result = c.add(a, b)\n click.echo('{} + {} = {}'.format(a, b, result))", "def parse_single_op_string(opstring) :\n ops = {'+' : \"plus\",\n '?' : \"opt\" , \n '*' : \"star\"}\n return '('.join(ops[c] for c in reversed(opstring)) + '('", "def test_evaluate_add_expression(self):\n value = self.evaluate_common(\"2M add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Decimal, \"Expected Decimal\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n value = self.evaluate_common(\"2D add 2M\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2F add 2D\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Double, \"Expected Double\")\n self.assertTrue(value.value == 4.0, \"Expected 4\")\n value = self.evaluate_common(\"2 add 2L\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int64, \"Expected Int64\")\n self.assertTrue(value.value == 4, \"Expected 4\")\n try:\n value = self.evaluate_common(\"2 add '2'\")\n self.fail(\"String promotion to int\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"2 add null\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value is None, \"Expected None\")", "def add(lhs, rhs):\n return _make.add(lhs, rhs)", "def addition(a, b):\n pass", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def add(*args):\n #convert args to floats so we can do the maths\n values = list(args)\n for x in range(len(values)):\n values[x] = float(values[x])\n \n summation = str(ft.reduce(oper.add,values))\n return summation", "def add(*args):\n body = ['<h1>Addition Calculator</h1>']\n _sum = sum(map(int, args))\n body.append(f'Total equals: {_sum}')\n return '\\n'.join(body)", "def solve_equation_addition_precendence(eq, verbose=False):\n tokens = tokenize(eq)\n if verbose:\n print(f\"eq: {tokens}\")\n\n stack = []\n ops = {\n None: do_push,\n \"(\": do_push,\n \")\": do_parenthesis,\n \"+\": do_addition,\n \"*\": do_push,\n }\n\n for t in tokens:\n if isinstance(t, int):\n op = stack[-1] if len(stack) else None\n ops[op](stack, t)\n elif t == \"+\" or t == \"*\" or t == \"(\":\n stack.append(t)\n elif t == \")\":\n ops[\")\"](stack, t)\n # solve preparenthesis addition\n if len(stack) > 2:\n v = stack.pop()\n assert isinstance(v, int)\n ops[stack[-1]](stack, v)\n else:\n assert False, f\"fail token: {t}\"\n\n if verbose:\n print(f\"stack: {stack}\")\n\n # solve multiplications\n while len(stack) > 1:\n rhs = stack.pop()\n assert isinstance(rhs, int)\n op = stack.pop()\n if op == \"*\":\n lhs = stack.pop()\n assert isinstance(lhs, int)\n stack.append(lhs * rhs)\n else:\n assert False, f\"invalid operator (not *): {op}\"\n\n assert len(stack) == 1\n return stack[0]", "def calculate_expression(number1, number2, operator):\n\n if operator == '+':\n return number1 + number2\n elif operator == '-':\n return number1 - number2\n elif operator == '*':\n return number1 * number2", "def _build_add(self) -> str:\n return dedent(\n \"\"\"\n @SP\n M=M-1\n A=M\n D=M\n @SP\n M=M-1\n A=M\n M=M+D\n @SP\n M=M+1\n \"\"\"\n )", "def __add__(self, other):\n\n other = formula(other, namespace=self.namespace)\n terms = self.terms + other.terms\n pieces = [(term.name, term) for term in terms]\n pieces.sort()\n terms = [piece[1] for piece in pieces]\n return formula(terms, namespace=self.namespace)", "def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression", "def signExp(expression, sign):\n arr = list(expression)\n if sign == \"-\":\n for i in range(len(expression)):\n # Invert the sign if the 'sign' is '-'\n if arr[i] == \"+\":\n arr[i] = \"-\"\n elif arr[i] == \"-\":\n arr[i] = \"+\"\n # If the first characters is not a sign, it is a '+' and we need to \n # add it to the subexpression\n if arr[0] != \"+\" and arr[0] != \"-\":\n arr.insert(0, sign)\n return \"\".join(x for x in arr)", "def add(*args):\n\n result = int(args[0]) + int(args[1])\n\n return str(result)", "def add(first, second):\n return first + second", "def test_unary_op_support():\n check_peval_expression(\"+(2)\", {}, \"2\", fully_evaluated=True, expected_value=2)\n check_peval_expression(\"-(-3)\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression_bool(\"not 0\", {}, True)\n check_peval_expression(\"~(-4)\", {}, \"3\", fully_evaluated=True, expected_value=3)", "def add( a, b ):\n return a + b", "def func2(s):\n return(s+'-'+s)", "def addend_in_2(s):\n return head_of_list(plus, s)", "def split_and_sum(expression):\n\n split_vals = expression.split('+')\n float_vals = [float(v) for v in split_vals]\n total = sum([v for v in float_vals if v > 0.0])\n\n return total", "def _AccumulateANDTerm(self, operator, form_field, post_data, search_query):\n user_input = post_data.get(form_field)\n if user_input:\n values = VALUE_RE.findall(user_input)\n search_terms = ['%s%s' % (operator, v) for v in values]\n search_query.extend(search_terms)", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def incr_operand(self):\n pass", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(x, y):\n return x + y", "def add(self, term):\n self._value = self.accum_param.addInPlace(self._value, term)", "def add(x, y):\n\n return x + y", "def combine_expression(self, connector, sub_expressions):\n lhs, rhs = sub_expressions\n if connector == '%%':\n return 'MOD(%s)' % ','.join(sub_expressions)\n elif connector == '&':\n return 'BAND(%s)' % ','.join(sub_expressions)\n elif connector == '|':\n return 'BOR(%s)' % ','.join(sub_expressions)\n elif connector == '^':\n return 'POWER(%s)' % ','.join(sub_expressions)\n elif connector == '<<':\n return '(%(lhs)s * POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}\n elif connector == '>>':\n return 'FLOOR(%(lhs)s / POWER(2, %(rhs)s))' % {'lhs': lhs, 'rhs': rhs}\n return super().combine_expression(connector, sub_expressions)", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def add(a, b):\n return a + b", "def simplify(expression):\n q = []\n for x in expression:\n if x != \")\":\n q.append(x)\n else:\n subexp = \"\"\n while q:\n #print(q)\n c = q.pop()\n if c == \"(\":\n if len(q) and (q[-1] == \"+\" or q[-1] == \"-\"):\n sign = q.pop()\n else:\n sign = \"+\"\n subexp = signExp(subexp, sign)\n q.append(subexp)\n break\n else:\n subexp = c + subexp\n exp = \"\"\n while q:\n c = q.pop()\n exp = c + exp\n \n if len(exp) and exp[0] != \"+\" and exp[0] != \"-\":\n # Again if the first character is not a 'sign' make it a \"+\"\n exp = \"+\" + exp\n \n return exp", "def plus(x, y):\n x[:] += y[:]\n return x", "def add_op(self, expr):\n from cascada.bitvector import operation\n assert isinstance(expr, operation.Operation)\n assert not self.contain_op(expr)\n name = \"{}{}\".format(self.id_prefix, self.counter)\n self.counter += 1\n identifier = core.Variable(name, expr.width)\n self.table[identifier] = expr\n\n return identifier", "def convert_add_scalar(node, **kwargs):\n return scalar_op_helper(node, 'Add', **kwargs)", "def __radd__(self, other):\n return Token(\n other + self.text, self.position - len(other), self.category)", "def __add__(self,that):\n return self.__opExpand2(that,np.add)", "def add(a, b):\n return a+b", "def eval_sum(parse_result):\r\n total = 0.0\r\n current_op = operator.add\r\n for token in parse_result:\r\n if token == '+':\r\n current_op = operator.add\r\n elif token == '-':\r\n current_op = operator.sub\r\n else:\r\n total = current_op(total, token)\r\n return total", "def evaluator(operator: str, value1: str, value2: str) -> str:\n\n evaluation_function: str = value1 + operator + value2\n #Because all three are strings, the + operator simply appends them together to be simplified. \n\n result: str = str(simplify(evaluation_function))\n return result", "def _add_op(value, sample_args, rationals_allowed):\n entropy, sample_args = sample_args.peel()\n if rationals_allowed and sample_args.count >= 3:\n x = number.integer_or_rational(entropy, True)\n else:\n x = number.integer(entropy, True)\n if random.choice([False, True]):\n op_args = [x, value - x]\n else:\n op_args = [value - x, x]\n return ops.Add, op_args, sample_args", "def test_parse_add(self):\n self.assertEqual(parse_input.parse([\"1\", \"+\", \"2\"]), 3)", "def addition(a, b):\n return a + b", "def parse_substract_and_sum(numbers, operators):\n while len(numbers) > 1:\n if operators[0] == \"+\": \n result = calc.sum(numbers[0], numbers[1])\n elif operators[0] == \"-\":\n result = calc.substract(numbers[0], numbers[1])\n operators, numbers = change_list_elements(operators, numbers, result, 0)\n return operators, numbers", "def x_add_one(nom):\n\tif 'X' in nom and '*' not in nom:\n\t\treturn nom.replace('X', '1*X')\n\treturn nom", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def __add__(self, value):\n out = self.copy()\n out.addMath(Query.Math.Add, value)\n return out", "def pauli_represent_minus_plus(e):\n # XXX: todo, make sure that new operators inherit labels\n return expression_tree_transform(\n e, [(lambda e: isinstance(e, SigmaX),\n lambda e: SigmaMinus() + SigmaPlus()),\n (lambda e: isinstance(e, SigmaY),\n lambda e: I * SigmaMinus() - I * SigmaPlus())]\n )", "def test_sqpp_oddly_capped_operators(self):\n self.assertEqual(self.parser.parse_query('foo oR bar'),\n ['+', 'foo', '|', 'bar'])", "def add(a,b):\n\treturn a+b", "def __radd__(self,that):\n return self.__opExpand2(that,np.add)", "def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15", "def __add__(self, other):\n result = self.__class__()\n result._terms.extend(self)\n\n if isinstance(other, self._term_class):\n if any(\n isinstance(other, term.__class__) and other.name == term.name\n for term in self._terms\n ):\n msg = (\n f\"There is already a term of type {other.__class__} with name \"\n f\"'{other.name}' in {self.__class__}. Please provide a different \"\n f\"name for {other}.\"\n )\n raise ValueError(msg)\n else:\n result._terms.append(other)\n elif isinstance(other, self.__class__):\n for term in other:\n result += term\n else:\n msg = f\"Unsupported operand type(s) for +: {type(self)} and {type(other)}.\"\n raise TypeError(msg)\n\n return result", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def calc(operand_1, operand_2):\n return operand_1 + operand_2", "def test_sqpp_paren_expr1_minus_expr2_and_paren_expr3(self):\n self.assertEqual(self.parser.parse_query('(expr1) - expr2 + (expr3)'),\n ['+', 'expr1', '-', 'expr2', '+', 'expr3'])", "def __init__(self, expr1, expr2, coeff=1.0, name='add'):\n super(SumExpression, self).__init__(e1=expr1, e2=expr2, name=name)\n self._coeff = coeff\n self.domain = self.e1.domain" ]
[ "0.6888404", "0.63378376", "0.63330656", "0.6294168", "0.6236394", "0.6221723", "0.6219546", "0.6147867", "0.61287045", "0.6115096", "0.60734725", "0.60724515", "0.60708827", "0.6070692", "0.60704505", "0.60387814", "0.60252726", "0.5984255", "0.5975357", "0.5954446", "0.59498906", "0.5937147", "0.5928731", "0.5924677", "0.5919146", "0.58840847", "0.5876093", "0.5847572", "0.58436257", "0.5836227", "0.5833833", "0.58257985", "0.58165175", "0.579586", "0.57728964", "0.5772533", "0.5765712", "0.5759566", "0.57581204", "0.5757406", "0.57451606", "0.5734436", "0.5733477", "0.57321966", "0.57236016", "0.5721604", "0.5676141", "0.5659607", "0.56574863", "0.5656678", "0.5647677", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.5633222", "0.56208664", "0.561584", "0.55951846", "0.55950284", "0.55950284", "0.55950284", "0.55950284", "0.55950284", "0.55950284", "0.5592715", "0.5580193", "0.5577806", "0.55762804", "0.55678207", "0.55658895", "0.5564101", "0.556063", "0.55571824", "0.5556489", "0.55553067", "0.55521417", "0.5547829", "0.55441135", "0.55420697", "0.55420697", "0.5541677", "0.553873", "0.55344945", "0.5529834", "0.5516051", "0.55154985", "0.55125475", "0.55099523", "0.55099523", "0.55022544", "0.5499663" ]
0.8184933
0
whileStatement = "while" expression block
def parseWhileStatement( ): # parse rountine for while and uses the while class to print out the appropriate string tok = tokens.peek( ) if debug: print( "whileStatement: ", tok ) start = match( "while" ) expr = expression( ) blk = parseBlock( ) tok = tokens.peek( ) whileString = whileStatement( start, expr, blk ) return whileString
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_while(self: Parser, node: doc.While) -> None:\n with self.var_table.with_frame():\n cond = self.eval_expr(node.test)\n with T.While(cond):\n self.visit_body(node.body)", "def _While(self, t):\n self.fill(\"while (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, \"While else not supported\")", "def _analyse_stmt_While(self, statement: ast.While, *, next: CFNode) -> CFNode:\n # Analyse the else branch.\n else_node = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the body.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=body_node)\n else:\n branches.update(else_=else_node)\n else:\n branches.update(enter=body_node, else_=else_node, error=self._raise)\n\n loop_node = self._ast_node(statement, **branches)\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node", "def link_while_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.body)", "def WhileStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n return whileStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def gen_while(self, stmt: statements.While) -> None:\n condition_block = self.builder.new_block()\n body_block = self.builder.new_block()\n final_block = self.builder.new_block()\n self.break_block_stack.append(final_block)\n self.continue_block_stack.append(condition_block)\n self.builder.emit_jump(condition_block)\n self.builder.set_block(condition_block)\n self.gen_condition(stmt.condition, body_block, final_block)\n self.builder.set_block(body_block)\n self.gen_stmt(stmt.body)\n self.builder.emit_jump(condition_block)\n self.builder.set_block(final_block)\n self.break_block_stack.pop()\n self.continue_block_stack.pop()", "def syntax_while():\n i = 0\n while i < 5:\n print(i)\n i += 1\n\n ## Output\n # 0\n # 1\n # 2\n # 3\n # 4", "def compile_while(self) -> None:\n self._consume('while')\n self._consume('(')\n\n while_lbl = f\"WHILE_{self._while_count}\"\n while_false_lbl = f\"WHILE_FALSE{self._while_count}\"\n self._while_count += 1\n self.writer.write_label(while_lbl)\n\n self.compile_expression()\n self._consume(')')\n\n self._consume('{')\n self.writer.write_if(while_false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(while_lbl)\n self.writer.write_label(while_false_lbl)\n\n self._consume('}')", "def add_while(self, input_name, body_function, cond_function, name=None):\n return self._build_op(\n 'while', [input_name],\n name=name,\n attr={\n 'body_function': body_function,\n 'cond_function': cond_function\n })", "def test_42_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1+true) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: BinaryOp(+,IntLiteral(1),BooleanLiteral(True))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,442))", "def test_41_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: While(IntLiteral(1),[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,441))", "def compile_while(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'while' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.code_writer.write_label(lab1)\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.code_writer.write_if(lab2)\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab1)\r\n self.code_writer.write_label(lab2)", "def compile_while(self):\r\n start_label = \"WHILE_\" + str(self.__while_count)\r\n end_label = \"WHILE_END_\" + str(self.__while_count)\r\n self.__while_count += 1\r\n self.__advance(n=2) # Advance after the '(' token\r\n self.__vmwriter.write_label(start_label)\r\n self.compile_expression()\r\n self.__advance(n=2) # Advance after the '{' token\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(end_label)\r\n self.compile_statements()\r\n self.__advance() # Advance after the '}' token\r\n self.__vmwriter.write_goto(start_label)\r\n self.__vmwriter.write_label(end_label)", "def compile_while(self):\n\n\t\txml = \"<whileStatement>\\n\" + self.tokenizer.keyword() + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)\n\n\t\tself.compile_expression()\n\n\t\txml = self.tokenizer.symbol() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\txml = '</statements>\\n' + self.tokenizer.symbol() + '</whileStatement>\\n'\n\t\tself.outfile.write(xml)", "def visit_WhileNode(self, node: WhileNode, symbol_table: SymbolTable) -> None:\n while True:\n if self.visit(node.cond, symbol_table).value == 0:\n break\n else:\n for expr in node.body:\n if expr is not None:\n if isinstance(expr, ReturnNode):\n return expr\n res = self.visit(expr, symbol_table)\n if isinstance(res, ReturnNode):\n return res", "def _in_while_loop(control_flow_node_map, op_name):\n return op_name in control_flow_node_map and \"LoopCond\" in control_flow_node_map[op_name]", "def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret", "def while_(self):\n if self.line.startswith('wh'):\n if self.line.endswith('while') is False:\n return True", "def while_loop_op(op):\n return (control_flow_util.IsLoopSwitch(op) or\n control_flow_util.IsLoopMerge(op) or\n control_flow_util.IsLoopEnter(op) or\n control_flow_util.IsLoopExit(op) or\n TensorTracer.loop_cond_op(op) or\n op.type in ('RefNextIteration', 'NextIteration'))", "def whileLoop(count):\n result=''\n while count>=1:#判断条件\n result+=\"sorry\"\n count-=1#while的步进\n return result", "def syntax_while_break():\n i = 0\n while i < 5:\n if i >= 3:\n break\n print(i)\n i += 1\n\n ## Output\n # 0\n # 1\n # 2", "def whilestmt(self, w):\n invs = self.assemble_invariants(w)\n b_mid = self.flatten([Tree('assume', [w[0]]), w[-1], Tree('assert', invs), Tree('assume', [Tree('const_false', [])])])\n b = self.flatten([Tree('assert', invs),\n self.assemble_havoc(w),\n Tree('assume', invs),\n Tree('wpor', [Tree('block', b_mid), Tree('assume', self._not(w[0]))])])\n return b", "def while_loop(self):\n if self._loop is None:\n self._loop = self._while_loop()\n return self._loop\n return self._loop", "def convert_while(self, condition):\n\n # Run super definition\n condition = super().convert_while(condition)\n\n # Make while template\n while_template = \"while {cond}:\"\n\n # Replace logical operators\n condition = self.replace_logical_ops(condition, direction=\"from\")\n\n # Return converted if statement\n return [while_template.format(cond=condition)], []", "def syntax_while_continue():\n i = 0\n while i < 5:\n if i % 2 == 1:\n i += 1\n continue\n print(i)\n i += 1\n\n ## Output\n # 0\n # 2\n # 4", "def DoWhile(name, condition_blob_or_net, nets_or_steps):\n condition_not_net, stop_blob = NotNet(condition_blob_or_net)\n if isinstance(condition_blob_or_net, core.Net):\n nets_or_steps = _AppendNets(\n nets_or_steps, condition_blob_or_net, condition_not_net)\n else:\n nets_or_steps = _AppendNets(nets_or_steps, condition_not_net)\n\n # If stop_blob is pre-set to True (this may happen when DoWhile() is\n # called twice), the loop will exit after executing the first net/step\n # in nets_or_steps. This is not what we want. So we use BootNet to\n # set stop_blob to False.\n bool_net = BoolNet((stop_blob, False))\n return Do(name + '/DoWhile', bool_net, core.scoped_execution_step(\n _get_next_step_name('DoWhile-inner', name),\n nets_or_steps,\n should_stop_blob=stop_blob,\n ))", "def While(name, condition_blob_or_net, nets_or_steps):\n condition_not_net, stop_blob = NotNet(condition_blob_or_net)\n if isinstance(condition_blob_or_net, core.Net):\n nets_or_steps = _PrependNets(\n nets_or_steps, condition_blob_or_net, condition_not_net)\n else:\n nets_or_steps = _PrependNets(nets_or_steps, condition_not_net)\n\n def while_step(control_name):\n return core.scoped_execution_step(\n _get_next_step_name(control_name, name),\n nets_or_steps,\n should_stop_blob=stop_blob,\n )\n\n if _IsNets(nets_or_steps):\n # In this case, while_step has sub-nets:\n # [condition_blob_or_net, condition_not_net, nets_or_steps]\n # If stop_blob is pre-set to True (this may happen when While() is\n # called twice), the loop will exit after executing\n # condition_blob_or_net. So we use BootNet to set stop_blob to\n # False.\n bool_net = BoolNet((stop_blob, False))\n return Do(name + '/While', bool_net, while_step('While-inner'))\n else:\n return while_step('While')", "def while_do(condition: Callable[[Any], bool], source: ObservableBase) -> ObservableBase:\n from ..operators.observable.whiledo import while_do\n return while_do(condition, source)", "def is_while(self, file, i):\n\n # Save line to local variable\n line = file[i].strip()\n\n # If line starts with while and ends with ':' return True, else False\n if line.startswith(\"while\") and line.endswith(\":\"):\n return True\n return False", "def while_loop(n):\n\tprint(\"\\n\\nlet's see what a while loop is\\n\")\n\twhile n > 0:\n\t\tprint(f\"n = {n}\")\n\t\tn -= 1", "def _analyse_loop(\n self,\n statement: Union[ast.AsyncFor, ast.For, ast.While],\n *,\n next: CFNode,\n ) -> CFNode:\n # Node acting as target for the next iteration. We'll identify this\n # with the loop entry node, once that exists.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n loop_node = self._ast_node(\n statement,\n enter=body_node,\n else_=self._analyse_statements(statement.orelse, next=next),\n error=self._raise,\n )\n\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node", "def get_while_condition(self, file, i):\n\n # Run super definition\n line = super().get_while_condition(file, i)\n\n # Strip ending colon\n line = line.split(\":\", 1)[0]\n\n # Replace logical operators\n line = self.replace_logical_ops(line, direction=\"to\")\n\n # Create start and end for while call\n start = []\n end = []\n\n # Return while loop condition\n return line, start, end", "def test_78_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} continue; end{while} continue; end\"\"\"\n\t\texpect = \"Continue Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,478))", "def __init__(self, depth, condition, body_statement):\n super(WhileStatement, self).__init__(depth)\n self.condition = condition\n self.body_statement = body_statement", "def loop(self):\n line = self.read()\n while line != \"quit\":\n value = self.eval(line)\n print(value)\n line = self.read()", "def consume_while(it, pred):\n\tout = \"\"\n\twhile True:\n\t\tc = it.peek()\n\t\tif c is not EOFToken and pred(c):\n\t\t\tout += c\n\t\t\tit.next()\n\t\telse:\n\t\t\treturn out", "def while_P():\r\n i=0\r\n while i<7:\r\n j=0\r\n while j<4:\r\n if j==0 or i in(0,3) and j%3!=0 or j==3 and i in(1,2):\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n j+=1 \r\n print()\r\n i+=1", "def take_while(pred):\n def run(chunks, chunk, last):\n for i, c in enumerate(chunk):\n if not pred(c):\n return ParserResult.from_done(_chunks_merge((chunk[:i], chunks)), chunk[i:], last)\n if last:\n return ParserResult.from_error(\"Not enough input\")\n else:\n return ParserResult.from_partial(Parser(run, (chunk, chunks)))\n return Parser(run, tuple())", "def statement_eval(node, table):\n\n if node.kind == \"MOD_OP\":\n table = mod_op_eval(node, table)\n\n elif node.kind == \"SWAP_OP\":\n table = swap_op_eval(node, table)\n\n elif node.kind == \"FROM_LOOP\":\n block_node = node.block\n\n # TODO: check start condition\n\n while True:\n # Execute the block.\n table = block_eval(block_node, table)\n\n # Break if the end condition is satisfied.\n if expr_eval(node.end_condition, table):\n break\n\n elif node.kind == \"FOR_LOOP\":\n var_dec = node.var_declaration\n until_node = node.end_condition\n increment_node = node.increment_statement\n\n # Initialize the variable.\n table[var_dec.name] = expr_eval(var_dec.expr, table)\n\n while True:\n # Execute the block and increment statement.\n if not node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n \n table = block_eval(node.block, table)\n\n if node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n\n # Break if the end condition is satisfied.\n if table.refs[until_node.name] == expr_eval(until_node.expr, table):\n break\n\n table = var_condition_eval(until_node, table)\n\n elif node.kind == \"IF\":\n # Check the condition; if it fails, execute the\n # 'false' branch if it exists.\n\n if expr_eval(node.condition, table):\n table = block_eval(node.true, table)\n elif \"false\" in node.data:\n table = block_eval(node.false, table)\n\n elif node.kind == \"DO/UNDO\":\n # Do the action_block, then do the yielding block,\n # then undo the action block.\n table = block_eval(node.action_block, table)\n\n if \"yielding_block\" in node.data:\n table = block_eval(node.yielding_block, table)\n\n table = block_eval(inverter.unblock(node.action_block), table)\n\n elif node.kind == \"RESULT\":\n # Overwrites the variable 'result' with the given expression.\n table[\"result\"] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_DEC\":\n table[node.name] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_CONDITION\":\n table = var_condition_eval(node, table)\n\n elif node.kind == \"BLOCK\":\n table = block_eval(node, table)\n\n elif node.kind == \"FUNCTION_CALL\":\n # Call the function, then update table with the results.\n function = shared.program.functions[node.name]\n\n output = function.evaluate(\n node.backwards,\n node.ref_args,\n [expr_eval(arg, table) for arg in node.ref_args],\n [expr_eval(arg, table) for arg in node.const_args]\n )\n\n # After evaluating the function, the output table will\n # contain changed variables.\n table.update_refs(output)\n\n elif node.kind == \"UN\":\n inverted_node = inverter.unstatement(node.statement)\n table = statement_eval(inverted_node, table)\n\n elif node.kind == \"EXIT\":\n if expr_eval(node.condition, table):\n # We return by raising an exception.\n raise shared.ReturnException(expr_eval(node.value, table))\n\n elif node.kind == \"ENTER\":\n # Do nothing when we actually encounter these.\n pass\n\n return table", "def test_74_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} break; end{while} break; end\"\"\"\n\t\texpect = \"Break Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,474))", "def while_f():\r\n\r\n row = 0\r\n while row<6:\r\n col = 0\r\n while col<4:\r\n if col==1 and row>0 or row==3 and col<3 or col==2 and row==0 or col==3 and row==1:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col += 1\r\n print()\r\n row += 1", "def is_abecedarian_using_while(word):\n pass", "def _wrap_computation_in_while_loop(device, op_fn):\n\n def computation(i):\n with ops.control_dependencies(op_fn()):\n return i + 1\n\n iterations_per_loop_var = _create_or_get_iterations_per_loop()\n # By setting parallel_iterations=1, the parallel execution in while_loop is\n # basically turned off.\n with ops.device(device):\n iterations = array_ops.identity(iterations_per_loop_var)\n return control_flow_ops.while_loop(\n lambda i: i < iterations,\n computation, [constant_op.constant(0)],\n parallel_iterations=1)", "def test_35_break(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twhile a > 0 do\n\t\twith a:integer;b:boolean; do begin b:=true; break; end\n\t\tbreak; return 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Break Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,435))", "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def _is_in_outmost_while_loop(self, op):\n ctxt = self._get_op_control_flow_context(op)\n outer_while_context = control_flow_util.GetContainingWhileContext(ctxt)\n return outer_while_context == control_flow_util.GetContainingWhileContext(\n self._outmost_context)", "def infinite_loop():\n return True", "def while_b():\r\n\r\n row = 0\r\n while row<7:\r\n col = 0\r\n while col<4:\r\n if col==0 or row!=0 and row%3==0 and col<3 or col==3 and row in (4,5):\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col += 1\r\n print()\r\n row += 1", "def forward_until(self, condition, peek=True):\n c = self.__init(self.__empty(), self.peek().position)\n while self.hasNext() and not condition(self.peek() if peek else self):\n c += self.forward(1)\n return c", "def eval_loop():\n while(True):\n decision = raw_input(\"enter some mathematical operations\")\n if(decision == \"done\"):\n break\n print eval(decision)", "def take_until(condition):\n return partial(takewhile, pipe | condition | operator.not_)", "def _visit_loop_body(self, node, if_block=None, is_for=None):\n loop_name = \"for\" if is_for else \"while\"\n if if_block:\n node.if_block = if_block\n else:\n node.if_block = self.flow.nextblock(label=\"%s_body\" % loop_name,\n pos=node.body[0])\n self.visitlist(node.body)\n self.flow.loops.pop()\n\n if self.flow.block:\n # Add back-edge\n self.flow.block.add_child(node.cond_block)\n\n # Else clause\n if node.orelse:\n node.else_block = self.flow.nextblock(\n parent=node.cond_block,\n label=\"else_clause_%s\" % loop_name,\n pos=node.orelse[0])\n self.visitlist(node.orelse)\n if self.flow.block:\n self.flow.block.add_child(node.exit_block)\n else:\n node.cond_block.add_child(node.exit_block)\n\n self.exit_block(node.exit_block, node)", "def query_until_parsed(prompt, parse_func, condition=None):\n assert type(prompt) is str and callable(parse_func)\n response = raw_input(prompt + ' >> ')\n parsed_object = parse_func(response)\n while parsed_object is None:\n print \"Could not parse '%s' Please check your syntax.\" % response\n response = raw_input(prompt + ' >> ')\n parsed_object = parse_func(response)\n if condition is not None:\n while not condition(parsed_object) or parsed_object is None:\n print \"'%s' is not a valid response.\" % response\n response = raw_input(prompt + ' >> ')\n parsed_object = parse_func(response)\n return parsed_object", "def testWhileLoopProblem(self):\n def while_loop_problem():\n x = tf.get_variable(\"x\", shape=[], initializer=tf.ones_initializer())\n\n # Strange way of squaring the variable.\n _, x_squared = tf.while_loop(\n cond=lambda t, _: t < 1,\n body=lambda t, x: (t + 1, x * x),\n loop_vars=(0, x),\n name=\"loop\")\n return x_squared\n\n optimizer = meta.MetaOptimizer(net=dict(\n net=\"CoordinateWiseDeepLSTM\",\n net_options={\"layers\": ()}))\n minimize_ops = optimizer.meta_minimize(while_loop_problem, 3)\n with self.test_session() as sess:\n sess.run(tf.global_variables_initializer())\n train(sess, minimize_ops, 1, 2)", "def WaitForAll(iterable, condition):\n while any(map(condition, iterable)):\n Yield(frameNice=False)\n\n BeFrameNice()", "def step_back_while(cur_index, condition):\n while cur_index >= 0 and condition(cur_index):\n cur_index -= 1\n return cur_index", "def whileLoop(self, l1, l2):\r\n value = 0\r\n r = 0\r\n curr_l1 = l1\r\n curr_l2 = l2\r\n head = None\r\n curr = None\r\n\r\n while(curr_l1 or curr_l2 or r == 1):\r\n \r\n if curr_l1 != None and curr_l2 != None:\r\n value += curr_l1.val + curr_l2.val\r\n\r\n elif curr_l1 == None and curr_l2 == None:\r\n pass\r\n \r\n elif curr_l1 == None:\r\n value += curr_l2.val\r\n \r\n elif curr_l2 == None:\r\n value += curr_l1.val\r\n\r\n if r == 1:\r\n value += 1\r\n r = 0\r\n \r\n if value > 9:\r\n value = value - 10\r\n r = 1\r\n\r\n if head == None:\r\n head = ListNode(value)\r\n curr = head\r\n else:\r\n curr.next = ListNode(value)\r\n curr = curr.next\r\n\r\n value = 0\r\n curr_l1 = curr_l1.next if curr_l1 != None else None\r\n curr_l2 = curr_l2.next if curr_l2 != None else None\r\n \r\n return head", "def drop_while(pred):\n def _drop_while_xducer(step):\n outer = {\"trigger\": False}\n def _drop_while_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"trigger\"]:\n return step(r, x)\n elif not pred(x):\n outer[\"trigger\"] = True\n return step(r, x)\n return r\n return _drop_while_step\n return _drop_while_xducer", "def scanwhile(first, p):\n lines = [first]\n while True:\n line = lr.readline()\n if not line:\n break\n if p(line):\n lines.append(line)\n else:\n lr.push(line)\n break\n return lines", "def test_77_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then continue; else return;\n\t\tend{with} end{while} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,477))", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def run_until_stop(self):\n while self.commands[self.pointer] != END:\n # Get the cmd\n cmd = self.commands[self.pointer]\n opcode = cmd % 100\n modes = cmd // 100\n \n vals, locs, self.pointer = get_vals_and_locs(opcode, modes, self.pointer, self.commands)\n \n if opcode == ADD:\n self.commands[locs[2]] = vals[0] + vals[1]\n elif opcode == MUL:\n self.commands[locs[2]] = vals[0] * vals[1]\n elif opcode == INP:\n if self.inputs:\n self.commands[locs[0]] = self.inputs.pop(0)\n else:\n # Put the pointer back, so we run this opcode again\n self.pointer -= 2\n return False\n elif opcode == OUT:\n self.outputs.append(vals[0])\n elif opcode == JIT:\n if vals[0] != 0:\n self.pointer = vals[1]\n elif opcode == JIF:\n if vals[0] == 0:\n self.pointer = vals[1]\n elif opcode == LT:\n self.commands[locs[2]] = 1 if vals[0] < vals[1] else 0\n elif opcode == EQ:\n self.commands[locs[2]] = 1 if vals[0] == vals[1] else 0\n else:\n print(\"FAIL????\")\n\n return True", "def test_run_loop_success(self):\n found = False\n pyint = Interpreter(limit=15)\n try:\n pyint.run(code=BF_CODE_LOOP_TWICE)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def taking_while(pred):\n @coroutine\n def gen(target):\n while True:\n x = (yield)\n if pred(x):\n target.send(x)\n else:\n raise StopConsumption()\n\n return gen", "def lab10_q2():\n return \"\"\"\n Make a 'total' to add up the total revenue as while loop plays.\n\titem = qvm.dispense, makes it so item is the juice. \n\tWhile item: because if there is a juice this while loop will continue. If there aren't two fruits, there wont be a juice so while loop stops.\n\ttotal += qvm.collect_money(item). (this should be += not = right?) to add the revenue from that 'item'\n\trevalue item to the next juice.\n\treturn total\n \"\"\"", "def query_until(prompt, condition, default=None):\n assert type(prompt) is str and callable(condition)\n response = raw_input(prompt + ' >> ')\n if default is not None and not response:\n return default\n while not condition(response):\n if default is not None and not response:\n return default\n print \"'%s' is not a valid response.\" % response\n response = raw_input(prompt + ' >> ')\n return response", "def iteration(self) -> global___Statement.Iteration:", "def iteration(self) -> global___Statement.Iteration:", "def while_five():\r\n row = 0\r\n while row<7:\r\n col = 0\r\n while col<7:\r\n if col==0 and row<6 and row!=4 or col>0 and col<3 and row%3==0 or col==3 and (row==0 or row>3) and row<6:\r\n print('*', end = ' ')\r\n else:\r\n print(' ', end = ' ')\r\n col += 1\r\n print()\r\n row += 1", "def statements(self):\n\n while self.token.value not in ('EOF', 'else', 'end'):\n\n with self.resync('\\n', consume=True):\n self.statement()\n\n if not self.match(Tokens.SYMBOL, \";\"):\n self.error(\"expected ';' after statement \", token=self.prev_token, after_token=True)\n\n # consume the 'end' token if there is one\n self.match(Tokens.KEYWORD, 'end')", "def test_73_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do if (x=0) then break; else return;\n\t\tend{with} end{while} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,473))", "def while_X():\r\n i=0\r\n while i<6:\r\n j=0\r\n while j<6:\r\n if i-j==0 or i+j==5:\r\n print(\"*\",end=\" \")\r\n else:\r\n print(\" \",end=\" \")\r\n j+=1 \r\n print()\r\n i+=1", "def take_while(pred):\n def _take_while_xducer(step):\n def _take_while_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n return step(r, x) if pred(x) else Reduced(r)\n return _take_while_step\n return _take_while_xducer", "def loop(self):\n while self.dispatch(True) is not QUIT:\n pass", "def dequeue_loop():\n while True:\n result = dequeue_function()\n if not result:\n break\n print(result)", "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def breakLoop(num):\n sum=0\n while num<=4:#判断条件\n if num==3:\n num += 1\n break\n sum = sum+num\n\n num+=1#while的步进\n return sum", "def test_76_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do continue;\n\t\tend{with} end{with} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,476))", "def run_loop(self):\n while True:\n try:\n command = input('fuel-yaql> ').strip()\n except EOFError:\n return\n if not command:\n continue\n\n try:\n if command.startswith(':'): # Check for internal command\n r = self.execute_command(command)\n else:\n r = self.evaluate_expression(command)\n\n if isinstance(r, (list, dict)):\n print(json.dumps(r, indent=4))\n elif r is not None:\n print(r)\n\n except Exception as e:\n print(\"Unexpected error: {0}\".format(e))\n traceback.print_exc(sys.stdout)", "def stop_when_true(test_expr, result_expr, seq):\n result = None\n for e in seq:\n if test_expr(e):\n result = result_expr(e)\n break\n return result", "def test_close():\n while True:\n yield", "def __le__(self, *args):\n return _ida_hexrays.cwhile_t___le__(self, *args)", "def fact_while1(n: int) -> int:\n ret = 1\n if n == 0:\n return 1\n while True:\n if n == 1:\n return ret\n n, ret = n - 1, ret * n", "def _run_cmd_until_condition(self, cmd, cond, retry_count=None,\r\n retry_count_interval=5):\r\n count = 0\r\n while True:\r\n try:\r\n std_out, std_err = self._execute(cmd)\r\n except Exception: # pylint: disable=broad-except\r\n LOG.debug(\"Command %r failed while waiting for condition\",\r\n cmd)\r\n count += 1\r\n if retry_count and count >= retry_count:\r\n raise exceptions.ArgusTimeoutError(\r\n \"Command {!r} failed too many times.\"\r\n .format(cmd))\r\n time.sleep(retry_count_interval)\r\n else:\r\n if std_err:\r\n raise exceptions.ArgusCLIError(\r\n \"Executing command {!r} failed with {!r}\"\r\n .format(cmd, std_err))\r\n elif cond(std_out):\r\n break\r\n else:\r\n time.sleep(retry_count_interval)", "def wait_star():\n while True:\n r = sys.stdin.read(1)\n if r == '*':\n break", "def conditional(self) -> global___Statement.Conditional:", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def test_34_break(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twhile a > 0 do\n\t\twith a:integer;b:boolean; do begin b:=true; break; end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,434))", "def __call__(self, input=None): # pragma: no cover\n while False:\n yield None", "def _analyse_stmt_For(self, statement: ast.For, *, next: CFNode) -> CFNode:\n return self._analyse_loop(statement, next=next)", "def has_next():", "def test_72_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do break;\n\t\tend{with} end{with} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,472))", "def execute(self):\n while len(self.program)>(self.instruction_pointer):\n self.step()", "def Loop(self):\n self.coshell.SetModesCallback(self.SetModes)\n while True:\n try:\n text = self.Prompt()\n if text is None:\n break\n self.Run(text) # paradoxically ignored - coshell maintains $?\n except EOFError:\n # ctrl-d\n if not self.coshell.ignore_eof:\n break\n except KeyboardInterrupt:\n # ignore ctrl-c\n pass\n except interactive_coshell.CoshellExitError:\n break", "def detect_loop(self):\n tortoise = self.head\n hare = self.head\n while hare:\n tortoise = tortoise.next\n hare = hare.next.next\n if tortoise == hare:\n return True\n return False", "def count_cond(condition):\n \"*** YOUR CODE HERE ***\"\n def f(n):\n i, total = 1, 0\n while i <= n:\n if condition(n, i):\n total += 1\n i += 1\n return total\n return f", "def item5():\n for i in range(3):\n print('Loop %d' % i)\n else:\n print('Else block!')\n\n for i in range(3):\n print('Loop2 %d' % i)\n if i == 1:\n break\n else:\n print('Else2 block!')\n\n for x in []:\n print('Never runs')\n else:\n print('Else3 block!')\n\n while False:\n print('Never runs')\n else:\n print('Else4 block!')", "def compile_statements(self):\r\n tok_type = self.tokenizer.token_type()\r\n while tok_type == JackTokenizer.KEYWORD_T:\r\n key = self.tokenizer.key_word()\r\n if key == \"let\":\r\n self.compile_let()\r\n elif key == \"do\":\r\n self.compile_do()\r\n elif key == \"while\":\r\n self.compile_while()\r\n elif key == \"return\":\r\n self.compile_return()\r\n else:\r\n self.compile_if()\r\n tok_type = self.tokenizer.token_type()\r\n continue\r\n self.tokenizer.advance() # ignore ';' symbol\r\n tok_type = self.tokenizer.token_type()" ]
[ "0.806411", "0.7875308", "0.76918495", "0.7558932", "0.74995136", "0.7440557", "0.7426091", "0.73713285", "0.7269703", "0.72638226", "0.7247464", "0.7088606", "0.6975367", "0.69033563", "0.6820734", "0.68020827", "0.66737217", "0.6664865", "0.6610299", "0.6537933", "0.63982236", "0.6364233", "0.6342724", "0.6286428", "0.6284967", "0.6202966", "0.61382717", "0.6128669", "0.6100077", "0.60431015", "0.5913039", "0.58471215", "0.58215106", "0.5747959", "0.56440663", "0.55851066", "0.5584271", "0.5559107", "0.5515069", "0.54932505", "0.5470407", "0.5447996", "0.54371357", "0.54180765", "0.533922", "0.53366786", "0.532764", "0.5323428", "0.526989", "0.5233191", "0.52062386", "0.5160936", "0.5154792", "0.5150048", "0.5093892", "0.50607973", "0.505447", "0.5046429", "0.50372756", "0.50198674", "0.5018252", "0.49878994", "0.49794307", "0.4968048", "0.49661344", "0.4965807", "0.49397478", "0.49397478", "0.49307272", "0.49262175", "0.4925086", "0.49137142", "0.49043182", "0.48741627", "0.48416546", "0.48321715", "0.48321715", "0.4824011", "0.48196125", "0.47881898", "0.47794604", "0.47733456", "0.47535613", "0.47326055", "0.4727189", "0.47160274", "0.47119427", "0.47033125", "0.47033125", "0.46995708", "0.4692185", "0.46775505", "0.4672421", "0.4647228", "0.46420744", "0.46379355", "0.46328893", "0.46299058", "0.46214598", "0.46184793" ]
0.7981215
1
ifStatement = "if" expression block [ "else" block ]
def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string tok = tokens.peek( ) if debug: print( "ifStatement: ", tok ) start = match( "if" ) expr = expression( ) blk = parseBlock( ) elseblk = None tok = tokens.peek( ) if tok == "else": match( "else" ) elseblk = parseBlock( ) return ifStatement(expr, blk, elseblk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"", "def IF(logical_statement, expression_true, expression_false):\n if(type(logical_statement) == bool):\n if(logical_statement == True):\n return(expression_true)\n else:\n return(expression_false)\n else:\n print('Invalid type: logical statement does not evaluate to True or False.')", "def test_if_paren_statement():\n r = convert_code(\n \"{if (foo and bar) or foo and (bar or (foo and bar))}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if (foo and bar) or foo and (bar or (foo and bar)) %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def conditional(self) -> global___Statement.Conditional:", "def test_if_elseif_else_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo\\n{else}bar{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo\\n{% else %}bar{% endif %}\"", "def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"", "def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)", "def test_if_else_statement():\n r = convert_code(\"{if foo}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def test_if_elseif_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo{% endif %}\"", "def IfStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n if self.currtok[1].name == \"else\":\n self.currtok = next(self.tg)\n state2 = self.Statement()\n return ifelseStmt(express, state, state2)\n else:\n return ifStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def _IfExp(self, t):\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)", "def compile_if(self):\r\n else_label = \"ELSE_\" + str(self.__if_count)\r\n end_label = \"END_IF_\" + str(self.__if_count)\r\n self.__if_count += 1\r\n self.__advance(n=2)\r\n self.compile_expression()\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(else_label)\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__vmwriter.write_goto(end_label)\r\n self.__vmwriter.write_label(else_label)\r\n self.__advance()\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"ELSE\"]:\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__advance()\r\n self.__vmwriter.write_label(end_label)", "def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)", "def ifelse(test, if_true, if_false):\n if test:\n return if_true\n else:\n return if_false", "def compile_if(self) -> None:\n self._consume('if')\n self._consume('(')\n self.compile_expression()\n self._consume(')')\n\n end_lbl = f'IF_END_{self._if_count}'\n false_lbl = f'IF_FALSE_{self._if_count}'\n self._if_count += 1\n\n self._consume('{')\n self.writer.write_if(false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(end_lbl)\n self.writer.write_label(false_lbl)\n\n self._consume('}')\n\n if self._get_current_token() == 'else':\n self._consume('else')\n self._consume('{')\n self.compile_statements()\n self._consume('}')\n\n self.writer.write_label(end_lbl)", "def compile_if(self):\n\n\t\txml = '<ifStatement>\\n' + self.tokenizer.keyword() + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)\n\n\t\tself.compile_expression()\n\n\t\txml = self.tokenizer.symbol() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\tself.outfile.write('</statements>\\n' + self.tokenizer.symbol())\n\n\t\tif self.tokenizer.get_token() == 'else':\n\t\t\tself.compile_else()\n\n\t\tself.outfile.write('</ifStatement>\\n')", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))", "def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))", "def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))", "def _analyse_stmt_If(self, statement: ast.If, *, next: CFNode) -> CFNode:\n # Analyse both branches unconditionally: even if they're not reachable,\n # they still need to exist in the graph produced.\n if_branch = self._analyse_statements(statement.body, next=next)\n else_branch = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=if_branch)\n else:\n branches.update(else_=else_branch)\n else:\n branches.update(enter=if_branch, else_=else_branch, error=self._raise)\n\n return self._ast_node(statement, **branches)", "def test_if_string_statement():\n r = convert_code(\n \"{if 'hello'}\\nbar\\n{/if}\")\n assert r == \"{% if 'hello' %}\\nbar\\n{% endif %}\"", "def _ifelse(self):\n debug.show(\"ifelse:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 3:\n falseCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n trueCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"ifelse:True\")\n self.evaluate(trueCode)\n else:\n debug.show(\"ifelse:False\")\n self.evaluate(falseCode)\n else:\n debug.err(\"not enough items on the stack\")\n return None", "def link_if_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.true_body)\n if stmt.false_body is not None:\n self.link_stmt(stmt.false_body)", "def test_31_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\tif a>0 then return 0;\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,431))", "def test_if_statement_multiple():\n r = convert_code(\n \"{if !foo or foo.bar or foo|bar:foo['hello']}\\nfoo\\n{/if}\")\n assert r == \"{% if not foo or foo.bar or foo|bar(foo['hello']) %}\\nfoo\\n{% endif %}\"", "def visitIfElse(self, ctx):\n # type: (RelayParser.IfElseContext) -> expr.If\n cond = self.visit(ctx.expr())\n\n self.enter_var_scope()\n true_branch = self.visit(ctx.body(0))\n self.exit_var_scope()\n\n self.enter_var_scope()\n false_branch = self.visit(ctx.body(1))\n self.exit_var_scope()\n\n return expr.If(cond, true_branch, false_branch)", "def eval_if_else(item, motif_node_dict):\n # evaluate the `if` branch first\n true_branch = item.iftrue\n if type(true_branch).__name__ == 'FuncCall':\n motif_node, left = eval_function_call(true_branch, motif_node_dict) \n elif type(true_branch).__name__ == 'Assignment':\n left = eval_assignment(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Decl':\n left = eval_declaration(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Return':\n left = eval_return(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Compound':\n left = eval_function_body(true_branch, motif_node_dict)\n else:\n left = None\n # evaluate the `else` branch if it exists\n false_branch = item.iffalse\n if type(false_branch).__name__ == 'FuncCall':\n motif_node, right = eval_function_call(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Assignment':\n right = eval_assignment(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Decl':\n right = eval_declaration(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Return':\n right = eval_return(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Compound':\n right = eval_function_body(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'If': # else if case\n right = eval_if_else(false_branch, motif_node_dict)\n else:\n right = None\n\n if left or right:\n # only under certain circumstances do we actually create alternation node\n if eval_if_condition(item.cond):\n return provenance.create_alternation_node(left, right)\n else:\n # if only one branch is not None, we need not create a group node\n if not left:\n return right\n if not right:\n return left\n return provenance.create_group_node(left, right)\n else:\n return None", "def cond2if(cond_exp):\n def expand_clauses(list_of_clauses): \n if isNull(list_of_clauses):\n return FALSE # 4-15\n first = first_clause(list_of_clauses)\n rest = rest_clauses(list_of_clauses)\n if isElseClause(first):\n if isNull(rest):\n return seq2exp(cond_actions(first)) \n else:\n raise ValueError(\"ELSE clause is not last -- cond2if\")\n else:\n return make_if(\n cond_predicate(first),\n seq2exp(cond_actions(first)), # make a single \"'begin\" expression\n expand_clauses(rest))\n return expand_clauses(cond_clauses(cond_exp)) # 4-15 changed exp to cond_exp", "def ifelse(condition, then_branch, else_branch, name=None, outshape=None):\n # First check if we can replace an Theano conditional by a Python one\n if is_theano_object(condition) and is_constant(condition):\n condition = bool(condition.data)\n\n # Now the actual function\n if (cf.use_theano\n and not isinstance(condition, builtins.bool)\n and (isinstance(condition, theano.graph.basic.Variable)\n or isinstance(then_branch, theano.graph.basic.Variable)\n or isinstance(else_branch, theano.graph.basic.Variable))):\n # Theano function\n if isinstance(then_branch, LazyEval):\n then_branch = then_branch.eval()\n if isinstance(else_branch, LazyEval):\n else_branch = else_branch.eval()\n if outshape is None:\n # We call `bool` on the condition, in case it's a Python boolean\n # (even shim.ge & friends can return bools)\n return theano.ifelse.ifelse(bool(condition), then_branch,\n else_branch, name)\n else:\n return theano.ifelse.ifelse(bool(condition), then_branch.reshape(outshape),\n else_branch.reshape(outshape), name)\n else:\n # Python function\n if condition:\n if isinstance(then_branch, LazyEval):\n then_branch = then_branch.eval()\n return then_branch\n else:\n if isinstance(else_branch, LazyEval):\n else_branch = else_branch.eval()\n return else_branch", "def parse_if_cmd(self, line):\n line = re.sub(\"^if *\", \"\", line)\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n # Check all variables have been declared\n any_vars = [i for i in re.findall(IN_STR_VAR_REGEX, statement)]\n # Get the variables declared\n _vars = []\n for var in any_vars:\n _Var = getattr(self, var.strip('$'))\n if type(_Var) == inp_types.Variable: _vars.append(_Var.data)\n else: _vars.append(_Var)\n\n for var_name, var_val in zip(any_vars, _vars):\n statement = statement.replace(var_name, str(var_val))\n\n # Evaluate the if statement\n try:\n var_container = {}\n exec(f\"val = {statement}\", var_container)\n val = var_container['val']\n except Exception as e:\n self.print_error(\"Couldn't parse the if statement\\n\\nError:\"\n + str(e))\n\n end_line = self.get_end_brace()\n\n self.line_num += 1\n if val is False:\n self.line_num = end_line", "def get_if_condition(self, file, i):\n\n # Check if 'if function' is to run main function of program\n if re.match(\"if __name__ == [\\\"']__main__[\\\"']:\", file[i]) and \\\n re.match(r\"\\s*main\\(\\)\", file[i + 1]):\n\n # If yes, return None\n return \"omit\", 2, \n\n # Run super definition\n line = super().get_if_condition(file, i)\n\n # Strip ending colon\n line = line.split(\":\", 1)\n line, multi_statement = line[0], line[1]\n\n # Set if keyword for back translation\n ln_split = line.split(\" \")\n if ln_split[0] not in [\"elif\", \"else\"]:\n if_kw = \"if\"\n else:\n if_kw, line = ln_split[0], \" \".join(ln_split[1:]).strip()\n\n # Replace 'elif' with standard\n if if_kw == \"elif\":\n if_kw = \"else if\"\n\n # Replace logical operators\n line = self.replace_logical_ops(line, direction=\"to\")\n\n # Create start and end for while call\n start = []\n end = []\n\n # Check if multiple statements are declared in one line\n if multi_statement.strip():\n start += multi_statement.split(\";\")\n\n # Return if condition\n return line, if_kw, start, end", "def _if(self):\n debug.show(\"if:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 2:\n ifcode = isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"if:True\")\n evaluate(ifcode)\n else:\n debug.err(\"not enough items on the stack\")\n debug.show(\"if:False\")\n return None", "def check_if_statement(self, line):\n line = re.sub(\"^if *\", \"\", line)\n if '(' not in line or ')' not in line:\n self.print_error(\"Syntax error: If statements take the syntax if (condition) { ... }\",\n errorFunc=SyntaxError)\n\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n\n # Check all variables have been declared\n any_vars = [i.strip('$') for i in re.findall(VAR_REGEX, statement)]\n for var_name in any_vars:\n if var_name not in self.variables:\n self.print_error(f\"Unknown variable: {var_name}\")", "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def eliminate_ifones(body):\n def isifone(tree):\n if type(tree) is If:\n if type(tree.test) is Num: # TODO: Python 3.8+: ast.Constant, no ast.Num\n if tree.test.n == 1:\n return \"then\"\n elif tree.test.n == 0:\n return \"else\"\n elif type(tree.test) is NameConstant: # TODO: Python 3.8+: ast.Constant, no ast.NameConstant\n if tree.test.value is True:\n return \"then\"\n elif tree.test.value in (False, None):\n return \"else\"\n return False\n\n def optimize(tree): # stmt -> list of stmts\n t = isifone(tree)\n if t:\n branch = tree.body if t == \"then\" else tree.orelse\n return branch\n return [tree]\n\n return transform_statements(optimize, body)", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def newif(line):\n if not line.startswith(\"\\\\newif\"):\n return False\n pieces = line.split(\"\\\\\")\n if len(pieces) != 4 or pieces[0] != \"\" or pieces[1] != \"newif\":\n print(\"%Wrong number of pieces: \"+line)\n return False\n if not pieces[2].startswith(\"if\"):\n print(\"%Missing if: \"+line)\n return False\n name = pieces[2][2:]\n if not pieces[3].startswith(name):\n print(\"%Name missing: \"+line)\n return False\n value = pieces[3][len(name):]\n if not value in truth:\n print(\"Misunderstood truth value: \"+line)\n return False\n conditionals[\"\\\\if\"+name] = truth[value]\n return True", "def with_if_statement():\n if c():\n return t()\n else:\n return f()", "def _apply_if_statement(statement: ast.If) -> None:\n for child in ast.iter_child_nodes(statement):\n if isinstance(child, ast.If):\n if child in statement.orelse:\n setattr(statement, 'wps_if_chained', True) # noqa: B010\n setattr(child, 'wps_if_chain', statement) # noqa: B010", "def test_only_if(self):\n script = self.write_script(\"\"\"\n variable = 5\n if variable == 5:\n check = 30\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 30)", "def syntax_if_elif_else():\n s = 0.1\n if s < 0:\n print(\"s is smaller than 0\")\n elif s > 1:\n print(\"s is greater than 1\")\n else:\n print(\"s is between 0 and 1\")\n\n ## Output\n # s is between 0 and 1", "def __EvaluateIf(self, countIf, line):\n countIf = countIf - 1\n i = self.__ifs[countIf]\n i.SetLinePointer(self.__linePointer)\n #s = self.ScanIfCond(self.__oc.GermanUmlautReplace(line))\n s = self.ScanIfCond(line)\n if s:\n i.Set(s[0])\n try:\n i.Eval()\n line = ''\n except:\n raise Core.Error.IfHasNoEndif(0, 'IF-EXPRESSION %i HAS HAD AN ERROR:' \\\n ' EITHER NO CORRESPONDING (endif) OR SYNTAX ERROR'\n % countIf)\n l1, l2 = i.GetNextLine(), line\n return l1, l2", "def fn_if(self, value):\n\n condition_name, true_value, false_value = value\n if self.parser.conditions.evaluate(condition_name):\n return true_value\n else:\n return false_value", "def gen_condition(self, condition, yes_block, no_block):\n if isinstance(condition, expressions.BinaryOperator):\n if condition.op == \"||\":\n middle_block = self.builder.new_block()\n self.gen_condition(condition.a, yes_block, middle_block)\n self.builder.set_block(middle_block)\n self.gen_condition(condition.b, yes_block, no_block)\n elif condition.op == \"&&\":\n middle_block = self.builder.new_block()\n self.gen_condition(condition.a, middle_block, no_block)\n self.builder.set_block(middle_block)\n self.gen_condition(condition.b, yes_block, no_block)\n elif condition.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\"]:\n lhs = self.gen_expr(condition.a, rvalue=True)\n rhs = self.gen_expr(condition.b, rvalue=True)\n op_map = {\n \">\": \">\",\n \"<\": \"<\",\n \"==\": \"==\",\n \"!=\": \"!=\",\n \"<=\": \"<=\",\n \">=\": \">=\",\n }\n op = op_map[condition.op]\n self.emit(ir.CJump(lhs, op, rhs, yes_block, no_block))\n else:\n self.check_non_zero(condition, yes_block, no_block)\n elif isinstance(condition, expressions.UnaryOperator):\n if condition.op == \"!\":\n # Simply swap yes and no here!\n self.gen_condition(condition.a, no_block, yes_block)\n else:\n self.check_non_zero(condition, yes_block, no_block)\n else:\n self.check_non_zero(condition, yes_block, no_block)", "def toggle(condition, if_true, if_false):\n return (if_true if condition else if_false)", "def _if_node(self):\n # `cond` returns a tensor that contains boolean values. We add a `min`\n # operator to checks if there is any false value. If so, this condition\n # doesn't not hold.\n cond = tvm.relay.op.min(self.cond)\n return tvm.relay.If(cond, self.true_branch, self.false_branch)", "def visit_if(self, node):\n branches = 1\n # don't double count If nodes coming from some 'elif'\n if node.orelse and len(node.orelse) > 1:\n branches += 1\n self.inc_branch(branches)\n self.stmts += branches", "def eval_if_condition(condition):\n if type(condition).__name__ == 'BinaryOp':\n if type(condition.left).__name__ == 'ID':\n # case: if (mask & XXX) {...} in \"provenance_inode_permission\"; mask can only be determined at runtime\n if condition.left.name == 'mask':\n return True\n # case: if (shmflg & SHM_RDONLY) {...} in \"provenance_shm_shmat\"; shmflg can be only be determined at runtime\n if condition.left.name == 'shmflg':\n return True\n elif type(condition.left).__name__ == 'BinaryOp':\n if type(condition.left.left).__name__ == 'ID':\n # case: if ((perms & (DIR__WRITE)) != 0) in \"provenance_file_permission\"; perms can only be determined at runtime\n if condition.left.left.name == 'perms':\n return True\n # case: if ((prot & (PROT_WRITE)) != 0) in \"provenance_mmap_file\"; prot can only be determined at runtime\n elif condition.left.left.name == 'prot':\n return True\n elif type(condition.left.left).__name__ == 'BinaryOp':\n if type(condition.left.left.left).__name__ == 'ID':\n # case: if ((flags & MAP_TYPE) == MAP_SHARED...) in \"provenance_mmap_file\"; flags can only be determined at runtime\n if condition.left.left.left.name == 'flags':\n return True\n elif type(condition.left.right).__name__ == 'ID':\n # case: if (sock->sk->sk_family == PF_UNIX &&...) in \"provenance_socket_recvmsg\", \"provenance_socket_recvmsg_always\", \"provenance_socket_sendmsg\", \"provenance_socket_sendmsg_always\"; sock->sk->sk_family can only be determined at runtime\n if condition.left.right.name == 'PF_UNIX':\n return True\n elif type(condition).__name__ == 'FuncCall':\n # case: if (is_inode_dir(inode)) in \"provenance_file_permission\"; inode type can only be determined at runtime\n if condition.name.name == 'is_inode_dir':\n return True\n # case: else if (is_inode_socket(inode)) in \"provenance_file_permission\"\n elif condition.name.name == 'is_inode_socket':\n return True\n # case: if ( vm_mayshare(flags) ) in \"provenance_mmap_munmap\"; flags can only be determined at runtime\n elif condition.name.name == 'vm_mayshare':\n return True\n elif type(condition).__name__ == 'ID':\n # case: if (iprovb) in \"provenance_socket_sendmsg\", \"provenance_socket_sendmsg_always\"\n if condition.name == 'iprovb':\n return True\n # case: if (pprov) in \"provenance_socket_recvmsg\", \"provenance_socket_recvmsg_always\"\n elif condition.name == 'pprov':\n return True\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n else:\n return False", "def dummy_elif(dummy_code_block):\n return make_dummy_elif()", "def convert_if(self, condition, if_kw):\n\n # Run super definition\n condition = super().convert_if(condition)\n\n # Create if template\n if_template = \"{if_kw} {cond}:\" if condition else \"{if_kw}:\"\n\n # Convert if keyword from standard to python\n if if_kw == \"else if\":\n if_kw = \"elif\"\n\n # Replace logical operators\n condition = self.replace_logical_ops(condition, direction=\"from\")\n\n # Return converted if statement\n return [if_template.format(if_kw=if_kw, cond=condition)], []", "def evaluateBoolean(compiled_expression):", "def test_32_if(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a=0 then return 1; else return a; end\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,432))", "def if_then(condition: Callable[[], bool], then_source: ObservableBase,\n else_source: ObservableBase = None) -> ObservableBase:\n from ..operators.observable.ifthen import if_then\n return if_then(condition, then_source, else_source)", "def executeIf(tree,file):\n if(evalBoolean(tree.children[0])):\n explore(tree.children[1],file)", "def compile_else(self):\n\n\t\txml = self.tokenizer.keyword() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\txml = '</statements>\\n' + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)", "def _process_if(self, node):\n creg = node.children[0].name\n cval = node.children[1].value\n self.backend.set_condition(creg, cval)\n self._process_node(node.children[2])\n self.backend.drop_condition()", "def on_true(self) -> global___Expression:", "def with_if_function():\n return if_function(c(), t(), f())", "def elifs_to_if_then_else(stm):\n if stm.elifs:\n # replace elifs with nested if statements\n ifFalse = HdlStmBlock()\n topIf = HdlStmIf(stm.cond, stm.if_true, ifFalse)\n\n for c, stms in stm.elifs:\n _ifFalse = HdlStmBlock()\n\n lastIf = HdlStmIf(c, stms, _ifFalse)\n\n ifFalse.append(lastIf)\n ifFalse = _ifFalse\n\n if stm.if_false is None:\n lastIf.if_false = HdlStmBlock()\n else:\n lastIf.if_false = stm.if_false\n\n return topIf\n return stm", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def test_validates(if_statement_validator):\n test = {\n 'condition': 'is',\n 'target': 'bob',\n 'then': 'arne',\n }\n assert if_statement_validator(test).unwrap() == test", "def if_(self, condition_token, keyword_token, *tokens, preprocessor=None):\n condition = self._get_token_value(condition_token)\n if not isinstance(condition, bool):\n raise DoxhooksTypeError(condition, condition_token, \"bool\")\n if condition:\n self.interpret(keyword_token, *tokens, preprocessor=preprocessor)", "def do_ifjs(parser, token):\n thenode = do_if(parser, token)\n thenode.__class__ = IfjsNode\n return thenode", "def _truth_value(self, condition):\n if condition:\n return 'true stuff'\n else:\n return 'false stuff'", "def _truth_value(self, condition):\n if condition:\n return 'true stuff'\n else:\n return 'false stuff'", "def start_ifeq(self, left, right):\n self.write_line(\"ifeq (\" + left + \",\" + right + \")\")\n self.indent_right()", "def _visit_loop_body(self, node, if_block=None, is_for=None):\n loop_name = \"for\" if is_for else \"while\"\n if if_block:\n node.if_block = if_block\n else:\n node.if_block = self.flow.nextblock(label=\"%s_body\" % loop_name,\n pos=node.body[0])\n self.visitlist(node.body)\n self.flow.loops.pop()\n\n if self.flow.block:\n # Add back-edge\n self.flow.block.add_child(node.cond_block)\n\n # Else clause\n if node.orelse:\n node.else_block = self.flow.nextblock(\n parent=node.cond_block,\n label=\"else_clause_%s\" % loop_name,\n pos=node.orelse[0])\n self.visitlist(node.orelse)\n if self.flow.block:\n self.flow.block.add_child(node.exit_block)\n else:\n node.cond_block.add_child(node.exit_block)\n\n self.exit_block(node.exit_block, node)", "def switch(condition, then_expression, else_expression):\n x_shape = copy.copy(then_expression.get_shape())\n x = tf.cond(tf.cast(condition, 'bool'),\n lambda: then_expression,\n lambda: else_expression)\n x.set_shape(x_shape)\n return x", "def if_(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"if\")\n expr = self.tokens[self.pos:]\n self.pos = len(self.tokens)\n\n return IfNode(expr)\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid if directive.\")", "def _compile1_next_statement_with_mods(statements: Sequence[Statement]) -> (IFElement, int):\n main_statement = statements[0]\n\n if main_statement.matches_pattern(TEXT_PATTERN):\n ifelement = IFText(main_statement.get_atoms()[2].get_value())\n elif main_statement.matches_pattern(TITLE_PATTERN):\n ifelement = IFHeader(main_statement.get_atoms()[2].get_value(), 1)\n elif main_statement.matches_pattern(SUBTITLE_PATTERN):\n ifelement = IFHeader(main_statement.get_atoms()[2].get_value(), 2)\n elif main_statement.matches_pattern(IMAGE_PATTERN):\n ifelement = IFImage(main_statement.get_atoms()[3].get_value())\n elif main_statement.matches_pattern(TABLE_PATTERN):\n ifelement = IFImage(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH4wQBBhwE9K/n3wAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAD+SURBVGje7drRCsMgDIXhRH3/J27sLgplUFq1bazC7+1EzqfCEje1bDLzCDL5AAAAAAAAAAAAAAAAAAAA7kE1cAIAPEZ6ZZU1r64pY4yOgBii9zYvyyIiGvT9K9QhvYiklFxOYE/v9LxXszth6vT3Ad3SF9cPU6e/AxgqfTNgtPRtgAHTNwDGTF8LGDZ9FWDk9GXA4OkLpcT/d6FrzfNkd77vBx6ebbmY20rZYqlYnHYs7lW1R0Nz0UzcmEZL2X4CZrbtbl7zxbTrT48PJGbGCQAAAAAAAADTAM5qrRced73/Nritf1aqcIUAAAAAoFdTv/9WXt89dngE4Ap9PX4iDmnCXGGMIwAAAABJRU5ErkJggg==\")\n elif main_statement.matches_pattern(DESK_PATTERN):\n ifelement = IFImage(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH4wQBBh85h+r4DQAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAImSURBVGje7VrRjgMhCBT0/7+4ah9MLmYVRcBt9+o+XC7XFWcEgeEKMUX35Afdw59D4BBQPsHWnEfPf9kkfwTzI2HCWqJ67sAhYAAU8HjgEPjeOpBT7v7OXzXJud5vJHDJ6N77GFmlIATu7q/XyzkHCO1HoCyHVvWIw6FLIJigv5yCR59zTjlRqwyrddiB3jkXYxzEErVW4NuwAz0zvjnoY4pjJrgDvbLn4/hH7oGBdZP2eAn9sgdWre9Gv+aBrnVO2KzmnKXTCUr0483aF6b3Z9W3+Gj0LA8MImccPzegnxOgrAs26y7RZ4XArORbex5NTvu8HlBm5DDVnTlPunYA4LxGLaTE7kAS1G3iLymycmCcTwXesCHQ7eDrlriLrLzw52IAGCuyiz6kZEO7i1DQdPXoEsRvFPV3QtQSKNpq4O4yXKCyRCdzA059+ENzoZ8nwCxDxwP/l4DFaPGjKRUffQFUHkDAukTUeb0VD5qeOec8KBpBfPC10ZqJYGzBiVKKA4rR1+W522a5lTmu+KYFWdAXc+UndROs0E8aE5UajJE/U3lSHbgN/XIIjUWZlU7fSKCdSbVYb/un0+mFDoHtd6CdQZSUz5wjTAtTsa+0huM0b6VcBZXEJoS2tsqXDso+hEoL1e5R9w7dPqL9I9Vu8DlQkUZ+1aDdEgA4M0OAydcXuu2gsQeUSoWzvD54BBRzOHXgENiShVJORfJ2MykVr+Kbo7nEb2qMSvm3VR8cAAAAAElFTkSuQmCC\")\n elif main_statement.matches_pattern(HTML_REGEX_PATTERN):\n ifelement = IFElement('<a href=\"https://stackoverflow.com/a/1732454\">H̸̡̪̯ͨ͊̽̅̾̎Ȩ̬̩̾͛ͪ̈́̀́͘ ̶̧̨̱̹̭̯ͧ̾ͬC̷̙̲̝͖ͭ̏ͥͮ͟Oͮ͏̮̪̝͍M̲̖͊̒ͪͩͬ̚̚͜Ȇ̴̟̟͙̞ͩ͌͝S̨̥̫͎̭ͯ̿̔̀ͅ</a>')\n else:\n raise ValueError(f\"Unknown statement: {main_statement}\")\n\n mods = []\n for statement in statements[1:]:\n if statement.get_atoms()[0].get_atom_type() == AtomType.KEYWORD \\\n and statement.get_atoms()[0].get_value() == parser.WITH:\n mods.append(statement)\n else:\n break\n\n for mod in mods:\n build_modifier(mod).get_fun()(ifelement) # Uses side effects :(\n\n return ifelement, len(mods) + 1", "def should_execute(self, context: dict) -> bool:\n\n print(f'Checking snippet: {self.name}')\n\n if 'when' not in self.metadata:\n # always execute when no when conditional is present\n print(f'No conditional present, proceeding with skillet: {self.name}')\n return True\n\n when = self.metadata['when']\n when_str = '{{%- if {0} -%}} True {{%- else -%}} False {{%- endif -%}}'.format(when)\n when_template = self._env.from_string(when_str)\n results = when_template.render(context)\n print(f' Conditional Evaluation results: {results} ')\n if str(results).strip() == 'True':\n return True\n else:\n return False", "def test_if_and_filter_statement():\n r = convert_code(\n \"{if foo and awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if foo and awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def make_dummy_elif():\n elif_ = Elif(method=return_true, code_block=make_dummy_code_block())\n elif2_ = Elif(method=return_true, code_block=make_dummy_code_block())\n else_ = Else(code_block=make_dummy_code_block())\n instruction = IfElifElse(\n method=return_true,\n code_block=make_dummy_code_block(),\n elifs=[elif_, elif2_],\n else_=else_,\n )\n return instruction", "def ifcontext(ifcond: QAPISchemaIfCond, *args: QAPIGenCCode) -> Iterator[None]:\n for arg in args:\n arg.start_if(ifcond)\n yield\n for arg in args:\n arg.end_if()", "def iff(bool,trueValue,falseValue):\n if bool:\n return trueValue\n else:\n return falseValue", "def cond(\n scheduler: Scheduler,\n parent_job: Job,\n sexpr: SchedulerExpression,\n cond_expr: Any,\n then_expr: Any,\n *rest: Any,\n) -> Promise:\n exprs = (cond_expr, then_expr) + rest\n\n def then(args):\n i, cond_value = args\n\n if cond_value:\n # Return 'then' clause.\n return scheduler.evaluate(exprs[i + 1], parent_job=parent_job)\n\n elif len(exprs) - i == 3:\n # No more expresses, so return 'otherwise' clause.\n return scheduler.evaluate(exprs[i + 2], parent_job=parent_job)\n\n else:\n # Recurse to next conditional clause.\n return scheduler.evaluate((i + 2, exprs[i + 2]), parent_job=parent_job).then(then)\n\n # Evaluate conditional clause.\n return scheduler.evaluate((0, cond_expr), parent_job=parent_job).then(then)", "def _filter_if(node):\n return (\n isinstance(node.expression, UnaryOperation)\n and node.expression.type == UnaryOperationType.BANG\n )", "def set_if_chain(tree: ast.AST) -> ast.AST:\n for statement in ast.walk(tree):\n if isinstance(statement, ast.If):\n _apply_if_statement(statement)\n return tree", "def write_if(self, label):\n self._write_line('if-goto ' + label)", "def test_if_true():\n\n true_values = [True, 1, 2.0, object(), \"foo\", int]\n assert all(true_values)\n\n def f_if():\n if x:\n print(\"x is True\")\n\n for x in true_values:\n check_component(\n prune_cfg,\n f_if,\n additional_bindings=dict(x=x),\n expected_source=\"\"\"\n def f_if():\n print('x is True')\n \"\"\",\n )\n\n def f_if_else():\n if x:\n print(\"x is True\")\n else:\n print(\"x is False\")\n\n check_component(\n prune_cfg,\n f_if_else,\n additional_bindings=dict(x=2),\n expected_source=\"\"\"\n def f_if_else():\n print(\"x is True\")\n \"\"\",\n )", "def addEndIf(self):\n tpl = { 'action': ACTION_CONDITION_ENDIF, 'data': { 'obj': [ {} ] } }\n return tpl", "def test_if_filter_statement():\n r = convert_code(\n \"{if awesome.string|banana:\\\"foo\\\\\\\" $a\\\"}\\nbar\\n{/if}\")\n assert r == \"{% if awesome.string|banana(\\\"foo\\\\\\\" ${a}\\\") %}\\nbar\\n{% endif %}\"", "def ok_if_true(condition):\n\n if condition:\n html = '<i class=\"fa fa-check\"></i>'\n return mark_safe(html)\n return ''", "def ifelse(\n self,\n true_expr: ir.Value,\n false_expr: ir.Value,\n ) -> ir.Value:\n # Result will be the result of promotion of true/false exprs. These\n # might be conflicting types; same type resolution as case expressions\n # must be used.\n return ops.Where(self, true_expr, false_expr).to_expr()", "def strifset(xeval, iftrue, iffalse=\"\", typematch=False):\n if typematch:\n if not (xeval is False or xeval is None): return iftrue\n else: return iffalse\n else:\n if xeval: return iftrue\n else: return iffalse", "def special_if(self, form):\n testforms = [form[1:]]\n elseform = None\n\n startIndex = None\n\n parent = form.up()\n\n for i in range(len(parent)):\n x = parent[i]\n if x is form:\n startIndex = i\n\n if startIndex is None:\n raise RuntimeError(\"Bad\")\n\n # find following forms that begin with `elif' and `else'. We\n # break on anything else. Accumulate number of forms to delete.\n index = startIndex + 1\n\n while index < len(parent):\n f = parent[index]\n if isinstance(f, Form) and len(f) and isinstance(f[0], Identifier):\n if f[0].name == 'elif':\n testforms.append(f[1:])\n f.insert(0, Ignore)\n elif f[0].name == 'else':\n elseform = f[1:]\n f.insert(0, Ignore)\n # there should be nothing after else\n break \n else:\n # Anything other than elif or else, break\n break \n else:\n # it doesn't look anything at all like an else or an elif form\n break \n index += 1\n\n tests = [(self.reallyCompile(t[0]), self.compileSuite(t[1:])) for t in testforms]\n else_ = elseform and self.compileSuite(elseform)\n\n r = ast.If(tests, else_)\n return r", "def condition(x):\n return 'string' + x", "def else_(self):\n initial_pos = self.pos\n try:\n self.match_value(Identifier, \"else\")\n return ElseNode()\n except ParseError:\n self.pos = initial_pos\n raise ParseError(\"Invalid else directive.\")", "def If(name, condition_blob_or_net,\n true_nets_or_steps, false_nets_or_steps=None):\n if not false_nets_or_steps:\n return _RunOnceIf(name + '/If',\n condition_blob_or_net, true_nets_or_steps)\n\n if isinstance(condition_blob_or_net, core.Net):\n condition_blob = GetConditionBlobFromNet(condition_blob_or_net)\n else:\n condition_blob = condition_blob_or_net\n\n return Do(\n name + '/If',\n _RunOnceIf(name + '/If-true',\n condition_blob_or_net, true_nets_or_steps),\n _RunOnceIfNot(name + '/If-false', condition_blob, false_nets_or_steps)\n )", "def end_ifeq(self):\n self.indent_left()\n self.write_line(\"endif\")", "def is_if(self, file, i):\n\n # Save line to local variable\n line = file[i].strip()\n\n # If line starts with if and ends with ':' return True, else False\n if (line[:2] == \"if\" or line[:4] in [\"elif\", \"else\"]) and \":\" in line:\n return True\n return False" ]
[ "0.75067496", "0.71437025", "0.69992465", "0.69453186", "0.6932306", "0.6923417", "0.6896828", "0.68624914", "0.6846336", "0.68457514", "0.683607", "0.683607", "0.683607", "0.683607", "0.6810346", "0.68025327", "0.6774503", "0.6755583", "0.6644169", "0.66357064", "0.6629795", "0.66026986", "0.6570067", "0.6538245", "0.6513971", "0.6508838", "0.6467018", "0.6465875", "0.64132684", "0.63112146", "0.63031447", "0.6259792", "0.6215487", "0.61376286", "0.61169237", "0.6071987", "0.60661465", "0.6041416", "0.5956847", "0.5951688", "0.5948794", "0.5914303", "0.5914303", "0.59070814", "0.59056544", "0.5866672", "0.5857087", "0.5850751", "0.58407205", "0.58280075", "0.58206123", "0.5786505", "0.57772326", "0.5773504", "0.5745778", "0.57284933", "0.5704957", "0.568097", "0.56730574", "0.5629029", "0.5627185", "0.5621964", "0.56145513", "0.5610523", "0.5608252", "0.558951", "0.5558597", "0.55585545", "0.5522103", "0.5520734", "0.5497594", "0.5495143", "0.5473535", "0.5473535", "0.54666996", "0.54408145", "0.5437106", "0.5403607", "0.53711665", "0.5333262", "0.5317475", "0.53154624", "0.52878445", "0.52650917", "0.5258273", "0.5255264", "0.52490115", "0.5241914", "0.52333236", "0.52316916", "0.522619", "0.5217937", "0.5199617", "0.5168974", "0.5164122", "0.51582825", "0.5141147", "0.5134352", "0.51341605", "0.512611" ]
0.7603634
0
assign = ident "=" expression eoln
def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string tok = tokens.peek( ) if debug: print( "assign: ", tok ) if re.match( Lexer.identifier, tok ): ident = VarRef( tok ) else: error( "Invalid identifier" ) tok = tokens.next( ) equals = match( "=" ) tok = tokens.peek( ) expr = expression( ) match( ";" ) equals = VarRef( equals ) statement = assign( equals, ident, expr ) return statement
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def test_45_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:real; begin x:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,445))", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))", "def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))", "def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)", "def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:\n lhs = node.target\n rhs = self.eval_expr(node.value)\n ann_var = self.visit_tvm_annotation(node.annotation)\n if not isinstance(ann_var, Var):\n self.report_error(node.annotation, \"Annotation should be Var\")\n self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)\n frame = T.LetStmt(rhs, var=ann_var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)", "def visit_Assign(self, node):\n var_name = node.left.value\n self.VARIABLES[var_name] = self.visit(node.right)", "def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))", "def syntax_var_assign():\n a = 'Hello'\n print(f'{a} is stored at {hex(id(a))}')\n a = \"World\"\n print(f'{a} is stored at {hex(id(a))}')\n\n ## Output\n # Hello is stored at 0x10d251340\n # World is stored at 0x10d251378\n\n ## Notes\n # id()\n # Return the “identity” of an object. This is an integer (or long integer) which is guaranteed\n # to be unique and constant for this object during its lifetime.", "def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))", "def create_Assign(left_hand_side, right_hand_side):\n right_hand_side.ctx = ast.Load()\n left_hand_side.ctx = ast.Store()\n return ast.Assign(targets=[left_hand_side], value=right_hand_side)", "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node", "def assert_assignment(text, operator, left, right):\n try:\n node = parse_single_statement(text)\n eq_(node.op, operator)\n eq_(node.target.name, left)\n eq_( node.right.value, right)\n except AssertionError as e:\n node.show()\n raise e", "def Assignment(self):\n id = self.primary()\n if self.currtok[1].name == \"DECLERATION\":\n self.currtok = next(self.tg)\n if self.functions.get(self.currtok[0]) is not None:\n\n express = self.FunctionCall()\n return assignmentStmt(id, express)\n else:\n express = self.Expression()\n\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return assignmentStmt(id, express)\n raise SLUCSyntaxError(\"ERROR: Missing Semicolon on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing assignment on line {0}\".format(str(self.currtok[2] - 1)))", "def assign_to_env(self, line):\n tag = line[0]\n value = line[2::]\n self.env[tag] = self.eval_sub_statement(value)", "def assign_variable(executor, variable, value):\n variable = variable.replace(\" \", \"\")\n # TODO Should move parsing of this to ParsedStatementLet.\n # TODO Need to handle N-dimensional array element assignment.\n i = variable.find(\"(\")\n if i != -1:\n # Array reference\n j = variable.find(\")\", i+1)\n if j == -1:\n raise BasicSyntaxError(F\"Missing ) in in array assignment to {variable}\")\n if i+1 == j:\n raise BasicSyntaxError(F\"Missing array subscript in assignment to {variable}\")\n\n subscripts = variable[i+1:j].split(\",\")\n variable = variable[:i]\n is_valid_identifier(variable)\n subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts]\n executor.put_symbol_element(variable, value, subscripts)\n else:\n is_valid_identifier(variable)\n executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None)", "def visit_Assign(self, node: Assign) -> None:\n\n node_type = type(node.right).__name__\n if isinstance(node.right, String):\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n c_str = self.builder.alloca(instruct.type)\n self.builder.store(instruct, c_str)\n self.builder.ret_void()\n else:\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n self.builder.ret(instruct)\n\n self.GLOBAL_MEMORY[node.left.value] = instruct", "def test_001_lambda_assign(self):\r\n text = \"\"\"\r\n const f = (d,k,v) => d[k] = v\r\n \"\"\"\r\n tokens = Lexer().lex(text)\r\n ast = Parser().parse(tokens)\r\n expected = TOKEN('T_MODULE', '',\r\n TOKEN('T_VAR', 'const',\r\n TOKEN('T_ASSIGN', '=',\r\n TOKEN('T_TEXT', 'f'),\r\n TOKEN('T_LAMBDA', '=>',\r\n TOKEN('T_TEXT', 'Anonymous'),\r\n TOKEN('T_ARGLIST', '()',\r\n TOKEN('T_TEXT', 'd'),\r\n TOKEN('T_TEXT', 'k'),\r\n TOKEN('T_TEXT', 'v')\r\n ),\r\n TOKEN('T_ASSIGN', '=',\r\n TOKEN('T_SUBSCR', '',\r\n TOKEN('T_TEXT', 'd'),\r\n TOKEN('T_TEXT', 'k'))\r\n ),\r\n TOKEN('T_TEXT', 'v')\r\n )\r\n )\r\n )\r\n )\r\n\r\n self.assertFalse(parsecmp(expected, ast, False))", "def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with different arities, e.g.,\n # `x,y = a,b,c = ...` is not a syntax error in Python, though it\n # probably should be because it's guaranteed to cause a runtime\n # error. Raise the error here, since we cannot proceed.\n raise SyntaxError(\"Multiple assignment with different arities\")\n nvalues = nvalues.pop()\n\n # Assign temporary variables.\n temps = [ gensym() for i in range(nvalues) ]\n stmts = []\n if is_sequence_node(node.value) and len(node.value.elts) == nvalues:\n # Special case: RHS is sequence literal of correct length.\n for i in range(nvalues):\n temp_target = to_name(temps[i], ast.Store())\n stmts.append(ast.Assign([temp_target], node.value.elts[i]))\n else:\n # General case.\n temp_target = to_tuple(\n (to_name(temp, ast.Store()) for temp in temps), ast.Store())\n stmts.append(ast.Assign([temp_target], node.value))\n\n # Rewrite assignments as sequence of assignments.\n for target in reversed(node.targets):\n if is_sequence_node(target):\n stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))\n for i in range(nvalues))\n else:\n temp_tuple = to_tuple(to_name(temp) for temp in temps)\n stmts.append(ast.Assign([target], temp_tuple))\n \n return stmts", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def visit_AugAssign(self, node):\n # FIXME: Gensym the LHS to avoid two evaluations.\n self.generic_visit(node)\n rhs = to_call(self.op_to_function(node.op),\n [set_ctx(node.target), node.value])\n return ast.Assign([node.target], rhs)", "def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def visit_Assign(self, node):\n var_name = node.left.token.value\n self.GLOBAL_SCOPE[var_name] = self.visit(node.right)", "def _(self, node: Assignment):\n\n # This check allows us to ignore the initialization nodes\n # in the CAST 'i.e. x0 = -1'\n if node.source_refs == None:\n if type(node.left) == Var:\n if type(node.right) == Number and node.right.number == -1:\n return \"\"\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n to_ret = f\"( assign {left} {right} )\"\n return to_ret", "def assign(self, *args):\n return _libsbml.string_assign(self, *args)", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def f_setvar(self, name, expr):\r\n self.locals_ptr[name] = self.eval(expr, self.locals_ptr)\r\n return \"\"", "def val_at(self, *args, **kwargs):\n self.add_pc(1)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(self.ss_i(0), \"@\"), _m(self.ss_i(0))", "def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts", "def assign(obj, path, val, missing=None):\n return glom(obj, Assign(path, val, missing=missing))", "def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)", "def _compat_assign_gast_4(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value)", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def copy_stmt(self, env, dst_marking_var, src_marking_var):\n field = self.field\n return pyast.E(\"{} = {}\".format(field.access_from(dst_marking_var), field.access_from(src_marking_var)))", "def set_assignment(self, var, value):\n self.variable_to_value[var] = value", "def eval_statement(self, line):\n if line[0] in self.env:\n self.env[line[0]](line[1::])\n elif line[1] == \"=\":\n self.assign_to_env(line)\n else:\n print(\"ERROR: Undefined function {}\".format(line[0]))\n quit()", "def is_assign_to_name(statement):\n return isinstance(statement, ast.Assign) and \\\n len(statement.targets) == 1 and \\\n isinstance(statement.targets[0], ast.Name)", "def special_setitem(self, form):\n obj = self.reallyCompile(form[1])\n key = self.reallyCompile(form[2])\n value = self.reallyCompile(form[3])\n return ast.Assign([ast.Subscript(obj,\n 'OP_ASSIGN',\n [key])],\n value)", "def test_compiler_assignment(patch, compiler, lines, tree):\n patch.many(Objects, ['names', 'entity'])\n tree.assignment_fragment.service = None\n tree.assignment_fragment.mutation = None\n compiler.assignment(tree, '1')\n Objects.names.assert_called_with(tree.path)\n fragment = tree.assignment_fragment\n entity = get_entity(fragment.expression)\n Objects.entity.assert_called_with(entity)\n kwargs = {'name': Objects.names(), 'args': [Objects.entity()],\n 'parent': '1'}\n lines.append.assert_called_with('set', tree.line(), **kwargs)", "def verify_assign(self, d_stmt, table):\n lvalue = DanaExpr.factory(d_stmt.find_first_child(\"p_lvalue\"), table)\n expr = DanaExpr.factory(d_stmt.find_first_child(\"p_expr\"), table)\n self.exprs = [lvalue, expr]\n\n expr.type.check_type(d_stmt.linespan, lvalue.type)\n expr.type.in_types(d_stmt.linespan, [DanaType(\"int\"), DanaType(\"byte\")])", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def _assign_op(dest, op, arg, val, path, scope):\n if op == '[':\n dest[arg] = val\n elif op == '.':\n setattr(dest, arg, val)\n elif op == 'P':\n _assign = scope[TargetRegistry].get_handler('assign', dest)\n try:\n _assign(dest, arg, val)\n except Exception as e:\n raise PathAssignError(e, path, arg)\n else: # pragma: no cover\n raise ValueError('unsupported T operation for assignment')", "def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:\n for target in node.targets if isinstance(node, ast.Assign) else [node.target]:\n dottedname = node2dottedname(target) \n yield dottedname", "def assign_variable(self, name, value):\n return self.set_variable(name, value)", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def _compat_assign_gast_5(targets, value, type_comment):\n return gast.Assign(targets=targets, value=value, type_comment=type_comment)", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def expression(self, expr):\n self.set(expression=expr)", "def assign(self, var, value):\n\t\tself._root = self._insert(self._root, var, value)", "def get_assignment_literal_value(self):\n if not self.is_single_assign:\n raise ValueError(\n \"Statement is not an assignment to a single name: %s\" % self)\n n = self.ast_node\n target_name = n.targets[0].id\n literal_value = ast.literal_eval(n.value)\n return (target_name, literal_value)", "def bind_assign_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any:\n if isinstance(value, T.meta_var):\n return value.value\n elif isinstance(value, (list, tuple)):\n for i, v in enumerate(value):\n bind_assign_value(self, node, f\"{var_name}_{i}\", v)\n return value\n elif isinstance(value, Frame):\n value.add_callback(partial(value.__exit__, None, None, None))\n res = value.__enter__()\n IRBuilder.name(var_name, res)\n return res\n elif isinstance(value, (Buffer, IterVar)) or (\n isinstance(value, Var) and not self.var_table.exist(value)\n ):\n IRBuilder.name(var_name, value)\n return value\n else:\n value = tvm.runtime.convert(value)\n frame = T.LetStmt(value)\n var = frame.var\n IRBuilder.name(var_name, var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()\n return var", "def test_statement_initialized_by_assignment():\n shap = Statement(shape_id=\"@photo\", prop_id=\"dcterms:creator\", value_type=\"URI\")\n shap2 = Statement()\n shap2.shape_id = \"@photo\"\n shap2.prop_id = \"dcterms:creator\"\n shap2.value_type = \"URI\"\n assert shap == shap2", "def Block(self, prefix, assign=False, semicolon=False):\n self.Write('%s = {' if assign else '%s {', prefix)\n self._indent += 1\n yield\n self._indent -= 1\n self.Write('};' if assign or semicolon else '}')", "def test_obj_action_for_assignments():\n grammar = r\"\"\"\n S: a=\"foo\" b?=\"bar\" c=C+;\n C: val=\"baz\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = Parser(g)\n\n result = p.parse(\"foo bar baz baz baz\")\n\n assert isinstance(result, g.classes['S'])\n assert isinstance(result.c[0], g.classes['C'])\n\n assert result.a == \"foo\"\n assert result.b is True\n assert len(result.c) == 3\n assert all((c.val == \"baz\" for c in result.c))", "def set_value(self, var_name, new_value, tf_session):\n\n if(var_name in self.assign_operator):\n\n tf_session.run(\n self.assign_operator[var_name], {\n self.l_param_input[var_name]: new_value})\n else:\n print(\"Thou shall only assign learning parameters!\")", "def addAssignmentRule(self, var, math):\n\n r = self.model.createAssignmentRule()\n self.check(r, \"create assignment rule r\")\n self.check(r.setVariable(var), \"set assignment rule variable\")\n math_ast = libsbml.parseL3Formula(math)\n self.check(r.setMath(math_ast), \"set assignment rule equation\")\n return r", "def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()", "def set_var(parser,token):\n parts =token.split_contents()\n if len(parts) < 4:\n raise template.TemplateSyntaxError(\"'set' tag must be of the form:{% set <var_name>=\n <var_value>%}\")\n return SetVarNode(parts[1],parts[3])", "def assign(self, *args):\n return _ida_hexrays.cexpr_t_assign(self, *args)", "def let(self, var, val):\n\n self.d['__vstemp'] = val\n if var.endswith('+'):\n rvar = var.rstrip('+')\n # .. obj = eval(rvar,self.d)\n exec(\"%s.append(__vstemp)\" % rvar, self.d)\n else:\n exec(var + \" = __vstemp\", self.d)\n del self.d['__vstemp']", "def mk_assign(var_map, s, assigns):\n assign_args = []\n for k, v in assigns.items():\n k2 = convert_term(var_map, s, k)\n assert k2.fun == s, \"mk_assign: key is not an identifer.\"\n assign_args.append(k2.arg)\n assign_args.append(convert_term(var_map, s, v))\n\n return function.mk_fun_upd(s, *assign_args)", "def state_assignment(cls, state_assignment_string):\n lhs, rhs = state_assignment_string.split('=')\n return StateAssignment(lhs=lhs, rhs=rhs)", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def test_variable_assign(self):\n self.trace('x = 1')\n\n events = self.variable_events\n self.assertEqual(len(events), 1)\n event = events[0]\n self.assertIsInstance(event, TraceAssign)\n self.assertEqual(event.name, 'x')\n self.assertEqual(event.value, 1)", "def set(self, identifier, value_token, *, preprocessor=None):\n try:\n value = ast.literal_eval(value_token)\n except (SyntaxError, ValueError):\n value = value_token\n setattr(self, identifier, value)", "def visit_any_assign(self, node: types.AnyAssign) -> None:\n self._check_slots(node)\n self.generic_visit(node)", "def rhs(self):\n if not self.is_assign():\n raise AssertionError('Not an assignment')\n return self.initializer", "def assign(self, other):\n\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n ops = []\n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign(other_var))\n return tf.group(*ops, name=\"assign_\"+self.name)", "def set_expr(self, expr: Expr):\r\n self.expr = Expr.cnf(expr)", "def test_swap_assignment():\n x,y = 5,10\n yield (x,y)\n x,y = y,x # no ref-counting here\n yield (x,y)", "def find_lhs(line):\n ind = line.find('=')\n ind2 = line.find('(')\n if ind == -1:\n return None\n elif ind2 > -1:\n #there is both an equal sign and a (\n if ind < ind2:\n #the equal sign is first and there is an lhs\n #out = myfunc(b=5)#<-- the lhs here is \"out\"\n return line[0:ind]\n else:\n #the ( is first as in\n #myfunc(1, b=2)#<-- note that there is no assignment here\n return None\n else:\n #there is an equal sign, but no (\n return line[0:ind]", "def set(self, ident, value, default=False):\n\n root = self\n ident = util.unscalar(ident)\n value = util.unscalar(value)\n # ELEMENT: {\n if isinstance(ident, str) and ident.find(\".\") >= 0:\n ident = [y for x in ident.split(\".\")\n for y in (re.sub(r\"\\(.*$\", \"\", x), 0)]\n if isinstance(ident, (list, tuple)):\n chopped = list(util.chop(ident, 2))\n for i in range(len(chopped) - 1):\n x, y = chopped[i]\n result = self.__dotop(root, x, y, True)\n if result is None:\n # last ELEMENT\n return \"\"\n else:\n root = result\n result = self.__assign(root, chopped[-1][0], chopped[-1][1],\n value, default)\n else:\n result = self.__assign(root, ident, 0, value, default)\n\n if result is None:\n return \"\"\n else:\n return result", "def process_assignment_ast(stmt_ast: ast.Assign, stmt_ast_parent_block):\n logger.log.info(\"Generating SymbolicState instance from assignment ast\")\n # first, add a reference from stmt_ast to its parent block\n stmt_ast.parent_block = stmt_ast_parent_block\n logger.log.info(\"Instantiating symbolic state for AST instance stmt_ast = %s\" % stmt_ast)\n # determine the program variables assigned on the left-hand-side\n targets: list = stmt_ast.targets\n # extract names - for now just care about normal program variables, not attributes or functions\n logger.log.info(\"Extracting list of assignment target names\")\n target_names: list = []\n for target in targets:\n target_names += extract_symbol_names_from_target(target)\n logger.log.info(\"List of all program variables changed is %s\" % target_names)\n # extract function names\n assigned_value = stmt_ast.value\n function_names: list = extract_function_names(assigned_value)\n logger.log.info(\"List of all program functions called is %s\" % function_names)\n # merge the two lists of symbols\n logger.log.info(\"Merging lists of assignment target names and function names\")\n all_symbols: list = target_names + function_names\n logger.log.info(\"List of all symbols to mark as changed in the symbolic state is %s\" % all_symbols)\n # set up a SymbolicState instance\n logger.log.info(\"Instantiating new StatementSymbolicState instance with all_symbols = %s\" % all_symbols)\n symbolic_state: SymbolicState = StatementSymbolicState(all_symbols, stmt_ast)\n return symbolic_state", "def assign(self, dst, req, src):\n if req == 'null':\n return\n if req in ('write', 'inplace'):\n dst[:] = src\n elif req == 'add':\n dst[:] += src", "def assign(self, name, values):\n self._assignments[name] = values", "def lhs(self):\n if not self.is_assign():\n raise AssertionError('Not an assignment')\n return self.var.name", "def _parse_initial_assignments(self, model, comp, node):\n node = dom_child(node, 'initialAssignment')\n while node:\n var = str(node.getAttribute('symbol')).strip()\n var = self._convert_name(var)\n if var in comp:\n self.log('Parsing initial assignment for \"' + var + '\".')\n var = comp[var]\n expr = parse_mathml_rhs(dom_child(node, 'math'), comp, self)\n if var.is_state():\n # Initial value\n var.set_state_value(expr, default=True)\n else:\n # Change of value\n var.set_rhs(expr)\n else:\n raise SBMLError('Initial assignment found for unknown'\n ' parameter <' + var + '>.')\n node = dom_next(node, 'initialAssignment')", "def ccode(expr, assign_to=None, **settings):\n\n return CCodePrinter(settings).doprint(expr, assign_to)", "def assign(self, *args):\n return _ida_hexrays.cnumber_t_assign(self, *args)", "def assign(self, *args):\n return _ida_hexrays.cloop_t_assign(self, *args)" ]
[ "0.7246561", "0.6989228", "0.6973773", "0.6966724", "0.6946717", "0.6939313", "0.6896663", "0.6890361", "0.6872121", "0.67357904", "0.6725393", "0.6725393", "0.6697676", "0.66803247", "0.6641363", "0.6609404", "0.6593395", "0.65624285", "0.648081", "0.63984084", "0.638135", "0.6370385", "0.63329434", "0.6301359", "0.6300919", "0.62792206", "0.6184669", "0.6113256", "0.6111942", "0.6094138", "0.60292476", "0.60227233", "0.6016635", "0.60143745", "0.6010736", "0.60078496", "0.598719", "0.5971656", "0.5913546", "0.59050363", "0.58523875", "0.58242583", "0.57918686", "0.57538486", "0.56828254", "0.5643862", "0.5611143", "0.5606789", "0.55954784", "0.556235", "0.556235", "0.556235", "0.556235", "0.55534726", "0.54643697", "0.5430684", "0.54302996", "0.5422592", "0.53969264", "0.53852004", "0.53438985", "0.53427166", "0.523756", "0.5229776", "0.521692", "0.5199376", "0.5132912", "0.5115573", "0.5109514", "0.5095962", "0.5090958", "0.50900066", "0.5089743", "0.50852233", "0.50845695", "0.5083692", "0.5054273", "0.50271887", "0.50063026", "0.4946234", "0.4944306", "0.4935681", "0.4901942", "0.49008012", "0.48777455", "0.48573682", "0.48569155", "0.48437238", "0.48346645", "0.48259935", "0.48068684", "0.47955593", "0.47856075", "0.4764168", "0.47557786", "0.47005153", "0.46991044", "0.46965164", "0.46759537", "0.46635556" ]
0.74569654
0
statement = ifStatement | whileStatement | assign
def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught tok = tokens.peek( ) if debug: print( "statement: ", tok ) if tok == "if": stat = parseIfStatement( ) return stat elif tok == "while": stat = parseWhileStatement( ) return stat else: stat = parseAssign( ) return stat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stmt_if(executor, stmt):\n e = Expression()\n result = e.eval(stmt._tokens, symbols=executor._symbols)\n if not result:\n executor.goto_next_line()", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def link_while_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.body)", "def statement_eval(node, table):\n\n if node.kind == \"MOD_OP\":\n table = mod_op_eval(node, table)\n\n elif node.kind == \"SWAP_OP\":\n table = swap_op_eval(node, table)\n\n elif node.kind == \"FROM_LOOP\":\n block_node = node.block\n\n # TODO: check start condition\n\n while True:\n # Execute the block.\n table = block_eval(block_node, table)\n\n # Break if the end condition is satisfied.\n if expr_eval(node.end_condition, table):\n break\n\n elif node.kind == \"FOR_LOOP\":\n var_dec = node.var_declaration\n until_node = node.end_condition\n increment_node = node.increment_statement\n\n # Initialize the variable.\n table[var_dec.name] = expr_eval(var_dec.expr, table)\n\n while True:\n # Execute the block and increment statement.\n if not node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n \n table = block_eval(node.block, table)\n\n if node.inc_at_end:\n table = mod_op_eval(increment_node, table)\n\n # Break if the end condition is satisfied.\n if table.refs[until_node.name] == expr_eval(until_node.expr, table):\n break\n\n table = var_condition_eval(until_node, table)\n\n elif node.kind == \"IF\":\n # Check the condition; if it fails, execute the\n # 'false' branch if it exists.\n\n if expr_eval(node.condition, table):\n table = block_eval(node.true, table)\n elif \"false\" in node.data:\n table = block_eval(node.false, table)\n\n elif node.kind == \"DO/UNDO\":\n # Do the action_block, then do the yielding block,\n # then undo the action block.\n table = block_eval(node.action_block, table)\n\n if \"yielding_block\" in node.data:\n table = block_eval(node.yielding_block, table)\n\n table = block_eval(inverter.unblock(node.action_block), table)\n\n elif node.kind == \"RESULT\":\n # Overwrites the variable 'result' with the given expression.\n table[\"result\"] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_DEC\":\n table[node.name] = expr_eval(node.expr, table)\n\n elif node.kind == \"VAR_CONDITION\":\n table = var_condition_eval(node, table)\n\n elif node.kind == \"BLOCK\":\n table = block_eval(node, table)\n\n elif node.kind == \"FUNCTION_CALL\":\n # Call the function, then update table with the results.\n function = shared.program.functions[node.name]\n\n output = function.evaluate(\n node.backwards,\n node.ref_args,\n [expr_eval(arg, table) for arg in node.ref_args],\n [expr_eval(arg, table) for arg in node.const_args]\n )\n\n # After evaluating the function, the output table will\n # contain changed variables.\n table.update_refs(output)\n\n elif node.kind == \"UN\":\n inverted_node = inverter.unstatement(node.statement)\n table = statement_eval(inverted_node, table)\n\n elif node.kind == \"EXIT\":\n if expr_eval(node.condition, table):\n # We return by raising an exception.\n raise shared.ReturnException(expr_eval(node.value, table))\n\n elif node.kind == \"ENTER\":\n # Do nothing when we actually encounter these.\n pass\n\n return table", "def conditional(self) -> global___Statement.Conditional:", "def _While(self, t):\n self.fill(\"while (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, \"While else not supported\")", "def switch(cond, ift, iff):", "def visit_while(self: Parser, node: doc.While) -> None:\n with self.var_table.with_frame():\n cond = self.eval_expr(node.test)\n with T.While(cond):\n self.visit_body(node.body)", "def test_45_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:real; begin x:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,445))", "def _analyse_stmt_While(self, statement: ast.While, *, next: CFNode) -> CFNode:\n # Analyse the else branch.\n else_node = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the body.\n dummy_node = self._dummy_node()\n with self._updated_context(break_=next, continue_=dummy_node):\n body_node = self._analyse_statements(statement.body, next=dummy_node)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=body_node)\n else:\n branches.update(else_=else_node)\n else:\n branches.update(enter=body_node, else_=else_node, error=self._raise)\n\n loop_node = self._ast_node(statement, **branches)\n self._graph.collapse_node(dummy_node, loop_node)\n return loop_node", "def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def toggle(condition, if_true, if_false):\n return (if_true if condition else if_false)", "def test_42_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1+true) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: BinaryOp(+,IntLiteral(1),BooleanLiteral(True))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,442))", "def __init__(self, depth, condition, body_statement):\n super(WhileStatement, self).__init__(depth)\n self.condition = condition\n self.body_statement = body_statement", "def asserter(stmt):\n if not stmt:\n raise AssertionError('CheckReadBuffer case failed')", "def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))", "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def link_if_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.true_body)\n if stmt.false_body is not None:\n self.link_stmt(stmt.false_body)", "def Assignment(self):\n id = self.primary()\n if self.currtok[1].name == \"DECLERATION\":\n self.currtok = next(self.tg)\n if self.functions.get(self.currtok[0]) is not None:\n\n express = self.FunctionCall()\n return assignmentStmt(id, express)\n else:\n express = self.Expression()\n\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return assignmentStmt(id, express)\n raise SLUCSyntaxError(\"ERROR: Missing Semicolon on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing assignment on line {0}\".format(str(self.currtok[2] - 1)))", "def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))", "def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"ifStatement: \", tok )\n\tstart = match( \"if\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\telseblk = None\n\ttok = tokens.peek( )\n\tif tok == \"else\":\n\t\tmatch( \"else\" )\n\t\telseblk = parseBlock( )\n\treturn ifStatement(expr, blk, elseblk)", "def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))", "def multi_statement() -> None:\n pass; print(\"hello\")", "def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))", "def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)", "def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))", "def test_41_while(self):\n\t\tinput = \"\"\"function foo():integer; var a:integer; begin\n\t\twhile(1) do begin end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: While(IntLiteral(1),[])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,441))", "def stmts(obj, next, token):\n while token is not EOF:\n token = assignlist(obj, next, token)", "def parseAssign( ): # parse rountine for the assign and uses the assign class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"assign: \", tok )\n\tif re.match( Lexer.identifier, tok ):\n\t\tident = VarRef( tok )\n\telse: \n\t\terror( \"Invalid identifier\" )\n\ttok = tokens.next( )\n\tequals = match( \"=\" )\n\ttok = tokens.peek( )\n\texpr = expression( )\n\tmatch( \";\" )\n\tequals = VarRef( equals )\n\tstatement = assign( equals, ident, expr )\n\treturn statement", "def process_stmt(self, stmt):\n if isinstance(stmt, (ast.While, ast.If)):\n self.process_branch(stmt)\n elif isinstance(stmt, ast.Expr):\n self.process_expr(stmt)\n elif isinstance(stmt, ast.Assign):\n self.process_assign(stmt)\n elif isinstance(stmt, ast.Break):\n self.breaks.append(self.curr_block)\n elif isinstance(stmt, ast.Continue):\n self.continues.append(self.curr_block)\n else:\n # self.replacer.visit(stmt)\n # Append a normal statement to the current block\n self.curr_block.add(stmt)", "def _apply_if_statement(statement: ast.If) -> None:\n for child in ast.iter_child_nodes(statement):\n if isinstance(child, ast.If):\n if child in statement.orelse:\n setattr(statement, 'wps_if_chained', True) # noqa: B010\n setattr(child, 'wps_if_chain', statement) # noqa: B010", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def _analyse_stmt_If(self, statement: ast.If, *, next: CFNode) -> CFNode:\n # Analyse both branches unconditionally: even if they're not reachable,\n # they still need to exist in the graph produced.\n if_branch = self._analyse_statements(statement.body, next=next)\n else_branch = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=if_branch)\n else:\n branches.update(else_=else_branch)\n else:\n branches.update(enter=if_branch, else_=else_branch, error=self._raise)\n\n return self._ast_node(statement, **branches)", "def while_do(condition: Callable[[Any], bool], source: ObservableBase) -> ObservableBase:\n from ..operators.observable.whiledo import while_do\n return while_do(condition, source)", "def with_if_statement():\n if c():\n return t()\n else:\n return f()", "def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def gen_while(self, stmt: statements.While) -> None:\n condition_block = self.builder.new_block()\n body_block = self.builder.new_block()\n final_block = self.builder.new_block()\n self.break_block_stack.append(final_block)\n self.continue_block_stack.append(condition_block)\n self.builder.emit_jump(condition_block)\n self.builder.set_block(condition_block)\n self.gen_condition(stmt.condition, body_block, final_block)\n self.builder.set_block(body_block)\n self.gen_stmt(stmt.body)\n self.builder.emit_jump(condition_block)\n self.builder.set_block(final_block)\n self.break_block_stack.pop()\n self.continue_block_stack.pop()", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def eval_statement(self, line):\n if line[0] in self.env:\n self.env[line[0]](line[1::])\n elif line[1] == \"=\":\n self.assign_to_env(line)\n else:\n print(\"ERROR: Undefined function {}\".format(line[0]))\n quit()", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def IF(logical_statement, expression_true, expression_false):\n if(type(logical_statement) == bool):\n if(logical_statement == True):\n return(expression_true)\n else:\n return(expression_false)\n else:\n print('Invalid type: logical statement does not evaluate to True or False.')", "def _process_if(self, node):\n creg = node.children[0].name\n cval = node.children[1].value\n self.backend.set_condition(creg, cval)\n self._process_node(node.children[2])\n self.backend.drop_condition()", "def compile_while(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'while' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.code_writer.write_label(lab1)\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.code_writer.write_if(lab2)\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab1)\r\n self.code_writer.write_label(lab2)", "def WhileStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n return whileStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))", "def holds(self,assignment):\n return self.condition(*tuple(assignment[v] for v in self.scope))", "def _compile1_next_statement_with_mods(statements: Sequence[Statement]) -> (IFElement, int):\n main_statement = statements[0]\n\n if main_statement.matches_pattern(TEXT_PATTERN):\n ifelement = IFText(main_statement.get_atoms()[2].get_value())\n elif main_statement.matches_pattern(TITLE_PATTERN):\n ifelement = IFHeader(main_statement.get_atoms()[2].get_value(), 1)\n elif main_statement.matches_pattern(SUBTITLE_PATTERN):\n ifelement = IFHeader(main_statement.get_atoms()[2].get_value(), 2)\n elif main_statement.matches_pattern(IMAGE_PATTERN):\n ifelement = IFImage(main_statement.get_atoms()[3].get_value())\n elif main_statement.matches_pattern(TABLE_PATTERN):\n ifelement = IFImage(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH4wQBBhwE9K/n3wAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAD+SURBVGje7drRCsMgDIXhRH3/J27sLgplUFq1bazC7+1EzqfCEje1bDLzCDL5AAAAAAAAAAAAAAAAAAAA7kE1cAIAPEZ6ZZU1r64pY4yOgBii9zYvyyIiGvT9K9QhvYiklFxOYE/v9LxXszth6vT3Ad3SF9cPU6e/AxgqfTNgtPRtgAHTNwDGTF8LGDZ9FWDk9GXA4OkLpcT/d6FrzfNkd77vBx6ebbmY20rZYqlYnHYs7lW1R0Nz0UzcmEZL2X4CZrbtbl7zxbTrT48PJGbGCQAAAAAAAADTAM5qrRced73/Nritf1aqcIUAAAAAoFdTv/9WXt89dngE4Ap9PX4iDmnCXGGMIwAAAABJRU5ErkJggg==\")\n elif main_statement.matches_pattern(DESK_PATTERN):\n ifelement = IFImage(\"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAEAAAABACAIAAAAlC+aJAAAACXBIWXMAAC4jAAAuIwF4pT92AAAAB3RJTUUH4wQBBh85h+r4DQAAABl0RVh0Q29tbWVudABDcmVhdGVkIHdpdGggR0lNUFeBDhcAAAImSURBVGje7VrRjgMhCBT0/7+4ah9MLmYVRcBt9+o+XC7XFWcEgeEKMUX35Afdw59D4BBQPsHWnEfPf9kkfwTzI2HCWqJ67sAhYAAU8HjgEPjeOpBT7v7OXzXJud5vJHDJ6N77GFmlIATu7q/XyzkHCO1HoCyHVvWIw6FLIJigv5yCR59zTjlRqwyrddiB3jkXYxzEErVW4NuwAz0zvjnoY4pjJrgDvbLn4/hH7oGBdZP2eAn9sgdWre9Gv+aBrnVO2KzmnKXTCUr0483aF6b3Z9W3+Gj0LA8MImccPzegnxOgrAs26y7RZ4XArORbex5NTvu8HlBm5DDVnTlPunYA4LxGLaTE7kAS1G3iLymycmCcTwXesCHQ7eDrlriLrLzw52IAGCuyiz6kZEO7i1DQdPXoEsRvFPV3QtQSKNpq4O4yXKCyRCdzA059+ENzoZ8nwCxDxwP/l4DFaPGjKRUffQFUHkDAukTUeb0VD5qeOec8KBpBfPC10ZqJYGzBiVKKA4rR1+W522a5lTmu+KYFWdAXc+UndROs0E8aE5UajJE/U3lSHbgN/XIIjUWZlU7fSKCdSbVYb/un0+mFDoHtd6CdQZSUz5wjTAtTsa+0huM0b6VcBZXEJoS2tsqXDso+hEoL1e5R9w7dPqL9I9Vu8DlQkUZ+1aDdEgA4M0OAydcXuu2gsQeUSoWzvD54BBRzOHXgENiShVJORfJ2MykVr+Kbo7nEb2qMSvm3VR8cAAAAAElFTkSuQmCC\")\n elif main_statement.matches_pattern(HTML_REGEX_PATTERN):\n ifelement = IFElement('<a href=\"https://stackoverflow.com/a/1732454\">H̸̡̪̯ͨ͊̽̅̾̎Ȩ̬̩̾͛ͪ̈́̀́͘ ̶̧̨̱̹̭̯ͧ̾ͬC̷̙̲̝͖ͭ̏ͥͮ͟Oͮ͏̮̪̝͍M̲̖͊̒ͪͩͬ̚̚͜Ȇ̴̟̟͙̞ͩ͌͝S̨̥̫͎̭ͯ̿̔̀ͅ</a>')\n else:\n raise ValueError(f\"Unknown statement: {main_statement}\")\n\n mods = []\n for statement in statements[1:]:\n if statement.get_atoms()[0].get_atom_type() == AtomType.KEYWORD \\\n and statement.get_atoms()[0].get_value() == parser.WITH:\n mods.append(statement)\n else:\n break\n\n for mod in mods:\n build_modifier(mod).get_fun()(ifelement) # Uses side effects :(\n\n return ifelement, len(mods) + 1", "def whilestmt(self, w):\n invs = self.assemble_invariants(w)\n b_mid = self.flatten([Tree('assume', [w[0]]), w[-1], Tree('assert', invs), Tree('assume', [Tree('const_false', [])])])\n b = self.flatten([Tree('assert', invs),\n self.assemble_havoc(w),\n Tree('assume', invs),\n Tree('wpor', [Tree('block', b_mid), Tree('assume', self._not(w[0]))])])\n return b", "def test_statement_initialized_by_assignment():\n shap = Statement(shape_id=\"@photo\", prop_id=\"dcterms:creator\", value_type=\"URI\")\n shap2 = Statement()\n shap2.shape_id = \"@photo\"\n shap2.prop_id = \"dcterms:creator\"\n shap2.value_type = \"URI\"\n assert shap == shap2", "def parseWhileStatement( ): # parse rountine for while and uses the while class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"whileStatement: \", tok )\n\tstart = match( \"while\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\ttok = tokens.peek( )\n\twhileString = whileStatement( start, expr, blk )\n\treturn whileString", "def switch(condition, then_expression, else_expression):\n x_shape = copy.copy(then_expression.get_shape())\n x = tf.cond(tf.cast(condition, 'bool'),\n lambda: then_expression,\n lambda: else_expression)\n x.set_shape(x_shape)\n return x", "def _IfExp(self, t):\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)", "def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))", "def switch(cond, ift, iff):\n if (cf.use_theano and (isinstance(cond, theano.graph.basic.Variable)\n or isinstance(ift, theano.graph.basic.Variable)\n or isinstance(iff, theano.graph.basic.Variable))):\n return T.switch(cond, ift, iff)\n else:\n return np.where(cond, ift, iff)", "def if_then(condition: Callable[[], bool], then_source: ObservableBase,\n else_source: ObservableBase = None) -> ObservableBase:\n from ..operators.observable.ifthen import if_then\n return if_then(condition, then_source, else_source)", "def _set_logical_op(self, condition, incr):\n c1 = [\"@SP\", \"A=M\", \"D=D-M\"]\n c2 = [\"@TRUE{i}\" .format(i=incr)]\n c3 = [\"D;{c}\".format(c=condition)]\n c4 = [\"(FALSE{i})\".format(i=incr)]\n c5 = self._set_stack(0)\n c6 = [\"@ACOND{i}\".format(i=incr)]\n c7 = [\"0;JMP\"]\n c8 = [\"(TRUE{i})\".format(i=incr)]\n c9 = self._set_stack(-1)\n c10 = [\"(ACOND{i})\".format(i=incr)]\n return c1 + c2 + c3 + c4 + c5 + c6 + c7 + c8 + c9 +c10", "def assign_when(lhs, rhs, conditions):\n for nd_index in np.ndindex(lhs.shape):\n if conditions[nd_index]:\n lhs[nd_index] = rhs[nd_index]", "def IfStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n if self.currtok[1].name == \"else\":\n self.currtok = next(self.tg)\n state2 = self.Statement()\n return ifelseStmt(express, state, state2)\n else:\n return ifStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def while_loop_op(op):\n return (control_flow_util.IsLoopSwitch(op) or\n control_flow_util.IsLoopMerge(op) or\n control_flow_util.IsLoopEnter(op) or\n control_flow_util.IsLoopExit(op) or\n TensorTracer.loop_cond_op(op) or\n op.type in ('RefNextIteration', 'NextIteration'))", "def ifelse(test, if_true, if_false):\n if test:\n return if_true\n else:\n return if_false", "def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"", "def test_31_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\tif a>0 then return 0;\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,431))", "def eliminate_ifones(body):\n def isifone(tree):\n if type(tree) is If:\n if type(tree.test) is Num: # TODO: Python 3.8+: ast.Constant, no ast.Num\n if tree.test.n == 1:\n return \"then\"\n elif tree.test.n == 0:\n return \"else\"\n elif type(tree.test) is NameConstant: # TODO: Python 3.8+: ast.Constant, no ast.NameConstant\n if tree.test.value is True:\n return \"then\"\n elif tree.test.value in (False, None):\n return \"else\"\n return False\n\n def optimize(tree): # stmt -> list of stmts\n t = isifone(tree)\n if t:\n branch = tree.body if t == \"then\" else tree.orelse\n return branch\n return [tree]\n\n return transform_statements(optimize, body)", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def convert_while(self, condition):\n\n # Run super definition\n condition = super().convert_while(condition)\n\n # Make while template\n while_template = \"while {cond}:\"\n\n # Replace logical operators\n condition = self.replace_logical_ops(condition, direction=\"from\")\n\n # Return converted if statement\n return [while_template.format(cond=condition)], []", "def with_if_function():\n return if_function(c(), t(), f())", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def _ifelse(self):\n debug.show(\"ifelse:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 3:\n falseCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n trueCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"ifelse:True\")\n self.evaluate(trueCode)\n else:\n debug.show(\"ifelse:False\")\n self.evaluate(falseCode)\n else:\n debug.err(\"not enough items on the stack\")\n return None", "def eval_if_else(item, motif_node_dict):\n # evaluate the `if` branch first\n true_branch = item.iftrue\n if type(true_branch).__name__ == 'FuncCall':\n motif_node, left = eval_function_call(true_branch, motif_node_dict) \n elif type(true_branch).__name__ == 'Assignment':\n left = eval_assignment(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Decl':\n left = eval_declaration(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Return':\n left = eval_return(true_branch, motif_node_dict)\n elif type(true_branch).__name__ == 'Compound':\n left = eval_function_body(true_branch, motif_node_dict)\n else:\n left = None\n # evaluate the `else` branch if it exists\n false_branch = item.iffalse\n if type(false_branch).__name__ == 'FuncCall':\n motif_node, right = eval_function_call(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Assignment':\n right = eval_assignment(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Decl':\n right = eval_declaration(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Return':\n right = eval_return(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'Compound':\n right = eval_function_body(false_branch, motif_node_dict)\n elif type(false_branch).__name__ == 'If': # else if case\n right = eval_if_else(false_branch, motif_node_dict)\n else:\n right = None\n\n if left or right:\n # only under certain circumstances do we actually create alternation node\n if eval_if_condition(item.cond):\n return provenance.create_alternation_node(left, right)\n else:\n # if only one branch is not None, we need not create a group node\n if not left:\n return right\n if not right:\n return left\n return provenance.create_group_node(left, right)\n else:\n return None", "def Switch(*args):\n arg_list = list(args)\n arg_list.reverse()\n #\n while arg_list:\n cond, expr = arg_list.pop(), arg_list.pop()\n if cond:\n return expr\n return None", "def compile_while(self):\r\n start_label = \"WHILE_\" + str(self.__while_count)\r\n end_label = \"WHILE_END_\" + str(self.__while_count)\r\n self.__while_count += 1\r\n self.__advance(n=2) # Advance after the '(' token\r\n self.__vmwriter.write_label(start_label)\r\n self.compile_expression()\r\n self.__advance(n=2) # Advance after the '{' token\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(end_label)\r\n self.compile_statements()\r\n self.__advance() # Advance after the '}' token\r\n self.__vmwriter.write_goto(start_label)\r\n self.__vmwriter.write_label(end_label)", "def __EvaluateIf(self, countIf, line):\n countIf = countIf - 1\n i = self.__ifs[countIf]\n i.SetLinePointer(self.__linePointer)\n #s = self.ScanIfCond(self.__oc.GermanUmlautReplace(line))\n s = self.ScanIfCond(line)\n if s:\n i.Set(s[0])\n try:\n i.Eval()\n line = ''\n except:\n raise Core.Error.IfHasNoEndif(0, 'IF-EXPRESSION %i HAS HAD AN ERROR:' \\\n ' EITHER NO CORRESPONDING (endif) OR SYNTAX ERROR'\n % countIf)\n l1, l2 = i.GetNextLine(), line\n return l1, l2", "def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)", "def swap(self, node):\n new_node = self.choose_statement()\n\n if isinstance(new_node, ast.stmt):\n # The source `if P: X` is added as `if P: pass`\n if hasattr(new_node, 'body'):\n new_node.body = [ast.Pass()]\n if hasattr(new_node, 'orelse'):\n new_node.orelse = []\n if hasattr(new_node, 'finalbody'):\n new_node.finalbody = []\n\n # ast.copy_location(new_node, node)\n return new_node", "def test_if_variable_statement():\n r = convert_code(\n \"{if $foo}\\nbar\\n{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% endif %}\"", "def ifelse(condition, then_branch, else_branch, name=None, outshape=None):\n # First check if we can replace an Theano conditional by a Python one\n if is_theano_object(condition) and is_constant(condition):\n condition = bool(condition.data)\n\n # Now the actual function\n if (cf.use_theano\n and not isinstance(condition, builtins.bool)\n and (isinstance(condition, theano.graph.basic.Variable)\n or isinstance(then_branch, theano.graph.basic.Variable)\n or isinstance(else_branch, theano.graph.basic.Variable))):\n # Theano function\n if isinstance(then_branch, LazyEval):\n then_branch = then_branch.eval()\n if isinstance(else_branch, LazyEval):\n else_branch = else_branch.eval()\n if outshape is None:\n # We call `bool` on the condition, in case it's a Python boolean\n # (even shim.ge & friends can return bools)\n return theano.ifelse.ifelse(bool(condition), then_branch,\n else_branch, name)\n else:\n return theano.ifelse.ifelse(bool(condition), then_branch.reshape(outshape),\n else_branch.reshape(outshape), name)\n else:\n # Python function\n if condition:\n if isinstance(then_branch, LazyEval):\n then_branch = then_branch.eval()\n return then_branch\n else:\n if isinstance(else_branch, LazyEval):\n else_branch = else_branch.eval()\n return else_branch", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def _in_while_loop(control_flow_node_map, op_name):\n return op_name in control_flow_node_map and \"LoopCond\" in control_flow_node_map[op_name]", "def compile_while(self) -> None:\n self._consume('while')\n self._consume('(')\n\n while_lbl = f\"WHILE_{self._while_count}\"\n while_false_lbl = f\"WHILE_FALSE{self._while_count}\"\n self._while_count += 1\n self.writer.write_label(while_lbl)\n\n self.compile_expression()\n self._consume(')')\n\n self._consume('{')\n self.writer.write_if(while_false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(while_lbl)\n self.writer.write_label(while_false_lbl)\n\n self._consume('}')", "def syntax_while():\n i = 0\n while i < 5:\n print(i)\n i += 1\n\n ## Output\n # 0\n # 1\n # 2\n # 3\n # 4", "def ifop(stream: t.List[str]) -> AST:\n n, last = expr(stream)\n if not stream[n:] or stream[n] not in (\"=>\", \"<=>\"):\n raise SyntaxError(\"Expected => or <=>.\")\n if not stream[n + 1 :]:\n raise SyntaxError(\"Expected expression.\")\n m, rast = expr(stream[n + 1 :])\n if stream[n + 1 + m :]:\n raise SyntaxError(\"Unexpected character '{}'.\".format(stream[n + 1 + m]))\n return AST(stream[n], [last, rast])", "def visit_WhileNode(self, node: WhileNode, symbol_table: SymbolTable) -> None:\n while True:\n if self.visit(node.cond, symbol_table).value == 0:\n break\n else:\n for expr in node.body:\n if expr is not None:\n if isinstance(expr, ReturnNode):\n return expr\n res = self.visit(expr, symbol_table)\n if isinstance(res, ReturnNode):\n return res", "def _visit_loop_body(self, node, if_block=None, is_for=None):\n loop_name = \"for\" if is_for else \"while\"\n if if_block:\n node.if_block = if_block\n else:\n node.if_block = self.flow.nextblock(label=\"%s_body\" % loop_name,\n pos=node.body[0])\n self.visitlist(node.body)\n self.flow.loops.pop()\n\n if self.flow.block:\n # Add back-edge\n self.flow.block.add_child(node.cond_block)\n\n # Else clause\n if node.orelse:\n node.else_block = self.flow.nextblock(\n parent=node.cond_block,\n label=\"else_clause_%s\" % loop_name,\n pos=node.orelse[0])\n self.visitlist(node.orelse)\n if self.flow.block:\n self.flow.block.add_child(node.exit_block)\n else:\n node.cond_block.add_child(node.exit_block)\n\n self.exit_block(node.exit_block, node)", "def test_32_if(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a=0 then return 1; else return a; end\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,432))", "def onStatement(self, match):\n\t\treturn self.process(match[0])", "def compile_statements(self):\n\t\n\t\tif self.tokenizer.get_token() == 'do':\n\t\t\tself.compile_do()\n\t\telif self.tokenizer.get_token() == 'let':\n\t\t\tself.compile_let()\n\t\telif self.tokenizer.get_token() == 'while':\n\t\t\tself.compile_while()\n\t\telif self.tokenizer.get_token() == 'return':\n\t\t\tself.compile_return()\n\t\telif self.tokenizer.get_token() == 'if':\n\t\t\tself.compile_if()", "def makeMove(self, movable_statement):\n ### Student code goes here\n\n if type(movable_statement) != Statement or not self.kb.kb_ask(Fact(movable_statement, [])):\n return None\n\n statement1_list = [\"located\"]\n statement2_list = [\"located\"]\n\n statement1_list.append(movable_statement.terms[0])\n statement1_list.append(movable_statement.terms[1])\n statement1_list.append(movable_statement.terms[2])\n\n statement2_list.append(movable_statement.terms[0])\n statement2_list.append(movable_statement.terms[3])\n statement2_list.append(movable_statement.terms[4])\n\n curr_fact1 = Fact(Statement(statement1_list), [])\n new_fact1 = Fact(Statement(statement2_list), [])\n\n curr_empty = parse_input(\"fact: (located empty \" + statement2_list[2].term.element + \" \" + statement2_list[3].term.element + \")\")\n statement3_list = statement1_list\n statement3_list[1] = \"empty\"\n new_empty = Fact(Statement(statement3_list), [])\n\n self.kb.kb_retract(curr_fact1)\n self.kb.kb_assert(new_fact1)\n self.kb.kb_retract(curr_empty)\n self.kb.kb_assert(new_empty)", "def take_until(condition):\n return partial(takewhile, pipe | condition | operator.not_)" ]
[ "0.6292355", "0.6017129", "0.58480483", "0.5790392", "0.57717943", "0.57585835", "0.5709609", "0.5649995", "0.56101805", "0.5588036", "0.5515717", "0.550607", "0.5487956", "0.5476171", "0.5472926", "0.5407076", "0.54064995", "0.54023397", "0.5397795", "0.5369516", "0.53665644", "0.5356292", "0.53379697", "0.5337857", "0.53210163", "0.5304556", "0.52869856", "0.5272078", "0.52514863", "0.5237886", "0.52334327", "0.5232052", "0.5191541", "0.51882935", "0.51882935", "0.51882935", "0.51882935", "0.51841515", "0.51841515", "0.5163122", "0.5149908", "0.5133773", "0.51270396", "0.512116", "0.511541", "0.5095344", "0.5091063", "0.5067198", "0.5063338", "0.5063338", "0.5038522", "0.5023301", "0.50105864", "0.50091237", "0.5004484", "0.5003966", "0.49933654", "0.49826887", "0.49786434", "0.49698663", "0.49392533", "0.4936682", "0.4923947", "0.49238136", "0.4906774", "0.48900455", "0.48889524", "0.48866093", "0.48523813", "0.48363066", "0.48333916", "0.48190072", "0.47890732", "0.4779241", "0.47773382", "0.47707787", "0.47541893", "0.47491735", "0.47400346", "0.47348434", "0.47334677", "0.4732153", "0.4731485", "0.47254813", "0.47251874", "0.47212684", "0.4720384", "0.47177678", "0.47041306", "0.47021046", "0.47013155", "0.4695704", "0.4688981", "0.46873304", "0.46854421", "0.46618676", "0.46542892", "0.46445578", "0.4643544", "0.4629396" ]
0.635177
0
stmtList = { statement }
def stmtList( ): tok = tokens.peek( ) if debug: print( "stmtList: ", tok ) stat = statement( ) return stat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stmts_to_stmt(statements):\n if len(statements) == 1:\n return statements[0]\n array = FakeArray(statements, arr_type=pr.Array.NOARRAY)\n return FakeStatement([array])", "def __init__(self):\n self.Statement = []", "def parseStmtList( tokens ):\n\n\ttok = tokens.peek( )\n\tast = list( ) # list that keeps track of the all the returns from the parse rountines \n\twhile tok is not None:\n # need to store each statement in a list\n\t\tstatement = stmtList( )\n\t\tast.append( statement )\n\t\ttok = tokens.peek( )\t\t\n\treturn ast", "def _initialize_statements(self):\n return [SqlStatement(x) for x in self._raw_statements]", "def get_statement_list(self, insupdel=0):\n #NOTE: statement = [record, {...}]\n result = []\n try:\n if insupdel == StatementType.INSERT:\n statements = self.statements_insert\n elif insupdel == StatementType.UPDATE:\n statements = self.statements_update\n elif insupdel == StatementType.DELETE:\n statements = self.statements_delete\n if statements is not None:\n for statement in statements:\n result.append(statement[1])\n except Exception as ex:\n print \"Error retrieving statement list: \", ex\n return result", "def statements_to_txns(statement_list):\n new_list = [[stmt] for stmt in statement_list]\n return new_list", "def construct_statement(*args):\n\n INPUT_STATEMENT = \"\"\n for statement in args:\n INPUT_STATEMENT += statement\n \n\n return INPUT_STATEMENT", "def statements(civic_eid2997_statement, civic_aid6_statement):\n return [civic_eid2997_statement, civic_aid6_statement]", "def insert(statement: str) -> []:\n raise NotImplementedError", "def execute_list(self, stmt: List[loxStmtAST.Stmt]) -> None:\n for st in stmt:\n st.accept(self)", "def stmt2list(self, stmt):\n temp = ['%s\\n' % line for line in stmt.split('\\n')]\n return temp", "async def _insert_stmt(self):\n raise NotImplementedError", "def _sqllist(values):\n items = []\n items.append('(')\n for i, v in enumerate(values):\n if i != 0:\n items.append(', ')\n items.append(sqlparam(v))\n items.append(')')\n return SQLQuery(items)", "def prepare(self, connection, stmt):\n return Statement(connection, stmt)", "def prepare(self, connection, stmt):\n return Statement(connection, stmt)", "def statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def execute_query_list(cur, conn, query_list):\n try:\n for query in query_list:\n cur.execute(query)\n conn.commit()\n except psycopg2.Error as e:\n print(\"Error executing query list\")\n print(e)", "def create_table_statements() -> [str]:\n pass", "async def _update_stmt(self):\n raise NotImplementedError", "def insert_(statement: str, db_conf) -> []:\n try:\n result = db_conf.execute(statement)\n if result:\n return result\n except psycopg2.IntegrityError:\n pass\n return []", "def query(mdx_stmt):", "def compile_statement(statements: List[Any]) -> List[Any]:\n properties = ['action', 'resource', 'notresource', 'notaction']\n for statement in statements:\n for statement_property in properties:\n if statement_property in statement:\n statement[statement_property] = [compile_regex(item) for item in statement[statement_property]]\n return statements", "def parse_statements(script):\n # pylint: disable=too-many-branches\n stmt = ''\n quote = None\n for char in script:\n if quote != '--':\n stmt += char\n if quote is None:\n if char == ';':\n yield stmt.strip()\n stmt = ''\n elif char == \"'\":\n quote = \"'\"\n elif char == '\"':\n quote = '\"'\n elif char == '$':\n quote = '$'\n elif char == '-':\n quote = '-'\n elif quote in ('\"', \"'\"):\n if quote == char:\n quote = None\n elif quote == '-':\n if char == '-':\n quote = '--'\n stmt = stmt[:-2]\n else:\n quote = None\n elif quote == '--':\n if char == '\\n':\n quote = None\n elif quote.startswith('$'):\n if quote != '$' and quote.endswith('$'):\n if stmt.endswith(quote):\n quote = None\n else:\n quote += char\n stmt = stmt.strip()\n if stmt:\n yield stmt", "def multi_statement() -> None:\n pass; print(\"hello\")", "def _analyse_statements(\n self, statements: List[ast.stmt], *, next: CFNode\n ) -> CFNode:\n for statement in reversed(statements):\n analyse = getattr(self, \"_analyse_stmt_\" + type(statement).__name__)\n next = analyse(statement, next=next)\n return next", "def doctest_DKBCCCsvStatementParser():", "def test_update_from_empty(self):\r\n ctx = {}\r\n col = columns.List(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement([1, 2, 3], [], ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == [1, 2, 3]\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])", "def insert_statement() -> str:\n pass", "def mk_sql_list(ls):\n res = \"(\" + ' '.join([str(elem) for elem in intersperse(\",\", ls)]) + \")\"\n return res", "def statements(self):\n return self._statements", "def register_sql_proceedures(self):", "def add_statement(rq_dict, statement, result_data_contents=\"graph\"):\n rq_dict[\"statements\"].append({\"statement\": statement})\n rq_dict[\"statements\"][-1][\"resultDataContents\"] = [result_data_contents]", "def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'", "def gen_compound_statement(self, statement) -> None:\n for inner_statement in statement.statements:\n self.gen_stmt(inner_statement)", "def stmts(obj, next, token):\n while token is not EOF:\n token = assignlist(obj, next, token)", "def execute(self,sql):\n # self.results = self.execute_silent(sql)\n # return self.results\n # sql = self.format_sql(sql, **kwargs)\n sql_list = sql.split(';')\n for stmt in sql_list:\n if stmt:\n stmt = stmt.strip()\n if len(stmt) < 10:\n break\n result = self.execute_silent(stmt)\n #if result is not None,It's select stmt.\n if result:\n return result", "def Statements(self):\n states = list()\n while self.currtok[1].name in {\"SEMI\", \"LCURLY\", \"IDENT\", \"if\", \"print\", \"while\", \"return\"}:\n state = self.Statement()\n states.append(state)\n return StatementsStmt(states)", "def compile_statements(self):\n\t\n\t\tif self.tokenizer.get_token() == 'do':\n\t\t\tself.compile_do()\n\t\telif self.tokenizer.get_token() == 'let':\n\t\t\tself.compile_let()\n\t\telif self.tokenizer.get_token() == 'while':\n\t\t\tself.compile_while()\n\t\telif self.tokenizer.get_token() == 'return':\n\t\t\tself.compile_return()\n\t\telif self.tokenizer.get_token() == 'if':\n\t\t\tself.compile_if()", "def execute_block(self, stmt: List[loxStmtAST.Stmt], environment: loxenvironment.Environment) -> None:\n previous_env: loxenvironment.Environment = self.environment\n try:\n self.environment = environment\n for statement in stmt:\n self.execute(statement)\n finally:\n self.environment = previous_env", "def create_sql_write(api,timer):\n\n inserts = []\n\n for info in api:\n inserts.append('INSERT INTO DbDynamicInfo (number, status, available_bike_stands, '\n 'available_bikes, last_update) '\n 'VALUES (%d,\"%s\",%d,%d,\"%s\")'\n % (info[\"number\"], info[\"status\"], info[\"available_bike_stands\"],\n info[\"available_bikes\"], timer))\n\n return inserts", "def executemany(self, stmt, params):\n if params:\n if isinstance(params, (tuple, list, set)):\n newpar = []\n for par in params:\n newpar.append(convert_PROCEDURES(par))\n params = newpar\n elif isinstance(params, dict):\n for k, val in params.items():\n params[k] = convert_PROCEDURES(val)\n\n if stmt:\n exstmt = self.replacevals(stmt)\n if exstmt is None:\n return None\n #print exstmt\n #print params\n return super(MockCursor, self).executemany(convert_PROCEDURES(exstmt), params)\n return super(MockCursor, self).executemany(self._stmt, params)", "def _get_statement_object(db_stmt):\n return Statement._from_json(json.loads(db_stmt.json.decode('utf-8')))", "def prepare(self, stmt):\n exstmt = self.replacevals(stmt)\n self._stmt = convert_PROCEDURES(exstmt)", "def from_statement(cls, statement):\r\n return cls('\\n'.join(textwrap.dedent(statement).splitlines()[1:]))", "def __iter__(self):\n return iter(self._statements)", "def batch_execute(self, sql_list):\n with self.connection.cursor() as dbc:\n responses = []\n for sql in sql_list:\n dbc.execute(sql)\n responses.append(dbc.fetchall())\n return responses", "def __init__(self):\n self.insert_sql = \"\"\"INSERT INTO meetings (title, start_date, end_date) VALUES (%s, %s, %s) RETURNING id\"\"\"\n self.select_sql = \"\"\"SELECT * FROM meetings WHERE start_date >= %s AND end_date <= %s\"\"\"\n self.select_all_sql = \"\"\"SELECT * FROM meetings\"\"\"", "def final_statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def build_sql_cmds(sql):\n\tsql_cmds = []\n\n\t# Sql for path table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"path\", column=\"strPath\"))\n\t# SQL for movie table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"movie\", column=\"c22\"))\n\t# SQL for episode table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"episode\", column=\"c18\"))\n\t# SQL for art table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"art\", column=\"url\"))\n\t# SQL for tvshow table\n\tsql_cmds.append(sql.format(db=VIDEO_DATABASE, table=\"tvshow\", column=\"c16\"))\n\n\treturn sql_cmds", "def sub_binds(sql_select):\n\n keywords = ['INNER','FROM','HAVING','WHERE',\"GROUP BY\",\", \"]\n\n (sql_command,binds) = tuple(sql_select)\n\n for b in binds: sql_command=sql_command.replace('?',repr(b),1)\n\n replace_dict = {x:('\\n\\t'+x) for x in keywords}\n\n print '\\n'+replacer(sql_command,replace_dict)+'\\n'", "def test_update_from_none(self):\r\n ctx = {}\r\n col = columns.List(columns.Integer, db_field=\"TEST\")\r\n statements = col.get_update_statement([1, 2, 3], None, ctx)\r\n\r\n #only one variable /statement should be generated\r\n assert len(ctx) == 1\r\n assert len(statements) == 1\r\n\r\n assert ctx.values()[0].value == [1, 2, 3]\r\n assert statements[0] == '\"TEST\" = :{}'.format(ctx.keys()[0])", "def verbatim(self, stmt, suppress=False):\n if not suppress:\n self.statements.append(stmt)\n\n return stmt", "def _execute(self, stmt) -> sa.engine.ResultProxy:\n return self._engine.execute(stmt)", "def gen_empty_statement(self, statement) -> None:\n pass", "def per_item_statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def parseStmt(line):\n print(\"Statement\")\n index=0\n if line[0] == 's':\n print(\"Set\")\n index += 4\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseSet(cmds)\n elif line[0] == 'h':\n exit()\n elif line[0] == 'j':\n index += 5\n if line[index] == ' ':\n print(\"Jumpt\")\n index += 1\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJumpt(cmds)\n else:\n print(\"Jump\")\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJump(cmds)\n else:\n print(\"Invalid Operation\")", "def _split_sql(self):\n return [stmt.strip() for stmt in RE_SQL_STATEMENT.split(self.sql) if stmt.strip()]", "def _load_statements(self):\n home = Path(\".\")\n context = {\"table_name\": self.TABLE}\n self.sql = {}\n for path in home.glob(\"./sql/*\"):\n with open(path) as f:\n template = Template(f.read().strip())\n self.sql[path.stem] = template.render(context)", "def __init__(self, sql=None, statements=None, filename=None):\n assert atmost_one(sql, statements, filename), 'Multiple initializer'\n\n if sql is None:\n sql = ''\n\n if filename:\n with open(filename, 'r') as f:\n sql = f.read()\n\n self._raw_sql = sql\n self._raw_statements = self._sanitize_sql()\n self._statements = self._initialize_statements()\n\n # Add the statements that the script was initialized from\n if statements:\n self.append(statements)", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def computeStatementOperation(self, trace_collection):", "def run_multiple_sql_statements(statements, fetch=True, cur=None, conn=None, commit=True):\n\n try:\n if conn is None:\n logger.error(\"Connection cannot be None.\")\n raise ValueError(\"Connection cannot be None.\")\n\n if cur is None:\n cur = conn.cursor()\n\n if statements is None:\n logger.error(\"Sql statement list is empty\")\n raise ValueError(\"Sql statement list is empty\")\n\n for _, statement in enumerate(statements):\n logger.debug(\"Executing SQL = \" + statement)\n res = cur.execute(statement)\n if fetch:\n data = cur.fetchall()\n else:\n data = None\n if commit:\n conn.commit()\n except Exception as exception:\n logger.error(exception)\n raise exception\n\n return (res, data)", "def insert_many_execute(self) -> None:\n self.connection.isolation_level = None\n self.cursor.execute('BEGIN TRANSACTION')\n for i in self.__sql_buffer.split(';'):\n self.cursor.execute(i)\n self.__sql_buffer = \"\"\n self.cursor.execute('COMMIT')", "async def query(self, stmt, *args):\n\n with (await self.application.db.cursor()) as cur:\n await cur.execute(stmt, args)\n return [self.row_to_obj(row, cur)\n for row in await cur.fetchall()]", "def statement_elements_in_statement(stmt):\n def search_stmt_el(stmt_el, stmt_els):\n stmt_els.append(stmt_el)\n while stmt_el is not None:\n if isinstance(stmt_el, pr.Array):\n for stmt in stmt_el.values + stmt_el.keys:\n stmt_els.extend(statement_elements_in_statement(stmt))\n stmt_el = stmt_el.next\n\n stmt_els = []\n for as_name in stmt.as_names:\n # TODO This creates a custom pr.Call, we shouldn't do that.\n stmt_els.append(pr.Call(as_name._sub_module, as_name,\n as_name.start_pos, as_name.end_pos))\n\n ass_items = chain.from_iterable(items for items, op in stmt.assignment_details)\n for item in stmt.expression_list() + list(ass_items):\n if isinstance(item, pr.StatementElement):\n search_stmt_el(item, stmt_els)\n elif isinstance(item, pr.ListComprehension):\n for stmt in (item.stmt, item.middle, item.input):\n stmt_els.extend(statement_elements_in_statement(stmt))\n elif isinstance(item, pr.Lambda):\n for stmt in item.params + item.returns:\n stmt_els.extend(statement_elements_in_statement(stmt))\n\n return stmt_els", "def pre_call(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def _get_filtered_db_statements(db, get_full_stmts=False, clauses=None,\n not_duplicates=None, num_procs=1):\n if not_duplicates is None:\n not_duplicates = set()\n\n # Only get the json if it's going to be used. Note that if the use of the\n # get_full_stmts parameter is inconsistent in _choose_unique, this will\n # cause some problems.\n if get_full_stmts:\n tbl_list = [db.RawStatements.mk_hash, db.RawStatements.id,\n db.RawStatements.json]\n else:\n tbl_list = [db.RawStatements.mk_hash, db.RawStatements.id]\n\n db_s_q = db.filter_query(tbl_list, db.RawStatements.db_info_id.isnot(None))\n\n # Add any other criterion specified at higher levels.\n if clauses:\n db_s_q = db_s_q.filter(*clauses)\n\n # Produce a generator of statement groups.\n db_stmt_data = db_s_q.order_by(db.RawStatements.mk_hash).yield_per(10000)\n choose_unique_stmt = partial(_choose_unique, not_duplicates, get_full_stmts)\n stmt_groups = (list(grp) for _, grp\n in groupby(db_stmt_data, key=lambda x: x[0]))\n\n # Actually do the comparison.\n if num_procs is 1:\n stmts = set()\n duplicate_ids = set()\n for stmt_list in stmt_groups:\n stmt, some_duplicates = choose_unique_stmt(stmt_list)\n stmts.add(stmt)\n duplicate_ids |= some_duplicates\n else:\n pool = Pool(num_procs)\n print(\"Filtering db statements in %d processess.\" % num_procs)\n res = pool.map(choose_unique_stmt, stmt_groups)\n pool.close()\n pool.join()\n stmt_list, duplicate_sets = zip(*res)\n stmts = set(stmt_list)\n duplicate_ids = {uuid for dup_set in duplicate_sets for uuid in dup_set}\n\n return stmts, duplicate_ids", "def compile_statements(self) -> None:\n while self._get_current_token() != '}':\n if self._get_current_token() in self.STATEMENT_TOKENS:\n getattr(self, 'compile_' + self._get_current_token())()\n else:\n raise CompilationEngineError(f\"{self._get_current_token()} is an expected token at this point\")", "def __execsql(self, sql, seq):\n return self.sqldb.executemany(sql, [x._asdict() for x in seq])", "def statement(cls, code=''):\n return MySQLStatement.byName(code) if code else MySQLStatement", "def get_sqls(self):\n return {\n \"prepare_check\": \"select id from dbo.sysobjects where id = object_id('{0}')\".format(self.table_name),\n \"prepare_create\": \"\"\"\n create table {0} (\n id INT primary key identity,\n channel NVARCHAR(20),\n channel_detail NVARCHAR(100),\n channel_user_id NVARCHAR(100),\n request_timestamp DATETIME2,\n request_id NVARCHAR(100),\n request_type NVARCHAR(100),\n request_text NVARCHAR(MAX),\n request_payloads NVARCHAR(MAX),\n request_intent NVARCHAR(100),\n request_is_adhoc BIT,\n response_type NVARCHAR(100),\n response_text NVARCHAR(MAX),\n response_payloads NVARCHAR(MAX),\n response_milliseconds INT,\n context_is_new BIT,\n context_topic_name TEXT,\n context_topic_status TEXT,\n context_topic_is_new BIT,\n context_topic_keep_on BIT,\n context_topic_priority INT,\n context_error NVARCHAR(MAX),\n request_json NVARCHAR(MAX),\n response_json NVARCHAR(MAX),\n context_json NVARCHAR(MAX))\n \"\"\".format(self.table_name),\n\n \"write\": \"\"\"\n insert into {0} (\n channel,\n channel_detail,\n channel_user_id,\n request_timestamp,\n request_id,\n request_type,\n request_text,\n request_payloads,\n request_intent,\n request_is_adhoc,\n response_type,\n response_text,\n response_payloads,\n response_milliseconds,\n context_is_new,\n context_topic_name,\n context_topic_status,\n context_topic_is_new,\n context_topic_keep_on,\n context_topic_priority,\n context_error,\n request_json, response_json, context_json)\n values (\n ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\n \"\"\".format(self.table_name),\n }", "def get_response_statements(self, input_statement):\r\n #print \"using the override storage adapter\"\r\n response_query = self.statements.find({\"$text\":{\"$search\": input_statement}})\r\n statement_query = []\r\n for item in response_query:\r\n for question in item['in_response_to']:\r\n statement_query.append(question)\r\n \r\n statement_objects = []\r\n for statement in list(statement_query):\r\n statement_objects.append(self.mongo_to_object(statement)) \r\n \r\n return statement_objects", "def per_stream_response_statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def sql_execute(sql,value):\n cur = c.cursor()\n cur.execute(sql,value)\n results = cur.fetchall()\n return results", "def executeAll(lines):", "def execute_query(self) -> dict or list:\n pass", "def compile_parameter_list(self) -> None:\n if self._get_current_token() != ')':\n var_type = self._get_current_token()\n self._consume_type()\n\n self.table.define(self._get_current_token(), var_type, Kind.ARG)\n self._consume(TokenTypes.IDENTIFIER)\n while self._get_current_token() != ')':\n self._consume(',')\n var_type = self._get_current_token()\n self._consume_type()\n\n self.table.define(self._get_current_token(), var_type, Kind.ARG)\n self._consume(TokenTypes.IDENTIFIER)", "def create_stmt(self, stmtcls, ast, params=None, nopush=False):\n if params is None:\n stmtobj = stmtcls(parent=self.current_parent, ast=ast)\n else:\n stmtobj = stmtcls(parent=self.current_parent, ast=ast, **params)\n stmtobj.label = self.current_label\n self.current_label = None\n\n if self.current_block is None or self.current_parent is None:\n self.error(\"Statement not allowed in this context.\", ast)\n else:\n self.current_block.append(stmtobj)\n if not nopush:\n self.push_state(stmtobj)\n return stmtobj", "def add_statement(self, statement):\n if self.check_statement(statement):\n self._statement = statement\n self.statement_status = 'OK'\n else:\n self._statement = None\n self.statement_status = 'X'", "def executescript(c, of, debug = False):\n\tquery_list = []\n\tquery_list_candidates = of.readlines()\n\tfor line in query_list_candidates:\n\t\t# process out comment lines\n\t\tif line.startswith(\"--\"):\n\t\t\tpass\n\t\telse:\n\t\t\tif line.strip() != \"\":\n\t\t\t\tquery_list.append(line.strip())\n\tquery_list = \" \".join(query_list).split(';')\n\tfor query in query_list:\n\t\tif query.strip():\n\t\t\tif debug:\n\t\t\t\tprint \"executescript [status] : executing query:\\n\\t%s\\n\" % (query.strip())\n\t\t\tc.execute(query.strip())", "def test_dbapi_raw(n):\n\n conn = engine.pool._creator()\n cursor = conn.cursor()\n compiled = (\n Customer.__table__.insert()\n .values(name=bindparam(\"name\"), description=bindparam(\"description\"))\n .compile(dialect=engine.dialect)\n )\n\n if compiled.positional:\n args = (\n (\"customer name %d\" % i, \"customer description %d\" % i)\n for i in range(n)\n )\n else:\n args = (\n dict(\n name=\"customer name %d\" % i,\n description=\"customer description %d\" % i,\n )\n for i in range(n)\n )\n\n cursor.executemany(str(compiled), list(args))\n conn.commit()\n conn.close()", "def process_statement (lx,wlist,fb):\n # Grammar for the statement language is:\n # S -> P is AR Ns | P is A | P Is | P Ts P\n # AR -> a | an\n # We parse this in an ad hoc way.\n msg = add_proper_name (wlist[0],lx)\n if (msg == ''):\n if (wlist[1] == 'is'):\n if (wlist[2] in ['a','an']):\n lx.add (wlist[3],'N')\n fb.addUnary ('N_'+wlist[3],wlist[0])\n else:\n lx.add (wlist[2],'A')\n fb.addUnary ('A_'+wlist[2],wlist[0])\n else:\n stem = verb_stem(wlist[1])\n if (len(wlist) == 2):\n lx.add (stem,'I')\n fb.addUnary ('I_'+stem,wlist[0])\n else:\n msg = add_proper_name (wlist[2],lx)\n if (msg == ''):\n lx.add (stem,'T')\n fb.addBinary ('T_'+stem,wlist[0],wlist[2])\n return msg", "def process_statement (lx,wlist,fb):\n # Grammar for the statement language is:\n # S -> P is AR Ns | P is A | P Is | P Ts P\n # AR -> a | an\n # We parse this in an ad hoc way.\n msg = add_proper_name (wlist[0],lx)\n if (msg == ''):\n if (wlist[1] == 'is'):\n if (wlist[2] in ['a','an']):\n lx.add (wlist[3],'N')\n fb.addUnary ('N_'+wlist[3],wlist[0])\n else:\n lx.add (wlist[2],'A')\n fb.addUnary ('A_'+wlist[2],wlist[0])\n else:\n stem = verb_stem(wlist[1])\n if (len(wlist) == 2):\n lx.add (stem,'I')\n fb.addUnary ('I_'+stem,wlist[0])\n else:\n msg = add_proper_name (wlist[2],lx)\n if (msg == ''):\n lx.add (stem,'T')\n fb.addBinary ('T_'+stem,wlist[0],wlist[2])\n return msg" ]
[ "0.6858657", "0.6821307", "0.6613819", "0.6559915", "0.64512044", "0.63355", "0.6250831", "0.61308306", "0.5997265", "0.58391565", "0.57354546", "0.5710025", "0.5697581", "0.567703", "0.567703", "0.5657745", "0.55945855", "0.5572425", "0.5521451", "0.5517517", "0.55145484", "0.5508067", "0.55078167", "0.5487551", "0.5474311", "0.54192215", "0.54119086", "0.53842944", "0.5371576", "0.5356417", "0.5348011", "0.53390443", "0.5334243", "0.53262955", "0.5320591", "0.53180444", "0.5294993", "0.5290399", "0.5279914", "0.52771574", "0.52693033", "0.5257708", "0.52481", "0.52238846", "0.52213335", "0.5193058", "0.51905364", "0.51812667", "0.51803124", "0.5180107", "0.51764077", "0.516644", "0.51647764", "0.51633203", "0.51546514", "0.5148829", "0.5145927", "0.5137253", "0.51193553", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5116215", "0.5109982", "0.50884426", "0.50846267", "0.50708723", "0.50704026", "0.5061483", "0.50423396", "0.50359", "0.50229806", "0.50215745", "0.50211734", "0.50123894", "0.5005452", "0.50037384", "0.49989754", "0.49970654", "0.49938244", "0.49765676", "0.49763164", "0.4973762", "0.49717188", "0.49717188" ]
0.8045383
0
gee = { Statement }
def parseStmtList( tokens ): tok = tokens.peek( ) ast = list( ) # list that keeps track of the all the returns from the parse rountines while tok is not None: # need to store each statement in a list statement = stmtList( ) ast.append( statement ) tok = tokens.peek( ) return ast
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.Statement = []", "def declaration(self) -> global___Statement.Declaration:", "def PolicyStatement(self) -> PolicyStatement:", "def __init__(self, grammar, trace=...):\n ...", "def Statement(self):\n t = self.token\n if t.stmt_begin:\n self._advance()\n return t.std()\n ex = self.expression(0)\n self._advance([\"NEWLINE\", \"END\", \"DEDENT\"])\n return ex", "def doctest_DKBCCCsvStatementParser():", "def g():", "def map(self) -> global___Statement.Declaration:", "def __init__(self, *args, **kwargs):\r\n Grammar.__init__(self)\r\n dict.__init__(self, *args, **kwargs)", "def lab7_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab7_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab7_q1():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def body(self):", "def lab7_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab9_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def problem_statement():\n pass", "def lab8_q4():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab9_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab8_q3():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def __init__(self, exprs):\n self.exprs = exprs", "def exo2():", "def start_at(self) -> global___Statement.Declaration:", "def __init__(self):\n self.name = ''\n self.variables = []\n self.assumptions = []\n self.guarantees = []", "def lab9_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab8_q2():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def visit_Declaration(self, node):\n self.block = node.name\n obj = {\n 'enamldef': True,\n 'type': node.name,\n 'base': node.base,\n 'doc': node.doc,\n 'lineno': node.lineno,\n 'identifier': node.identifier,\n 'filename': self.filename,\n 'block': self.block,\n 'children': [],\n 'bindings': [],\n }\n self.stack.append(obj)\n for item in node.body:\n self.visit(item)", "def Statements(self):\n states = list()\n while self.currtok[1].name in {\"SEMI\", \"LCURLY\", \"IDENT\", \"if\", \"print\", \"while\", \"return\"}:\n state = self.Statement()\n states.append(state)\n return StatementsStmt(states)", "def Block(self):\n self._advance([\"INDENT\"])\n stmts = self.Statements()\n self._advance([\"DEDENT\"])\n return stmts", "def visit_Instantiation(self, node):\n obj = {\n 'enamldef': False,\n 'type': node.name,\n 'lineno': node.lineno,\n 'identifier': node.identifier,\n 'filename': self.filename,\n 'block': self.block,\n 'children': [],\n 'bindings': [],\n }\n self.stack.append(obj)\n for item in node.body:\n self.visit(item)\n self.stack.pop()\n self.stack[-1]['children'].append(obj)", "def __post_init__(self) -> None:\n self.current_line = Line(mode=self.mode)\n\n v = self.visit_stmt\n Ø: Set[str] = set()\n self.visit_assert_stmt = partial(v, keywords={\"assert\"}, parens={\"assert\", \",\"})\n self.visit_if_stmt = partial(\n v, keywords={\"if\", \"else\", \"elif\"}, parens={\"if\", \"elif\"}\n )\n self.visit_while_stmt = partial(v, keywords={\"while\", \"else\"}, parens={\"while\"})\n self.visit_for_stmt = partial(v, keywords={\"for\", \"else\"}, parens={\"for\", \"in\"})\n self.visit_try_stmt = partial(\n v, keywords={\"try\", \"except\", \"else\", \"finally\"}, parens=Ø\n )\n self.visit_except_clause = partial(v, keywords={\"except\"}, parens={\"except\"})\n self.visit_with_stmt = partial(v, keywords={\"with\"}, parens={\"with\"})\n self.visit_classdef = partial(v, keywords={\"class\"}, parens=Ø)\n self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS)\n self.visit_return_stmt = partial(v, keywords={\"return\"}, parens={\"return\"})\n self.visit_import_from = partial(v, keywords=Ø, parens={\"import\"})\n self.visit_del_stmt = partial(v, keywords=Ø, parens={\"del\"})\n self.visit_async_funcdef = self.visit_async_stmt\n self.visit_decorated = self.visit_decorators\n\n # PEP 634\n self.visit_match_stmt = self.visit_match_case\n self.visit_case_block = self.visit_match_case", "def lab8_q5():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def lab9_q5():\n return \"\"\"\n YOUR EXPLANATION HERE\n \"\"\"", "def evaluate(self, edict):\n pass", "def iteration(self) -> global___Statement.Iteration:", "def iteration(self) -> global___Statement.Iteration:", "def __init__(self, the_ast):\n self._ast = the_ast", "def __init__(self):\n\n self.loops = []\n self.ast_util = ast_util.ASTUtil()", "def statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def expression(self) -> Expression:\n ...", "def __init__(self,expressions,name,namespace={}):\n self.expressions = expressions\n self.name = name\n self.namespace = namespace", "def __init__(self, node, declare):\n preproc.__init__(self, node, declare, \"hashElif\", \"#elif\")\n self.string = \" \"+self.test", "def statements(civic_eid2997_statement, civic_aid6_statement):\n return [civic_eid2997_statement, civic_aid6_statement]", "def __init__(self):\n self.elevations = []", "def bracket_as_entity(self):\n pass", "def code():", "def __init__(self) -> None:\n self.seen_clause_in_line = False\n self._is_comprehension = False\n self._seen_for = False\n self._seen_for_in_line = False\n self._seen_if_in_line = False\n self._seen_nl = False\n self._potential_violation = False\n self._reported = False", "def evals(self):\n\t\tpass", "def __init__(self):\n self._g = {}", "def beginScope():", "def test_statement_initialized_by_assignment():\n shap = Statement(shape_id=\"@photo\", prop_id=\"dcterms:creator\", value_type=\"URI\")\n shap2 = Statement()\n shap2.shape_id = \"@photo\"\n shap2.prop_id = \"dcterms:creator\"\n shap2.value_type = \"URI\"\n assert shap == shap2", "def syntax_text():", "def is_used_as_statement(item):\n # this is what we need above our head to be in this category\n layering = [\n ANY_CONSTRUCT,\n syntax.PROGRAM\n ]\n\n return filter_by_layering(item, layering)", "def test_statement_initialized_with_just_one_field():\n shap = Statement(prop_id=\"dcterms:creator\")\n assert not shap.start\n assert shap.shape_id is None\n assert shap.shape_label is None\n assert shap.prop_id == \"dcterms:creator\"\n assert shap.prop_label is None\n assert shap.mand is None\n assert shap.repeat is None\n assert shap.value_type is None\n assert shap.value_datatype is None\n assert shap.constraint_value is None\n assert shap.constraint_type is None\n assert shap.shape_ref is None\n assert shap.annot is None", "def explain(self):", "def multi_statement() -> None:\n pass; print(\"hello\")", "def __init__(self, name: unicode, entry: ghidra.program.model.address.Address, body: ghidra.program.model.address.AddressSetView, source: ghidra.program.model.symbol.SourceType, findEntryPoint: bool, recreateFunction: bool):\n ...", "def d(self):\n pass", "def d(self):\n pass", "def __iter__(self):\n return iter(self._statements)", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def let():\n def from_many(*kv_pairs):\n new_bindings = {}\n for entry in kv_pairs:\n with match(entry) as case:\n with case('Quoted(Sexpr(Name(name), expr))') as [m]:\n new_bindings[m.name] = m.expr\n\n def _from_many(quoted_body):\n return EvaluateInContext(\n push_subscope_with(new_bindings),\n pop_subscope,\n quoted_body.subexpression\n )\n\n return e.Function({parse_fn(\"(λ &[any] . any)\"): _from_many})\n yield (\"(λ ...&[(name any)] . (λ &[any] . any))\", from_many)\n\n def from_one(key, value, quoted_body):\n return EvaluateInContext(\n push_subscope_with({key.subexpression.name: value}),\n pop_subscope,\n quoted_body.subexpression\n )\n yield (\"(λ &[name] any &[any] . any)\", from_one)", "def sth():", "def parseStmt(line):\n print(\"Statement\")\n index=0\n if line[0] == 's':\n print(\"Set\")\n index += 4\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseSet(cmds)\n elif line[0] == 'h':\n exit()\n elif line[0] == 'j':\n index += 5\n if line[index] == ' ':\n print(\"Jumpt\")\n index += 1\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJumpt(cmds)\n else:\n print(\"Jump\")\n rest = line[index:]\n cmds = str.split(rest, ',')\n parseJump(cmds)\n else:\n print(\"Invalid Operation\")", "def agreements():\n pass", "def stmtList( ):\n\n\ttok = tokens.peek( )\n\tif debug: print( \"stmtList: \", tok )\n\tstat = statement( )\n\treturn stat", "def result(self) -> global___Expression:", "def opinion():\n pass", "def __init__(self, symbol):\n Expression.__init__(self, None)\n self.symbol = symbol\n self.index = None\n self.basetype = symbol.basetype", "def __init__(self):\n # The logging object. \n # Example: self.log.info(f\"Current value of var: {my_var}\")\n self.log = logging.getLogger()\n\n # Create object of EDH\n self.edh = EDH.EDH()\n\n # Parameters to be used in sale\n self.item_amount = \"$0.01\"\n self.fuel_amount = \"$1.00\"\n self.fuel_grade = \"Diesel 1\"", "def standard(self) -> global___Snippet.Standard:", "def enterScope(self, name):", "def __init__(self):\n self.g_sect = []", "def experiment(self) -> Any:", "def debug():\n def _debug(x):\n return e.String(x.as_source())\n yield (\"(λ any . str)\", _debug)", "def __init__(self):\n {}", "def main():\n e = Edge(12, 34, 5.67)\n print(e)", "def _analyse_stmt_Expr(self, statement: ast.Expr, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def __init__(self, source, filenum, linenum, param_list, ops_raw_list, ops_list):\n self.gate_definition = {\n 'source': source,\n 'filenum': filenum,\n 'linenum': linenum,\n 'param_list': param_list,\n 'ops_raw_list': ops_raw_list,\n 'ops_list': ops_list}", "def __init__(self):\n self.table = {}", "def elegir_ventana(self):\r\n pass", "def __init__(self, name: unicode, entries: ghidra.program.model.address.AddressSetView, body: ghidra.program.model.address.AddressSetView, source: ghidra.program.model.symbol.SourceType, findEntryPoint: bool, recreateFunction: bool):\n ...", "def __init__(self):\n # The logging object. \n # Example: self.log.info(f\"Current value of var: {my_var}\")\n self.log = logging.getLogger()\n\n # Create object of EDH\n self.edh = EDH.EDH()\n\n # Parameters to be used in sale\n self.item_amount = \"$0.01\"\n self.fuel_amount = \"$1.00\"\n self.carwash_amount = \"$2.50\"\n self.fuel_grade = \"Diesel 1\"", "def evaluate(self, g):\n pass", "def __init__(self, node, declare):\n preproc.__init__(self, node, declare, \"hashElse\", \"#else\")", "def mechanism(self):", "def exercise_b2_93():\r\n pass", "def easteregg(phrase):\n\n if phrase == 'E = m c^2':\n return \"\"\"\nGeneral relativity, or the general theory of relativity, \nis the geometric theory of gravitation published by Albert Einsteinin 1916[1] \nand the current description of gravitation in modern physics. \nGeneral relativity generalizes special relativity and\nNewton's law of universal gravitation, providing a unified description \nof gravity as a geometric property of space and time, or spacetime. \nIn particular, the curvature of spacetime is directly related \nto the energy and momentum of whatever matterand radiation are present. \nThe relation is specified by the Einstein field equations, \na system of partial differential equations.\n\"\"\"", "def eval(self):\n pass", "def eval(self):\n pass", "def eval(self):\n pass", "def onStatement(self, match):\n\t\treturn self.process(match[0])", "def __init__(self, table, j):\n self._table = table\n self._j = j", "def __init__(self, semantics):\n self.semantics = semantics", "def Statement(self):\n if self.currtok[1].name == \"SEMI\":\n self.currtok = next(self.tg)\n return semicolon()\n if self.currtok[1].name == \"LCURLY\":\n return self.Block()\n if self.currtok[1].name == \"IDENT\":\n if self.functions.get(self.currtok[0]) is None:\n return self.Assignment()\n else:\n return self.FunctionCall()\n if self.currtok[1].name == \"if\":\n return self.IfStatement()\n if self.currtok[1].name == \"print\":\n return self.PrintStmt()\n if self.currtok[1].name == \"while\":\n return self.WhileStatement()\n if self.currtok[1].name == \"return\":\n return self.ReturnStmt()\n\n raise SLUCSyntaxError(\"ERROR: Unexpected token {0} on line {1}\".\n format(self.currtok[1], str(self.currtok[2] - 1)))", "def test_models_edx_ui_book_with_valid_statement(statement):\n assert statement.event_type == \"book\"\n assert statement.name == \"book\"", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def __init__(self):\n self.d = {}", "def substantiate():", "def _create_event_statement(event_name):\n field_name_to_value = {'name': event_name, 'event_date': get_random_date(), 'uuid': get_uuid()}\n return create_vertex_statement('Event', field_name_to_value)" ]
[ "0.6804917", "0.5830523", "0.57813966", "0.55667824", "0.554171", "0.54849756", "0.5472526", "0.5465269", "0.5427294", "0.54140335", "0.53767276", "0.53541756", "0.5350958", "0.53406405", "0.5316228", "0.53140706", "0.53092444", "0.5294509", "0.529304", "0.52851737", "0.5274673", "0.52488357", "0.5248108", "0.5246906", "0.5230919", "0.5230171", "0.5230011", "0.52275497", "0.5219155", "0.5219133", "0.52176344", "0.52004755", "0.5193492", "0.51730365", "0.51730365", "0.51420164", "0.5107689", "0.5098743", "0.5056819", "0.50451607", "0.5027325", "0.5021576", "0.5015907", "0.5014064", "0.5013998", "0.50070506", "0.49660733", "0.49517322", "0.49350253", "0.4929923", "0.4922791", "0.49114665", "0.48779356", "0.4866951", "0.48505574", "0.4845869", "0.48373577", "0.48373577", "0.4836697", "0.48340017", "0.48282337", "0.48220557", "0.4817562", "0.48130825", "0.48091865", "0.47969148", "0.47896066", "0.4779656", "0.47756186", "0.4766833", "0.47572166", "0.47526714", "0.47487184", "0.4743042", "0.4741733", "0.47417063", "0.47393754", "0.47351074", "0.4734839", "0.47248954", "0.47245646", "0.47217083", "0.4718953", "0.47177342", "0.47137055", "0.4701805", "0.46984714", "0.46916726", "0.46916726", "0.46916726", "0.46873173", "0.46859536", "0.4682169", "0.46767357", "0.46764392", "0.46760583", "0.46760583", "0.46760583", "0.46760583", "0.46717796", "0.4666334" ]
0.0
-1
main program for testing
def main(): global debug ct = 0 for opt in sys.argv[1:]: if opt[0] != "-": break ct = ct + 1 if opt == "-d": debug = True if len(sys.argv) < 2+ct: print ("Usage: %s filename" % sys.argv[0]) return parse("".join(mklines(sys.argv[1+ct]))) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n pass", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():\n return", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main():\n run_test_all()", "def main():\n\tpass", "def test_script(self) -> None:\n main()", "def __main() :\n launchTests()", "def main(self):", "def main(self):\r\n pass", "def main(args):", "def main(args):", "def main(self) -> None:\n pass", "def main() -> None:\n return", "def main():\n run_program()", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n Main()", "def main():\n\n pass", "def main(args=None):", "def main(args=None):", "def run():\n main()", "def main():\n tng.api.runner()", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def main():\n ...", "def main():\n\tcli = Cli()\n\tcli.run()", "def main():\n return 0", "def main():\n return 0", "def main():\n pass", "def main():\n unittest.main(exit=False, verbosity=2)\n return 0", "def main():\n\n pass\n\n return None", "def main():\n\n pass\n\n return None", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def main():\n vunit = vunit_pkg.VUnit.from_argv()\n vunit = map_sources(vunit)\n run_tests(vunit)", "def main():\n print(\"is Running!\")", "def main(self):\n pass", "def main():\n\n BASIC.run(PROGRAM)", "def main():\n print(\"Everythin is ok\")", "def run_main():\n main(sys.argv)", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def runtest(self):", "def main():\n testlib = VorpatestLibrary()\n testlib.prepare_test()\n testlib.run_vorpaline(*sys.argv[1:])\n testlib.run_vorpastat()\n testlib.cleanup_test()", "def main(args=None):\n pass", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def main(cls):\n raise NotImplementedError", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def main():\n pass\n\n if __name__ == \"__main)__\":\n main()" ]
[ "0.84278727", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.8427348", "0.84023356", "0.8301654", "0.8301654", "0.8301654", "0.8301654", "0.82025355", "0.8164585", "0.8161485", "0.80703014", "0.8070099", "0.8019265", "0.8007026", "0.8007026", "0.7948684", "0.7922294", "0.78608686", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.7842694", "0.78287137", "0.7826166", "0.7803283", "0.7803283", "0.7777299", "0.7763899", "0.77596", "0.77198493", "0.76136816", "0.7596748", "0.7596748", "0.7589593", "0.7580201", "0.7576217", "0.7576217", "0.75139755", "0.748605", "0.74708736", "0.7455102", "0.74518156", "0.74281156", "0.7417354", "0.7350136", "0.7341851", "0.7338101", "0.73130155", "0.73113656", "0.73028094", "0.72656924", "0.72637266", "0.72637266", "0.72637266", "0.72637266", "0.72637266", "0.72637266", "0.72637266", "0.72637266", "0.72634375" ]
0.0
-1
Returns ssh username for connecting to cluster workers.
def get_ssh_user(): return getpass.getuser()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ssh_user(self):\n if self.configuration.get(\"pg_ssh_user\"):\n return \"%s@\" % self.configuration.get(\"pg_ssh_user\")\n else:\n return \"%s@\" % DEFAULT_SSH_USER", "def get_username(self) -> str:\n try:\n return self[\"user\"]\n except KeyError:\n raise MarathonNotConfigured(\n \"Could not find marathon user in system marathon config\"\n )", "def head_node_user(self):\n return self._get_param(\"ClusterUser\")", "def username(self) -> str:\n return self.get_env_var(self.username_var)", "def username(self) -> str:\n return self.get_env_var(self.username_var)", "def username(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"username\")", "def __get_username(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVER_USERNAME')", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def username(self) -> str:\n return pulumi.get(self, \"username\")", "def get_remote_user(self, username):\n return 'ec2-user'", "def username(self) :\n\t\ttry :\n\t\t\treturn self._username\n\t\texcept Exception as e:\n\t\t\traise e", "def username(self):\n return self._query_config()['username']", "def username(self):\n return self._authenticator.username()", "def __get_username(self) -> str:\n\t\treturn os.getenv('MQTT_DRIVEN_USERNAME')", "def username(self) -> str:\n return self._username", "def git_username(self):\n return self._git_username", "def username(self) -> str:\n raise NotImplementedError", "def get_username(self) -> str:\n return self._username", "def get_sshhost(self):\n return self._sshhost.gethost()", "def remote_hostname(self):\n return pn_connection_remote_hostname(self._impl)", "def driver_username(self):\n return self._driver_username", "def _what_is_username(self):\n prompt = \"-?- Send to: \"\n sn = self._input(prompt)\n return sn", "def username(self):\n return self._username()", "def username(self):\n return self._username", "def username(self):\n return self._username", "def username(self):\n return self._username", "def get_username(self):\n raise NotImplementedError('get_username')", "def username(self) -> Optional[str]:\n return self._state.get(\"username\", None)", "def get_weak_username(self, host):\n try:\n return self.weak_hosts.get(host)[1]\n except IndexError:\n return \" \"", "def get_username():\r\n return get_creds(CREDS_FILE)[1]", "def get_username(self):\r\n raise NotImplementedError", "def get_host_name(self):\n return self.get_command_output(\"hostname\").strip(\"\\n\")", "def get_uname(self):\n return Server.t_usernames.get(threading.get_ident())", "def get_username_for_active_connection(self):\n user_info = self.get_user_info()\n return getattr(user_info, 'user_name', None)", "def username():\n user = os.environ[\"CREODIAS_USERNAME\"]\n if user:\n return user\n raise ValueError(\"Set environment variable CREODIAS_USERNAME\")", "def userhost(self):\n if self.user:\n return u\"%s@%s\" % (self.user, self.host)\n else:\n return self.host", "def get_username(self):\n if self.controller.oem_config:\n return 'oem'\n return self.username", "def user_name():\n\n # Theoretically, we should be using Kerberos principal name for this.\n # However, Python Kerberos API bindings (both kerberos and krb5 modules)\n # are broken to the extent that one does not return the username, and other\n # has sad API, so we have to use other venues.\n\n if \"ATHENA_USER\" in os.environ:\n return os.environ[\"ATHENA_USER\"]\n return getpass.getuser()", "def sys_user_name():\n\timport subprocess\n\treturn subprocess.Popen('whoami', shell=True, stdout=subprocess.PIPE).stdout.readline().replace('\\n', '')", "def get_ssh_user(self, profile):\n ssh_user = 'root'\n if self._value.has_option(profile, 'ssh_user'):\n ssh_user = self._value.get(profile, 'ssh_user')\n self.logger.info(\"%s is selected as a ssh user\" % ssh_user)\n return ssh_user", "def get_username(self):\n return self.username", "def username():\n login = 0\n return pwd.getpwuid(os.getuid())[login]", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def get_user_name(self):\n whoami = subprocess.Popen('whoami', stdin=None, stdout=subprocess.PIPE,\n shell=True, close_fds=True)\n whoami = whoami.communicate()[0]\n if '\\n' in whoami:\n newline_index = whoami.find('\\n')\n whoami = whoami[:newline_index]\n return whoami", "def login_username(self):\n return os.getenv(\"CONAN_LOGIN_USERNAME\", \"dvirtz\")", "def get_username():\n\n return pwd.getpwuid(os.getuid())[0]", "def ocsaudit_get_username():\n return pwd.getpwuid(os.getuid()).pw_name", "def username(self, repository):\r\n return self._username(repository)", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def username(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"username\")", "def _username(self):\n if 'username' not in self._config:\n self._config['username'] = self._UI.get_input(\"Please enter your trac username: \")\n self._config._write_config()\n return self._config['username']", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"username\")", "def username(self):\n return self.user.username", "def cluster_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_name\")", "def GetUsername(self):\n pass", "def get_username(ui):\n username = ui.config('bb', 'username', None)\n if username:\n return username\n import getpass\n username = getpass.getuser()\n ui.status('using system user %r as username' % username)\n return username", "def get_username(self, master_id):\r\n return self._handler.get_username(master_id)", "def get_username():\n return pwd.getpwuid(os.getuid())[0]", "def user_name(self) -> str:\n result = subprocess.run(\n [\"git\", \"config\", \"user.name\"],\n check=True,\n cwd=self.dir,\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n )\n return result.stdout.strip()", "def repo_user(self):\n if 'repoze.who.identity' in self.environ:\n return self.environ['repoze.who.identity'].get('user')", "def user():\n\treturn request.authorization.username if zk.get_http_login() else zk.get_username()", "def host_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"host_name\")", "def get_username(self):\r\n return self.username", "def username(self, inst):\r\n return inst.user.username", "def username(self, instance):\r\n return instance.user.username", "def username(self) -> str:", "def username(self) -> str:", "def computer_name(self) -> str:\n return pulumi.get(self, \"computer_name\")", "def username(self):\n log.warning(\"username property deprecated. Use boundjid.user\")\n return self.boundjid.user", "def host_name(self):\n return self._host_name", "def via_host_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"via_host_name\")", "def user_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_name\")", "def getUsername(self):\n\t\treturn self.Username.lower()", "def get_nickname(self):\n return self._nick", "def get_host_name():\n return socket.gethostname()", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")" ]
[ "0.7331437", "0.6813522", "0.67854804", "0.67478967", "0.67478967", "0.6733212", "0.66904676", "0.66705793", "0.66705793", "0.66705793", "0.6658738", "0.66570204", "0.6605207", "0.65863043", "0.65245265", "0.6501686", "0.6480417", "0.6475567", "0.6475105", "0.64678997", "0.6448277", "0.64340156", "0.64275813", "0.6421704", "0.64067924", "0.64067924", "0.64067924", "0.63855106", "0.63849294", "0.6384408", "0.6383455", "0.6380696", "0.63781464", "0.6376987", "0.63754773", "0.6330437", "0.632541", "0.6310789", "0.6282853", "0.6281617", "0.62629503", "0.62629175", "0.6262252", "0.6259699", "0.6259699", "0.62539124", "0.62538767", "0.6250858", "0.62479573", "0.62274915", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.6221959", "0.62135756", "0.6213532", "0.6213532", "0.6213532", "0.6213532", "0.6213532", "0.6213532", "0.6213532", "0.6213532", "0.6213532", "0.6207502", "0.6201796", "0.62004006", "0.61872166", "0.61582077", "0.61580884", "0.6148021", "0.614757", "0.6144103", "0.61426485", "0.6138534", "0.61382055", "0.61248213", "0.6109583", "0.6109583", "0.60917103", "0.6079948", "0.60788757", "0.6076507", "0.6065732", "0.6049755", "0.6040373", "0.60372335", "0.6032071", "0.6032071", "0.6032071", "0.6032071", "0.6032071" ]
0.72012144
1
Returns ssh key to connecting to cluster workers. If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key will be used for syncing across different nodes.
def get_ssh_key(): path = os.environ.get("TUNE_CLUSTER_SSH_KEY", os.path.expanduser("~/ray_bootstrap_key.pem")) if os.path.exists(path): return path return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def host_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"host_key\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> str:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"cluster_id\")", "def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")", "def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")", "def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")", "def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")", "def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")", "def host_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_key\")", "def key_encryption_key_identity(self) -> Optional[pulumi.Input['ClusterPropertiesKeyEncryptionKeyIdentityArgs']]:\n return pulumi.get(self, \"key_encryption_key_identity\")", "def partition_key(self) -> str:\n return pulumi.get(self, \"partition_key\")", "def client_key(self) -> str:\n return pulumi.get(self, \"client_key\")", "def client_key(self) -> str:\n return pulumi.get(self, \"client_key\")", "def kms_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"kms_key\")", "def hostkey(self):\n return self.__get_option('hostkey_file')", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_id\")", "def kms_key_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"kms_key_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_id\")", "def cluster_identifier(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_identifier\")", "def get_cluster_command(cls):\n if 'cluster_command' in cls.global_settings:\n return cls.global_settings['cluster_command']\n else:\n return None", "def ssh_public_key(self) -> pulumi.Input['SshPublicKeyArgs']:\n return pulumi.get(self, \"ssh_public_key\")", "def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")", "def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")", "def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")", "def kms_key_id(self) -> Optional[str]:\n return pulumi.get(self, \"kms_key_id\")", "def download_kube_key(self):\n if Cloud().target_cloud_gcp():\n return\n logger.info(\"Downloading cluster ssh key from s3 ...\")\n data = self._bucket.get_object(self._s3_cluster_ssh_key)\n assert data is not None, \"No kube ssh key at {}/{}\".format(self._bucket_name, self._s3_cluster_ssh_key)\n dir = os.path.dirname(self._key_file)\n if not os.path.exists(dir):\n os.makedirs(dir)\n with open(self._key_file, \"w\") as f:\n f.write(data)\n os.chmod(self._key_file, 0o0600)\n logger.info(\"Downloaded kube ssh key from %s/%s to %s\", self._bucket_name, self._s3_cluster_ssh_key, self._key_file)\n return self._key_file", "def client_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_key\")", "def cluster_id(self):\n return self._cluster_id", "def client_public_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"client_public_key\")", "def crypto_key_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"crypto_key_id\")", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> str:\n return pulumi.get(self, \"cluster_name\")", "def kms_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key\")", "def kms_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"kms_key\")", "def cluster_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"cluster_name\")", "def kms_key_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key_id\")", "def kms_key_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key_id\")", "def kms_key_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key_id\")", "def kms_key_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key_id\")", "def kms_key_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"kms_key_id\")", "def get_ssh_key(self, profile):\n ssh_key = '/home/ssm-user/bastion'\n if self._value.has_option(profile, 'ssh_key'):\n ssh_key = self._value.get(profile, 'ssh_key')\n self.logger.info(\"%s is selected as a ssh user\" % ssh_key)\n return ssh_key", "def getPubKey(User):\n with settings(key_filename='/Users/eric/.ssh/id_rsa.pub', host_string=watt):\n with cd('/home/%s/.ssh' % (User)):\n auth_keyfile = sudo(\n '( [ -f authorized_keys ] && echo \"authorized_keys\" ) || ( [ -f authorized_keys2 ] && echo \"authorized_keys2\" )')\n key = sudo('head -1 %s' % auth_keyfile)\n\n return key", "def _var_key(var):\n\n # pylint: disable=protected-access\n # Get the distributed variable if it exists.\n if getattr(var, \"_distributed_container\", None) is not None:\n var = var._distributed_container()\n if var._in_graph_mode:\n return var._shared_name\n return var._unique_id", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"cluster_name\")", "def get_key_file(self):\n return self.configuration.get(\"pg_host_key\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def cluster_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"cluster_name\")", "def get_key(self):\n return self._determine_key()", "def get_key_name(self, instance):\n return REDIS_TEST_KEY_NAME", "def get_master_blinding_key(self):\n return self._jadeRpc('get_master_blinding_key')", "def client_public_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_public_key\")", "def secret_key_ref(self) -> Optional[pulumi.Input['KongClusterPluginConfigfromSecretkeyrefArgs']]:\n return pulumi.get(self, \"secret_key_ref\")", "def pubkey(self) -> str:\n\n return self._pubkey", "def GetPublicKey(self):\n public_ssh_key_file = self.ssh_key_file + '.pub'\n if (not os.path.exists(self.ssh_key_file) or\n not os.path.exists(public_ssh_key_file)):\n log.warn('You do not have an SSH key for Google Compute Engine.')\n log.warn('[%s] will be executed to generate a key.',\n self.ssh_keygen_executable)\n\n ssh_directory = os.path.dirname(public_ssh_key_file)\n if not os.path.exists(ssh_directory):\n if console_io.PromptContinue(\n 'This tool needs to create the directory [{0}] before being able '\n 'to generate SSH keys.'.format(ssh_directory)):\n files.MakeDir(ssh_directory, 0700)\n else:\n raise exceptions.ToolException('SSH key generation aborted by user.')\n\n keygen_args = [\n self.ssh_keygen_executable,\n '-t', 'rsa',\n '-f', self.ssh_key_file,\n ]\n _RunExecutable(keygen_args)\n\n with open(public_ssh_key_file) as f:\n return f.readline().strip()", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")" ]
[ "0.63742065", "0.61436695", "0.61436695", "0.61436695", "0.61436695", "0.61436695", "0.6095634", "0.6095634", "0.6095634", "0.6095634", "0.60669327", "0.60669327", "0.60669327", "0.60669327", "0.60669327", "0.60669327", "0.60596585", "0.597747", "0.58629495", "0.58629495", "0.5853532", "0.5850277", "0.5831395", "0.5831395", "0.5812002", "0.5805047", "0.5805047", "0.5805047", "0.5762766", "0.57547444", "0.5751789", "0.5717375", "0.5717375", "0.5717375", "0.5717375", "0.57020307", "0.56619143", "0.5618195", "0.5609324", "0.55922043", "0.55856043", "0.5557012", "0.5557012", "0.5546054", "0.55173266", "0.5511645", "0.549119", "0.549119", "0.549119", "0.549119", "0.549119", "0.546806", "0.5460996", "0.54499996", "0.5447339", "0.5447339", "0.5441648", "0.54263103", "0.54263103", "0.54263103", "0.54263103", "0.54263103", "0.5423085", "0.5418289", "0.54113555", "0.5404862", "0.54043925", "0.53944916", "0.53836656", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473", "0.5341473" ]
0.70618993
0
writes uuids and extras of given nodes to a file (json). This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes.
def export_extras(nodes, filename='node_extras.txt'): #outstring = ''#' node uuid | extras \n' outdict = {} for node in nodes: if isinstance(node, int): #pk node = load_node(node) elif isinstance(node, basestring): #uuid node = load_node(node) if not isinstance(node, Node): print('skiped node {}, is not an AiiDA node, did not know what to do.'.format(node)) continue uuid = node.uuid extras_dict = node.get_extras() outdict[uuid] = extras_dict #line = '{} | {}\n'.format(uuid, extras_dict) #outstring = outstring + line #outfile = open(filename, 'w') #outfile.write(outstring) #outfile.close() json.dump(outdict, open(filename,'w')) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_nodes(node, filename):\n\n with open(filename, 'w', newline='') as f:\n writer = csv.DictWriter(f,\n fieldnames=node[0].keys(),\n quoting=csv.QUOTE_ALL)\n writer.writeheader()\n writer.writerows(node)", "def write(node, filepath):\n data = read(node)\n\n if not data:\n return\n\n with open(filepath, 'w') as f:\n json.dump(data, f, indent=4, sort_keys=True)\n\n return data", "def import_extras(filename):\n\n all_extras = {}\n\n # read file\n #inputfile = open(filename, 'r')\n #lines = inputfile.readlines()\n #for line in lines[1:]:\n # splitted = line.split(' | ')\n # uuid = splitted[0].rstrip(' ')\n # extras = splitted[1].rstrip(' ')\n # #extras = dict(extras)\n # print(extras)\n # all_extras[uuid] = extras\n #inputfile.close()\n try:\n all_extras = json.load(open(filename))\n except:\n print('The file has to be loadabel by json. i.e json format (which it is not).')\n\n for uuid, extras in all_extras.iteritems():\n\n try:\n node = load_node(uuid)\n except:\n # Does not exists\n print('node with uuid {} does not exist in DB'.format(uuid))\n node = None\n continue\n if isinstance(node, Node):\n node.set_extras(extras)\n else:\n print('node is not instance of an AiiDA node')\n #print(extras)\n return", "def save(file_path, nodes=[]):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n return file_path\n print time.time() - t", "def save(file_path, nodes):\n\n t = time.time()\n data = get_data(nodes)\n\n if not data:\n mc.warning('Nothing to save for selected nodes!')\n return\n\n if not file_path.endswith(file_extention):\n file_path = os.path.splitext(file_path)[0]+file_extention\n\n utils.write_json(file_path, data)\n print time.time() - t", "def write_node_features(node_features, node_file):\n dgl.data.utils.save_tensors(node_file, node_features)", "def save_config(node, force=False):\n filepath = os.path.join(\"nodes/\", env.host_string + \".json\")\n tmp_filename = 'tmp_{0}.json'.format(env.host_string)\n files_to_create = [tmp_filename]\n if not os.path.exists(filepath) or force:\n # Only save to nodes/ if there is not already a file\n print \"Saving node configuration to {0}...\".format(filepath)\n files_to_create.append(filepath)\n for node_file in files_to_create:\n with open(node_file, 'w') as f:\n f.write(json.dumps(node, indent=4))\n return tmp_filename", "def write_graph_to_file(self, path):\n graph = nx.Graph()\n for node in self.graph.nodes(data=True):\n new_node = deepcopy(node)\n new_node[1]['blocks'] = list(new_node[1]['blocks'])\n graph.add_node(*new_node)\n graph.add_edges_from(self.graph.edges())\n json.dump(json_graph.node_link_data(graph), open(path, 'w'))", "def write_edges(\n edges: Mapping[str, Any],\n filename: str,\n jsonlines: bool = False,\n gzipflag: bool = False,\n yaml: bool = False,\n):\n pass", "def write_subgraph_nodeids(filename, nodelist):\n with open(filename, 'w') as f:\n f.write('nodeid\\n')\n for i in nodelist:\n f.write(str(i) + '\\n')", "def save_info_to_file(filepath, tokens):\n with open(filepath, 'w') as f:\n json.dump(tokens, f)", "def write_out():\n os.replace(\"recipes.json\", \".recipes.json.backup\")\n with open(\"recipes.json\", \"w\") as recipes_file:\n json.dump(recipebook.to_json_list(),recipes_file)", "def write(self, *, tp_nodes, tp_edges: Mapping[str, Edge], tp_namespaces, tn_nodes, tn_edges, tn_namespaces):\n with open(os.path.join(self.graph_dir_path, \"tp_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tp_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_edges.pkl\"), \"wb\") as file:\n pickle.dump(tp_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tp_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tp_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_nodes.pkl\"), \"wb\") as file:\n pickle.dump(tn_nodes, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_edges.pkl\"), \"wb\") as file:\n pickle.dump(tn_edges, file, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(self.graph_dir_path, \"tn_namespaces.pkl\"), \"wb\") as file:\n pickle.dump(tn_namespaces, file, protocol=pickle.HIGHEST_PROTOCOL)", "def writeLinkoJson(linkograph, file):\n # Convert the linkograph into a form json can use.\n jsonLinko = [linkograph.labels]\n\n \"\"\"\n for entry in linkograph:\n tolist = list(map(list, entry))\n jsonLinko.append(tolist)\n \"\"\"\n for index in range(len(linkograph)):\n tolist = list(map(list, linkograph[index]))\n tolist.append(linkograph.uuids[index])\n jsonLinko.append(tolist)\n\n with open(file, 'w') as jsonFile:\n json.dump(jsonLinko, jsonFile, indent=4)", "def generate_file(file_name, node_keys):\n if file_name is None:\n raise ValueError(\"'file_name' is not present. This was created by @Edd1e234\")\n if node_keys is None or len(node_keys) is 0:\n raise ValueError(\"'node_keys' has no values. This was created by @Edd1e234\")\n\n file = open(file_name, \"w+\")\n for i in node_keys:\n file.write(i + \"\\n\")", "def writeNodes(fil, nodes, nofs=1):\n fil.write(' NODAL COORDINATES 2.2.30\\n')\n for i,n in enumerate(nodes):\n fil.write(\"%10d%20.11e%20.11e%20.11e\\n\" % ((i+nofs,)+tuple(n)))\n fil.write('ENDOFSECTION\\n')", "def write_edge_features(edge_features, edge_file):\n dgl.data.utils.save_tensors(edge_file, edge_features)", "def nx_to_json(graph, filename):\n graph_data = json_graph.node_link_data(graph)\n\n with open(filename, \"w\") as f:\n json.dump(graph_data, f, indent=4)", "def save_associated_genes(identifiers=[DEFAULT_IDENTIFIER]):\n for identifier in identifiers:\n file_path = os.path.join(EXTERNAL_DATA_PATH, \"{}.json\".format(identifier))\n if os.path.isfile(file_path):\n continue\n associated_genes = get_associated_genes(identifier)\n content = {\"identifier\": get_string_db_identifier(identifier), \"data\": associated_genes}\n with open(file_path, 'w') as f:\n f.write(json.dumps(content, sort_keys=True, indent=4, separators=(',', ': ')))\n print(\"Saved associated genes for {}\".format(identifier))", "def create_json_stash(self, path, node_params):\n json_file_path = os.path.join(path, self.name + '.json')\n\n if not os.path.isdir(path):\n os.makedirs(path)\n\n if os.path.isfile(json_file_path):\n return json_file_path\n\n with open(json_file_path, 'w+') as json_file:\n\n json_file.write(json.dumps(node_params, indent=4))\n\n self.json_stash = json_file_path\n\n return json_file_path", "def dump_to_json(fileinfos, out):\n jsonarray = json.dumps(fileinfos)\n json_filename = \"all_elements_used.json\"\n text_file = open(os.path.join(out,out_dir_name,json_filename), \"w\")\n text_file.write(jsonarray)\n text_file.close()\n stdout.write(\"... \"+json_filename+\" created\\n\")", "def _dumpJson(self, data, file):\n name, ext = os.path.splitext(file)\n tempFile = \"{0}.tmp\".format(name)\n with open(tempFile, \"w\") as f:\n json.dump(data, f, indent=4)\n shutil.copyfile(tempFile, file)\n os.remove(tempFile)", "def write_json_file(self, fname, content):\n pass", "def write_json_file(self, fname, content):\n pass", "def writeProductsToFile():\n # Call getProducts() and save file as JSON. Could also put to DB or use MyJson, but writing to file for simplicity right now\n data = getRequest(getProducts())\n\n # Try to create file\n try:\n # Store data in JSON file \n with open('products.json', 'w') as outfile:\n json.dump(data, outfile)\n print(\"JSON file for products created!\")\n except:\n print(\"Could not dump JSON to file\")\n raise", "def write_node_shp(self,shpname,extra_fields=[]):\n assert len(extra_fields)==0 # not yet supported!\n\n # zero-based index of node (why does write_edge_shp create 1-based ids?)\n base_dtype = [('node_id',np.int32)]\n\n node_geoms=[geometry.Point( self.nodes['x'][i] )\n for i in self.valid_node_iter() ]\n\n node_data=self.nodes[~self.nodes['deleted']].copy()\n\n # don't need to write all of the original fields out:\n node_data=utils.recarray_del_fields(node_data,['x','deleted'])\n\n wkb2shp.wkb2shp(shpname,input_wkbs=node_geoms,fields=node_data,\n overwrite=True)", "def write_json(tables, file: str, only_var=False) -> None:\n with Path(file).open(\"w\", encoding=\"utf-8\") as __f:\n dump(tables.var if only_var else tables, __f, indent=\" \")", "def export_json(contents, filename):\n with open(filename, 'w') as f:\n json.dump(contents, f)", "def write_json_file(self, file, content):\n with open(file, \"w\", encoding=\"utf-8\") as f:\n json.dump(content, f, indent=2)", "def save_node(self, node: Node):", "def save_json(node):\n return _api_internal._save_json(node)", "def write(fname, data):\n # Encode to string.\n encoder = NumpyJSONEncoder(check_circular=True, indent=' ')\n serial = encoder.encode(data)\n\n # Write to file.\n with open(fname, 'w') as fo:\n fo.write(serial)", "def write_json_file(file_name: str, content: list):\n with open(file_name, 'w+') as file_object:\n json.dump(content, file_object)", "def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")", "def save_network(nodes, edges, name, pid):\n d = {'nodes': nodes, 'edges': edges, 'name': name, 'pid': pid}\n\n out_filename = \"{0}-{1}.pickle\".format(\n datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\"),\n pid\n )\n with open(out_filename, 'wb') as out_file:\n pickle.dump(d, out_file)", "def write_triplets(triplets, tool_names, out_fname):\n output = open(out_fname, 'w')\n output.write(json.dumps({'tool_names': tool_names, 'triplets': triplets}))\n output.close()", "def save_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='w') as f:\n saveable_chain = [block.__dict__ for block in [Block(block_el.index, block_el.previous_hash, \n [tx.__dict__ for tx in block_el.transactions], \n [tx.__dict__ for tx in block_el.chipsactions],\n [tx.__dict__ for tx in block_el.messsactions],\n block_el.proof, block_el.timestamp) for block_el in self.__chain]]\n f.write(json.dumps(saveable_chain))\n f.write('\\n')\n saveable_tx = [tx.__dict__ for tx in self.__open_transactions]\n f.write(json.dumps(saveable_tx))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_chipsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n saveable_chip = [tx.__dict__ for tx in self.__open_messsactions]\n f.write(json.dumps(saveable_chip))\n f.write('\\n')\n f.write(json.dumps(list(self.__peer_nodes)))\n except IOError:\n print('Saving failed!')", "def write_extras(self, f):\n\n # we implement our own version to allow us to store the base\n # state\n\n gb = f.create_group(\"base state\")\n\n for name, state in self.base.items():\n gb.create_dataset(name, data=state.d)", "def json_write(data, path, **kwargs):\n with open(_fsdecode(path), 'wt') as file:\n _json_dump(data, file, **kwargs)", "def save_articles(self, articles):\n for article in articles:\n with open(\n f'{self.data_folder}/articles/{article.get(\"id\")}.json',\n 'w'\n ) as f:\n json.dump(article, f)", "def write_json(self, filename):\n with open(filename, 'a+') as f:\n f.write(json.dumps(self.weights))\n f.write(\"\\n\")", "def visited_nodes_to_file(self):\r\n # Create and write file only if we have something to write\r\n if len(self.visited_node) > 0:\r\n with open('{}'.format(self.path), mode='w') as f:\r\n # Writing line by line to the file\r\n for node, val in self.visited_node:\r\n f.write('{} {}\\n'.format(self.convert_matrix_rastor(node), val))", "def write_tree(tree: dict, path: str) -> None:\n with open(path, mode=\"w\", encoding=\"utf-8\") as file_object:\n file_object.write(json.dumps(tree, indent=4))", "def _write_json(self):\n with open(self._file_path, 'w') as f:\n json.dump(self._content, f, indent=4, separators=None,\n encoding='utf-8', sort_keys=False)", "def _dumpJson(data, file):\n name, ext = os.path.splitext(file)\n tempFile = \"{0}.tmp\".format(name)\n with open(tempFile, \"w\") as f:\n json.dump(data, f, indent=4)\n shutil.copyfile(tempFile, file)\n os.remove(tempFile)", "def save_uids_to_file(uids, **load):\n io.write_file(uids, io.UIDS_FILE, load)", "def save_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='w') as f:\n # save the block object like a dictionary\n saveable_chain = [block.__dict__ for block in \n [Block(block_el.index, block_el.previous_hash, [tx.__dict__ for tx in block_el.transactions] , block_el.proof, block_el.timestamp) \n for block_el in self.__blockchain]]\n f.write(json.dumps(saveable_chain))\n f.write('\\n')\n saveable_transactions = [tx.__dict__ for tx in self.__open_transactions]\n f.write(json.dumps(saveable_transactions))\n # save the connected nodes\n f.write('\\n')\n f.write(json.dumps(list(self.__peer_nodes))) \n except IOError:\n print('Saving failed')", "def save_digraph_json(graph: nx.DiGraph, f: Union[IO, os.PathLike, str]):\n # Obtain the JSON object to serialize\n json_obj = json.dumps(node_link_data(graph))\n\n # Save the object\n if isinstance(f, (os.PathLike, str)):\n with open(f, 'w', encoding='utf-8') as file:\n file.write(json_obj)\n else:\n f.write(json_obj)", "def save_to_json(self, file_name: str) -> bool:\n nodes = []\n edges = []\n for k, v in self.dw_graph.get_all_v().items():\n k1 = self.dw_graph.get_nodes(k)\n a_node = {\"pos\": k1.position, \"id\": k}\n nodes.append(a_node)\n for edge in self.dw_graph.all_out_edges_of_node(k).values():\n an_edge = {\"src\": edge.src, \"w\": edge.weight, \"dest\": edge.dest}\n edges.append(an_edge)\n new_graph = {\"Edges\": edges, \"Nodes\": nodes}\n try:\n with open(file_name, \"w\") as f:\n json.dump(new_graph, indent=4, fp=f)\n return True\n\n except Exception as e:\n print(e)\n return False", "def save_json(file_name: str, content: list):\n with open(file_name, \"w+\") as file_object:\n json.dump(content, file_object)", "def save(self):\n with self.open(self.filename, 'wt') as fd:\n for node in self.elements:\n fd.write(node.text)", "def write(self, service, values):\n with open(os.path.join(self.directory, service), \"w\") as f:\n values = [d.copy() for d in values]\n for d in values:\n # There can be other values in the JSON:\n d[\"extra\"] = 123\n f.write(dumps(values))", "def write_json(filepath, data):\n\n with open(filepath, 'w', encoding='utf-8') as file_obj:\n json.dump(data, file_obj, ensure_ascii=False, indent=2)", "def write_to_nexus(self, out_path, write_annotations=True, nexus_annotations=None, exclude_annotations=[], use_symbols=False):\n if write_annotations and not nexus_annotations:\n if not self.nexus_annotations:\n raise RuntimeError(\"This tree file has no associated annotation file. Either associate or supply one as a parameter.\")\n nexus_annotations = self.nexus_annotations\n if nexus_annotations:\n for node in self.getNodes():\n if node in self.nexus_annotations.node_annotations:\n node.annotate_node(self.nexus_annotations.node_annotations, self.nexus_annotations.annotation_symbols, exclude_annotations, use_symbols)\n tree_annotation = str(self) + \";\"\n self.swap_annotations(\"Original\")\n for node in self.getNodes():\n if node in self.nexus_annotations.leaf_annotations:\n node.annotate_node(self.nexus_annotations.leaf_annotations, exclude_annotations)\n leaves = []\n for node in self.getNodes():\n if node.isLeaf():\n leaves.append(node.label)\n leaf_annotation = \"\"\n for leaf in leaves:\n leaf_annotation += \"\\n\\t%s\" % (leaf)\n with open(out_path, \"w+\") as file:\n file.write(\n \"#NEXUS\\nbegin taxa;\\n\\tdimensions ntax=%d;\\n\\ttaxlabels%s\\n;\\nend;\\n\\nbegin trees;\\n\\ttree tree_1 = \"\n \"[&R] %s\\nend;\" % (len(leaves), leaf_annotation, tree_annotation))", "def write_to_file(path, data):\n with open(path, 'w') as outfile:\n json.dump(data, outfile)", "def to_json_file(self, path):\n with open(path, 'w') as f:\n f.write(self.to_json())", "def _write_content(i, content):\n fpath = io_mgr.get_parties_json(i)\n with open(fpath, 'w') as fstream:\n fstream.write(json.dumps(content, indent=4))", "def make_json_file(data, filepath):\n with open(filepath, 'w') as f:\n json.dump(data, f, indent=4)\n f.close", "def _write(self, filename, data):\n fullpath = os.path.join(self._tempdir, filename)\n with open(fullpath, 'w') as ofile:\n json.dump(data, ofile)\n return fullpath", "def save_friend_nodes(self):\n print \"Exporting to file tsv ...\"\n count_edge = 0\n count_node = 0\n with open('../data/yelp.tsv','w') as f:\n for user in self.df['user_id']:\n for friends in self.df['friends']:\n count_node += 1\n for friend in friends:\n f.write(\"%s\\t%s\\n\" % (user, friend))\n count_edge += 1\n print \"Graph Summary:\", count_node, \"nodes,\", count_edge, \"edges.\"", "def export(fileprefix, hedges):\n with open(fileprefix + '.txt', 'w') as f:\n for h in hedges:\n s = \"\"\n for node in h[0]: #each node in the tail\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n for node in h[1]: #each node in the head\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n s += '1' + '\\n' #assigns weight for the hedge, currently always set to 1\n f.write(s)", "def write_to_json(content, path):\n\n try:\n with open(path + 'followers_friend_&_follower.json', mode='r') as f:\n old_content = json.load(f)\n\n total_content = content + old_content\n\n with open(path + 'followers_friend_&_follower.json', mode='w') as f:\n json.dump(total_content, f, indent=4)\n except IOError:\n print 'No file, creating one!'\n with open(path + 'followers_friend_&_follower.json', 'w') as f:\n json.dump(content, f, indent=4)", "def save_claim_data(dest_filedir, node_id, private_key, node_cert, endpointinfo, hex_str, node_info_csv):\n # Create files of each claim data info\n print(\"\\nSaving claiming data info at location: \", dest_filedir)\n log.debug(\"Saving claiming data info at location: \" +\n dest_filedir)\n try:\n log.debug(\"Writing node info at location: \" + dest_filedir +\n 'node.info')\n # Create files for each claim data - node info, node key,\n # node cert, endpoint info\n with open(dest_filedir+'node.info', 'w+') as info_file:\n info_file.write(node_id)\n\n log.debug(\"Writing node info at location: \" +\n dest_filedir + 'node.key')\n with open(dest_filedir+'node.key', 'wb+') as info_file:\n info_file.write(private_key)\n\n log.debug(\"Writing node info at location: \" +\n dest_filedir + 'node.crt')\n with open(dest_filedir+'node.crt', 'w+') as info_file:\n info_file.write(node_cert)\n\n log.debug(\"Writing node info at location: \" +\n dest_filedir + 'endpoint.info')\n with open(dest_filedir+'endpoint.info', 'w+') as info_file:\n info_file.write(endpointinfo)\n\n log.debug(\"Writing node info at location: \" +\n dest_filedir + 'node_info.csv')\n with open(dest_filedir+'node_info.csv', 'w+') as info_file:\n for input_line in node_info_csv:\n info_file.write(input_line)\n info_file.write(\"\\n\")\n\n save_random_hex_str(dest_filedir, hex_str)\n except Exception as file_error:\n raise file_error", "def save_data(file_to_save, object_to_serialize):\r\n with open(file_to_save, \"w\", encoding=\"utf-8\") as f:\r\n f.write(json.dumps(object_to_serialize, indent=2, ensure_ascii=False))", "def save(self, filename: str, extention='csv', zipmode='w', **kwargs):\n if extention not in self._extention_types:\n raise Exception('Unsupported extention: ' + extention)\n\n if not filename.endswith('.tar'):\n filename += '.tar'\n\n delimiter = self._extention_types[extention]\n\n nodes_content = self.export_nodes().to_csv(sep=delimiter, index=False)\n edges_content = self.export_edges().to_csv(sep=delimiter, index=False)\n\n nodes_file_name = 'nodes.' + extention\n edges_file_name = 'edges.' + extention\n\n def add_to_tar(tar, filename, filecontent):\n content = filecontent.encode()\n with TemporaryFile() as tmp:\n tmp.write(content)\n tmp.seek(0)\n info = tarfile.TarInfo(name=filename)\n info.size = len(content)\n tar.addfile(tarinfo=info, fileobj=tmp)\n\n make_path(filename)\n with tarfile.open(name=filename, mode=zipmode) as tar:\n add_to_tar(tar, nodes_file_name, nodes_content)\n add_to_tar(tar, edges_file_name, edges_content)\n\n return filename", "def save_json(file_name, file_content):\n with open(generate_file_path(\"output\", file_name), 'w', encoding='utf-8') as f:\n json.dump(file_content, f, ensure_ascii=False, indent=4)", "def write_json(data, filepath):\n with open(filepath, \"w\") as f:\n content = json.dumps(data, indent=3)\n f.write(content + '\\n')", "def dump_graph(data, vertex_getter, edge_getter, filename=None):\n\n if not filename:\n filename = '%s.txt' % datetime.now().strftime('%m-%d-%Y_%H-%M-%S')\n\n data = {vertex_getter(node): edge_getter(node) for node in data}\n\n with open(filename, 'w') as json_file:\n json_file.write(json.dumps(data))\n\n return filename", "def save_json(self, file: Union[str, TextIO]) -> None:\n if hasattr(file, 'write'):\n file_ctx = nullcontext(file)\n else:\n file_ctx = open(file, 'w')\n\n with file_ctx as fp:\n for d in self:\n json.dump(d.dict(), fp)\n fp.write('\\n')", "def serialize(self):\n keys = [\n 'uid',\n 'commit_sha',\n 'timestamp',\n 'filename',\n 'comment',\n 'train_data',\n 'val_data',\n 'test_data',\n 'model_files',\n 'custom_data',\n ]\n data = {key: self.__dict__[key] for key in keys}\n with open(os.path.join(self.root_path, self._data_file), 'w') as file:\n json.dump(data, file)", "def write_json(obj_to_write: Any, filename: str):\n \n with open(filename, 'w') as json_file:\n json.dump(obj_to_write, json_file, indent=4)", "def write_to_file(info: List[str]) -> None:\n return", "def save_to_geojson(self, topology_map, filename):", "def nodes(self, nodes):\n global g_npoints\n for osmid, tags, (lng, lat) in nodes:\n if 'name' in tags:\n\n # Build a synthetic value by copying the tags and\n # adding osmid, latitude and longitude.\n valobj = tags.copy()\n valobj['osmid'] = osmid\n valobj['latitude'] = lat\n valobj['longitude'] = lng\n valstr = json.dumps(valobj)\n\n # Construct a GeoJSON bin value to be indexed.\n locobj = { 'type': \"Point\", 'coordinates': [ lng, lat ] }\n locgeo = aerospike.GeoJSON(locobj)\n\n # Make a hash of the id to use for random selection.\n hshval = self.id_to_hash(osmid)\n\n key = (self.args.nspace, self.args.set, osmid)\n \n self.client.put(key, { VALBIN: valstr,\n LOCBIN: locgeo,\n MAPBIN: valobj,\n HSHBIN: hshval },\n policy={ 'timeout': 10000,\n 'retry': 10 })\n\n self.npoints += 1\n if self.npoints % 1000 == 0:\n sys.stderr.write('.')", "def appendJson(filepath,entry):\n with open(filepath,mode='r', encoding='utf-8') as f:\n jconfig = json.load(f)\n\n with open(filepath,mode='w', encoding='utf-8') as feedsjson:\n jconfig.append(entry)\n print(json.dumps(jconfig,indent=2))\n json.dump(jconfig,feedsjson)", "def save_joint_configurations_to_file(commander: moveit_commander.MoveGroupCommander):\n\n POSE_FILE_DIR.mkdir(parents=True, exist_ok=True)\n\n pose_file, temp_file = pose_file_paths(commander)\n\n temp_file.write_text(json.dumps(commander.get_remembered_joint_values(), indent=4, sort_keys=True))\n temp_file.rename(pose_file)", "def make_json(neuron, file_path, axon_tracing=None, dendrite_tracing=None):\n\t# parse axon\n\tif axon_tracing is not None:\n\t\tnodes = axon_tracing[0]['nodes']\n\t\taxon = [\n\t\t\tdict(\n\t\t\t\tsampleNumber = n['sampleNumber'],\n\t\t\t\tx = n['x'],\n\t\t\t\ty = n['y'],\n\t\t\t\tz = n['z'],\n\t\t\t\tradius = n['radius'],\n\t\t\t\tparentNumber = n['parentNumber'],\n\t\t\t)\n\t\t\tfor n in nodes]\n\telse:\n\t\taxon = []\n\n\t# parse dendrites\n\tif dendrite_tracing is not None:\n\t\tnodes = dendrite_tracing[0]['nodes']\n\t\tdendrite = [\n\t\t\tdict(\n\t\t\t\tsampleNumber = n['sampleNumber'],\n\t\t\t\tx = n['x'],\n\t\t\t\ty = n['y'],\n\t\t\t\tz = n['z'],\n\t\t\t\tradius = n['radius'],\n\t\t\t\tparentNumber = n['parentNumber'],\n\t\t\t)\n\t\t\tfor n in nodes]\n\telse:\n\t\tdendrite = []\n\n\tcontent = dict(\n\t\tneurons = [\n\t\t\tdict(\n\t\t\t\tidString = neuron['idString'],\n\t\t\t\tsoma = dict(\n\t\t\t\t\tx = neuron['soma'].x,\n\t\t\t\t\ty = neuron['soma'].y,\n\t\t\t\t\tz = neuron['soma'].z,\n\t\t\t\t),\n\t\t\t\taxon = axon, \n\t\t\t\tdendrite = dendrite,\n\n\t\t\t)\n\t\t]\n\t)\n\n\t# save to file\n\twith open(file_path, 'w') as f:\n\t\tjson.dump(content, f)", "def write_embeddings_to_file(self):\n modes = [self.generator, self.discriminator]\n for i in range(2):\n embedding_matrix = modes[i].embedding_matrix\n embedding_matrix = embedding_matrix.detach().to('cpu').numpy()\n index = np.array(range(self.n_node)).reshape(-1, 1)\n embedding_matrix = np.hstack([index, embedding_matrix])\n embedding_list = embedding_matrix.tolist()\n embedding_str = [str(int(emb[0])) + \"\\t\" + \"\\t\".join([str(x) for x in emb[1:]]) + \"\\n\" \n for emb in embedding_list]\n with open(config.emb_filenames[i], \"w+\") as f:\n lines = [str(self.n_node) + \"\\t\" + str(config.n_emb) + \"\\n\"] + embedding_str\n f.writelines(lines)", "def write_rels(dict_3, files1):\n path = f'{output_path}/ppt/_rels/presentation.xml.rels'\n root, tree = gen_tree(path)\n for i in files1:\n val = dict_3[i]\n tag, Id, Type, target = val\n ele = etree.Element(tag)\n etree.SubElement(root, tag, Id=Id, Type=Type, Target=target)\n tree.write(path, pretty_print=True, xml_declaration=True, encoding='UTF-8', standalone=True)\n return", "def _write_json(\n output_path, records\n):\n output_path.write_text(json.dumps(records))", "def write_complex_json(filepath, obj):\n\n with open(filepath, 'w', encoding='utf-8') as file_obj:\n json.dump(obj, file_obj, cls=ExtendedEncoder, ensure_ascii=False, indent=2)", "def dumpCoordinatesToFile(self, coordinate_ids): \n coordinate_ids_dump_file = open(self.COORDINATES_DUMP_FNAME , 'w')\n pickle.dump(coordinate_ids, coordinate_ids_dump_file)\n coordinate_ids_dump_file.close()", "def write(self, _filepath=None):\n _json_txt = json.dumps(self.json_dict, indent=2)\n self._write_json_text(_json_txt, _filepath)", "def store_metadata(fh, metadata):\n json.dump(metadata, fh, indent=2)", "def _create_init_values_file(self, node_folder, init_values):\n init_json = os.path.join(node_folder, self.INIT_VALUES_FILE)\n with open(init_json, \"w\") as outfile:\n json.dump(init_values, outfile)", "def test_newFile(self):\n self.write(\"service1.json\", [{\"host\": \"host1\", \"port\": 123},\n {\"host\": \"host2\", \"port\": 124}])\n self.write(\"service2.json\", [])\n self.pump()\n self.assertNodesEqual(\n knownNodes(self.disco, \"service1\", \"staging\"),\n [self.node(\"service1\", \"host1\", 123),\n self.node(\"service1\", \"host2\", 124)])", "def write(self, model, **kwargs):\n self.section_line_list = []\n self.node_string_list = []\n self.node_connector_string_list = []\n self.node_connector_string_mapping = (\n {}\n ) # A mapping of the node and index to the section\n self.bus_string_list = (\n []\n ) # Only used for nodes - not nodes derived from PV, Loads or Capacitors\n self.nodeID_list = []\n self.sectionID_list = []\n self.section_feeder_mapping = {}\n self.section_line_feeder_mapping = {}\n self.section_headnode_mapping = {}\n\n # Verbose print the progress\n if \"verbose\" in kwargs and isinstance(kwargs[\"verbose\"], bool):\n self.verbose = kwargs[\"verbose\"]\n else:\n self.verbose = False\n\n # Writing the load file\n if self.verbose:\n logger.info(\"Writing the load file...\")\n self.write_load_file(model, **kwargs)\n\n # Writing the network file\n if self.verbose:\n logger.info(\"Writing the network file...\")\n self.write_network_file(model, **kwargs)\n\n # Writing the equipment file\n if self.verbose:\n logger.info(\"Writing the equipment file...\")\n self.write_equipment_file(model, **kwargs)", "def save_tree_to_file(tree: dict, fname: str = 'tree') -> None:\r\n fid = open(fname + \".pkl\", \"wb\")\r\n pickle.dump(tree, fid)\r\n fid.close()", "def save_to_file(cls, list_objs):\n namefile = cls.__name__ + \".json\"\n rep_list = []\n if list_objs is not None and list_objs != []:\n for item in list_objs:\n repre = cls.to_dictionary(item)\n # rep_list.append(cls.to_json_string(repre))\n rep_list.append(repre)\n\n with open(namefile, \"w\", encoding=\"UTF-8\") as f:\n # json.dump(rep_list, f)\n f.write(cls.to_json_string(rep_list))", "def save_node(self, node: Union[dict, Node]) -> Node:", "def save_to_json(self, file_name: str) -> bool:\n flag = True\n with open(file_name, \"w\") as jsonFile:\n try:\n d = {\"Edges\": [], \"Nodes\": []}\n for src in self._graph.out_edges.keys():\n for dst, w in self._graph.all_out_edges_of_node(src).items():\n d[\"Edges\"].append({\"src\": src, \"w\": w.weight, \"dest\": dst})\n for key, value in self._graph.nodes.items():\n if value.location is None:\n d[\"Nodes\"].append({\"id\": key})\n else:\n d[\"Nodes\"].append({\"pos\": str(value.location), \"id\": key})\n s = d.__str__()\n s = s.replace(\" \", \"\")\n s = s.replace(\"'\", \"\\\"\")\n jsonFile.write(s)\n # print(\"Save Json was succeeded \")\n except Exception as e:\n print(\"Save Json was failed \")\n print(e)\n flag = False\n finally:\n return flag", "def write_dgl_objects(\n graph_obj,\n node_features,\n edge_features,\n output_dir,\n part_id,\n orig_nids,\n orig_eids,\n formats,\n sort_etypes,\n):\n part_dir = output_dir + \"/part\" + str(part_id)\n os.makedirs(part_dir, exist_ok=True)\n write_graph_dgl(\n os.path.join(part_dir, \"graph.dgl\"), graph_obj, formats, sort_etypes\n )\n\n if node_features != None:\n write_node_features(\n node_features, os.path.join(part_dir, \"node_feat.dgl\")\n )\n\n if edge_features != None:\n write_edge_features(\n edge_features, os.path.join(part_dir, \"edge_feat.dgl\")\n )\n\n if orig_nids is not None:\n orig_nids_file = os.path.join(part_dir, \"orig_nids.dgl\")\n dgl.data.utils.save_tensors(orig_nids_file, orig_nids)\n if orig_eids is not None:\n orig_eids_file = os.path.join(part_dir, \"orig_eids.dgl\")\n dgl.data.utils.save_tensors(orig_eids_file, orig_eids)", "def to_file(self, fp):\n dict_ = self.serialize()\n with open_file(fp, mode='w') as writer:\n json.dump(dict_, writer, indent=2)", "def json_dump(data, fp, pretty=False, compact=False, minimal=False):\n DataEncoder.minimal = minimal\n\n with _iotools.open_file(fp, \"w\") as f:\n kwargs = {}\n\n if pretty:\n kwargs[\"sort_keys\"] = True\n kwargs[\"indent\"] = 4\n if compact:\n kwargs[\"indent\"] = None\n kwargs[\"separators\"] = (\",\", \":\")\n\n return json.dump(data, f, cls=DataEncoder, **kwargs)", "def generateOutputfile(filePath, fileContent):\n with open(os.path.join(filePath), 'w') as fp:\n json.dump(fileContent, fp, indent = 2)", "def save_data_to_file(file_name, list_of_product_objects):\r\n try:\r\n objF = open(file_name, \"w\")\r\n for row in list_of_product_objects:\r\n objF.write(str(row[0]) + \",\" + str(row[1]) + \"\\n\")\r\n objF.close()\r\n except IOError:\r\n print(\"Unable to locate file\")", "def save_to_file(cls, list_objs):\n filename = cls.__name__ + \".json\"\n with open(filename, \"w\") as f:\n if list_objs is None:\n f.write(\"[]\")\n else:\n d = [x.to_dictionary() for x in list_objs]\n f.write(Base.to_json_string(d))", "def write_new_nodes(self, nodes):\n\n if len(self._cache) > self._cache_max_size:\n # The size of the cache has exceeded the threshold. Discard the\n # old cache values (but still store the new nodes into the\n # cache):\n logger.debug('Clearing node cache')\n self._cache.clear()\n\n data = {}\n max_node_id = 0\n for node in nodes:\n max_node_id = max(max_node_id, node.id)\n data[node.id] = self._dump(node._entries)\n self._cache[node.id] = node._entries\n\n self.db[len(self._max_node_ids)] = data\n\n if max_node_id == 0:\n # Rewrite last value:\n self._max_node_ids.append(self._max_node_ids[-1])\n else:\n self._max_node_ids.append(max_node_id)", "def save_per_gene(filename, tn_per_gene, reads_per_gene, aliases_designation):\n\n with open(filename, \"w\") as f:\n\n f.write(\"Gene name\\tNumber of transposons per gene\\tNumber of reads per gene\\n\")\n\n for gene in tn_per_gene:\n tnpergene = tn_per_gene[gene]\n readpergene = reads_per_gene[gene]\n if gene in aliases_designation:\n gene_alias = aliases_designation.get(gene)[0]\n else:\n gene_alias = gene\n f.write(gene_alias + \"\\t\" + str(tnpergene) + \"\\t\" + str(readpergene) + \"\\n\")", "def save_file(data, filename):\n with open(filename, \"w\") as outfile:\n json.dump(data, outfile)" ]
[ "0.6882683", "0.6683414", "0.6664386", "0.6651517", "0.664519", "0.64958185", "0.6158971", "0.5804926", "0.56545246", "0.5637596", "0.56242365", "0.5573552", "0.5563774", "0.55553705", "0.5554492", "0.5551853", "0.5548628", "0.5515836", "0.55090535", "0.5494786", "0.54754364", "0.54623634", "0.5442637", "0.5442637", "0.54394776", "0.54345083", "0.54229504", "0.542062", "0.5415291", "0.54023105", "0.5395926", "0.5386612", "0.5379975", "0.5374967", "0.5366976", "0.5365397", "0.53484285", "0.5348406", "0.53483105", "0.53437644", "0.5336797", "0.5332955", "0.53193873", "0.53075707", "0.52943563", "0.5283634", "0.52826023", "0.5274229", "0.5270707", "0.52584064", "0.52544415", "0.5248273", "0.5246877", "0.52439547", "0.5243317", "0.523506", "0.5223842", "0.52140737", "0.52109075", "0.52051884", "0.5204665", "0.51981807", "0.5186645", "0.51861584", "0.51803124", "0.51754344", "0.5170663", "0.5163179", "0.51573426", "0.5155679", "0.5153411", "0.5145838", "0.5139095", "0.51233643", "0.5107695", "0.5099318", "0.5098838", "0.50921917", "0.5083076", "0.5082607", "0.50573933", "0.5056956", "0.5049905", "0.5049309", "0.5045999", "0.50430185", "0.50421166", "0.50373983", "0.50315994", "0.50258845", "0.5025851", "0.5023447", "0.50175047", "0.5017364", "0.5016221", "0.50145805", "0.50139385", "0.50036067", "0.4998736", "0.49979123" ]
0.846316
0
reads in nodes uuids and extras from a file and aplies them to nodes in the DB. This is useful for import/export because currently extras are lost. Therefore this can be used to save and restore the extras on the nodes.
def import_extras(filename): all_extras = {} # read file #inputfile = open(filename, 'r') #lines = inputfile.readlines() #for line in lines[1:]: # splitted = line.split(' | ') # uuid = splitted[0].rstrip(' ') # extras = splitted[1].rstrip(' ') # #extras = dict(extras) # print(extras) # all_extras[uuid] = extras #inputfile.close() try: all_extras = json.load(open(filename)) except: print('The file has to be loadabel by json. i.e json format (which it is not).') for uuid, extras in all_extras.iteritems(): try: node = load_node(uuid) except: # Does not exists print('node with uuid {} does not exist in DB'.format(uuid)) node = None continue if isinstance(node, Node): node.set_extras(extras) else: print('node is not instance of an AiiDA node') #print(extras) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_extras(nodes, filename='node_extras.txt'):\n\n #outstring = ''#' node uuid | extras \\n'\n outdict = {}\n for node in nodes:\n if isinstance(node, int): #pk\n node = load_node(node)\n elif isinstance(node, basestring): #uuid\n node = load_node(node)\n\n if not isinstance(node, Node):\n print('skiped node {}, is not an AiiDA node, did not know what to do.'.format(node))\n continue\n uuid = node.uuid\n extras_dict = node.get_extras()\n outdict[uuid] = extras_dict\n #line = '{} | {}\\n'.format(uuid, extras_dict)\n #outstring = outstring + line\n\n #outfile = open(filename, 'w')\n #outfile.write(outstring)\n #outfile.close()\n json.dump(outdict, open(filename,'w'))\n return", "def read_node_features_file(nodes_features_file):\n\n node_features = dgl.data.utils.load_tensors(nodes_features_file, False)\n return node_features", "def import_data(self, filename=None, rawdata=None, append=False):\n \n if filename:\n with open(filename,\"r\") as f:\n data = f.read()\n elif rawdata:\n data = rawdata\n else:\n raise Exception(\"No data given\")\n\n if not append:\n self.nodelist = []\n\n d = deserialize(data, self.consolidator)\n self.nodelist += list(d.nodes.values())\n if append:\n self.domain_obj = None #mark as outdated\n else:\n self.domain_obj = d", "def load_recipes_from_file(cls, args):\n with open(args.recipes_file, 'r') as f:\n reader = csv.DictReader(f)\n for row in reader:\n cls._recipes.append(row)\n cls._add_indices_to_recipes()\n cls._initialize_recipes_status()\n logging.info(\"Recipes loaded.\")", "def _read_node_file(self):\n self.node_df = gt.remove_colons(pd.read_csv(self.node_file, dtype=str))", "def load_entity_data(self, data_file):\n load_xml_seed(data_file)", "def load_nodes(filename):\n\n with open(filename) as f:\n reader = csv.DictReader(f)\n return [item for item in reader]", "def load_nodes(path):\n global parents\n with open(path, 'r') as r:\n for line in r:\n (taxid, parent, other) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 2)\n parents[taxid] = parent", "def read_relations(db, openfile):\n pass", "def import_data(self, filename=None, rawdata=None, append=False):\n if filename:\n tree = ET.parse(filename)\n elif rawdata:\n tree = ET.ElementTree(ET.fromstring(rawdata))\n else:\n raise Exception(\"No data given\")\n\n root = tree.getroot()\n features = root.find(\"Features\")\n\n if not append:\n self.nodelist = []\n\n feature_id_table = {}\n\n # map all feature ids to name\n for feature in features.iter('Feature'):\n feature_id_table[feature.attrib[\"id\"]] = feature.attrib[\"data\"]\n\n # build relation structure\n for feature in features.iter('Feature'):\n fobj = Node(feature.attrib[\"data\"])\n tmp = feature.find('description')\n if tmp != None:\n fobj.text = tmp.text\n else:\n tmp = feature.find('speak')\n if tmp != None:\n fobj.text = tmp.text\n neighbors = feature.find('neighbors')\n for neighbor in neighbors.iter('neighbor'):\n fobj.add_relation(\n neighbor.attrib['relationship'],\n feature_id_table[neighbor.attrib['dest']])\n self.nodelist.append(fobj)", "def load(self, filename):\n\t\tf = open(filename).read().split(\"\\n\")\n\n\t\tfor item in f:\n\t\t\tcommand = item.split(\":\")\n\n\t\t\t# Add node\n\t\t\tif len(command) == 2:\n\t\t\t\t_id = command[0].strip()\n\t\t\t\t_label = command[1].strip() or None\n\n\t\t\t\t# Duplicate id\n\t\t\t\tif _id in self.nodes:\n\t\t\t\t\traise ValueError\n\n\t\t\t\t# Add node\n\t\t\t\tself.nodes[_id] = Node(_id, _label)\n\n\t\t\t# Add link\n\t\t\telif len(command) == 3:\n\t\t\t\t_from = command[0].strip()\n\t\t\t\t_label = command[1].strip() or None\n\t\t\t\t_to = command[2].strip()\n\n\t\t\t\t# Non-existent Nodes\n\t\t\t\tif _from not in self.nodes or _to not in self.nodes:\n\t\t\t\t\traise ValueError\n\n\t\t\t\tself.nodes[_from].add_neighbour(self.nodes[_to], _label)", "def load_users():\n filepath = \"./seed_data/u.user\"\n users = open(filepath)\n\n\n for user in users:\n user = user.rstrip().split('|')\n db_user = User(user_id=user[0], age=user[1], zipcode=user[4])\n db.session.add(db_user)\n\n db.session.commit()", "def add_graph_attributes(G, filename):\n Ef = dict() # feature -> edges\n Nf = dict() # node -> features\n with open(filename) as f:\n for line in f: # for each node, list of features it belongs to\n d = line.split()\n u = int(d[0])\n features = d[1:]\n for f in features:\n Ef.setdefault(f, []).extend(G.in_edges(u)) # add feature-dependent edges\n #G.node[u]['Fu'] = features\n G.nodes[u]['Fu'] = features\n Nf[u] = features\n print('Read graph attributes')\n return Ef, Nf", "def get_nodes(self):\n with open('node_list.txt', 'r') as file:\n self.nodes = [line.rstrip('\\n') for line in file]", "def create_from_files():\n logging.info('\"Create from files\" task started using config file %s', args.config)\n file_dir_path = config['input_dir']\n files = os.listdir(file_dir_path)\n\n for file_name in files:\n filename_without_extension = os.path.splitext(file_name)[0]\n if len(filename_without_extension) > 255:\n message = 'Truncating the filename \"' + filename_without_extension + '\" since it exceeds Drupal\\'s maximum node title length of 255 characters.'\n logging.error(message)\n filename_without_extension = filename_without_extension[:255]\n\n islandora_model = set_model_from_extension(file_name, config)\n\n node_json = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': filename_without_extension}\n ],\n 'status': [\n {'value': config['published']}\n ],\n 'field_model': [\n {'target_id': islandora_model,\n 'target_type': 'taxonomy_term'}\n ]\n }\n\n node_headers = {\n 'Content-Type': 'application/json'\n }\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('+ Node for \"' + filename_without_extension + '\" created at ' + node_uri + '.')\n logging.info('Node for \"%s\" created at %s.', filename_without_extension, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, '', node_response.text)\n\n file_path = os.path.join(config['input_dir'], file_name)\n media_type = set_media_type(file_path, config)\n media_response_status_code = create_media(config, file_name, node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+ ' + media_type.title() + \" media for \" + filename_without_extension + \" created.\")\n logging.info(\"Media for %s created.\", file_path)\n else:\n logging.error('Node for \"%s\" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code)", "def add_from_uuid_list(self):\n\n uuids = self._read_file()\n if not uuids:\n return\n\n for uuid in uuids:\n uuid = uuid.split('\\n')[0]\n\n # Checks if lenght of the uuid is correct\n if not check_uuid_authenticity(uuid):\n self.report.add('Invalid uuid lenght.')\n continue\n \n self.add_record.push_record_by_uuid(self.global_counters, uuid)\n return", "def load_ratings():\n filepath = \"./seed_data/u.data\"\n ratings = open(filepath)\n\n for rating in ratings:\n rating = rating.rstrip().split()\n\n db_rating = Rating(movie_id=rating[1], user_id=rating[0],\n score=rating[2])\n db.session.add(db_rating)\n\n db.session.commit()", "def add_users_from_file(args):\n with open(args.users_from_file) as file:\n for line in file:\n name, email_address = line.split(',')\n add_user(name, email_address.strip().lower())", "def main(load: bool, load_only: bool, force: bool, with_sudo: bool):\n paths = []\n na = NodeAssembler()\n for processor_cls in processor_resolver:\n if not processor_cls.importable:\n continue\n click.secho(f\"Checking {processor_cls.name}\", fg=\"green\", bold=True)\n if not load_only:\n if (\n force\n or not processor_cls.nodes_path.is_file()\n or not processor_cls.nodes_indra_path.is_file()\n or not processor_cls.edges_path.is_file()\n ):\n processor = processor_cls()\n click.secho(\"Processing...\", fg=\"green\")\n # FIXME: this is redundant, we get nodes twice\n nodes = list(processor.get_nodes())\n processor.dump()\n else:\n click.secho(\"Loading cached nodes...\", fg=\"green\")\n with open(processor_cls.nodes_indra_path, \"rb\") as fh:\n nodes = pickle.load(fh)\n na.add_nodes(nodes)\n\n paths.append((processor_cls.nodes_path, processor_cls.edges_path))\n\n nodes_path = pystow.module(\"indra\", \"cogex\", \"assembled\").join(name=\"nodes.tsv.gz\")\n if not load_only:\n if force or not nodes_path.is_file():\n # Now create and dump the assembled nodes\n assembled_nodes = na.assemble_nodes()\n assembled_nodes = sorted(assembled_nodes, key=lambda x: (x.db_ns, x.db_id))\n Processor._dump_nodes_to_path(assembled_nodes, nodes_path)\n\n if load or load_only:\n sudo_prefix = \"\" if not with_sudo else \"sudo\"\n command = dedent(\n f\"\"\"\\\n {sudo_prefix} neo4j-admin import \\\\\n --database=indra \\\\\n --delimiter='TAB' \\\\\n --skip-duplicate-nodes=true \\\\\n --skip-bad-relationships=true \\\\\n --nodes {nodes_path}\n \"\"\"\n ).rstrip()\n for _, edge_path in paths:\n command += f\"\\\\\\n --relationships {edge_path}\"\n\n click.secho(\"Running shell command:\")\n click.secho(command, fg=\"blue\")\n os.system(command) # noqa:S605", "def populate(infile):\n main(infile)", "def seed_user_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"|\") #data is a list\n\n #get data from split line\n id = int(data[0])\n age = int(data[1])\n zip = data[4]\n\n #create a new record and add it to the queue\n new_user = User(user_id=id, age=age, zipcode=zip)\n db.session.add(new_user)\n\n #commit changes\n db.session.commit()", "def getNodeData(self, file):\n\n with open('./data/{}.json'.format(file), 'r') as json_file:\n try:\n objs = []\n data = json_file.read()\n dic = json.loads(data)['data']\n for i in dic:\n objs.append(Entity(i['id'], i['name']))\n return objs\n except Exception as e:\n print(e)", "def loadTaxi(file):\n arr = []\n with open(file, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n row['fare_amount'] = float(row['fare_amount'])\n row['pickup_longitude'] = float(row['pickup_longitude'])\n row['pickup_latitude'] = float(row['pickup_latitude'])\n row['dropoff_longitude'] = float(row['dropoff_longitude'])\n row['dropoff_latitude'] = float(row['dropoff_latitude'])\n row['pickup_datetime'] = datetime.strptime(\n row['pickup_datetime'], '%Y-%m-%d %H:%M:%S %Z')\n arr.append(row)\n\n inserted_ids = db.taxi.insert_many(arr).inserted_ids\n print(\"{} taxi docs inserted\".format(len(inserted_ids)))", "def writeImports2File(self, file, indent = \" \"):\r\n # import each entity and its associated graphical file\r\n for obj in self.listNodes.keys():\r\n file.write(indent+\"from \"+obj+\" import \"+obj+\"\\n\")\r\n if not obj[0:4] == \"ASG_\":\r\n file.write(indent+\"from graph_\"+obj+\" import graph_\"+obj+\"\\n\")", "def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()", "def seed_rating_data(filename):\n\n #open file and go through it line by line\n log_file = open(filename)\n\n for line in log_file:\n data = line.strip().split(\"\\t\")\n\n #get data from split line\n user_id = data[0]\n movie_id = data[1]\n score = data[2]\n\n #create a new record and add it to the queue\n new_rating = Rating(movie_id=movie_id, user_id=user_id, \n score=score)\n db.session.add(new_rating)\n\n #commit changes\n db.session.commit()", "def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')", "def fromfile(self,file):\n self.d.update(params_file(file))", "def import_db(import_file):\n import_data(import_file)", "def read_extras(self, f):\n\n gb = f[\"base state\"]\n for name in gb:\n self.base[name] = Basestate(self.cc_data.grid.ny, ng=self.cc_data.grid.ng)\n self.base[name].d[:] = gb[name]", "def create():\n logging.info('\"Create\" task started using config file %s', args.config)\n input_csv = os.path.join(config['input_dir'], config['input_csv'])\n if os.path.exists(input_csv):\n # Store a dictionary of id_field values: node IDs so we can add child nodes.\n node_ids = dict()\n\n field_definitions = get_field_definitions(config)\n with open(input_csv) as csvfile:\n csv_data = csv.DictReader(csvfile, delimiter=config['delimiter'])\n csv_column_headers = csv_data.fieldnames\n\n node_endpoint = config['host'] + '/node?_format=json'\n\n for row in csv_data:\n row = clean_csv_values(row)\n id_field = row[config['id_field']]\n\n # Add required fields.\n node = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': row['title']}\n ],\n 'status': [\n {'value': config['published']}\n ]\n }\n\n # If a node with an ID that matches the current item's\n # 'parent_id' value has just been created, make the item\n # a child of the node.\n if 'parent_id' in row.keys() and row['parent_id'] in node_ids:\n row['field_member_of'] = node_ids[row['parent_id']]\n\n # Add custom (non-required) CSV fields.\n required_fields = ['file', config['id_field'], 'title']\n custom_fields = list(\n set(csv_column_headers) - set(required_fields))\n for custom_field in custom_fields:\n if not isinstance(row[custom_field], str):\n continue\n # Skip updating field if value is empty.\n if len(row[custom_field]) == 0:\n continue\n\n # This field can exist in the CSV to create parent/child\n # relationships and is not a Drupal field.\n if custom_field == 'parent_id':\n continue\n\n # 'langcode' is a core Drupal field, but is not considered a \"base field\".\n if custom_field == 'langcode':\n continue\n\n # Execute field preprocessor scripts, if any are configured. Note that these scripts\n # are applied to the entire value from the CSV field and not split field values,\n # e.g., if a field is multivalued, the preprocesor must split it and then reassemble\n # it back into a string before returning it. Note that preprocessor scripts work only\n # on string data and not on binary data like images, etc. and only on custom fields\n # (so not title).\n if 'preprocessors' in config and len(config['preprocessors']) > 0:\n for field, command in config['preprocessors'].items():\n if field in csv_column_headers:\n output, return_code = preprocess_field_data(config['subdelimiter'], row[field], command)\n if return_code == 0:\n preprocessor_input = copy.deepcopy(row[field])\n row[field] = output.decode().strip()\n logging.info('Preprocess command %s executed, taking \"%s\" as input and returning \"%s\".', command, preprocessor_input, output.decode().strip())\n else:\n message = 'Preprocess command ' + command + ' failed with return code ' + str(return_code)\n logging.error(message)\n sys.exit(message)\n\n # Assemble Drupal field structures for entity reference fields from CSV data. For\n # taxonomy terms, target_type is 'taxonomy_term'; for nodes, it's 'node_type'.\n if field_definitions[custom_field]['field_type'] == 'entity_reference':\n if field_definitions[custom_field]['target_type'] == 'taxonomy_term':\n target_type = 'taxonomy_term'\n field_vocabs = get_field_vocabularies(config, field_definitions, custom_field)\n if config['subdelimiter'] in row[custom_field]:\n prepared_tids = []\n delimited_values = row[custom_field].split(config['subdelimiter'])\n for delimited_value in delimited_values:\n tid = prepare_term_id(config, field_vocabs, delimited_value)\n tid = str(tid)\n prepared_tids.append(tid)\n row[custom_field] = config['subdelimiter'].join(prepared_tids)\n else:\n row[custom_field] = prepare_term_id(config, field_vocabs, row[custom_field])\n row[custom_field] = str(row[custom_field])\n\n if field_definitions[custom_field]['target_type'] == 'node':\n target_type = 'node_type'\n\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n field_values.append({'target_id': subvalue, 'target_type': target_type})\n node[custom_field] = field_values[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n else:\n node[custom_field] = [\n {'target_id': row[custom_field],\n 'target_type': target_type}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n node[custom_field] = [\n {'target_id': subvalues[0],\n 'target_type': target_type}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Typed relation fields.\n elif field_definitions[custom_field]['field_type'] == 'typed_relation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_typed_relation_string(config, row[custom_field], target_type)\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_typed_relation_string(config, row[custom_field], target_type)\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # Geolocation fields.\n elif field_definitions[custom_field]['field_type'] == 'geolocation':\n target_type = field_definitions[custom_field]['target_type']\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = split_geolocation_string(config, row[custom_field])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n field_values.append(subvalue)\n node[custom_field] = field_values\n else:\n field_value = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value\n # Cardinality is 1.\n else:\n field_values = split_geolocation_string(config, row[custom_field])\n node[custom_field] = field_value[0]\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n # For non-entity reference and non-typed relation fields (text, integer, boolean etc.).\n else:\n # Cardinality is unlimited.\n if field_definitions[custom_field]['cardinality'] == -1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality has a limit.\n elif field_definitions[custom_field]['cardinality'] > 1:\n if config['subdelimiter'] in row[custom_field]:\n field_values = []\n subvalues = row[custom_field].split(config['subdelimiter'])\n subvalues = subvalues[:field_definitions[custom_field]['cardinality']]\n if len(subvalues) > field_definitions[custom_field]['cardinality']:\n log_field_cardinality_violation(custom_field, id_field, field_definitions[custom_field]['cardinality'])\n for subvalue in subvalues:\n subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], subvalue)\n field_values.append({'value': subvalue})\n node[custom_field] = field_values\n else:\n row[custom_field] = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], row[custom_field])\n node[custom_field] = [{'value': row[custom_field]}]\n # Cardinality is 1.\n else:\n subvalues = row[custom_field].split(config['subdelimiter'])\n first_subvalue = subvalues[0]\n first_subvalue = truncate_csv_value(custom_field, id_field, field_definitions[custom_field], first_subvalue)\n node[custom_field] = [{'value': first_subvalue}]\n if len(subvalues) > 1:\n log_field_cardinality_violation(custom_field, id_field, '1')\n\n node_headers = {'Content-Type': 'application/json'}\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('Node for \"' + row['title'] + '\" (record ' + id_field + ') created at ' + node_uri + '.')\n logging.info(\"Node for %s (record %s) created at %s.\", row['title'], id_field, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, id_field, node_response.text)\n else:\n logging.error(\"Node for CSV record %s not created, HTTP response code was %s.\", id_field, node_response.status_code)\n continue\n\n # Map ID from CSV of newly created node to its node ID so we can use it for linking child nodes, etc.\n if node_response.status_code == 201:\n node_nid = node_uri.rsplit('/', 1)[-1]\n node_ids[id_field] = node_nid\n\n # If there is no media file (and we're not creating paged content), move on to the next CSV row.\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+No media for ' + node_uri + ' created since its \"file\" field in the CSV is empty.')\n logging.warning(\"No media for %s created since its 'file' field in the CSV is empty.\", node_uri)\n continue\n\n # If there is a media file, add it.\n if 'file' in row:\n file_path = os.path.join(config['input_dir'], row['file'])\n media_type = set_media_type(file_path, config)\n\n if node_response.status_code == 201:\n # If what is identified in the 'file' field is a file, create the media from it.\n if 'file' in row and len(row['file']) != 0 and os.path.isfile(file_path):\n media_response_status_code = create_media(config, row['file'], node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+' + media_type.title() + \" media for \" + row['file'] + \" created.\")\n logging.info(\"%s media for %s created.\", media_type.title(), row['file'])\n\n if 'file' in row and len(row['file']) == 0 and config['paged_content_from_directories'] is False:\n print('+ No file specified in CSV for ' + row['title'])\n logging.info(\"No file specified for %s, so no media created.\", id_field)\n\n if config['paged_content_from_directories'] is True:\n # Console output and logging are done in the create_children_from_directory function.\n create_children_from_directory(config, row, node_nid, row['title'])", "def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numnod = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.nodes = []\n\n for _ in range(self.numnod):\n node = FRDNode()\n self.nodes.append(node)\n if self.format < 2:\n in_file.read(1)\n node.key = int(in_file.read(2))\n node.number = int(in_file.read(5*(self.format+1)))\n node.pos = [float(in_file.read(12)) for j in range(3)]\n in_file.read(1) # eol\n else:\n node.number = struct.unpack('i', in_file.read(4))[0]\n if self.format == 2:\n node.pos = struct.unpack('fff', in_file.read(12))\n else:\n node.pos = struct.unpack('ddd', in_file.read(24))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def _importNode(self, node):\n if self.environ.shouldPurge():\n self._purgeProperties()\n self._initProperties(node)\n self._logger.info('settings imported.')", "def buildAdjacencyMetdatata(self,filename):\n initial=True\n node=None\n values=[]\n for line in open(filename,'r'):\n params=line.strip().split(\":\")\n if len(params)==4:\n randomvariable=params[0]\n parents=params[2]\n tablelen=params[3]\n if initial:\n initial=False\n node=BayesNode(params[0],params[2],params[3])\n node.buildCPT()\n else:\n self.adjacency[node.id]=node\n if params[0]!=\"\":\n node=BayesNode(params[0],params[2],params[3])\n node.buildCPT()\n else:\n node.setValue(params[0],params[1])", "def _load_file_services(\n neo4j_session: neo4j.Session, file_services: List[Dict], update_tag: int,\n) -> None:\n ingest_file_services = \"\"\"\n UNWIND $file_services_list as fservice\n MERGE (fs:AzureStorageFileService{id: fservice.id})\n ON CREATE SET fs.firstseen = timestamp(), fs.type = fservice.type\n SET fs.name = fservice.name,\n fs.lastupdated = $azure_update_tag\n WITH fs, fservice\n MATCH (s:AzureStorageAccount{id: fservice.storage_account_id})\n MERGE (s)-[r:USES]->(fs)\n ON CREATE SET r.firstseen = timestamp()\n SET r.lastupdated = $azure_update_tag\n \"\"\"\n\n neo4j_session.run(\n ingest_file_services,\n file_services_list=file_services,\n azure_update_tag=update_tag,\n )", "def load_org_notes_file():\n nodelist = orgnode.makelist(NOTES_ORG_FILE)\n return nodelist", "def save_nodes(node, filename):\n\n with open(filename, 'w', newline='') as f:\n writer = csv.DictWriter(f,\n fieldnames=node[0].keys(),\n quoting=csv.QUOTE_ALL)\n writer.writeheader()\n writer.writerows(node)", "def write_extras(self, f):\n\n # we implement our own version to allow us to store the base\n # state\n\n gb = f.create_group(\"base state\")\n\n for name, state in self.base.items():\n gb.create_dataset(name, data=state.d)", "def loadProducts():\n dump = os.path.dirname(os.path.abspath(__file__)) + \"/dump.json\"\n data = open(dump, 'r')\n for deserialized_object in serializers.deserialize(\"json\", data):\n deserialized_object.save()", "def nodes(self, nodes):\n global g_npoints\n for osmid, tags, (lng, lat) in nodes:\n if 'name' in tags:\n\n # Build a synthetic value by copying the tags and\n # adding osmid, latitude and longitude.\n valobj = tags.copy()\n valobj['osmid'] = osmid\n valobj['latitude'] = lat\n valobj['longitude'] = lng\n valstr = json.dumps(valobj)\n\n # Construct a GeoJSON bin value to be indexed.\n locobj = { 'type': \"Point\", 'coordinates': [ lng, lat ] }\n locgeo = aerospike.GeoJSON(locobj)\n\n # Make a hash of the id to use for random selection.\n hshval = self.id_to_hash(osmid)\n\n key = (self.args.nspace, self.args.set, osmid)\n \n self.client.put(key, { VALBIN: valstr,\n LOCBIN: locgeo,\n MAPBIN: valobj,\n HSHBIN: hshval },\n policy={ 'timeout': 10000,\n 'retry': 10 })\n\n self.npoints += 1\n if self.npoints % 1000 == 0:\n sys.stderr.write('.')", "def import_db_from_file(capabilities_xml_filename: str):\n\n db_filename: str = get_database_filename()\n\n # Check if the db already exists\n if os.path.isfile(db_filename):\n user_response = input(\n 'Warning: database file already exists! Remove and reload (n/Y)?: '\n ).strip()\n if user_response not in ('y', 'Y', ''):\n return\n os.remove(db_filename)\n logger.debug('%s removed.', db_filename)\n\n conn = sqlite3.connect(db_filename)\n cursor = conn.cursor()\n\n # Create the Database tables\n _create_tables(cursor)\n\n # Parse file\n tree = ET.parse(capabilities_xml_filename)\n root = tree.getroot()\n for icapability in root.findall('capability'):\n capability_id = icapability.attrib['id']\n # If this is a derived capability, inherit from parent capability\n parent = None\n if '.' in capability_id:\n parent = _extract_parent_capability(capability_id, cursor)\n\n _store_capability(capability_id, conn, cursor, icapability, parent)\n _store_preconditions(capability_id, conn, cursor, icapability)\n _store_actions(capability_id, conn, cursor, icapability)\n _store_postconditions(capability_id, conn, cursor, icapability)\n\n conn.close()", "def do_import(files, container_id):\n content = files['fname']['content']\n lines = string.splitfields(content, '\\n')\n for line in lines:\n line = line.strip()\n if line != '':\n username, role = string.splitfields(line, ';')\n try:\n user_id = get_user_by_username(username).id\n role_id = get_role_by_name(role).id\n items = DmsUserUrlRole.objects.filter(user=user_id).filter(container=container_id)\n if len(items) == 0:\n DmsUserUrlRole.save_user_url_role(DmsUserUrlRole(), user_id, container_id, role_id)\n else:\n item = items[0]\n item.role_id = role_id\n item.save()\n except:\n pass", "def populate(filepath: str, key: str, strict_redis: StrictRedis) -> None:\n print(f\"Populating redis from {filepath} under key: '{key}'\")\n \n # Read the file contents\n with open(filepath, 'r') as f:\n lines = f.readlines()\n\n # Strip newline character from each line and ensure all are lowercase\n lines = [line.strip('\\n').lower() for line in lines]\n\n # Add the words to the redis set under the specified key\n strict_redis.sadd(key, *lines)\n\n print(f\"Finished. Added {len(lines)} items.\")", "def read_restaurant(file):\r\n name_to_rating = {}\r\n price_to_names = {'$':[], '4$':[],'$$$':[],'$$':[]}\r\n cuisine_to_names = {}", "def readdata(self, filepaths):\n pass", "def loadNetworkFromFile(self, file):\r\n for line in open(file, 'r'):\r\n fromVertex, toVertex, capacity = map(int, line.split())\r\n self.addEdge(fromVertex, toVertex, capacity)", "def load_atom(db_dir):\n current = open(db_dir, \"r\")\n mol2_file = []\n for row in current:\n line = row.split()\n mol2_file.append(line)\n atom_start = mol2_file.index(['@<TRIPOS>ATOM']) + 1\n atom_end = mol2_file.index(['@<TRIPOS>BOND'])\n atom_info=mol2_file[atom_start:atom_end]\n mol=[]\n for line in atom_info:\n atom_type = line[1][0]\n x_y_z = np.asarray(line[2:5], float)\n idx = int(line[0])\n node1 = Node(atom_type, x_y_z, idx)\n mol.append(node1)\n return mol", "def extract_nodes(file_name, file_name_out):\n with open(file_name, 'r') as file_in:\n nodes = {} # dict of player and unique id\n uid = 1\n for line in file_in:\n fields = parse_line(line)\n player = format_name(fields[0])\n if player not in nodes:\n nodes[player] = uid\n uid += 1\n\n with open(file_name_out, 'w') as file_out:\n print('id,label', file=file_out)\n for player in nodes:\n print(nodes[player], player, sep=',', file=file_out)\n\n return nodes", "def load_user():\n\n for i, row in enumerate(open(\"seed_data/role.user\")):\n row = row.rstrip()\n name, description = row.split(\"|\")\n role = RoleModel(name=name, description=description)\n db.session.add(role)\n\n for i, row in enumerate(open(\"seed_data/user.user\")):\n row = row.rstrip()\n name, phone, email, password, confirmed_at, role_id = row.split(\"|\")\n user = UserModel(name=name,\n phone=phone,\n email=email,\n password=password,\n confirmed_at=confirmed_at,\n role_id=role_id)\n db.session.add(user)\n\n # for i, row in enumerate(open(\"seed_data/order.user\")):\n # row = row.rstrip()\n # active, user_id, product_location_id = row.split(\"|\")\n # order = OrderrModel(\n # active=active, \n # user_id=user_id, \n # product_location_id=product_location_id)\n # db.session.add(order)\n\n db.session.commit()", "def importer(self, nodesName = False, edgesName = False, mode = \"authorities\"):\n\n\t\tprint mode\n\t\tif nodesName:\n\t\t\tself.outputNodes = nodesName\n\t\tif edgesName:\n\t\t\tself.outputEdges = edgesName\n\t\t\n\t\tif mode == \"authorities\":\n\t\t\tids = {}\n\t\t\twith open(self.outputNodes, \"rt\") as nodes:\n\t\t\t\ti = 0\n\t\t\t\tfor line in nodes:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\tdata = line.split(\";\")\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t#Index : id, label, item (0 = neo4j, 1 = authority), centrality\n\t\t\t\t\t\t\tif int(data[2]) == 1:\n\t\t\t\t\t\t\t\tself.index[\"items\"][data[1]] = []\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.index[\"authorities\"].append(data[1])\n\t\t\t\t\t\t\t\tids[data[0]] = data[1]\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tprint data\n\t\t\t\t\t\t\t#Index : id, label, item (0 = neo4j, 1 = authority), centrality\n\t\t\t\t\t\t\tif int(data[3]) == 1:\n\t\t\t\t\t\t\t\tself.index[\"items\"][data[1]] = []\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.index[\"authorities\"].append(data[1])\n\t\t\t\t\t\t\t\tids[data[0]] = data[1]\n\t\t\t\t\ti += 1\n\n\t\t\tself.index[\"authorities\"] = set(self.index[\"authorities\"])\n\n\t\t\twith open(self.outputEdges, \"rt\") as edges:\n\t\t\t\ti = 0\n\t\t\t\tfor line in edges:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t#source;target\n\t\t\t\t\t\tdata = line.split(\";\")\n\t\t\t\t\t\tif data[0] not in self.index[\"items\"]:\n\t\t\t\t\t\t\tself.index[\"items\"][data[0]] = []\n\t\t\t\t\t\tself.index[\"items\"][data[0]].append(ids[data[1].replace(\"\\n\", \"\")])\n\t\t\t\t\ti += 1\n\t\t\n\t\telif mode == \"cluster\":\n\t\t\tprint \"cluster mode\"\n\t\t\t#Nodes\n\t\t\twith open(self.outputNodes, \"rt\") as nodes:\n\t\t\t\ti = 0\n\t\t\t\tfor line in nodes:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\tdata = line.split(\";\")\n\t\t\t\t\t\t#Index : id, label, centrality\n\t\t\t\t\t\tself.index[\"items\"][data[1]] = []\n\t\t\t\t\ti += 1\n\t\t\t#Edges\n\t\t\twith open(self.outputEdges, \"rt\") as edges:\n\t\t\t\ti = 0\n\t\t\t\tfor line in edges:\n\t\t\t\t\tif i != 0:\n\t\t\t\t\t\t#source;target\n\t\t\t\t\t\tdata = line.replace(\"\\n\", \"\").split(\";\")\n\t\t\t\t\t\tif data[0] not in self.index[\"items\"]:\n\t\t\t\t\t\t\tself.index[\"items\"][data[0]] = []\n\t\t\t\t\t\tself.index[\"items\"][data[0]].append((data[1], float(1)/float(data[2])))\n\t\t\t\t\ti += 1\n\n\t\treturn True", "def inflateFile(mat, fileToInflate, x=0, y=0):\n\n # Variables for the parsed JSON array, the nodes added and all the\n # nodes in the material, respectively.\n\n nodes_JSON = readNodes(fileToInflate)\n nodes_dict = {}\n nodes = mat.node_tree.nodes\n\n # We iterate a first time to create the nodes with their settings\n\n for node in nodes_JSON[\"NodeSetup\"]:\n technical_name = node[\"type\"]\n location = node[\"location\"]\n nodes_dict[node[\"label\"]] = nodes.new(technical_name)\n nodes_dict[node[\"label\"]].location = (node[\"location\"][0]+x,\n node[\"location\"][1]+y)\n nodes_dict[node[\"label\"]].name = node[\"label\"]\n nodes_dict[node[\"label\"]].label = node[\"label\"]\n\n # The nodes' parameters can be generic, runnable Python.\n # This requires us to actually execute part of the files.\n\n for attribute in node.keys():\n if attribute not in (\"in\", \"label\", \"type\", \"location\"):\n exec(\"nodes_dict[node[\\\"label\\\"]].\"+ attribute + \" = \" +\n str(node[attribute]))\n\n # We create the links between the nodes\n # The syntax in the json is the following\n # \"in\": [\n# [\"<what node is plugged in>\", <which of the nodes outputs is used, can be\n# either string, as in \"\\\"Color\\\"\" or number, eg. 0 for the first output.>,\n# <what input is this plugged to. Can be omitted for sequentially filling all\n# inputs. If this has a value, it works like the previous value.>],\n# <next inputs etc.>\n# ]\n\n links = mat.node_tree.links\n\n for node in nodes_JSON[\"NodeSetup\"]:\n if \"in\" in node.keys(): # Does the node have links?\n i = 0\n while i < len(node[\"in\"]): # We iterate over the links\n\n # Is a specific input specified?\n\n if len(node[\"in\"][i]) == 3:\n\n # Contruct and execute the line adding a link\n\n exec (\"links.new(nodes_dict[\\\"\" + node[\"in\"][i][0] +\n \"\\\"].outputs[\" + str(node[\"in\"][i][1]) +\n \"], nodes_dict[\\\"\" + node[\"label\"] + \"\\\"].inputs[\"\n + str(node[\"in\"][i][2]) + \"])\")\n else:\n\n # We don't have a specific input to hook up to\n\n exec (\"links.new(nodes_dict[\\\"\" + node[\"in\"][i][0] +\n \"\\\"].outputs[\" + str(node[\"in\"][i][1]) +\n \"], nodes_dict[\\\"\" + node[\"label\"] + \"\\\"].inputs[\"\n + str(i) + \"])\")\n i += 1\n\n # We return the nodes for purposes of further access to them\n\n return nodes_dict", "def populate_t_database():\n with open('minerals.json') as file:\n file = json.loads(file.read())\n\n for mineral in file[:22]:\n mineral_entry = Mineral.objects.get_or_create(**mineral)", "def read_nodes_dmp(fname):\n df = pd.read_csv(fname, sep=\"|\", header=None, index_col=False,\n names=['tax_id', \n 'parent_tax_id',\n 'rank', \n 'embl_code',\n 'division_id', \n 'inherited_div_flag', # 1 or 0\n 'genetic_code_id', \n 'inherited_GC_flag', # 1 or 0\n 'mitochondrial_genetic_code_id', \n 'inherited_MGC_flag', # 1 or 0\n 'GenBank_hidden_flag',\n 'hidden_subtree_root_flag', # 1 or 0 \n 'comments'])\n return df.assign(rank = lambda x: x['rank'].str.strip(),\n embl_code = lambda x: x['embl_code'].str.strip(),\n comments = lambda x: x['comments'].str.strip())", "def import_phones(self):\n phones_file = GAConfig[\"gene_file\"]\n with open(phones_file, 'r') as in_file:\n for line in in_file:\n line = line.replace(\"\\n\",\"\")\n self.phones.append(line)", "def _read_netgen(self, file):\n\n with open(file, 'r') as f:\n\n aid = 0 # current arc ID\n\n # Read the file line-by-line\n for line in f:\n\n # Decide what to do based on the line prefix\n\n # Comment line\n if line[0] == 'c':\n # Skip\n continue\n\n # Problem info\n elif line[0] == 'p':\n # p sense #nodes #arcs #int int_type #defenses #attacks\n # We always assume that the sense is minimization\n\n ls = line.split()\n if ls[5] == 'n':\n self.parent_type = 0\n self.def_limit = int(ls[6])\n self.att_limit = int(ls[7])\n\n # Initialize all nodes as transshipment (in case the NETGEN\n # file lists only nonzero supply values)\n self.nodes = [_Node(i, 0.0) for i in range(int(ls[2]))]\n\n # Node\n elif line[0] == 'n':\n # n ID supply\n\n # All nodes have already been defined, so update existing\n # supply values\n\n ls = line.split()\n self.nodes[int(ls[1])-1].supply = float(ls[2])\n\n # Arc\n elif line[0] == 'a':\n # a tail head LB UB cost\n\n ls = line.split()\n tail = self.nodes[int(ls[1])-1]\n head = self.nodes[int(ls[2])-1]\n if (int(ls[2]) == 0) and (self.parent_type == 0):\n head = None\n\n self.arcs.append(_Arc(aid, tail, head, float(ls[4]),\n float(ls[5])))\n aid += 1\n\n # Interdependency\n elif line[0] == 'i':\n # i parent child\n\n ### We assume for now that arcs are parents.\n\n ls = line.split()\n self.int.append((self.arcs[int(ls[1])-1],\n self.arcs[int(ls[2])-1]))\n\n # Defensible arc\n elif line[0] == 'd':\n # d arc\n\n ls = line.split()\n self.def_arcs.append(self.arcs[int(ls[1])-1])\n\n # All defensible arcs are assumed to be destructible\n self.att_arcs.append(self.arcs[int(ls[1])-1])\n\n # Destructible arc\n elif line[0] == 'r':\n # r arc\n\n ls = line.split()\n self.att_arcs.append(self.arcs[int(ls[1])-1])\n\n # If no defensible or destructible arcs were listed, we assume that\n # all arcs are available\n\n if len(self.def_arcs) == 0:\n self.def_arcs[:] = self.arcs[:]\n\n if len(self.att_arcs) == 0:\n self.att_arcs[:] = self.def_arcs[:]", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def import_mirge(file, verbose = False):\n from drresults import models\n import re\n from django.db.utils import DatabaseError\n lineno = 1\n for line in file:\n if verbose:\n print(\"Line Number: %d\" % lineno)\n lineno += 1\n info = line.split('\\t')\n probe_set_id = info[0] \n exiqon_id = info[1]\n totxv_t_p = info[2]\n totxv_t_stat = info[3]\n totxv_w_p = info[4]\n totxv_w_stat = info[5]\n\ttotxvi_t_p = info[6]\n totxvi_t_stat = info[7]\n totxvi_w_p = info[8]\n totxvi_w_stat = info[9]\n meta_p = info[11]\n meta_stat = info[12]\n\n gemir = models.gemir(probe_set_id=probe_setid, exiqon_id=exiqon_id, totxv_t_p=totxv_t_p, totxv_t_stat=totxv_t_stat, totxv_w_p=totxv_w_p, totxv_w_stat=totxv_w_stat, totxvi_t_p=totxvi_t_p, totxvi_t_stat=totxvi_t_stat, totxvi_w_p=totxvi_w_p, totxvi_w_stat=totxvi_w_stat, meta_p=meta_p, meta_stat=meta_stat)\n\n try:\n gemir.save()\n except DatabaseError as de:\n from sys import stderr\n stderr.write(\"Error loading gemir file line: {0}\\n\".format(line))\n raise de\n ##end of import_mirge", "def json_to_neo4j(filename):\n authenticate(ENV[\"DB_URL\"], ENV[\"DB_USERNAME\"],ENV[\"DB_PASSWORD\"]) # Accessing the NEO4J server\n neo4j_graph = Graph()\n string_to_instance_mapping = dict()\n\n with open(filename, \"r\") as f:\n json_data = json.load(f)\n for node in json_data[\"nodes\"]:\n node_instance = Node(node[\"type\"], id=node[\"id\"])\n string_to_instance_mapping[node[\"id\"]] = node_instance\n for link in json_data[\"links\"]:\n source_node_instance = string_to_instance_mapping[link[\"source\"]]\n target_node_instance = string_to_instance_mapping[link[\"target\"]]\n edge = Relationship(source_node_instance, \"MAPS TO\", target_node_instance)\n neo4j_graph.create(edge)", "def loadData(self,filepath):\r\n self.removeCheckbuttons()\r\n self.tree = ET.parse(filepath)# Parse xml Tree\r\n self.data = self.tree.getroot().find(\"data\")# Find Data\r\n self.sensors = [i.text for i in self.tree.getroot().find('columns')]# Get Sensor Names\r\n for s in self.sensors:# Add Each Sensor as Option\r\n self.addOption(s)", "def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()", "def load_products():\n\n for i, row in enumerate(open(\"seed_data/category.product\")):\n row = row.rstrip()\n name = row.split(\"|\")\n product_category = ProductCategoryModel(name=name)\n db.session.add(product_category)\n\n for i, row in enumerate(open(\"seed_data/product.product\")):\n row = row.rstrip()\n name, short_description, long_description, product_category_id, img_path_xs, img_path_sm, img_path_md, img_path_lg = row.split(\"|\")\n product = ProductModel(name=name,\n short_description=short_description,\n long_description=long_description,\n product_category_id=product_category_id,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg)\n db.session.add(product)\n\n for i, row in enumerate(open(\"seed_data/location.product\")):\n row = row.rstrip()\n name, description, address1, address2, city, state, zip_code, country, latitude, longitude, direction_url = row.split(\"|\")\n location = LocationModel(name=name,\n description=description,\n address1=address1,\n address2=address2,\n city=city,\n state=state,\n zip_code=zip_code,\n country=country,\n latitude=latitude,\n longitude=longitude,\n direction_url=direction_url)\n db.session.add(location)\n\n for i, row in enumerate(open(\"seed_data/location_product.product\")):\n row = row.rstrip()\n location_id, product_id, price, num_available = row.split(\"|\")\n location_product = LocationProductModel(location_id=location_id,\n product_id=product_id,\n price=price,\n num_available=num_available)\n db.session.add(location_product)\n\n db.session.commit()", "def load_data(db_handler):\n\n from random import seed\n from random import random\n \n seed(1)\n\n new_notes = []\n\n for i in range(1,10):\n\n new_notes.append({\n\n\n 'title': str(i) + str(random()),\n 'content': 'Lorem ipsum' + str(i),\n 'active': True,\n 'created_by':\"Cristhian\" + str(i),\n 'created_at': date.today(),\n 'edited_at':date.today(),\n \n })\n\n new_notes.append(\n {\n \"active\": False,\n \"content\": \"Jesenia\",\n \"edited_at\": \"2019-10-24\",\n \"title\": \"Jesenia La chica de al lado\",\n \"created_by\": \"Cristhian1\",\n \"created_at\": \"2019-10-24\"\n })\n\n new_notes.append(\n {\n \"active\": False,\n \"title\": \"La vida de los numeros\",\n \"content\": \"Lorem ipsum y los numeros de la muerte\",\n \"edited_at\": \"2019-10-25\",\n \"created_by\": \"Jesenia\",\n \"created_at\": \"2019-10-24\"\n })\n\n Note.insert_many(new_notes).execute()\n\n User(name=\"Cristhian\", email=\"[email protected]\",\n password=b'$2b$12$U/QjtHt/j0xRT4r8Hx3fOe93EssM6M0iiUaQJOrTd64RXbxvhw6Ii').save()", "def ingest_node_results(self, filename, extension=-1):\n\n # Which node is this?\n wg, node_name = utils.parse_node_filename(filename)\n #node_id = self.retrieve_node_id(wg, node_name)\n uves_node_id = self.retrieve_node_id(wg, \"UVES-{}\".format(node_name))\n giraffe_node_id = self.retrieve_node_id(wg, \"GIRAFFE-{}\".format(node_name))\n\n # Start ingesting results.\n data = Table.read(filename, hdu=extension)\n\n #default_row = {\"node_id\": node_id}\n default_row = {\"node_id\": -1}\n columns = (\n \"node_id\", \"cname\", \"filename\", \"setup\", \"snr\",\n \"vel\", \"e_vel\", \"vrot\", \"e_vrot\",\n \"teff\", \"e_teff\", \"nn_teff\", \"enn_teff\", \"nne_teff\", \"sys_err_teff\",\n \"logg\", \"e_logg\", \"nn_logg\", \"enn_logg\", \"nne_logg\", \"sys_err_logg\", \"lim_logg\",\n \"feh\", \"e_feh\", \"nn_feh\", \"enn_feh\", \"nne_feh\", \"sys_err_feh\",\n \"xi\", \"e_xi\", \"nn_xi\", \"enn_xi\", \"nne_xi\",\n \"mh\", \"e_mh\", \"nn_mh\", \"enn_mh\", \"nne_mh\",\n \"alpha_fe\", \"e_alpha_fe\", \"nn_alpha_fe\", \"enn_alpha_fe\", \"nne_alpha_fe\",\n \"vrad\", \"e_vrad\", \"vsini\", \"e_vsini\",\n \"peculi\", \"remark\", \"tech\")\n\n # Update formats, as necessary.\n tmp_key_format = \"{}_NEW_DTYPE\"\n for key, new_dtype in _FITS_FORMAT_ADAPTERS.items():\n\n # FUCK THESE IDIOTIC PEOPLE WHAT THE FUCK IS WRONG WITH THEM\n if node_name == \"Carmela-Elena\":\n\n if key in (\"teff\", \"e_teff\", \"logg\"):\n data[tmp_key_format.format(key.upper())] = _adapt_str_to_float(data[key.upper()])\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n elif key in (\"feh\", \"e_feh\"):\n del data[key.upper()]\n data[tmp_key_format.format(key.upper())] = np.nan * np.ones(len(data))\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n elif key in (\"tech\", \"peculi\", \"remark\"):\n del data[key.upper()]\n data[tmp_key_format.format(key.upper())] = [\"\"] * len(data)\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n\n elif node_name == \"Porto\" and key in (\"teff\", \"e_teff\", \"feh\", \"e_feh\"):\n data[tmp_key_format.format(key.upper())] = _adapt_str_to_float(data[key.upper()])\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n data[tmp_key_format.format(key.upper())] = np.array(data[key.upper()], dtype=new_dtype)\n del data[key.upper()]\n data.rename_column(tmp_key_format.format(key.upper()), key.upper())\n\n N = len(data)\n for i, row in enumerate(data):\n logger.info(\"Ingesting row {}/{} from node WG{}: {}\".format(i, N,\n wg, node_name))\n row_data = {}\n row_data.update(default_row)\n row_data.update(dict(zip(columns[1:], [row[c.upper()] for c in columns[1:]])))\n\n if row_data[\"setup\"].strip() == \"UVES\":\n row_data[\"node_id\"] = uves_node_id\n elif row_data[\"setup\"].strip() == \"GIRAFFE\":\n row_data[\"node_id\"] = giraffe_node_id\n else:\n raise WTFError\n\n if node_name.lower() == \"carmela-elena\":\n for key in (\"tech\", \"peculi\", \"remark\"):\n row_data[key] = str(row_data[key])\n\n use_columns = [] + list(columns)\n for k in row_data.keys():\n if isinstance(row_data[k], (bool, np.bool_)):\n del row_data[k]\n use_columns.remove(k)\n\n self.execute(\n \"INSERT INTO results ({}) VALUES ({})\".format(\n \", \".join(use_columns),\n \", \".join([\"%({})s\".format(column) for column in use_columns])),\n row_data)\n\n else:\n self.execute(\n \"INSERT INTO results ({}) VALUES ({})\".format(\n \", \".join(columns),\n \", \".join([\"%({})s\".format(column) for column in columns])),\n row_data)\n\n self.connection.commit()\n return N", "def fill_attributes(ml_file, other_file):\n with xr.load_dataset(other_file) as other:\n with xr.open_dataset(ml_file) as ml:\n for variable in other.variables:\n if variable in ml.variables:\n other[variable].attrs = ml[variable].attrs\n other.to_netcdf(other_file)", "def _read_seed_file(self):\n try:\n if not os.path.exists(self.__seedfile):\n self.__logger.error(\"seedfile:\" + self.__seedfile + \" not exist!\")\n return\n with open(self.__seedfile, \"r\") as file:\n for line in file.readlines():\n url = {\"url\":line.strip(), \"depth\":0}\n self.pushUrl(url)\n except IOError as e:\n self.__logger.error(e)\n else:\n self.__logger.info(\"use seedfile:\" + self.__seedfile)", "def addEdge_file(self, path):\n with open(path, 'r') as File:\n for line in File.readlines():\n ints = list(map(int, line.strip().split())) \n u = ints[0]\n v = ints[1:]\n for i in v:\n self.addEdge(u, i)", "def load(self, file):\n\n address = 0\n\n with open(file) as f: # what about case to handle if index out of range\n # aka no argument provided to command line\n for line in f:\n val = line.split(\"#\")[0].strip()\n if val == '':\n continue\n cmd = int(val, 2)\n self.ram[address] = cmd\n address += 1", "def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!", "def madmp_import(file, dry_run, hard_sync):\n if not file or not os.path.isfile(file):\n click.secho(\"'%s' is not a file\" % file, file=sys.stderr, fg=\"red\")\n return\n\n with open(file, \"r\") as dmp_file:\n dmp_dict = json.load(dmp_file).get(\"dmp\", {})\n dmp = convert_dmp(dmp_dict, hard_sync=hard_sync)\n\n click.echo(\"DMP %s has %s datasets\" % (dmp.dmp_id, len(dmp.datasets)))\n\n for dataset in dmp.datasets:\n recid = \"[no record]\"\n\n if dataset.record:\n pid = dataset.record.pid\n recid = \"[recid: %s]\" % pid.pid_value\n\n click.echo(\" DS: %s %s\" % (dataset.dataset_id, recid))\n\n if dry_run:\n db.session.rollback()\n else:\n db.session.add(dmp)\n db.session.commit()", "def load(self, filename):\n # try to access file\n\n datafilename = os.path.join(os.path.dirname(filename), self.uid + \"-data.npz\")\n\n with self.netlock:\n initfrom = {}\n datafile = None\n if os.path.isfile(filename):\n try:\n self.logger.info(\"Loading nodenet %s metadata from file %s\", self.name, filename)\n with open(filename) as file:\n initfrom.update(json.load(file))\n except ValueError:\n warnings.warn(\"Could not read nodenet metadata from file %s\", filename)\n return False\n except IOError:\n warnings.warn(\"Could not open nodenet metadata file %s\", filename)\n return False\n\n if os.path.isfile(datafilename):\n try:\n self.logger.info(\"Loading nodenet %s bulk data from file %s\", self.name, datafilename)\n datafile = np.load(datafilename)\n except ValueError:\n warnings.warn(\"Could not read nodenet data from file %\", datafile)\n return False\n except IOError:\n warnings.warn(\"Could not open nodenet file %s\", datafile)\n return False\n\n # initialize with metadata\n self.initialize_nodenet(initfrom)\n\n if datafile:\n\n if 'sizeinformation' in datafile:\n self.NoN = datafile['sizeinformation'][0]\n self.NoE = datafile['sizeinformation'][1]\n else:\n self.logger.warn(\"no sizeinformation in file, falling back to defaults\")\n\n # the load bulk data into numpy arrays\n if 'allocated_nodes' in datafile:\n self.allocated_nodes = datafile['allocated_nodes']\n else:\n self.logger.warn(\"no allocated_nodes in file, falling back to defaults\")\n\n if 'allocated_node_offsets' in datafile:\n self.allocated_node_offsets = datafile['allocated_node_offsets']\n else:\n self.logger.warn(\"no allocated_node_offsets in file, falling back to defaults\")\n\n if 'allocated_elements_to_nodes' in datafile:\n self.allocated_elements_to_nodes = datafile['allocated_elements_to_nodes']\n else:\n self.logger.warn(\"no allocated_elements_to_nodes in file, falling back to defaults\")\n\n if 'allocated_nodespaces' in datafile:\n self.allocated_nodespaces = datafile['allocated_nodespaces']\n else:\n self.logger.warn(\"no allocated_nodespaces in file, falling back to defaults\")\n\n if 'allocated_node_parents' in datafile:\n self.allocated_node_parents = datafile['allocated_node_parents']\n else:\n self.logger.warn(\"no allocated_node_parents in file, falling back to defaults\")\n\n if 'allocated_elements_to_activators' in datafile:\n self.allocated_elements_to_activators = datafile['allocated_elements_to_activators']\n else:\n self.logger.warn(\"no allocated_elements_to_activators in file, falling back to defaults\")\n\n if 'allocated_nodespaces_por_activators' in datafile:\n self.allocated_nodespaces_por_activators = datafile['allocated_nodespaces_por_activators']\n else:\n self.logger.warn(\"no allocated_nodespaces_por_activators in file, falling back to defaults\")\n\n if 'allocated_nodespaces_ret_activators' in datafile:\n self.allocated_nodespaces_ret_activators = datafile['allocated_nodespaces_ret_activators']\n else:\n self.logger.warn(\"no allocated_nodespaces_ret_activators in file, falling back to defaults\")\n\n if 'allocated_nodespaces_sub_activators' in datafile:\n self.allocated_nodespaces_sub_activators = datafile['allocated_nodespaces_sub_activators']\n else:\n self.logger.warn(\"no allocated_nodespaces_sub_activators in file, falling back to defaults\")\n\n if 'allocated_nodespaces_sur_activators' in datafile:\n self.allocated_nodespaces_sur_activators = datafile['allocated_nodespaces_sur_activators']\n else:\n self.logger.warn(\"no allocated_nodespaces_sur_activators in file, falling back to defaults\")\n\n if 'allocated_nodespaces_cat_activators' in datafile:\n self.allocated_nodespaces_cat_activators = datafile['allocated_nodespaces_cat_activators']\n else:\n self.logger.warn(\"no allocated_nodespaces_cat_activators in file, falling back to defaults\")\n\n if 'allocated_nodespaces_exp_activators' in datafile:\n self.allocated_nodespaces_exp_activators = datafile['allocated_nodespaces_exp_activators']\n else:\n self.logger.warn(\"no allocated_nodespaces_exp_activators in file, falling back to defaults\")\n\n\n if 'w_data' in datafile and 'w_indices' in datafile and 'w_indptr' in datafile:\n w = sp.csr_matrix((datafile['w_data'], datafile['w_indices'], datafile['w_indptr']), shape = (self.NoE, self.NoE))\n # if we're configured to be dense, convert from csr\n if not self.sparse:\n w = w.todense()\n self.w = theano.shared(value=w.astype(T.config.floatX), name=\"w\", borrow=False)\n self.a = theano.shared(value=datafile['a'].astype(T.config.floatX), name=\"a\", borrow=False)\n else:\n self.logger.warn(\"no w_data, w_indices or w_indptr in file, falling back to defaults\")\n\n if 'g_theta' in datafile:\n self.g_theta = theano.shared(value=datafile['g_theta'].astype(T.config.floatX), name=\"theta\", borrow=False)\n else:\n self.logger.warn(\"no g_theta in file, falling back to defaults\")\n\n if 'g_factor' in datafile:\n self.g_factor = theano.shared(value=datafile['g_factor'].astype(T.config.floatX), name=\"g_factor\", borrow=False)\n else:\n self.logger.warn(\"no g_factor in file, falling back to defaults\")\n\n if 'g_threshold' in datafile:\n self.g_threshold = theano.shared(value=datafile['g_threshold'].astype(T.config.floatX), name=\"g_threshold\", borrow=False)\n else:\n self.logger.warn(\"no g_threshold in file, falling back to defaults\")\n\n if 'g_amplification' in datafile:\n self.g_amplification = theano.shared(value=datafile['g_amplification'].astype(T.config.floatX), name=\"g_amplification\", borrow=False)\n else:\n self.logger.warn(\"no g_amplification in file, falling back to defaults\")\n\n if 'g_min' in datafile:\n self.g_min = theano.shared(value=datafile['g_min'].astype(T.config.floatX), name=\"g_min\", borrow=False)\n else:\n self.logger.warn(\"no g_min in file, falling back to defaults\")\n\n if 'g_max' in datafile:\n self.g_max = theano.shared(value=datafile['g_max'].astype(T.config.floatX), name=\"g_max\", borrow=False)\n else:\n self.logger.warn(\"no g_max in file, falling back to defaults\")\n\n if 'g_function_selector' in datafile:\n self.g_function_selector = theano.shared(value=datafile['g_function_selector'], name=\"gatefunction\", borrow=False)\n else:\n self.logger.warn(\"no g_function_selector in file, falling back to defaults\")\n\n if 'n_function_selector' in datafile:\n self.n_function_selector = theano.shared(value=datafile['n_function_selector'], name=\"nodefunction_per_gate\", borrow=False)\n else:\n self.logger.warn(\"no n_function_selector in file, falling back to defaults\")\n\n\n if 'n_node_porlinked' in datafile:\n self.n_node_porlinked = theano.shared(value=datafile['n_node_porlinked'], name=\"porlinked\", borrow=False)\n else:\n self.logger.warn(\"no n_node_porlinked in file, falling back to defaults\")\n\n if 'n_node_retlinked' in datafile:\n self.n_node_retlinked = theano.shared(value=datafile['n_node_retlinked'], name=\"retlinked\", borrow=False)\n else:\n self.logger.warn(\"no n_node_retlinked in file, falling back to defaults\")\n\n # reconstruct other states\n if 'g_function_selector' in datafile:\n g_function_selector = datafile['g_function_selector']\n self.has_new_usages = True\n self.has_pipes = PIPE in self.allocated_nodes\n self.has_directional_activators = ACTIVATOR in self.allocated_nodes\n self.has_gatefunction_absolute = GATE_FUNCTION_ABSOLUTE in g_function_selector\n self.has_gatefunction_sigmoid = GATE_FUNCTION_SIGMOID in g_function_selector\n self.has_gatefunction_tanh = GATE_FUNCTION_TANH in g_function_selector\n self.has_gatefunction_rect = GATE_FUNCTION_RECT in g_function_selector\n self.has_gatefunction_one_over_x = GATE_FUNCTION_DIST in g_function_selector\n else:\n self.logger.warn(\"no g_function_selector in file, falling back to defaults\")\n\n for id in range(len(self.allocated_nodes)):\n if self.allocated_nodes[id] > MAX_STD_NODETYPE:\n uid = tnode.to_id(id)\n self.native_module_instances[uid] = self.get_node(uid)\n\n for sensor, id_list in self.sensormap.items():\n for id in id_list:\n self.inverted_sensor_map[tnode.to_id(id)] = sensor\n for actuator, id_list in self.actuatormap.items():\n for id in id_list:\n self.inverted_actuator_map[tnode.to_id(id)] = actuator\n\n # re-initialize step operators for theano recompile to new shared variables\n self.initialize_stepoperators()\n\n return True", "def loadNodes(self, fname):\r\n with open(fname, \"r\") as fp:\r\n\r\n # Read in the header\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"SPECGRID\":\r\n self.SPECGRID = np.array(fp.readline().split()[0:3], dtype=int)\r\n if item[0] == \"COORDSYS\":\r\n self.COORDSYS = fp.readline().split()\r\n if item[0] == \"COORD\":\r\n break\r\n\r\n # Read in the coordinates\r\n self.coords = []\r\n for line in fp:\r\n if line.split()[-1] != \"/\":\r\n item = line.split()\r\n for c in item:\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(int(cc[0])):\r\n self.coords.append(cc[-1])\r\n else:\r\n self.coords.append(c)\r\n else:\r\n if len(line.split()) > 1:\r\n item = line.split()\r\n for i in range(len(item) - 1):\r\n cc = item[i]\r\n if '*' in cc:\r\n ccc = cc.split('*')\r\n for j in range(int(ccc[0])):\r\n self.coords.append(ccc[-1])\r\n else:\r\n self.coords.append(c)\r\n break\r\n else:\r\n break\r\n\r\n # Read in ZCORN\r\n self.zcorn = []\r\n i = 0\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ZCORN\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n self.zcorn += line.split()\r\n else:\r\n self.zcorn += line.split()[0:-1]\r\n break\r\n if len(self.zcorn) > 0:\r\n break\r\n\r\n # Read in (in)active cells\r\n self.active = []\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ACTNUM\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n c = line.split()\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(float(cc[0])):\r\n self.active += cc[-1]\r\n else:\r\n self.active += c\r\n else:\r\n self.active += line.split()[0:-1]\r\n break\r\n\r\n self.coords = np.array(self.coords, dtype=float)\r\n print(self.coords)\r\n\r\n # In Petrel...\r\n self.ne = self.SPECGRID[0] # x i\r\n self.nn = self.SPECGRID[1] # y j\r\n self.nz = self.SPECGRID[2] # z k\r\n\r\n # build grid\r\n self.buildGrid(plot=False)\r\n self.buildActiveCells(plot=False)\r\n self.buildZGrid(plot=False)\r\n # self.calculateVolumes(plot=False)\r\n #\r\n # Convert to VTK\r\n self.GridType = \"vtkStructuredGrid\"\r\n self.Grid = vtk.vtkStructuredGrid()\r\n self.Grid.SetDimensions(self.ne+1, self.nn+1, self.nz+1)\r\n vtk_points = vtk.vtkPoints()\r\n ve = 1.\r\n\r\n for iz in range(self.nz):\r\n if iz == 0:\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZT[iz][ix,iy] )\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZB[iz][ix,iy] )\r\n self.Grid.SetPoints(vtk_points)\r\n\r\n # Add in active cells\r\n ac = vtk.vtkIntArray()\r\n ac.SetName( \"ActiveCells\" )\r\n for iac in self.ActiveCells.flatten( order='F' ):\r\n ac.InsertNextTuple1( iac )\r\n self.Grid.GetCellData().AddArray(ac)", "def load_random_data(self, parts, nodes, max_nodes):\n\n self.parts = parts\n self.nodes = nodes\n self.max_nodes = max_nodes\n\n if self.verbose:\n print 'Generating random data using nodes:' + str(nodes) + \\\n ' parts:' + str(parts) + ' max nodes:' + str(max_nodes)\n\n node_list = []\n node_list.extend(range(1, nodes))\n\n # for each part we want to add a random number of nodes from the node list\n for i in range(1, parts):\n self.data_dict[i] = random.sample(node_list, random.randint(2, max_nodes))", "def load_expenditures():\n\n Expenditure.query.delete()\n\n with open(expenditure_file) as f:\n for _ in range(1):\n next(f)\n \n for row in f:\n row = row.rstrip()\n expenditure_data = row.split(\",\")\n print(expenditure_data)\n\n id = expenditure_data[0]\n category_id = expenditure_data[1]\n price = expenditure_data[2]\n date_of_expenditure = expenditure_data[3]\n expenditure_userid = expenditure_data[4]\n where_bought = expenditure_data[5]\n description = expenditure_data[6]\n\n expenditure = Expenditure(\n id = id,\n category_id = category_id,\n price = price,\n date_of_expenditure = get_datetime(date_of_expenditure),\n expenditure_userid = expenditure_userid,\n where_bought = where_bought,\n description = description\n )\n\n db.session.add(expenditure)\n\n db.session.commit()", "def load_invites(invite_filename):\n\n # Write code here to loop over invite data and populate DB.\n\n print(\"Invites\")\n\n for i, row in enumerate(open(invite_filename)):\n row = row.rstrip()\n\n values_list = row.split()\n\n user_id, event_id, rsvp = values_list\n\n user_id = int(user_id)\n event_id = int(event_id)\n\n # Instantiate invite\n invite = Invitation(user_id=user_id,\n event_id=event_id,\n rsvp=rsvp)\n\n # Add invite to session\n db.session.add(invite)\n\n # Commit all invite instances to DB\n db.session.commit()", "def setup(inFileName):\n coords = []\n nodes = []\n in_ = open(inFileName, \"r\")\n for line in in_:\n nodeID, xCoord, yCoord = line.split()\n coords.append((int(nodeID), (float(xCoord), float(yCoord))))\n nodes.append(int(nodeID))\n in_.close()\n\n distances = NodeContainer()\n for i in range(len(coords) - 1):\n for j in range(i + 1, len(coords)):\n distances.add(coords[i], coords[j])\n return (nodes, distances)", "def load_ratings():\n\n print \"Ratings\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Rating.query.delete()\n\n # Read u.data file and insert data\n for row in open(\"seed_data/u.data\"):\n row = row.rstrip()\n user_id, movie_id, score, timestamp = row.split(\"\\t\")\n\n user_id = int(user_id)\n movie_id = int(movie_id)\n score = int(score)\n\n #from rating class take the movie_id and make it equal to the movie_id \n #from the for loop above. We are calling it to make an instance of the rating\n #class\n rating = Rating(movie_id=movie_id, user_id=user_id, score=score)\n \n #We need to add to the session or it won't ever be stored\n db.session.add(rating)\n\n #Once we're done, we should commit our work\n db.session.commit()", "def load_from_file(self, file_path):\n for line in open(file_path, 'r'):\n term = line.rstrip('\\n')\n self.add(term)", "def update_file(filename, items):\n # TODO: Implement something in the templates to denote whether the value\n # being replaced is an XML attribute or a value. Perhaps move to dyanmic\n # XML tree building rather than string replacement.\n should_escape = filename.endswith('addon.xml')\n\n with open(filename, 'r') as inp:\n text = inp.read()\n\n for key, val in items.items():\n if should_escape:\n val = saxutils.quoteattr(val)\n text = text.replace('{%s}' % key, val)\n output = text\n\n with open(filename, 'w') as out:\n out.write(output)", "def load_all(self, file):\n self.model = load_model(file + \"_model.h5\")", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "async def insert_attack_json_data(self, buildfile):\n cur_items = [x['uid'] for x in await self.dao.get('attack_uids')]\n logging.debug('[#] {} Existing items in the DB'.format(len(cur_items)))\n with open(buildfile, 'r') as infile:\n attack_dict = json.load(infile)\n loaded_items = {}\n # Extract all TIDs\n for item in attack_dict['objects']:\n if 'external_references' in item:\n # Filter down\n if any(x for x in item['external_references'] if x['source_name'] == 'mitre-attack'):\n items = [x['external_id'] for x in item['external_references'] if\n x['source_name'] == 'mitre-attack']\n if len(items) == 1:\n tid = items[0]\n # Add in\n if tid.startswith('T') and not tid.startswith('TA'):\n if item['type'] == \"attack-pattern\":\n loaded_items[item['id']] = {'id': tid, 'name': item['name'],\n 'examples': [],\n 'similar_words': [],\n 'description': item['description'],\n 'example_uses': []}\n else:\n logging.critical('[!] Error: multiple MITRE sources: {} {}'.format(item['id'], items))\n # Extract uses for all TIDs\n for item in attack_dict['objects']:\n if item['type'] == 'relationship':\n if item[\"relationship_type\"] == 'uses':\n if 'description' in item:\n normalized_example = item['description'].replace('<code>', '').replace('</code>',\n '').replace('\\n', '').encode('ascii', 'ignore').decode('ascii')\n # Remove att&ck reference (name)[link to site]\n normalized_example = re.sub('\\[.*?\\]\\(.*?\\)', '', normalized_example)\n if item['target_ref'].startswith('attack-pattern'):\n if item['target_ref'] in loaded_items:\n loaded_items[item['target_ref']]['example_uses'].append(normalized_example)\n else:\n logging.critical('[!] Found target_ref not in loaded data: {}'.format(item['target_ref']))\n logging.debug(\"[#] {} Techniques found in input file\".format(len(loaded_items)))\n # Deduplicate input data from existing items in the DB\n to_add = {x: y for x, y in loaded_items.items() if x not in cur_items}\n logging.debug('[#] {} Techniques found that are not in the existing database'.format(len(to_add)))\n for k, v in to_add.items():\n await self.dao.insert('attack_uids', dict(uid=k, description=defang_text(v['description']), tid=v['id'],\n name=v['name']))\n if 'example_uses' in v:\n [await self.dao.insert('true_positives', dict(uid=k, true_positive=defang_text(x))) for x in\n v['example_uses']]", "def read_all():\n with open(User.get_users_path(), encoding='utf8') as file:\n for line in file:\n username, pw_hash, email, name, level = line.strip().split(';')\n level = int(level)\n user = User.from_file(username)\n# userfile = User.get_path()\n# user = User(username=username, pw_hash=pw_hash, email=email, name=name, level=level,\n# new=False)\n User.users[username] = user", "def read_extras():\n extras = dict()\n extra_requirements_dir = 'packaging/requirements'\n for extra_requirements_filename in os.listdir(extra_requirements_dir):\n filename_match = re.search(r'^requirements-(\\w*).txt$', extra_requirements_filename)\n if not filename_match:\n continue\n extra_req_file_path = os.path.join(extra_requirements_dir, extra_requirements_filename)\n try:\n extras[filename_match.group(1)] = read_file(extra_req_file_path).splitlines()\n except RuntimeError:\n pass\n return extras", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def write_node_features(node_features, node_file):\n dgl.data.utils.save_tensors(node_file, node_features)", "def update_ip(ip_file, available_ip):\n with open(ip_file, \"r\") as file:\n address = ordered_load(file)\n \"\"\"address is a dict type object\"\"\"\n # print address\n address = address['node']\n\n with open(ip_file, \"w\") as file:\n print address\n # address['initial'][0] = available_ip[2]\n \"\"\"Fill the new configuration with available_ip\"\"\"\n index = 0\n for i in address:\n for ip in address[i]:\n j = address[i].index(ip)\n address[i][j] = available_ip[index]\n index = index + 1\n\n print address\n # print address['initial']\n # address = dict(address)\n node_ip = dict()\n node_ip['node'] = address\n yaml.dump(node_ip, file)", "def bulk_insert(cls, path=\"data.json\"):\n from json import load\n from codecs import open\n \n lists = load(open(path, \"r\", \"utf8\"))\n for lst in lists:\n ing = cls(content = lst)\n ing.put()", "def copy_and_modify_files(basedir, copy_nodes, runpath, remote_port, seed):\n \n config_file_name = None\n for copy_node in copy_nodes:\n\n file_src_name = None\n file_dst_name = None\n file_contents = None\n\n # Read from disk?\n if copy_node.hasAttribute(\"file\"):\n file_src_name = copy_node.getAttribute(\"file\")\n file_src_path = os.path.join(basedir, file_src_name)\n\n # Sanity check\n if file_src_name.find(\"/\") != -1:\n raise RuntimeError('name of file to copy \"%s\" contains a \"/\"' % file_src_name)\n if not os.path.exists(file_src_path):\n raise RuntimeError('file \"%s\" does not exist' % file_src_path)\n\n # Read contents\n file_handle = open(file_src_path, 'rb')\n file_contents = file_handle.read()\n file_handle.close()\n\n # By now we need a destination name and contents\n if copy_node.hasAttribute(\"name\"):\n file_dst_name = copy_node.getAttribute(\"name\")\n elif file_src_name:\n file_dst_name = file_src_name\n else:\n raise RuntimeError('<copy> node with no destination name: %s' % copy_node.toxml())\n if file_contents == None:\n raise RuntimeError('<copy> node with no contents: %s' % copy_node.toxml())\n\n # Is this our config file?\n if copy_node.getAttribute(\"type\") == \"config\":\n config_file_name = file_dst_name\n\n config_parser = xml.dom.minidom.parseString(file_contents)\n config_xml = config_parser.documentElement\n\n set_sumoconfig_option(config_parser, config_xml, \"traci_server\", \"remote-port\", remote_port)\n set_sumoconfig_option(config_parser, config_xml, \"random_number\", \"seed\", seed)\n set_sumoconfig_option(config_parser, config_xml, \"random_number\", \"random\", \"false\")\n\n file_contents = config_xml.toxml()\n\n # Write file into rundir\n file_dst_path = os.path.join(runpath, file_dst_name)\n file_handle = open(file_dst_path, \"wb\")\n file_handle.write(file_contents)\n file_handle.close()\n\n # make sure that we copied a config file\n if not config_file_name:\n raise RuntimeError('launch config contained no <copy> node with type=\"config\"')\n\n return config_file_name", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def import_maze(self, filepath, canvas, datastore):\n inputter = XMlInputer()\n self\n\n if filepath is '':\n Debug.printi(\"No input file to read\", Debug.Level.INFO)\n return\n\n inputter.read_file(filepath)\n Debug.printi(\"Maze input file \" + filepath + \" has been read\", Debug.Level.INFO)\n self._all_entries.clear()\n\n for node in inputter._root.iter(\"node\"):\n original_id = node.attrib[\"id\"]\n node.attrib[\"id\"] = \"n_\" + node.attrib[\"id\"]\n for edge in inputter._root.iter(\"edge\"):\n if edge.attrib[\"source\"] == original_id:\n edge.attrib[\"source\"] = node.attrib[\"id\"]\n if edge.attrib[\"target\"] == original_id:\n edge.attrib[\"target\"] = node.attrib[\"id\"]\n\n i = 0\n for node in inputter._root.iter(\"node\"):\n # Use an index to assign the id to the nodes\n i += 1\n self._all_entries[str(i)] = node\n attributes = node.attrib\n old_id = attributes[\"id\"]\n attributes[\"id\"] = str(i)\n pictures = {}\n for pic in node.getchildren():\n pictures[pic.attrib[\"name\"]] = pic.attrib\n canvas.create_new_node(\n (\n int(attributes[\"x\"]),\n int(attributes[\"y\"])\n ),\n prog=True,\n data=(attributes, pictures)\n )\n Debug.printi(\"New Node Created from file ID:\" + node.attrib[\"id\"], Debug.Level.INFO)\n\n # Since we are changing the id's we need to update all of the edges that use them\n for edge in inputter._root.iter(\"edge\"):\n if edge.attrib[\"source\"] == old_id:\n edge.attrib[\"source\"] = str(i)\n if edge.attrib[\"target\"] == old_id:\n edge.attrib[\"target\"] = str(i)\n\n for edge in inputter._root.iter(\"edge\"):\n self._all_entries[(edge.attrib[\"source\"], edge.attrib[\"target\"])] = edge\n source_coords = int(self._all_entries[edge.attrib[\"source\"]].attrib[\"x\"]), \\\n int(self._all_entries[edge.attrib[\"source\"]].attrib[\"y\"])\n\n target_coords = int(self._all_entries[edge.attrib[\"target\"]].attrib[\"x\"]), \\\n int(self._all_entries[edge.attrib[\"target\"]].attrib[\"y\"])\n\n wall1_node = edge.find(\"Wall1\")\n wall2_node = edge.find(\"Wall2\")\n\n wall1 = {\n \"height\" : wall1_node.attrib[\"height\"] if len(wall1_node.attrib) > 0 else None,\n \"textures\" : {}\n }\n\n wall2 = {\n \"height\": wall2_node.attrib[\"height\"] if len(wall2_node.attrib) > 0 else None,\n \"textures\": {}\n }\n\n for texture in wall1_node.iter(\"Texture\"):\n wall1[\"textures\"][\"path\"] = {\n \"path\" : texture.attrib[\"path\"],\n \"tile_x\": texture.attrib[\"tileX\"],\n \"tile_y\": texture.attrib[\"tileY\"]\n }\n\n for texture in wall2_node.iter(\"Texture\"):\n wall2[\"textures\"][\"path\"] = {\n \"path\": texture.attrib[\"path\"],\n \"tile_x\": texture.attrib[\"tileX\"],\n \"tile_y\": texture.attrib[\"tileY\"]\n }\n\n canvas._clear_cache(source_coords)\n canvas._begin_edge(source_coords)\n canvas._execute_edge(target_coords)\n canvas._end_edge(target_coords,\n prog=True,\n data={\n \"source\": edge.attrib[\"source\"],\n \"target\": edge.attrib[\"target\"],\n \"height\": None,\n \"wall1\": wall1,\n \"wall2\": wall2\n })\n Debug.printi(\"New EDGE Created from file Source:\" + edge.attrib[\"source\"]\n + \" Target: \" + edge.attrib[\"target\"], Debug.Level.INFO)\n\n for object in inputter._root.iter(\"object\"):\n self._all_entries[object.attrib[\"name\"]] = object\n canvas._mark_object((int(object.attrib[\"x\"]), int(object.attrib[\"y\"])),\n prog=True,\n data={\n \"x_coordinate\": object.attrib[\"x\"],\n \"y_coordinate\": object.attrib[\"y\"],\n \"name\": object.attrib[\"name\"],\n \"mesh\": object.attrib[\"mesh\"],\n \"scale\": object.attrib[\"scale\"]\n })\n Debug.printi(\"New Object Created from file Name:\" + object.attrib[\"name\"], Debug.Level.INFO)\n\n self._floor_tex = ET.SubElement(self._root, \"floorTexture\") if self._floor_tex is None else self._floor_tex\n self._wall_height = ET.SubElement(self._root, \"wallHeight\") if self._wall_height is None else self._wall_height\n self._edge_width = ET.SubElement(self._root, \"edgeWidth\") if self._edge_width is None else self._edge_width\n self._sky_texture = ET.SubElement(self._root, \"skySphereTexture\") if self._sky_texture is None else self._sky_texture\n self._start_node = ET.SubElement(self._root, \"startNode\") if self._start_node is None else self._start_node\n\n for floor_tex in inputter._root.iter(\"floorTexture\"):\n self._floor_tex.attrib[\"val\"] = floor_tex.attrib[\"val\"]\n for wall_height in inputter._root.iter(\"wallHeight\"):\n self._wall_height.attrib[\"val\"] = wall_height.attrib[\"val\"]\n for edge_width in inputter._root.iter(\"edgeWidth\"):\n self._edge_width.attrib[\"val\"] = edge_width.attrib[\"val\"]\n for sky_tex in inputter._root.iter(\"skySphereTexture\"):\n self._sky_texture.attrib[\"val\"] = sky_tex.attrib[\"val\"]\n for start_node in inputter._root.iter(\"startNode\"):\n self._start_node.attrib[\"id\"] = start_node.attrib[\"id\"]\n\n datastore.inform(\"Environment Edit\", data={\n \"floor_texture\": self._floor_tex.attrib[\"val\"],\n \"wall_height\": self._wall_height.attrib[\"val\"],\n \"edge_width\": self._edge_width.attrib[\"val\"],\n \"sky_texture\": self._sky_texture.attrib[\"val\"],\n \"start_node\": self._start_node.attrib[\"id\"]\n })\n\n datastore.inform(\"VR Edit\", data={\\\n \"frame_angle\" : inputter._root.attrib[\"frameAngle\"],\n \"distortion\" : inputter._root.attrib[\"distortion\"],\n \"windowed\" : inputter._root.attrib[\"windowed\"],\n \"eye_height\" : inputter._root.attrib[\"eye\"],\n \"minimum_dist_to_wall\" : inputter._root.attrib[\"minDistToWall\"]\n })\n self._root.attrib[\"takeOffAfter\"] = \"20\"\n self._root.attrib[\"displays\"] = \"3,4,1,2,5,6\"", "def get_data_from_file(file_name):\n try:\n with open(file_name, 'rb') as f:\n raw_dict = xmltodict.parse(f.read())\n ways = [w for w in clean_list(raw_dict['osm']['way'], element_type='way') if filter_out(w)]\n nodes = clean_list(raw_dict['osm']['node'], element_type='node')\n relations = clean_list(raw_dict['osm']['relation'], element_type='relation')\n selection = [{'version': parse_xml_parameter('version', raw_dict),\n 'osm3s': parse_xml_parameter('osm3s', raw_dict),\n 'generator': parse_xml_parameter('generator', raw_dict),\n 'bounds': parse_xml_parameter('bounds', raw_dict),\n 'elements': ways + nodes + relations\n }\n ]\n return selection\n except: # (IOError, xmltodict.ExpatError):\n return None", "def load_users(user_filename):\n\n print(\"Users\")\n\n for i, row in enumerate(open(user_filename)):\n row = row.rstrip()\n user_id, name, email, password, phone, dob = row.split(\"|\")\n\n if email:\n is_registered = True\n else:\n is_registered = False\n\n # Instantiate user\n user = User(name=name, \n email=email, \n password=password, \n phone=phone, \n dob=dob,\n is_registered=is_registered)\n\n\n # Add user to session\n db.session.add(user)\n\n\n # Commit all users to DB\n db.session.commit()", "def extractXMLEntity(self, fileElementTree, fileName, xmlFilesContent, txtFileRead):\n\t\tfor entity in fileElementTree.iter('entity'):\n\t\t\tid = entity.find('id').text\n\t\t\tspan = entity.find('span').text\n\t\t\tannotationType = entity.find('type').text\n\t\t\tproperties \t = entity.find('properties')\n\n\t\t\tnumSpans, spanTuple = self.spanToTuple(span)\n\n\t\t\tif annotationType == \"Age\":\n\t\t\t\tageType = properties.find('AgeType').text\n\t\t\t\txmlFilesContent[fileName].update({id: {'spans': spanTuple, 'numSpans': numSpans, 'type': annotationType, \"ageType\": ageType}})\n\n\t\t\telif annotationType == \"FamilyMember\":\n\t\t\t\tcount \t\t = properties.find('Count').text\n\t\t\t\tfamilyRelation = properties.find('Relation').text\n\t\t\t\tfamilySide = properties.find('SideOfFamily').text\n\t\t\t\tmentions \t = self.fetchMentionFromSpan(spanTuple, txtFileRead)\n\t\t\t\txmlFilesContent[fileName].update({id: { 'spans': spanTuple, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'numSpans': numSpans, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t'type': annotationType, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"familyRelation\": familyRelation, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"count\": count, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"familySide\": familySide,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"mention\": mentions}})\n\n\t\t\telif annotationType == \"Observation\":\n\t\t\t\tnegation = properties.find('Negation').text\n\t\t\t\tcertainty = properties.find('Certainty').text\n\t\t\t\tmentions = self.fetchMentionFromSpan(spanTuple, txtFileRead)\n\t\t\t\txmlFilesContent[fileName].update({id: {'spans': spanTuple, 'numSpans': numSpans, 'type': annotationType, 'mentions': mentions, \"negation\": negation, \"certainty\": certainty}})\n\n\t\t\telif annotationType == \"LivingStatus\":\n\t\t\t\talive = properties.find('Alive').text\n\t\t\t\thealthy = properties.find('Healthy').text\n\t\t\t\txmlFilesContent[fileName].update({id: {'spans': spanTuple, 'numSpans': numSpans, 'type': annotationType, \"alive\": alive, \"healthy\": healthy}})\n\t\treturn xmlFilesContent", "def add_data():\n neo = NeoData(\"neo4j://neo:7687\")\n neo.add_example_data()", "def load_users():\n\n print \"Users\"\n\n User.query.delete()\n\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n ID, password, name, first_entry_at = row.split(\"|\")\n first_entry_at = datetime.strptime(first_entry_at, \"%m-%d-%y\")\n\n user = User(ID=ID, password=password, name=name, first_entry_at=first_entry_at)\n\n db.session.add(user)\n\n db.session.commit()", "def load_data():\n\n server_node = load_nodes(SERVER_NODE_INFILE)\n road_node = load_nodes(ROAD_NODE_INFILE)\n road_segment_point = load_nodes(ROAD_SEGMENT_POINT_INFILE)\n\n return server_node, road_node, road_segment_point", "def read_tetgen(filename):\n nodes = tetgen.readNodes(filename+'.1.node')\n print \"Read %d nodes\" % nodes.shape[0]\n elems = tetgen.readElems(filename+'.1.ele')\n print \"Read %d tetraeders\" % elems.shape[0]\n return nodes,elems", "def loadData(self):\n machineToNode = {}\n self.listOfMachines = []\n nextID = 0\n self.processingSteps = []\n with open(self.filename) as f:\n lines = f.read().splitlines()\n for line in lines:\n formatted = line.split(\"\\t\")\n order = int(formatted[0])\n machine = int(formatted[1])\n timestamp = float(formatted[2])\n if machine not in machineToNode: # normalizing machines according to the nodes (1,2,3... instead of 1,34,2...)\n machineToNode[machine] = nextID\n nextID +=1\n self.listOfMachines.append(machineToNode[machine]) # normalized list of all machines\n\n pstep = ProcessingStep(machineToNode[machine], timestamp, order)\n self.processingSteps.append(pstep)" ]
[ "0.673297", "0.5395848", "0.5311601", "0.5258237", "0.5242158", "0.52234995", "0.50911427", "0.5082365", "0.50697577", "0.5054883", "0.49930757", "0.4956298", "0.4920614", "0.49156678", "0.48891574", "0.4886341", "0.48851725", "0.48767012", "0.48547512", "0.4849261", "0.4848845", "0.48449424", "0.48419544", "0.48360765", "0.48305762", "0.48027632", "0.4802578", "0.47970206", "0.4793884", "0.4791032", "0.47904742", "0.47791004", "0.4774314", "0.47579402", "0.47424045", "0.4731775", "0.47271144", "0.47185078", "0.47184798", "0.47098923", "0.46912718", "0.46763855", "0.4669112", "0.466205", "0.465654", "0.46532476", "0.46520495", "0.46487808", "0.46456552", "0.46304318", "0.4626896", "0.46259707", "0.462486", "0.4622311", "0.46132177", "0.46103418", "0.46084896", "0.4607072", "0.46062207", "0.46049568", "0.45975432", "0.45926097", "0.45921367", "0.45898226", "0.45897135", "0.4579503", "0.45785666", "0.45624813", "0.45577502", "0.45561206", "0.45541492", "0.45540184", "0.45519888", "0.455126", "0.4544438", "0.45417368", "0.4531379", "0.45262408", "0.4521442", "0.45184264", "0.45180333", "0.45180333", "0.4517643", "0.45130873", "0.4510397", "0.4509641", "0.44992223", "0.44956696", "0.4492389", "0.4486317", "0.44834235", "0.44819915", "0.4476666", "0.44681734", "0.4468131", "0.44664752", "0.4455173", "0.44471416", "0.44443446", "0.44427282" ]
0.8099154
0
Delete a set of nodes. (From AiiDA cockbook)
def delete_nodes(pks_to_delete): from django.db import transaction from django.db.models import Q from aiida.backends.djsite.db import models from aiida.orm import load_node # Delete also all children of the given calculations # Here I get a set of all pks to actually delete, including # all children nodes. all_pks_to_delete = set(pks_to_delete) for pk in pks_to_delete: all_pks_to_delete.update(models.DbNode.objects.filter( parents__in=pks_to_delete).values_list('pk', flat=True)) print "I am going to delete {} nodes, including ALL THE CHILDREN".format( len(all_pks_to_delete)) print "of the nodes you specified. Do you want to continue? [y/N]" answer = raw_input() if answer.strip().lower() == 'y': # Recover the list of folders to delete before actually deleting # the nodes. I will delete the folders only later, so that if # there is a problem during the deletion of the nodes in # the DB, I don't delete the folders folders = [load_node(pk).folder for pk in all_pks_to_delete] with transaction.atomic(): # Delete all links pointing to or from a given node models.DbLink.objects.filter( Q(input__in=all_pks_to_delete) | Q(output__in=all_pks_to_delete)).delete() # now delete nodes models.DbNode.objects.filter(pk__in=all_pks_to_delete).delete() # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for f in folders: f.erase()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_nodes(self, _ids):\n return self.make_request(\"POST\", \"nodes/delete\", { \"nodes\" : _ids })", "def deleteNode(*args, **kwds):\n nodes = args\n if len(args) < 1:\n nodes = cmds.ls(sl=1)\n \n for node in nodes:\n node_lst = [node]\n if isinstance(node, (list, tuple)):\n node_lst = node\n\n for n in node_lst:\n if cmds.objExists(str(n)):\n cmds.delete(str(n), **kwds)\n else:\n cmds.warning(\"# Don’t exist - \" + node)", "def test_graph_deletes_nodes(graph_with_edges):\n graph_with_edges.del_nodes('B')\n listy = ['A', 'C', 'D', 'E', 'F']\n for node in listy:\n assert node in graph_with_edges.nodes()\n assert 'B' not in graph_with_edges.nodes()", "def remove_nodes_from(self, nodes):\n for node in nodes:\n self.remove_node(node)", "def delete(self, ids):\n\n for uid in ids:\n # Remove existing node, if it exists\n if self.hasnode(uid):\n # Delete from topics\n topic = self.attribute(uid, \"topic\")\n if topic and self.topics:\n # Delete id from topic\n self.topics[topic].remove(uid)\n\n # Also delete topic, if it's empty\n if not self.topics[topic]:\n self.topics.pop(topic)\n\n # Delete node\n self.removenode(uid)", "def delete_nodes(self, deletenodes):\n if not isinstance(deletenodes, list):\n deletenodes = [deletenodes]\n for deletenode in deletenodes:\n if isinstance(deletenode, node.NodeConnection):\n self.disconnect_nodes(deletenode.start, deletenode.end)\n if isinstance(deletenode, node.RigNode) or isinstance(deletenode, node.MouseNode):\n for hook in ['inhook', 'outhook']:\n hooks = deletenode.data.get_attr(hook, column='name')\n for i in range(hooks.rowCount()):\n hooknode = deletenode.data.get_attr('%s%s' % (hook, i)).data()\n self.disconnect_nodes(deletenode, hooknode, True)\n # end for i in range(hooks.rowCount())\n # end for hook in ['inhook', 'outhook']\n self.removeItem(deletenode)\n # end for deletenode in deletenodes", "def deleted_canvas_nodes(self, canvas_nodes: list[CanvasNode]) -> None:\n for canvas_node in canvas_nodes:\n node = canvas_node.core_node\n del self.canvas_nodes[node.id]\n del self.session.nodes[node.id]", "def remove_nodes(self, count=1):\n for i in range(count):\n dead_guy = self.all_nodes.pop()\n self.log.info(\"Removing node %s\" % dead_guy.name)\n dead_guy.decommission()\n self.log.info(\"Client %s is removed\" % dead_guy.name)\n self.save_cluster()\n self.inject_hosts_files()", "def destroyNodes(self):\r\n for nt in self.listNodes.keys(): \t# for all kind of nodes...\r\n for node in self.listNodes[nt]: \t# for all nodes of type <nt>\r\n if node.graphObject_: node.graphObject_.destroy()", "def fusion_api_delete_ha_nodes(self, uri=None, api=None, headers=None):\n return self.ha_nodes.delete(uri, api, headers)", "def delete(self, nodes):\n # Check indices.\n N = len(self)\n if not isinstance(nodes, (set, list, tuple)):\n nodes = [nodes]\n if not all(0 < node <= N for node in nodes):\n raise IndexError()\n\n # Reparent orphaned nodes.\n # Lift the arc until the parent is non-deleted node.\n # If all parents are deleted, we will hit the root eventually.\n deleted = set(nodes)\n alive_heads = [None] * N\n for node in range(1, N + 1):\n head = self.heads(node)\n while head in deleted:\n head = self.heads(head)\n alive_heads[node - 1] = head\n\n # Remap.\n new_nodes = {0: 0}\n new_node = 1\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n new_nodes[node] = new_node\n new_node += 1\n\n # Gather non-deleted stuff.\n forms = []\n lemmas = []\n cpostags = []\n postags = []\n feats = []\n heads = []\n deprels = []\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n forms.append(self.forms(node))\n lemmas.append(self.lemmas(node))\n cpostags.append(self.cpostags(node))\n postags.append(self.postags(node))\n feats.append(self.feats(node))\n heads.append(new_nodes[alive_heads[node - 1]])\n deprels.append(self.deprels(node))\n \n # Construct new tree.\n self.__init__(forms, lemmas, cpostags, postags, feats, heads, deprels)", "def delete_cluster(self):", "def _delete_edges(self, to_be_deleted_set, adj_dict):\n for pair in to_be_deleted_set:\n first_node = pair[0]\n second_node = pair[1]\n adj_dict.pop((first_node, second_node), None)", "def delete_node(tx, node_value, node_type):\n cql = \"MATCH(n:\" + node_type + \"{name:$node_value}) DETACH DELETE(n);\"\n try:\n tx.run(cql, node_value=node_value)\n except Exception as e:\n print(str(e))", "def destroy_nodes(\n self,\n name,\n ):\n pass", "def deleteAll(tx):\n query = (\n\n \"MATCH(p1:Person)-[a:APP_CONTACT]->(p2:Person)\"\n \"WHERE a.date < date() - duration({Days: 10}) OR (a.date = date() - duration({Days: 10}) AND a.hour < time())\"\n \"DELETE a\"\n\n )\n\n tx.run(query)", "def delete_transform_from_nodes(nodes):\n\n for node in nodes:\n try:\n shape = [x for x in cmds.listHistory(node, future=True)\n if x not in cmds.listHistory(node, future=True, pdo=True)]\n transform = cmds.listRelatives(shape, parent=True)\n cmds.delete(transform)\n except ValueError:\n return", "def delete_nodes(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"delete_nodes\")", "def delete_all(self):\n query = \"\"\"MATCH(n) DETACH DELETE n\"\"\"\n return self.create_tx(query)", "def delete_node(self, node_tup):\n signature = hashlib.sha256((uname+node_sig).encode('utf-8')).hexdigest() #hash value\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"DELETE FROM nodes WHERE ip==(:ip) AND port==(:port)\", { \"ip\":node_tup[1], \"port\":node_tup[2]})\n app_process.commit()\n app_process.close()", "def _delete_all(self):\n logging.info(\"Remove all nodes and relations from database.\")\n self.graph.delete_all()\n return", "def delete_orphan_nodes(self):\n used=np.zeros( self.Nnodes(),'b1')\n valid_cells=~self.cells['deleted']\n valid_nodes=self.cells['nodes'][valid_cells,:].ravel()\n valid_nodes=valid_nodes[ valid_nodes>=0 ]\n used[ valid_nodes ]=True\n\n valid_edges=~self.edges['deleted']\n valid_nodes=self.edges['nodes'][valid_edges,:].ravel()\n used[ valid_nodes ]=True\n \n self.log.info(\"%d nodes found to be orphans\"%np.sum(~used))\n\n for n in np.nonzero(~used)[0]:\n self.delete_node(n)", "def delete(self):\n _url = (\n f\"{self.connector.base_url}/projects/{self.project_id}/nodes/{self.node_id}\"\n )\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.node_id = None\n self.name = None", "def deleteAllNeedlesFromScene(self):\n #productive #onButton\n profprint()\n while slicer.util.getNodes('python-catch-round_'+str(self.round)+'*') != {}:\n nodes = slicer.util.getNodes('python-catch-round_'+str(self.round)+'*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)", "def delete():", "def remove_nodes(self, properties, **kwargs):\r\n\t\traise NotImplementedError", "def del_node(node, delnum):\n pass", "def del_node (self, id):\n raise NotImplementedError", "def delete_edges(self, _ids):\n return self.make_request(\"POST\", \"edges/delete\", { \"edges\" : _ids })", "def delete_node(self, _id):\n return self.make_request(\"DELETE\", \"nodes/\"+_id, {})", "def test_delete_cluster_network(self):\n pass", "def delete(self, ids):\n\n if self.cluster:\n return self.cluster.delete(ids)\n\n return super().delete(ids)", "def remove_nodes_from_tag(session, tag_name, nodes=[], username='system_user'):\n session = validate_session(session)\n nodes_completed = []\n nodes_failed = []\n for node in nodes:\n tags_per_node = \\\n session.query(TagsPerNode, TagInfo).\\\n join(TagInfo).filter(TagInfo.tag == tag_name).\\\n filter(TagsPerNode.node_id == node).all()\n if len(tags_per_node) > 0:\n try:\n tags_deleted = map(lambda nodes: session.delete(nodes[0]),\n tags_per_node)\n session.commit()\n nodes_completed.append(node)\n except Exception as e:\n session.rollback()\n nodes_failed.append(node)\n else:\n return(False, \"Tag %s does not exist\" % \\\n (tag_name), tag_name)\n if len(nodes_failed) > 0 and len(node_completed) >0:\n return(True, \"Nodes %s were deleted from tag %s and nodes % were not deleted\" % \\\n (nodes_completed, tag_name, nodes_failed), nodes)\n elif len(nodes_failed) > 0 and len(node_completed) == 0:\n return(False, \"Nodes %s were not deleted from tag %s\" % \\\n (nodes_failed, tag_name), nodes)\n elif len(nodes_completed) > 0 and len(nodes_failed) == 0:\n return(True, \"Nodes %s were deleted from tag %s\" % \\\n (nodes_completed, tag_name), nodes)", "def remove_nodes(self, nodes):\n for node in nodes:\n for arc in node.entries:\n arc.src.exits.remove(arc)\n self.arcs.remove(arc)\n for arc in node.exits:\n arc.dest.entries.remove(arc)\n self.arcs.remove(arc)\n self.nodes.remove(node)\n dangling_nodes = []\n for node in self.nodes:\n if node == self.start or node == self.end:\n pass\n else:\n if not node.exits or not node.entries:\n dangling_nodes.append(node)\n if dangling_nodes:\n self.remove_nodes(dangling_nodes)", "def delete_all_edges(options):\n edge = Edge(vsm_obj)\n edges = (edge.query())\n edge_id = None\n for item in edges.edgePage.list_schema:\n edge.id = item.objectId\n edge.delete()", "def remove_nodes_connections(self, nodes):\n nodes = ensure_list(nodes)\n for nd in nodes:\n for nd_in in self.successors[nd.name]:\n self.predecessors[nd_in.name].remove(nd)\n self.edges.remove((nd, nd_in))\n self.successors.pop(nd.name)\n self.predecessors.pop(nd.name)\n self._node_wip.remove(nd)", "def removeNode(self, node):", "def test_delete_collection_cluster_network(self):\n pass", "def test_del_some_edges(graph_with_edges):\n graph_with_edges.del_edges('A', 'B')\n assert graph_with_edges['A'] == {'C': 9}", "def delete_node(uuid):\n with session_for_write() as session:\n # Delete attribute data\n session.execute(\n delete(model.Attribute).where(\n model.Attribute.node_uuid == uuid))\n # Delete introspection data\n session.execute(\n delete(model.Option).where(\n model.Option.uuid == uuid))\n session.execute(\n delete(model.IntrospectionData).where(\n model.IntrospectionData.uuid == uuid))\n # Delete the actual node\n session.execute(\n delete(model.Node).where(\n model.Node.uuid == uuid\n ).execution_options(synchronize_session=False)\n )", "def remove_nodes(self, nodes):\r\n new_leaves = set()\r\n for node in nodes:\r\n if node not in self.nodes:\r\n raise Exception(\"Attempting to remove invalid node: %s\" % node.data.id)\r\n for parent_node in node.parents:\r\n if parent_node in nodes:\r\n continue\r\n parent_node.children.remove(node)\r\n if not parent_node.children:\r\n new_leaves.add(parent_node)\r\n\r\n # Do these outside in case 'nodes' is in fact self.leaves, so that we don't change the set we're iterating over.\r\n self.leaves -= nodes\r\n self.leaves.update(new_leaves)\r\n return new_leaves", "def visit_Delete(self, node):\n self.generic_visit(node)\n if len(node.targets) > 1:\n return [ ast.Delete([node.target]) for target in node.targets ]\n return node", "def clean():\n os.system('killall -9 lnd')\n os.system('killall -9 btcd')\n \n shutil.rmtree(btcd_dir)\n os.remove(btcd_log)\n\n index = 0\n while True:\n node = Node.from_index(index)\n try:\n shutil.rmtree(node.path())\n os.remove(node.log())\n except:\n click.echo(f'removed {index} nodes.')\n break\n index += 1", "def delete_set(self, item): # TODO test\n tree = item.parent\n item_label = item.parent_node\n tree.remove_node(item)\n tree.remove_node(item_label)\n self.exercise.sets.remove(item.set)\n print(\"delete set\")", "def purge_nodes(self):\n\n return self._from_json(self.manage.run(override=\"purge-nodes\"))[\n \"success\"\n ]", "def delete_node_cascade(self,n):\n # list will get mutated - copy preemptively\n for j in list(self.node_to_edges(n)):\n self.delete_edge_cascade(j)\n self.delete_node(n)", "def remove_temporary_nodes(board_id):\n q1 = \"MATCH (n)-[r]-() WHERE n.board = {0} DELETE n, r\".format(board_id)\n q2 = \"MATCH n WHERE n.board = {0} DELETE n\".format(board_id)\n neo4j.CypherQuery(GRAPH_DB, q1).execute_one()\n neo4j.CypherQuery(GRAPH_DB, q2).execute_one()", "def delete_exporters(self):\n for node_cfg in self.node_cfg_list:\n self.delete_node(node_cfg)", "def delete(self, *items):\n self._visual_drag.delete(*items)\n ttk.Treeview.delete(self, *items)", "def delete_nodeset(nodeset_id): # noqa: E501\n return NodesetController.delete_nodeset(nodeset_id)", "def delete_network_bulk(self, tenant_id, network_id_list, sync=False):", "def delete_trash():\n\n #query db for marked trash\n q = QueryBuilder()\n nodes_to_delete_pks = []\n\n q.append(Node,\n filters = {'extras.trash': {'==' : True}\n }\n )\n res = q.all()\n for node in res:\n nodes_to_delete_pks.append(node[0].dbnode.pk)\n print('pk {}, extras {}'.format(node[0].dbnode.pk, node[0].get_extras()))\n\n #Delete the trash nodes\n\n print('deleting nodes {}'.format(nodes_to_delete_pks))\n delete_nodes(nodes_to_delete_pks)\n\n return", "def deleteSelected(self):\n self.p.dat.flowsheet.deleteEdges(self.selectedEdges)\n self.selectedEdges = []\n self.p.dat.flowsheet.deleteNodes(self.selectedNodes)\n self.selectedNodes = []\n self.p.noneSelectedEmit()\n self.p.createScene()", "def delete(self):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant delete abstract elements')\r\n if self.eid is None:\r\n return self\r\n query = \"\"\"\r\n e = g.e(eid)\r\n if (e != null) {\r\n g.removeEdge(e)\r\n g.stopTransaction(SUCCESS)\r\n }\r\n \"\"\" \r\n results = execute_query(query, {'eid':self.eid})", "def delete_node(self,n):\n if self._node_to_edges is not None:\n if len(self._node_to_edges[n])>0:\n print( \"Node %d has edges: %s\"%(n,self._node_to_edges[n]) )\n raise GridException(\"Node still has edges referring to it\")\n del self._node_to_edges[n]\n if self._node_to_cells is not None:\n if len(self._node_to_cells[n])>0:\n raise GridException(\"Node still has cells referring to it\")\n del self._node_to_cells[n]\n if self._node_index is not None:\n self._node_index.delete(n, self.nodes['x'][n,self.xxyy] )\n\n self.push_op(self.undelete_node,n,self.nodes[n].copy())\n\n self.nodes['deleted'][n] = True\n \n # special case, used for undo, reverts to previous state\n # more completely.\n if len(self.nodes)==n+1:\n self.nodes=self.nodes[:-1]", "def delete(*args, all: bool=True, attribute: Union[AnyStr, List[AnyStr]]=\"\", channels:\n bool=True, constraints: bool=True, constructionHistory: bool=True, controlPoints:\n bool=False, expressions: bool=True, hierarchy: AnyStr=\"\", inputConnectionsAndNodes:\n bool=True, motionPaths: bool=True, shape: bool=True, staticChannels: bool=True,\n timeAnimationCurves: bool=True, unitlessAnimationCurves: bool=True, **kwargs)->None:\n pass", "def remove_nodes(self, nodes, check_ready=True):\n nodes = ensure_list(nodes)\n for nd in nodes:\n if nd not in self.nodes:\n raise Exception(f\"{nd} is not present in the graph\")\n if self.predecessors[nd.name] and check_ready:\n raise Exception(\"this node shoudn't be run, has to wait\")\n self.nodes.remove(nd)\n # adding the node to self._node_wip as for\n self._node_wip.append(nd)\n # if graph is sorted, the sorted list has to be updated\n if hasattr(self, \"sorted_nodes\"):\n if nodes == self.sorted_nodes[: len(nodes)]:\n # if the first node is removed, no need to sort again\n self._sorted_nodes = self.sorted_nodes[len(nodes) :]\n else:\n for nd in nodes:\n self._sorted_nodes.remove(nd)\n # starting from the previous sorted list, so is faster\n self.sorting(presorted=self.sorted_nodes)", "def destroy(self):\n del self.nodes\n self.nodes = {}", "def delete_node(self, key_chain):\n node = self._data\n for key in key_chain[:-1]:\n node = node[key]\n\n del node[key_chain[-1]]", "def remove_node(self, node_id):\n try: \n del self._nodes[node_id] \n del self._inc[node_id] \n except KeyError:\n return \n for arcs_set in self._inc.values():\n arcs_to_remove = Set()\n for arc in arcs_set:\n if arc._head is node_id: arcs_to_remove.add(arc)\n arcs_set.difference_update(arcs_to_remove)", "def node_delete(self, nodeId):\n\n self._client.delete(\n \"{}/nodes/{}\".format(\n LKECluster.api_endpoint, parse.quote(str(nodeId))\n ),\n model=self,\n )", "def clear_nodes (self, node_type):\n return self.network.remove_nodes_from(\n [id for id, node in self.network.nodes_iter(data=True) if\n node.type == node_type])", "def syncrepl_delete(self, uuids):\n pass", "def delete_node(ugraph, node):\r\n neighbors = ugraph[node]\r\n ugraph.pop(node)\r\n for neighbor in neighbors:\r\n ugraph[neighbor].remove(node)", "def remove_nodes_from_cluster(self, nodes, redeploy=True,\n check_services=False):\n self.fuel_web.update_nodes(\n self.cluster_id,\n nodes,\n pending_addition=False, pending_deletion=True,\n )\n if redeploy:\n self.fuel_web.deploy_cluster_wait(self.cluster_id,\n check_services=check_services)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_node(ugraph, node):\n neighbors = ugraph[node]\n ugraph.pop(node)\n for neighbor in neighbors:\n ugraph[neighbor].remove(node)", "def delete_list(self): \n temp_node = self.head\n while temp_node is not None:\n prev_node = temp_node\n temp_node = temp_node.next\n # prev_node.val += \": deleted\" # for sanity check\n # reset data\n prev_node.val = None\n prev_node.next = None", "def deleteAllAutoNeedlesFromScene(self):\r\n # productive #onButton\r\n profprint()\r\n while slicer.util.getNodes('auto-seg_' + str(self.round) + '*') != {}:\r\n nodes = slicer.util.getNodes('auto-seg_' + str(self.round) + '*')\r\n for node in nodes.values():\r\n slicer.mrmlScene.RemoveNode(node)", "def test_delete3(graph):\n graph.delete(3)\n print(graph)\n assert str(graph) == \"Graph<<1, 2>, <0, 2>, <0, 1>>\"", "def snap_delete_all(mnode):\n cmd = \"gluster snapshot delete all --mode=script\"\n return g.run(mnode, cmd)", "def scale_nodes_in(self, nodes):\n for node in nodes:\n self.nodes.remove(node)\n return self.terminate_instances(node.instance_id for node in nodes)", "def remove_node(self, node_id):\n try: \n del self._nodes[node_id] \n del self._inc[node_id]\n except KeyError:\n return \n for arcs_list in self._inc.values():\n record = arcs_list.get_first_record()\n while record is not None:\n arc = record.element\n if arc._head is node_id: arcs_list.delete_record(record)\n record = record._next", "def del_node(self, n):\n if n in self.node_dict:\n del self.node_dict[n]\n for node in self.node_dict:\n try:\n self.del_edge(node, n)\n except:\n pass\n else:\n raise KeyError(\"Cannot remove node that does not exist.\")", "def delete_set(set_name):\n\n flg = logging.getLogger(\"lettuce.xgenSetup.delete_set\")\n\n flg.info(\"Set to delete: {}\".format(set_name))\n\n if mc.objExists(set_name):\n mc.select(set_name)\n old_objects = mc.ls(selection=True)\n flg.debug(\"Old Objects:\")\n for o in old_objects:\n flg.debug(o)\n ref_objects = mc.ls(selection=True, referencedNodes=True)\n\n ref_del_queue = []\n if len(ref_objects) > 0:\n flg.debug(\"Old Reference Nodes:\")\n for o in ref_objects:\n flg.debug(o)\n for o in ref_objects:\n flg.debug(\"Queuing {} for reference removal\".format(o))\n top = mc.referenceQuery(o, referenceNode=True)\n ref_del_queue.append(top)\n if len(ref_del_queue):\n for o in ref_del_queue:\n flg.debug(\"Removing reference: {}\".format(o))\n ref_file = mc.referenceQuery(o, filename=True)\n mc.file(ref_file, removeReference=True)\n for o in old_objects:\n try:\n flg.debug(\"Deleting {}\".format(o))\n mc.delete(o)\n except ValueError as e:\n flg.debug(\"Unable to delete {0}. Error: {1}\".format(o, e))\n flg.debug(\"Deleting set: {}\".format(set_name))\n mc.delete(set_name)", "def delete_transactions(conn, ids):\n cur = conn.cursor()\n for item in ids:\n cur.execute(\"DELETE from transactions WHERE id=? \", (item,))\n conn.commit()", "def destroy_all(self):\n self.log.info(\"Destroying the %s cluster\" % self.cluster_name)\n for n in self.all_nodes:\n n.destroy()\n remove(self.save_file)", "def delete_many(self, keys):\n raise NotImplementedError()", "def perform_set(self, nodes=[]):\n\n # Input validation\n try:\n # Works for XGNodeDict input\n set_nodes = nodes.get_updates()\n except (AttributeError, TypeError):\n # Assume list instead\n set_nodes = nodes\n if not isinstance(set_nodes, list):\n raise ValueError('Expecting nodes to be of type list')\n else:\n for x in set_nodes:\n if not isinstance(x, XGNode):\n raise ValueError('Invalid node: {0}'.format(x.__class__))\n\n req = cinder.volume.drivers.violin.vxg.core.request.XGSet(set_nodes)\n resp = self.send_request(req)\n try:\n # Works for XGNodeDict input, clear the tracked modifications\n nodes.clear_updates()\n except (AttributeError, TypeError):\n pass\n return resp.as_action_result()", "def delete_by_remote_path(self, list_of_remote_paths): # todo: check error handling\n conn = self.create_connection()\n conn.isolation_level = None\n c = conn.cursor()\n c.execute(\"begin\")\n try:\n for rp in list_of_remote_paths:\n # srp = os.path.join(remote_starting_path, rp)\n # cmd = 'DELETE FROM %s WHERE %s=\"%s\"' % (self.TABLE_NAME, self.REM, srp)\n cmd = 'DELETE FROM %s WHERE %s=\"%s\"' % (self.TABLE_NAME, self.HDFS, rp)\n c.execute(cmd)\n # c.execute(\"fnord\") # to check if transaction rollbacks\n conn.commit()\n except sqlite3.Error:\n print(\"Transaction failed!\")\n conn.rollback()\n conn.close()", "def delete_inE(self, *labels):\r\n self._simple_deletion('inE', labels)", "def clean(self):\n for nodeId in list(self.nodes.keys()):\n if not self.nodes[nodeId].safe:\n del self.nodes[nodeId]", "def delete_node_categories_client():\n\n firebase = pyrebase.initialize_app(config)\n db = firebase.database()\n db.child(\"categories/clients\").remove()", "def delete_edges_from(self, edges: Iterable):\n for i, j in edges:\n self.delete_edge(i, j)", "def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data", "def test_delete_network(self):\n pass", "def clean_proxy_nodes(rig_top_node):\n proxy_nodes = cmds.ls('proxy_*', type='network')\n\n if proxy_nodes:\n cmds.delete(proxy_nodes)\n LOG.debug(\"Deleted proxy nodes: {nodes}\".format(nodes=''.join(proxy_nodes)))\n\n if cmds.objExists('proxy_nodes'):\n print 'Deleting set'\n cmds.delete('proxy_nodes')", "def test_delete_hyperflex_node_profile(self):\n pass", "def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()", "def deleteAll(self):\n self.deleteAttributeRange() #Default args = everything", "def test_remove_all_values1(delete_tree):\n delete_tree.remove(\"ted\")\n delete_tree.remove(\"tea\")\n delete_tree.remove(\"teabag\")\n delete_tree.remove(\"teabags\")\n delete_tree.remove(\"teabagger\")\n delete_tree.remove(\"teabaggers\")\n delete_tree.remove(\"teabagged\")", "def _delete(self, current_node):\n pass", "def test_delete1(graph):\n graph.delete(1)\n print(graph)\n assert str(graph) == \"Graph<<2>, <0, 3>, <2>>\"", "def delNode(nodeName):\n\t\tslicer.util.getNode(nodeName)\n\t\tslicer.mrmlScene.RemoveNode(slicer.util.getNode(nodeName))\n\t\treturn", "def test_remove_all_values3(delete_tree):\n delete_tree.remove(\"tea\")\n delete_tree.remove(\"teabagged\")\n delete_tree.remove(\"teabaggers\")\n delete_tree.remove(\"teabagger\")\n delete_tree.remove(\"teabags\")\n delete_tree.remove(\"teabag\")\n delete_tree.remove(\"ted\")", "def clear(request):\n Node.objects.all().delete()\n return JsonResponse({'response': 'clear'})", "def deleteRig(self):\n\n allNodes = cmds.ls(\"*\")\n for node in allNodes:\n if cmds.objExists(node + \".sourceModule\"):\n cmds.lockNode(node, lock=False)\n source = cmds.getAttr(node + \".sourceModule\")\n if source == self.name:\n try:\n cmds.delete(node)\n except:\n pass" ]
[ "0.7969933", "0.767122", "0.7110108", "0.7096792", "0.70466393", "0.6917409", "0.685601", "0.6738954", "0.6675691", "0.66630596", "0.6651388", "0.6613611", "0.66037107", "0.6587649", "0.6580377", "0.65754753", "0.6555476", "0.6534313", "0.65310407", "0.652126", "0.64312625", "0.63865054", "0.63709134", "0.63639486", "0.63610625", "0.6348617", "0.62910926", "0.62751096", "0.6266323", "0.62396413", "0.6227739", "0.6218436", "0.61470133", "0.6142137", "0.6125056", "0.6125039", "0.61110026", "0.60993564", "0.6094124", "0.6075837", "0.6069201", "0.6068975", "0.6065473", "0.6063726", "0.60608286", "0.60565776", "0.6052285", "0.6033727", "0.6033699", "0.6033462", "0.60300756", "0.60265774", "0.60223716", "0.6017014", "0.60114723", "0.6004027", "0.6003342", "0.6000738", "0.59789157", "0.5970015", "0.5916617", "0.59036124", "0.5897174", "0.5888661", "0.5886807", "0.5885143", "0.5885143", "0.5885143", "0.5885143", "0.5885143", "0.5882569", "0.5882146", "0.5881842", "0.58786815", "0.58773386", "0.58646524", "0.5864012", "0.5858055", "0.5857274", "0.58571965", "0.5853742", "0.58507264", "0.5850662", "0.58489174", "0.5843965", "0.5840879", "0.58406913", "0.5831151", "0.5824666", "0.5819689", "0.5810302", "0.58092725", "0.58005226", "0.5796547", "0.57951015", "0.5794704", "0.5794359", "0.5773841", "0.5768505", "0.5764987" ]
0.69111544
6
This method deletes all AiiDA nodes in the DB, which have a extra trash=True And all their children. Could be advanced to a garbage collector. Be careful to use it.
def delete_trash(): #query db for marked trash q = QueryBuilder() nodes_to_delete_pks = [] q.append(Node, filters = {'extras.trash': {'==' : True} } ) res = q.all() for node in res: nodes_to_delete_pks.append(node[0].dbnode.pk) print('pk {}, extras {}'.format(node[0].dbnode.pk, node[0].get_extras())) #Delete the trash nodes print('deleting nodes {}'.format(nodes_to_delete_pks)) delete_nodes(nodes_to_delete_pks) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _delete_all(self):\n logging.info(\"Remove all nodes and relations from database.\")\n self.graph.delete_all()\n return", "def DeleteAllItems(self):\r\n\r\n self.DeleteRoot()", "def clear_db():\n humans = Human4j.nodes.all()\n for h in humans:\n h.delete()\n binomes = Binome4j.nodes.all()\n for b in binomes:\n b.delete()\n projects = Project4j.nodes.all()\n for p in projects:\n p.delete()\n sherpas = Sherpa4j.nodes.all()\n for sh in sherpas:\n sh.delete()\n students = Pioupiou4j.nodes.all()\n for piou in students:\n piou.delete()\n partenaires = Partenaire4j.nodes.all()\n for part in partenaires:\n part.delete()\n ps = Planete_Solidaire.nodes.all()\n for misc in ps:\n misc.delete()", "def resetTree(self):\n for fila in self.verDatos.get_children():\n self.verDatos.delete(fila)", "def clean():\n os.system('killall -9 lnd')\n os.system('killall -9 btcd')\n \n shutil.rmtree(btcd_dir)\n os.remove(btcd_log)\n\n index = 0\n while True:\n node = Node.from_index(index)\n try:\n shutil.rmtree(node.path())\n os.remove(node.log())\n except:\n click.echo(f'removed {index} nodes.')\n break\n index += 1", "def cleanup(self):\n for child in self.children():\n child.deleteLater()", "def delete_nodes(pks_to_delete):\n from django.db import transaction\n from django.db.models import Q\n from aiida.backends.djsite.db import models\n from aiida.orm import load_node\n\n # Delete also all children of the given calculations\n # Here I get a set of all pks to actually delete, including\n # all children nodes.\n all_pks_to_delete = set(pks_to_delete)\n for pk in pks_to_delete:\n all_pks_to_delete.update(models.DbNode.objects.filter(\n parents__in=pks_to_delete).values_list('pk', flat=True))\n\n print \"I am going to delete {} nodes, including ALL THE CHILDREN\".format(\n len(all_pks_to_delete))\n print \"of the nodes you specified. Do you want to continue? [y/N]\"\n answer = raw_input()\n\n if answer.strip().lower() == 'y':\n # Recover the list of folders to delete before actually deleting\n # the nodes. I will delete the folders only later, so that if\n # there is a problem during the deletion of the nodes in\n # the DB, I don't delete the folders\n folders = [load_node(pk).folder for pk in all_pks_to_delete]\n\n with transaction.atomic():\n # Delete all links pointing to or from a given node\n models.DbLink.objects.filter(\n Q(input__in=all_pks_to_delete) |\n Q(output__in=all_pks_to_delete)).delete()\n # now delete nodes\n models.DbNode.objects.filter(pk__in=all_pks_to_delete).delete()\n\n # If we are here, we managed to delete the entries from the DB.\n # I can now delete the folders\n for f in folders:\n f.erase()", "def delete_all(self):\n query = \"\"\"MATCH(n) DETACH DELETE n\"\"\"\n return self.create_tx(query)", "def delete_orphan_nodes(self):\n used=np.zeros( self.Nnodes(),'b1')\n valid_cells=~self.cells['deleted']\n valid_nodes=self.cells['nodes'][valid_cells,:].ravel()\n valid_nodes=valid_nodes[ valid_nodes>=0 ]\n used[ valid_nodes ]=True\n\n valid_edges=~self.edges['deleted']\n valid_nodes=self.edges['nodes'][valid_edges,:].ravel()\n used[ valid_nodes ]=True\n \n self.log.info(\"%d nodes found to be orphans\"%np.sum(~used))\n\n for n in np.nonzero(~used)[0]:\n self.delete_node(n)", "def destroyNodes(self):\r\n for nt in self.listNodes.keys(): \t# for all kind of nodes...\r\n for node in self.listNodes[nt]: \t# for all nodes of type <nt>\r\n if node.graphObject_: node.graphObject_.destroy()", "def clean_database(databasePathname):\n print '# loading database ' + databasePathname\n try:\n db = gdbm.open(databasePathname, 'w')\n except:\n print \"# \" + databasePathname + \" could not be loaded\"\n sys.exit(-1)\n\n # even though gdbm supports memory efficient iteration over\n # all keys, I want to order my traversal across similar\n # paths to leverage caching of directory files:\n allKeys=db.keys()\n print '# finished loaded keys from ' + databasePathname\n allKeys.sort()\n print '# finished sorting keys from ' + databasePathname\n print '# deleting dead nodes'\n count=0\n for currKey in allKeys:\n try:\n os.stat(currKey)\n sys.stdout.write('.')\n except OSError:\n del db[currKey]\n sys.stdout.write('*')\n count=count+1\n sys.stdout.flush()\n print \"\\n# reorganizing \" + databasePathname\n db.reorganize()\n db.sync()\n db.close()\n print '# done cleaning ' + databasePathname + ', removed ' + str(count) + ' dead nodes!'", "def clean(self):\n for nodeId in list(self.nodes.keys()):\n if not self.nodes[nodeId].safe:\n del self.nodes[nodeId]", "def destroy_all(self):\n self.log.info(\"Destroying the %s cluster\" % self.cluster_name)\n for n in self.all_nodes:\n n.destroy()\n remove(self.save_file)", "def delete(self, nodes):\n # Check indices.\n N = len(self)\n if not isinstance(nodes, (set, list, tuple)):\n nodes = [nodes]\n if not all(0 < node <= N for node in nodes):\n raise IndexError()\n\n # Reparent orphaned nodes.\n # Lift the arc until the parent is non-deleted node.\n # If all parents are deleted, we will hit the root eventually.\n deleted = set(nodes)\n alive_heads = [None] * N\n for node in range(1, N + 1):\n head = self.heads(node)\n while head in deleted:\n head = self.heads(head)\n alive_heads[node - 1] = head\n\n # Remap.\n new_nodes = {0: 0}\n new_node = 1\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n new_nodes[node] = new_node\n new_node += 1\n\n # Gather non-deleted stuff.\n forms = []\n lemmas = []\n cpostags = []\n postags = []\n feats = []\n heads = []\n deprels = []\n\n for node in range(1, N + 1):\n if node in deleted:\n continue\n forms.append(self.forms(node))\n lemmas.append(self.lemmas(node))\n cpostags.append(self.cpostags(node))\n postags.append(self.postags(node))\n feats.append(self.feats(node))\n heads.append(new_nodes[alive_heads[node - 1]])\n deprels.append(self.deprels(node))\n \n # Construct new tree.\n self.__init__(forms, lemmas, cpostags, postags, feats, heads, deprels)", "def delete_all_entities(self):\n self._delete_all_acls()\n self._delete_all_containers()\n self._delete_all_orders()\n self._delete_all_secrets()", "def delete_relatives(self):\n category_ratings = list(self.category_ratings.all())\n self.category_ratings.clear()\n for category_rating in category_ratings:\n if category_rating.isOrphaned():\n category_rating.delete()\n\n word_counts = list(self.word_counts.all())\n self.word_counts.clear()\n for word_count in word_counts:\n if word_count.isOrphaned():\n word_count.delete()", "def delete_all_dags(db: Redis[bytes]) -> None:\n for dag in db.smembers(DAG_INDEX):\n db.delete(join(DAG_OPERATIONS, dag.decode())) # type:ignore\n db.delete(join(DAG_STATUS, dag.decode())) # type:ignore\n # Remove old index\n db.delete(DAG_INDEX)", "def deleteAll(tx):\n query = (\n\n \"MATCH(p1:Person)-[a:APP_CONTACT]->(p2:Person)\"\n \"WHERE a.date < date() - duration({Days: 10}) OR (a.date = date() - duration({Days: 10}) AND a.hour < time())\"\n \"DELETE a\"\n\n )\n\n tx.run(query)", "def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return", "def deleteAllSteps(self):\n\n self.dbase.deleteAllSteps(self.scene)\n return", "def remove_all():\n \"\"\" Removes all from the database \"\"\"\n redis_store.flushall()", "def remove_stale_files(self) -> None:\n\n for db in self.dbnodes:\n db.remove_stale_dbnode_files()", "def delete_all():\n if os.path.exists(DATA_DIR):\n shutil.rmtree(DATA_DIR)", "def _delete_all_containers(self):\n for container_ref in self.created_entities['container']:\n self.barbicanclient.containers.delete(container_ref)", "def delete(self): # DirObj.delete\n self.deleted=True\n for name, d in self.subdirs.iteritems():\n d.delete()\n for name, f in self.files.iteritems():\n f.delete()", "def delete_all(self):\n raise NotImplementedError()", "def DeleteAllItems(self):\r\n\r\n if self._anchor:\r\n self.Delete(self._anchor)", "def deleteAll(self):\n self.db.execute(\"DELETE FROM MATCH;\", ())", "def delete_all(self):\n # delete everything\n shutil.rmtree(self.location)", "def deleteAllNeedlesFromScene(self):\n #productive #onButton\n profprint()\n while slicer.util.getNodes('python-catch-round_'+str(self.round)+'*') != {}:\n nodes = slicer.util.getNodes('python-catch-round_'+str(self.round)+'*')\n for node in nodes.values():\n slicer.mrmlScene.RemoveNode(node)", "def delete_child_rows(rows, doctype):\n\tfor p in list(set([r[1] for r in rows])):\n\t\twebnotes.conn.sql(\"\"\"delete from `tab%s` where parent=%s\"\"\" % (doctype, '%s'), p)", "def delete_child_rows(rows, doctype):\n\tfor p in list(set([r[1] for r in rows])):\n\t\twebnotes.conn.sql(\"\"\"delete from `tab%s` where parent=%s\"\"\" % (doctype, '%s'), p)", "def delete_child_rows(rows, doctype):\n\tfor p in list(set([r[1] for r in rows])):\n\t\twebnotes.conn.sql(\"\"\"delete from `tab%s` where parent=%s\"\"\" % (doctype, '%s'), p)", "def delete_tree():\n from trie import Trie\n t = Trie()\n t.insert(\"ted\")\n t.insert(\"tea\")\n t.insert(\"teabag\")\n t.insert(\"teabags\")\n t.insert(\"teabagger\")\n t.insert(\"teabaggers\")\n t.insert(\"teabagged\")\n return t", "def PurgeAll(self):\n\t\tself.acad.ActiveDocument.PurgeAll()", "def clear_tree(self):\n self.treeview.delete(*self.treeview.get_children())", "def destroy(self):\n del self.nodes\n self.nodes = {}", "def prune(self, rel=None):\n deleted = self._registry._get_not_reachable(self.root, rel=rel)\n for d in deleted:\n self._delete_cuds_triples(d)", "def _purge(self):\n for _ in self.all():\n self.delete(_)", "def clean():\n new_tree = None", "def delete_all_edges(options):\n edge = Edge(vsm_obj)\n edges = (edge.query())\n edge_id = None\n for item in edges.edgePage.list_schema:\n edge.id = item.objectId\n edge.delete()", "def deleteAllSteps(self, landsatScene):\n\n with self.getConnection() as conn:\n try:\n cur = conn.cursor()\n cur.execute(\"\"\"\\\n delete from process_run where PATH=? and ROW=? and Acqdate=? \n and fk_wfid=?\"\"\", (landsatScene.path, landsatScene.row, landsatScene.acqdate, self.wfid))\n\n except sqlite3.Error as error:\n cur.close()\n raise workflowException('Database {0}: {1}'.format(self.wfname, repr(error)))\n return", "def clean_database(self):\n for name in list(self.database):\n self._remove_database_entry(name)", "def delete_all(self, prog:progress=None): \n\t\tself.__output_status('Delete all files')\n\t\tif (self.__check_terminated()):\n\t\t\treturn;\t\n\t\tdelete_dir(self.root)\n\t\ttime.sleep(0.3)", "def delete_all(self):\n self.session.query(TodoItem).delete()\n self.session.query(TodoList).delete()", "def cleanup_funsies(db: Redis[bytes]) -> None:\n queues = rq.Queue.all(connection=db)\n for queue in queues:\n queue.delete(delete_jobs=True)\n\n # Reset operation status\n ops = join(OPERATIONS, hash_t(\"*\"), \"owner\")\n keys = db.keys(ops)\n if len(keys):\n logger.info(f\"clearing {len(keys)} unfinished ops\")\n for k in keys:\n db.delete(k)", "def DeleteChildren(self, tree):\r\n\r\n for child in self._children:\r\n if tree:\r\n tree.SendDeleteEvent(child)\r\n\r\n child.DeleteChildren(tree)\r\n \r\n if child == tree._select_me:\r\n tree._select_me = None\r\n\r\n # We have to destroy the associated window\r\n wnd = child.GetWindow()\r\n if wnd:\r\n wnd.Destroy()\r\n child._wnd = None\r\n\r\n if child in tree._itemWithWindow:\r\n tree._itemWithWindow.remove(child)\r\n \r\n del child\r\n \r\n self._children = []", "def DeleteChildren(self, tree):\r\n\r\n for child in self._children:\r\n if tree:\r\n tree.SendDeleteEvent(child)\r\n\r\n child.DeleteChildren(tree)\r\n \r\n if child == tree._selectItem:\r\n tree._selectItem = None\r\n\r\n # We have to destroy the associated window\r\n for wnd in child._wnd:\r\n if wnd:\r\n wnd.Hide()\r\n wnd.Destroy()\r\n \r\n child._wnd = []\r\n\r\n if child in tree._itemWithWindow:\r\n tree._itemWithWindow.remove(child)\r\n \r\n del child\r\n \r\n self._children = []", "def clean(self):\n\t\tfor v in self:\n\t\t\tv.reset_distance()\n\t\t\tv.reset_predecessor()\n\t\t\tv.reset_visited()", "def db_cleanup(self):\n with self.context():\n meido.db.session.remove()\n meido.db.drop_all()", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def remove_all(self):\n # Post a delete all notice to the manager\n self._remove_all()", "def cleanup(self):\n for table in filter(lambda x: self.cmd.exists(x, silent=(log.level < DEBUG)), self.tables):\n log.info(\"MLoad\", \"Dropping table '{}'...\".format(table))\n self.cmd.drop_table(table, silent=True)", "def delete_all(self):\n models.CourseLearningOutcome.objects.all().delete()\n #models.CoreLearningOutcome.objects.all().delete()\n #models.CreditType.objects.all().delete()\n models.Course.objects.all().delete()\n models.DegreeProgram.objects.all().delete()\n models.DPCourseSpecific.objects.all().delete()\n models.DPCourseGeneric.objects.all().delete()\n models.DPCourseSubstituteSpecific.objects.all().delete()\n models.DPCourseSubstituteGeneric.objects.all().delete()", "def delete_all_onprogress_pages():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM on_progress_pages\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def deleteAll(self):\n self.deleteAttributeRange() #Default args = everything", "def delete(self):\n for i in set(self.instances.values()):\n i.delete()\n shutil.rmtree(self.dirpath, True)", "def full_reset(self):\n for docid in self.iter_docids():\n self.delete(docid)\n self.client.delete(self.dbprefix + 'schema')\n self.client.delete(self.dbprefix + 'docs')\n self.client.delete(self.dbprefix + 'nextid')", "def __del__(self):\n del self.board_\n del self.children_edges_\n self.board_ = None\n del self.parent_edge_\n # print(\"destruct node\")", "def remove_all_objs(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n # remove add objects\n for key in objs.keys():\n self.remove_obj(key)\n # remove attached objects\n for key in objs_attached.keys():\n self.unlink_obj(objs_attached[key].link_name, key)", "def delete_all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SQL = \"DELETE FROM accounts;\"\n cursor.execute(SQL)", "def deleted_canvas_nodes(self, canvas_nodes: list[CanvasNode]) -> None:\n for canvas_node in canvas_nodes:\n node = canvas_node.core_node\n del self.canvas_nodes[node.id]\n del self.session.nodes[node.id]", "def cleanup(self):\n\n if self.debug:\n print 'Running cleanup()'\n print 'Starting removing dead links'\n\n for root, dirs, files in os.walk(self.tags_folder):\n if files:\n for f in files:\n try:\n full_path = os.path.join(root, f)\n if not os.path.exists(os.readlink(full_path)):\n os.unlink(full_path)\n if self.debug:\n print 'Removing dead link %s' % full_path\n except OSError:\n pass\n\n if self.debug:\n print 'Starting removing empty directories'\n self._del_empty_dirs(self.tags_folder)", "def preprocessNode(self):\n while self.node.firstChild():\n self.node.firstChild().doDelete(self.node)", "def _delete_all_acls(self):\n for acl_ref in self.created_entities['acl']:\n entity_ref = acl_ref.replace(\"/acl\", \"\")\n blank_acl_entity = self.barbicanclient.acls.create(\n entity_ref=entity_ref)\n blank_acl_entity.remove()", "def reset(self) -> None:\r\n self.tree.delete(*self.tree.get_children())", "def delete_all_runtimes(self):\n self.compute_handler.delete_all_runtimes()", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def destroy(self):\n for node in self.find_references():\n node.destroy()\n self._bld.RemoveObject(self.get_sobj())", "def clear_all() -> None:\n datastore.db.client.drop_database(DATABASE_NAME)\n ClassifierCache.clear_all()", "def remove(self):\n for ref_node in self.node.find_references():\n ref_node.destroy()\n File.remove(self)", "def cleanup(self):\n deletes = []\n for item in self._collect.find({'status': 'started'}, {'_id': True}):\n deletes.append(pymongo.DeleteOne(item))\n # Remove them\n if len(deletes):\n print(\"Delete\", self._collect.bulk_write(deletes).deleted_count)", "def DeleteChildren(self, item):\r\n\r\n self._dirty = True # do this first so stuff below doesn't cause flicker\r\n\r\n self.ChildrenClosing(item)\r\n item.DeleteChildren(self)", "def clean_all_db():\n for model in [\n Component, Arch, AutoCase, AutoCaseFailure, Bug, Linkage, WorkItem,\n Document, Project, Framework]:\n model.objects.all().delete()", "def empty_db(self):\n try:\n self.cur.execute(\"DELETE FROM Crashes;\")\n self.con.commit()\n print 'Deleted all records'\n\n except sqlite.Error, e:\n print 'Unable to delete all records.'\n print 'Exception follows:'\n print e", "def clean(self):\n for i in self.winfo_children():\n i.destroy()", "def clean_db():\n yield\n logging.info(\"Delete table\")\n db.delete_table(\"TestRules\")", "def delete_all_onprogress_domains():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM on_progress_domains\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def _remove_recursively(self, root):\n # Stop if we reached a content item\n if not isinstance(root, ContentDescriptorSet):\n return\n\n children = root.get_children_reference()\n\n # Storing a list of items to delete because we don't want to remove\n # children while we iterate over them.\n children_to_delete = []\n\n for child in children:\n\n # Walk deeper until we find a ContentItem object.\n if not isinstance(child, ContentItem):\n self._remove_recursively(child)\n continue\n\n # Check if we can keep the item or not..\n if self.keep_item(child):\n continue\n\n # Delete the child from the tree after walking through the list.\n children_to_delete.append(child)\n\n # Now let's delete all the items we were supposed to.\n for child_to_delete in children_to_delete:\n root.remove_child(child_to_delete)", "def delete_all_domain_pages():\r\n db = connect()\r\n cursor = db.cursor()\r\n try:\r\n cursor.execute(\"DELETE FROM domain_pages\")\r\n db.commit()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def clear_data():\n logger.info(\"Delete Structure instances\")\n Structure.objects.all().delete()\n logger.info(\"Delete StructureType instances\")\n StructureType.objects.all().delete()\n logger.info(\"Delete Industry instances\")\n Industry.objects.all().delete()\n logger.info(\"Delete Price instances\")\n PriceList.objects.all().delete()\n logger.info(\"Delete Stock instances\")\n Stock.objects.all().delete()\n logger.info(\"Delete News instances\")\n News.objects.all().delete()\n logger.info(\"Delete NewsImages instances\")\n NewsImage.objects.all().delete()\n logger.info(\"Delete News Sections instances\")\n NewsCategorySection.objects.all().delete()\n logger.info(\"Delete Analysis instances\")\n AnalysisOpinion.objects.all().delete()\n logger.info(\"Delete Analysis Images instances\")\n AnalysisImage.objects.all().delete()\n logger.info(\"Delete Analysis Sections instances\")\n AnalysisCategorySection.objects.all().delete()", "def clean_db():\n db = get_db()\n tables = db.tables\n for table in tables:\n db[table].drop()", "def clear(self):\n self._ll_tree.clear()", "def delete_db():\n db.drop_all()", "def clear_registered_nodes(self):\n self.__nodes.clear()\n self.__names.clear()\n self.__aliases.clear()", "def clean(cls, pdb_object):\n if not cls.computed(pdb_object):\n return\n for successor in cls.successors:\n successor.clean(pdb_object)\n pdb_object.uncomplete(cls.name)\n for file in cls.files(pdb_object):\n file.delete()", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def cleanAll(self):\n for i in range(len(self.asteroid_type) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_type[i])\n self.del_asteroid(i)\n\n for i in range(len(self.asteroid_id_e) - 1, -1, -1):\n x, y = self.get_coords(self.asteroid_id_e[i])\n self.del_asteroid_e(i)", "def clear(self):\n \n self.node_set.clear()\n self.prefix.clear()\n self.suffix.clear()\n self.num_node = 0\n self.edges = 0", "def flushdb(self):\n allKeys = self.redis.keys(self.appendKeys(\"*\"))\n # for some reason deleteing with a list of keys isn't working\n p = self.redis.pipeline()\n for key in allKeys:\n p.delete(key)\n p.execute()", "def delete_all(cls):\n cls.dbm().modelclass_deleteall(cls)", "def prune( self ):\n if self.children is None:\n return\n \n # recursively prune from bottom up\n for space in self.children:\n space.prune()\n\n # if all child nodes are empty remove them all\n for space in self.children:\n if not space.is_empty():\n return\n\n self.children = None", "def deleteNode(*args, **kwds):\n nodes = args\n if len(args) < 1:\n nodes = cmds.ls(sl=1)\n \n for node in nodes:\n node_lst = [node]\n if isinstance(node, (list, tuple)):\n node_lst = node\n\n for n in node_lst:\n if cmds.objExists(str(n)):\n cmds.delete(str(n), **kwds)\n else:\n cmds.warning(\"# Don’t exist - \" + node)", "def db_remove():\n\n db.session.close()\n db.drop_all()\n\n path = current_app.config['SNER_VAR']\n for file_object in os.listdir(path):\n file_object_path = os.path.join(path, file_object)\n if os.path.isdir(file_object_path):\n shutil.rmtree(file_object_path)\n else:\n os.unlink(file_object_path)", "def DeleteContainers(self):\n for container in itertools.chain(*list(self.containers.values())):\n container.Delete()", "def delete_all(self):\n with self.__lock:\n self.__data = dict()\n self.flush()", "def delete(self):\n self.parent.delete_node(self)", "def remove_all():\n storage = FileStorage()\n objects = storage.all()\n objects = list(objects.values())\n\n for element in objects:\n storage.delete(element)\n objects = storage.all()", "def delete(self, *args, **kwargs):\n self.delete_relatives()\n old_content = self.content\n super().delete(*args, **kwargs)\n if old_content.isOrphaned():\n old_content.delete()" ]
[ "0.75391054", "0.7246509", "0.6745943", "0.6743173", "0.66865116", "0.66524655", "0.6503817", "0.64772564", "0.6394675", "0.63327867", "0.6293654", "0.62701166", "0.6248863", "0.6222633", "0.6217673", "0.6183407", "0.61382097", "0.6128062", "0.61192054", "0.6104992", "0.60861146", "0.6066739", "0.60219026", "0.60211426", "0.60163176", "0.6010808", "0.6009833", "0.60089463", "0.59942514", "0.59707975", "0.59692186", "0.59692186", "0.59692186", "0.5965221", "0.5963352", "0.5963177", "0.59437925", "0.5923371", "0.59199125", "0.5919371", "0.59128726", "0.5898711", "0.5888167", "0.58719593", "0.58642286", "0.5841385", "0.58354837", "0.5832835", "0.5824786", "0.581415", "0.5813619", "0.5813619", "0.580388", "0.5771114", "0.5764473", "0.5759792", "0.5748104", "0.57462764", "0.57393515", "0.57336795", "0.5732811", "0.5732538", "0.5730909", "0.5728653", "0.57283247", "0.5720056", "0.5713465", "0.5713002", "0.5712868", "0.5694271", "0.5686877", "0.5674782", "0.5667102", "0.5660573", "0.5656199", "0.5651868", "0.56434906", "0.56411624", "0.5637836", "0.5633775", "0.56315994", "0.5627994", "0.56278807", "0.5619751", "0.5617307", "0.5611597", "0.56018084", "0.56008136", "0.5587076", "0.5583175", "0.55820924", "0.5580809", "0.5579864", "0.5572968", "0.55648714", "0.5564833", "0.555918", "0.5557219", "0.55514485", "0.5550127" ]
0.7323777
1
Creates a group for a given node list. So far this is only an AiiDA verdi command.
def create_group(name, nodes, description=None): group, created = Group.get_or_create(name=name) if created: print('Group created with PK={} and name {}'.format(group.pk, group.name)) else: print('Group with name {} and pk {} already exists. Do you want to add nodes?[y/n]'.format(group.name, group.pk)) answer = raw_input() if answer.strip().lower() == 'y': pass else: return nodes2 = [] nodes2_pks = [] for node in nodes: try: node = int(node) except ValueError: pass nodes2_pks.append(node) try: nodes2.append(load_node(node)) except:# NotExistentError: pass group.add_nodes(nodes2) print('added nodes: {} to group {} {}'.format(nodes2_pks, group.name, group.pk)) if description: group.description = description return group
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_new_group(self, a, b):\n self.groups[self.group_id] = set([a,b])\n self.node_id[a] = self.node_id[b] = self.group_id\n self.group_id += 1", "def create(self, context=None):\n values = self.obj_get_changes()\n db_nodegroup = self.dbapi.create_nodegroup(values)\n self._from_db_object(self, db_nodegroup)", "def createGroup(self, *group):\n if not self.rank:\n logging.info('Creating atom group {}'.format(group))\n\n if not len(group):\n for idSS in self.pargs['idSS']:\n self.lmp.command('group group{} type {}'.format(idSS, idSS))\n else:\n self.lmp.command('group ' + ('{} ' * len(group)).format(*group))", "def createGroup(self):\n return _libsbml.ListOfGroups_createGroup(self)", "def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])", "def createGroup(self, name):\n new_group = ET.SubElement(self._root,'group')\n group_name = ET.SubElement(new_group, 'name')\n group_name.text = name\n # update the document's groups\n self._groups = self._root.findall('group') \n print 'Creating group, \\'%s\\'' % name\n return CAGroup(new_group)", "def create_group_command(client: MsGraphClient, args: dict) -> tuple[str, dict, dict]:\n required_properties = {\n 'displayName': str(args.get('display_name')),\n 'mailNickname': str(args.get('mail_nickname')),\n 'mailEnabled': args.get('mail_enabled') == 'true',\n 'securityEnabled': args.get('security_enabled')\n }\n\n # create the group\n group = client.create_group(required_properties)\n\n # display the new group and it's properties\n group_readable, group_outputs = parse_outputs(group)\n human_readable = tableToMarkdown(name=f\"{required_properties['displayName']} was created successfully:\",\n t=group_readable,\n headers=['ID', 'Display Name', 'Description', 'Created Date Time', 'Mail',\n 'Security Enabled', 'Mail Enabled'],\n removeNull=True)\n entry_context = {f'{INTEGRATION_CONTEXT_NAME}(val.ID === obj.ID)': group_outputs}\n return human_readable, entry_context, group", "def createMainGroup(self):\n\t\tmc.group( n = self.grp.name, em = True )", "def test_create_group(self):\n pass", "def test_create_group(self):\n pass", "def crea_grupo(self):\r\n \r\n self.comprueba_casos_seleccionados()", "def createGroup(root, group, fileList):\n topGroupElem = ElementTree.SubElement(root, ELEM_GROUP, {ATTR_NAME: group})\n headerGroupElem = None\n sourceGroupElem = None\n pathElem = None\n for fl in fileList:\n if fl.endswith(\".h\"):\n if headerGroupElem == None:\n headerGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_HEADER})\n pathElem = ElementTree.SubElement(headerGroupElem, ELEM_PATH)\n else:\n if sourceGroupElem == None:\n sourceGroupElem = ElementTree.SubElement(topGroupElem, ELEM_GROUP, {ATTR_NAME: GRP_SRC})\n pathElem = ElementTree.SubElement(sourceGroupElem, ELEM_PATH)\n pathElem.text = fl", "def create_groups():\n groups = [\"iDRAC-Administrators\", \"iDRAC-Operators\", \"iDRAC-Readonly\"]\n group_priviledges = [\"0x000001ff\", \"0x000000f9\", \"0x00000001\"]\n for host in online_hosts:\n for index in [1,2,3]:\n print index,\" \", groups[index-1]\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupName \"+groups[index-1])\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupName failed \")\n\n result2 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupDomain corp.inmobi.com\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupDomain failed \")\n\n result3 = sudo(\"racadm5 -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgStandardSchema -i \"+str(index) +\" -o cfgSSADRoleGroupPrivilege \"+ group_priviledges[index-1])\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for RoleGroupPriviledge failed \")", "def createNewGroup():\n if request.method == 'POST':\n groupname = request.form['groupname1']\n internal = request.form['internal1']\n external = request.form['external1']\n userNo = request.form['usersNo1']\n if 'node1' in request.form:\n node = int(request.form['node1'])\n else:\n node = -1\n\n if int(userNo) == 0:\n if hl.createGroup(groupname, internal, external, node):\n return True\n elif int(userNo) > 0:\n if hl.createGroup(groupname, internal, external, node, genUsers=True, numUsers=int(userNo)):\n return True\n\n return False", "def createGroup(self):\n return _libsbml.GroupsModelPlugin_createGroup(self)", "def create_groups(**kwargs):\n for gname in SEC_GROUP_NAMES.itervalues():\n Group.objects.get_or_create(name=gname)", "def createGroup(self, group, members):\n connection = self.sock\n\n connection.send(\"create_group\".encode())\n\n status_code = connection.recv(2)\n\n if status_code != SUCCESS:\n print(\"Error\")\n return -1\n message = []\n message.append(\"gname:\")\n message.append(group)\n message.append(\";\")\n message.append(\"members:\")\n for i in members:\n message.append(i)\n message.append(\",\")\n if members:\n message.pop()\n message = ''.join(message)\n message = message.encode()\n connection.send(message)\n result = connection.recv(2)\n if result != SUCCESS:\n return -1\n\n packed_gid = connection.recv(4)\n gid = struct.unpack(\"<L\", packed_gid)\n repoids.append(gid)\n return 1", "def __make_group_by_res(self, group_name, name_list):\r\n if group_name not in self.groups:\r\n res_group = self.group['Residue'].getChildGrps()\r\n groups = [ res for res in res_groups if res.name in name_list ]\r\n new_group = Group(parent=[], id=-1, type=group_name, childs=groups)\r\n self.groups[group_name] = new_group", "def _create_child_group(self, name) -> \"GroupBase\":\n pass", "def create_volume_group() :\n volume_group_present = False\n for node in nodes:\n volume_group_status = \\\n send_cli_via_ssh(node, 'volume_group_status').strip()\n if volume_group_status:\n print( 'Volume group on {} already exist: {}'.format(\n node,\n ' '.join([line.split(';')[0] for line in\n volume_group_status.split()])\n ))\n volume_group_present = True\n else:\n print_with_timestamp( 'Creating vg00 on: {}'.format(node))\n send_cli_via_ssh(node, 'unit_manager create S001 vg00')\n volume_group_status = \\\n send_cli_via_ssh(node, 'volume_group_status').strip()\n if volume_group_status:\n print_with_timestamp( 'Created vg00 on: {}'.format(node))\n volume_group_present = True\n else :\n print_with_timestamp( 'Cannot create vg00 on: [}'.format(node))\n volume_group_present = False\n return volume_group_present", "def __make_group_by_atom(self, group_name, name_list):\r\n pass", "def create_mailing_list_group(sender, instance, **kwargs):\n\tname = instance.name\n\treturn requests.post(\"https://api.mailgun.net/v3/lists\",\n auth=('api', settings.MAILGUN_API_KEY),\n data={'address': '{}@arenbergorkest.be'.format(name),\n 'name': name})", "def _make_node_list(child_node, list_count):\n parent = GroupNode(child_node.node.parentnode) \n parent.add_child(child_node)\n if list_count == 0:\n return parent\n else:\n list_count -= 1\n return _make_node_list(parent, list_count)", "def group_assignmentgroups(assignment_group_list):\n return group_nodes(assignment_group_list, 2)", "async def create_group(ctx, name: str, role: str, group_type: str=None, comp: str=None, rating: int=None, time: str=None):\n\n owner = ctx.message.author.name\n \n if comp:\n comp = [int(i) for i in comp.split()] # convert string input to array\n\n new_group = Group(owner, name, role, group_type, rating, time, comp)\n bg_bot.manager.add_group(owner, new_group)\n \n await ctx.send(f'Created new {group_type} group for leader {owner}!')", "def __call__(self, *args: FParams.args, **kwargs: FParams.kwargs) -> DAGNode:\n return self._create_task_group(TaskGroup, *args, **kwargs)", "def create_nodes(self, nodes: List[Node]):\n nodes_str = \",\\n\".join([str(n) for n in nodes])\n query = \"\"\"CREATE %s\"\"\" % nodes_str\n return self.create_tx(query)", "async def async_create_group(\n hass: HomeAssistant,\n name: str,\n entity_ids: Collection[str] | None = None,\n user_defined: bool = True,\n icon: str | None = None,\n object_id: str | None = None,\n mode: bool | None = None,\n order: int | None = None,\n ) -> Group:\n group = Group.async_create_group_entity(\n hass, name, entity_ids, user_defined, icon, object_id, mode, order\n )\n\n # If called before the platform async_setup is called (test cases)\n await _async_get_component(hass).async_add_entities([group])\n return group", "def create_group():\n groupname = request.get_json().get(\"name\")\n description = request.get_json().get(\"description\")\n grp = admin.create_group(current_app.scoped_session(), groupname, description)\n if grp:\n response = admin.get_group_info(current_app.scoped_session(), groupname)\n else:\n response = {\"result\": \"group creation failed\"}\n response = jsonify(response)\n return response", "def test_060_add_group_to_group(self):\n\n testflow.step(\"Adding group %s to group %s\", TEST_GROUP1, TEST_GROUP2)\n assert MANAGE_CLI.run(\n 'groupadd',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to add group to group '%s'\" % TEST_GROUP1", "def test_create_group(self):\n groupid = 'villains'\n\n # create the group\n resp = self.app.post('/groups', data=json.dumps({'name':groupid}))\n assert resp.status_code == 200\n\n # Fetch the group to check that it persists\n resp = self.app.get('/groups/{}'.format(groupid))\n assert resp.status_code == 200", "def generate_groups(ctx):\n asyncio.run(generate_groups_impl(ctx.obj[\"config\"]))", "def _make_group(self, _rk, _group_hint):\n\n if isinstance(_group_hint, dict):\n # _group_hint is a single key/value pair\n g = _group_hint[list(_group_hint)[0]]\n\n r_type = g.get(\"type\", \"none\")\n if r_type != \"OS::Nova::ServerGroup\":\n return \"support only ServerGroup resource\"\n\n properties = g.get(\"properties\", {})\n if len(properties) == 0:\n return \"no properties\"\n\n group_name = properties.get(\"name\", None)\n if group_name is None:\n return \"no group name\"\n group_name = group_name.strip()\n\n policies = properties.get(\"policies\", [])\n if len(policies) == 0:\n return \"no policy of the group\"\n\n if len(policies) > 1:\n return \"multiple policies\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if group_name in self.groups.keys():\n group = self.groups[group_name]\n else:\n group = Group(group_name)\n\n policy = policies[0].strip()\n if policy == \"anti-affinity\":\n group_type = \"diversity\"\n else:\n group_type = policy\n\n group.group_type = group_type\n group.factory = \"server-group\"\n group.level = \"host\"\n\n self.groups[group_name] = group\n else:\n # group hint is uuid string.\n rg = self.resource.get_group_by_uuid(_group_hint)\n if rg is None:\n return \"unknown group found while making group\"\n\n # TODO: exclude soft-affinity and soft-anti-affinity?\n\n if rg.name in self.groups.keys():\n group = self.groups[rg.name]\n else:\n group = Group(rg.name)\n\n group.group_type = rg.group_type\n group.factory = rg.factory\n group.level = \"host\"\n\n self.groups[rg.name] = group\n\n if group is not None:\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def create_group(self, **kwargs):\n post_body = json.dumps({'group': kwargs})\n resp, body = self.post('groups', post_body)\n self.expected_success(201, resp.status)\n body = json.loads(body)\n return rest_client.ResponseBody(resp, body)", "def CreateGroupPostData(input, collection, grouping, item, groupname):\n root = etree.Element(collection)\n name = etree.SubElement(root, 'name')\n name.text = groupname\n is_smart = etree.SubElement(root, 'is_smart')\n is_smart.text = 'false'\n itemlist = etree.SubElement(root, grouping)\n \n for i in input:\n add_element = etree.SubElement(itemlist, item)\n add_element_id = etree.SubElement(add_element, 'id')\n add_element_id.text = i\n \n return etree.tostring(root)", "def get_groups():\n\n groups = [\"shelter\", \"sharing\", \"unsheltered\", \"motel\"]\n\n for item in groups:\n group = Group(group_name=item)\n\n db.session.add(group)\n\n db.session.commit()", "def addGroup(self, *args):\n return _libsbml.ListOfGroups_addGroup(self, *args)", "def make_grp(self):\n try:\n self.base['grp']\n except:\n self.base['grp'] = np.zeros(len(self.base),dtype='i')\n\n for halo in self._halos.values():\n halo[name][:] = halo._halo_id\n\n if config['verbose']: print \"writing %s\"%(self._base().filename+'.grp')\n self._base().write_array('grp',overwrite=True,binary=False)", "def create_initial_groups():\n \n from base import get_group_database, get_user_database\n import api\n \n # we want any groups we create in here to be active immediately\n save_min_sponsors = Group._min_sponsors\n Group._min_sponsors = 1\n \n user_db = get_user_database()\n group_db = get_group_database()\n \n user_admin = user_db['admin']\n \n def create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit=''):\n if not group_db.has_key(user_id):\n g = group_db.create_group(user_id=user_id,\n name=name,\n description=desc,\n owner=owner,\n no_pay=True)\n group_db.force_accept(g)\n if parent_id:\n group_db.join_group(g, group_db[parent_id], force=1)\n \n g = group_db[user_id]\n if join_pol:\n api.group_set_join_policy(user_admin, g, join_pol)\n if join_pol == 'open':\n # if membership is open, allow non-members to read\n api.group_set_other_perms(user_admin, g, 'ro')\n if memb_vis:\n api.group_set_membership_visible(user_admin, g, memb_vis)\n if desc:\n api.group_set_settings(user_admin, g, description=desc)\n if memb_edit:\n api.group_set_member_edit(user_admin, g, memb_edit)\n \n # set date of formation\n create = datetime(2004, 05, 10, 12, 0, 0)\n g.date = create\n \n \n groups = [\n ('top', 'Top', 'This group contains the top-level groups.', user_admin, None, '', 'open', ''),\n ('regional', 'Regional', 'Contains groups with a regional focus.', user_admin, 'top', '', 'open', ''),\n ('orgs', 'Organizations', 'Contains categories of organizations.', user_admin, 'top', '', 'open', ''),\n ('community', 'Community', 'Contains groups that are focused or based on ned.com.', user_admin, 'top', '', 'open', ''),\n ('issues', 'Issues', 'Contains groups focused on particular issues.', user_admin, 'top', '', 'open', ''),\n ('general', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'top', 'open', 'open', ''),\n ('general-other', 'General', 'Contains groups that don\\'t belong in other categories.', user_admin, 'general', 'open', 'open', ''),\n ('help', 'Help', 'Contains site help.', user_admin, 'community', '', 'open', ''),\n ('community-general', 'Community - General',\n '', user_admin, 'community', 'open', 'open', 'member'),\n ('suggestions', 'Suggestions', 'For community suggestions.', user_admin, 'community-general', '', 'open', ''),\n ('public', 'Public sector',\n 'Groups operating in the public sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('private', 'Private sector',\n 'Groups operating in the private sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('social', 'Social sector',\n 'Groups operating in the social sector should join this group.', user_admin, 'orgs', 'open', 'open', 'member'),\n ('orgs-general', 'Organizations - General',\n \"For organizations that don't fit in other categories.\", user_admin, 'orgs', 'open', 'open', 'member'),\n ('issues-business', 'Business',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-cyf', 'Children - Youth - Families',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-education', 'Education',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-env', 'Environment - Conservation',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-health', 'Health Care',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-pol', 'Policy - Politics',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-religion', 'Religion',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-soc', 'Social Justice - Human Services',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-tech', 'Technology',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('issues-general', 'Issues - General',\n '', user_admin, 'issues', 'open', 'open', 'member'),\n ('ned', '<ned> Network',\n '', user_admin, '', '', '', ''),\n ('ned-internal', 'Ned - Internal',\n '', user_admin, '', '', '', ''),\n ('sitedev', 'Site Development',\n '', user_admin, 'ned-internal', '', '', ''),\n ]\n \n for user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit in groups:\n create_group(user_id, name, desc, owner, parent_id, join_pol, memb_vis, memb_edit)\n \n # Help group\n g_help = group_db['help']\n api.group_set_anon_read(user_admin, g_help, True)\n \n # ON groups\n g_on = group_db['ned']\n group_db.join_group(g_on, group_db['private'], force=1)\n group_db.join_group(g_on, group_db['public'], force=1)\n group_db.join_group(g_on, group_db['social'], force=1)\n api.group_set_owners_by_user_id(user_admin, g_on, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on, 'owner')\n api.group_set_invite_policy(user_admin, g_on, 'owner')\n api.group_set_membership_visible(user_admin, g_on, 'open')\n api.group_set_member_edit(user_admin, g_on, True)\n api.group_set_anon_read(user_admin, g_on, True)\n \n g_on_int = group_db['ned-internal']\n api.group_set_owners_by_user_id(user_admin, g_on_int, ['admin', 'jimc'])\n api.group_set_join_policy(user_admin, g_on_int, 'owner')\n api.group_set_invite_policy(user_admin, g_on_int, 'owner')\n api.group_set_membership_visible(user_admin, g_on_int, 'member')\n api.group_set_member_edit(user_admin, g_on_int, True)\n api.group_set_anon_read(user_admin, g_on_int, False)\n \n g_sitedev = group_db['sitedev']\n api.group_set_owners_by_user_id(user_admin, g_sitedev, ['admin', 'jimc'])\n \n Group._min_sponsors = save_min_sponsors", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n parent = self\n sdef, name, path = self.file.get_custom_node_info(qid, gslash, name, path, parent) \n grp = Group(self.file, sdef, name, path, attrs, parent)\n return grp", "def test_add_group(self):\n pass", "def group_assignments(assignment_list):\n return group_nodes(assignment_list, 1)", "def make_custom_group(self, qid, name='', path='', attrs={}):\n gslash = \"/\"\n sdef, name, path = self.get_custom_node_info(qid, gslash, name, path) \n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent)\n return grp", "def make_grp(self, name='grp', v=False):\n self.base[name] = self.get_group_array(v=v) #np.zeros(len(self.base), dtype=int)#self.get_group_array()", "def make_grp(self, name='grp'):\n self.base[name] = self.get_group_array()", "def createGroup(listOfPerson):\n atk=Department()\n atk.members=listOfPerson\n return atk", "def make_group(self, qid, name='', path='', attrs={}, link='', abort=True):\n gqid = qid + \"/\"\n sdef = self.get_sdef(gqid, self.default_ns, \"referenced in make_group\")\n id = sdef['id']\n ns = sdef['ns']\n path = self.deduce_path(id, ns, path)\n if not abort:\n id_noslash = id.rstrip('/') # could be different from gqid if namespace present\n grp = self.get_existing_group(path, id_noslash, name)\n if grp:\n # found already existing group\n return grp \n link_info = self.extract_link_info(name, link, Group)\n # create the group\n parent = None # no parent since this node created from File object (top level)\n grp = Group(self, sdef, name, path, attrs, parent, link_info)\n return grp", "def create_group(self, group_name, user_ids=[], role_ids=[]):\n payload = {}\n payload['name'] = group_name\n payload['user_ids'] = user_ids\n payload['role_ids'] = role_ids\n return Client._post(self, payload)", "def add_group(self):\n items = self.group_list.selectedItems()\n for item in items:\n self.parent.add_group_data(item.text())", "def test_new_group(self, inventoryloader):\n inventoryloader.add_group(u'newgroup')\n assert 'newgroup' in inventoryloader.groups", "def make_group(self, id, name='', attrs={}, link='', abort=True ): \n gid = id + \"/\"\n sgd = self.get_sgd(gid, name)\n path = self.full_path\n link_info = self.file.extract_link_info(name, link, Group)\n if not abort:\n # id = sgd['id'].rstrip('/') # not sure if need this\n grp = self.file.get_existing_group(path, id, name)\n if grp:\n return grp\n grp = Group(self.file, sgd, name, path, attrs, self, link_info)\n # self.mstats[gid]['created'].append(grp)\n return grp", "def test_create_device_group(self):\n pass", "def create_tree(outFile, tree, path='/'):\n for key, foo in tree.list():\n if outFile.has_node(path, key):\n logging.debug('Path already found:', path, key)\n continue\n logging.debug('Creating group:', path, key)\n outFile.create_group(path, key, key)\n dest = path + key + '/'\n if outFile.has_node(dest):\n continue\n create_tree(outFile, tree.child(key), dest)", "def test_ipam_vlan_groups_create(self):\n pass", "def groups_create(self, mar, request):\n if not permissions.CanCreateGroup(mar.perms):\n raise permissions.PermissionException(\n 'The user is not allowed to create groups.')\n\n user_dict = self._services.user.LookupExistingUserIDs(\n mar.cnxn, [request.groupName])\n if request.groupName.lower() in user_dict:\n raise exceptions.GroupExistsException(\n 'group %s already exists' % request.groupName)\n\n if request.ext_group_type:\n ext_group_type = str(request.ext_group_type).lower()\n else:\n ext_group_type = None\n group_id = self._services.usergroup.CreateGroup(\n mar.cnxn, self._services, request.groupName,\n str(request.who_can_view_members).lower(),\n ext_group_type)\n\n return api_pb2_v1.GroupsCreateResponse(\n groupID=group_id)", "def post_groups(\n data: PostGroupIn, tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.create_group\")\n grp = Group(\n description=data.description,\n members=[tkn.owner],\n group_name=data.group_name,\n owner=tkn.owner,\n ).save()\n logging.debug(\n \"Created group %s (%s) owned by %s\",\n data.group_name,\n str(grp.pk),\n tkn.owner.character_name,\n )\n return GetGroupOut.from_record(grp)", "def creategroup(body):\n group = body.get(\"groupname\", None)\n pps = body.get(\"pilotpoints\", None)\n print('lol',group, pps)\n print(type(pps))\n\n # Does the person exist already?\n if group not in group_dict and group is not None:\n group_dict[group] = {\n \"groupname\": group,\n \"pilotpoints\": pps,\n }\n return group_dict[group], 201\n\n # Otherwise, they exist, that's an error\n else:\n abort(\n 406,\n \"Person with last name {group} already exists\".format(group=group),\n )", "def append_node(ifaces_well, wellid, node_number, k, i, j):\n group_region.append([k, i, j, k, i, j])\n if default_ifaces is None:\n ifaces.append(ifaces_well)\n face_ct.append(len(ifaces_well))\n else:\n ifaces.append(default_ifaces)\n face_ct.append(len(default_ifaces))\n group_name.append('{}{}'.format(wellid, node_number))\n group_placement.append([Grid, GridCellRegionOption,\n PlacementOption,\n ReleaseStartTime,\n ReleaseOption,\n CHeadOption])", "def handle(self, *args, **options):\n new_group, created = Group.objects.get_or_create(name=options.get('group_name')) \n self.stdout.write(f\"Group {options.get('group_name')} created\")", "def create_group(user):\n if connexion.request.is_json:\n users_group = [User.from_dict(d) for d in connexion.request.get_json()]\n response = (\"success\", 201)\n if len(users_group) > 4:\n response = (\"Max number of player is 4\", 400)\n else:\n groupId = GroupStorageController.add_new_group(users_group)\n return response", "def create_nodes(self):", "def groups(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for group in extracted:\n # pylint: disable=no-member\n self.groups.add(group)", "def create_TestGroup(test_case, # type: AnyMagpieTestCaseType\n override_group_name=null, # type: Optional[Str]\n override_discoverable=null, # type: Optional[bool]\n override_data=null, # type: Optional[JSON]\n override_headers=null, # type: Optional[HeadersType]\n override_cookies=null, # type: Optional[CookiesType]\n ): # type: (...) -> JSON\n app_or_url = get_app_or_url(test_case)\n data = override_data\n if override_data is null:\n data = {\"group_name\": override_group_name if override_group_name is not null else test_case.test_group_name}\n # only add 'discoverable' if explicitly provided here to preserve original behaviour of 'no value provided'\n if override_discoverable is not null:\n data[\"discoverable\"] = override_discoverable\n grp_name = (data or {}).get(\"group_name\")\n if grp_name:\n test_case.extra_group_names.add(grp_name) # indicate potential removal at a later point\n resp = test_request(app_or_url, \"POST\", \"/groups\", json=data,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n return check_response_basic_info(resp, 201, expected_method=\"POST\")", "def createVolumeGroup(self, pvs, name):\n vg = {}\n vg['command'] = 'create:volgroup'\n vg['extentSize'] = EXTENT_SIZE\n vg['pvs'] = pvs\n vg['name'] = name\n\n return vg", "def create_group():\n incoming = request.get_json()\n chatroom = Chatroom(\n name = incoming['name'],\n tag = incoming['tag'],\n )\n db.session.add(chatroom)\n db.session.commit()\n participant = Participant(\n user_id = session['user_id'],\n room_id = chatroom.room_id,\n )\n db.session.add(participant)\n db.session.commit()\n return jsonify(results = chatroom.room_id)", "def execute(self, context):\n global array_nodes\n sub_tree = bpy.data.node_groups.new('Armory group', 'ArmGroupTree') # creating subtree\n sub_tree.use_fake_user = True\n group_node = array_nodes[self.node_index]\n group_node.group_tree = sub_tree # link subtree to group node\n sub_tree.nodes.new('LNGroupInputsNode').location = (-250, 0) # create node for putting data into subtree\n sub_tree.nodes.new('LNGroupOutputsNode').location = (250, 0) # create node for getting data from subtree\n context.space_data.path.append(sub_tree, node=group_node)\n sub_tree.group_node_name = group_node.name\n return {'FINISHED'}", "def group_nodes(node_list, tree_height):\n dict = OrderedDict()\n for node in node_list:\n nodelist = _make_node_list(GroupNode(node), tree_height)\n if nodelist.get_name() not in dict:\n dict[nodelist.get_name()] = nodelist\n else:\n dict[nodelist.get_name()].merge(nodelist)\n return list(dict.values()) # we usually need to know the length, so values() instead of itervalues()", "def allocate_group(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_AllocateNewGroupID(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_AllocateNewGroupID(key1, result_val)\n return result_val.i", "def test_groups():\n graph = Graph()\n for one, two in [(1, 2), (2, 3), (1, 4), (4, 3), (3, 1)]:\n graph.add_edge(one, two)\n groups = graph.group()\n eq_(len(groups), 3)", "async def groupadd(bot: fido, channel: str, sender: str, args: List[str]):\n\n if len(args) == 0:\n return \"Usage: \" + IRC.commandPrefix + \"groupadd <groupname> <nickname> <phonenumber>\"\n\n lines = []\n print(f\"Args: {args}\")\n number = ''\n nickname = ''\n group = ''\n for arg in args:\n if arg == \"\":\n continue # Ignore blank args.\n print(f\"Arg: [{arg.strip()}]\")\n if arg.startswith('+'):\n number = arg\n elif arg in bot.users:\n nickname = arg\n else:\n group = arg\n if not group or not nickname or not number:\n await bot.message(channel, \"Incorrect command usage. Ensure user is in channel, and that number has +<country code>.\")\n return\n add_group(mygroup=group, nickname=nickname, number=number)\n await bot.message(channel, f\"Added {nickname} to SMS group {group} with number {number}\")", "def add_group(self, name: str, leds_list: List[str]) -> Tuple[Optional['LedGroup'], str]:\n new_group: LedGroup = LedGroup(name, leds_list)\n verified_ledgroup = LedGroup.verify_led_group(new_group)\n if not verified_ledgroup:\n return None, 'wrong_group_name'\n is_unique = AuxEffects.check_unique(self, verified_ledgroup, 'LedGroup', None)\n if not is_unique:\n return None, 'group_exists'\n self.LedGroups.append(verified_ledgroup)\n return verified_ledgroup, \"\"", "def test_get_groups_3(\n self, management_client, internal_client, inventory_attributes\n ):\n\n did = \"some-device-id\"\n internal_client.create_device(did, inventory_attributes)\n for i in range(10):\n group = management_client.group(group=\"group\" + str(i))\n management_client.addDeviceToGroup(group, did)\n\n assert len(management_client.getAllGroups()) == 1", "def create_group(self, label):\n group = OptionGroup(label)\n self.append(group)\n return group", "def newSimgroup(simlist): \n nSims = len(simlist)\n \n simgrp = simgroup(nSims)\n \n for i, s in enumerate(simlist):\n \n simgrp[i] = newSim(s)\n \n return simgrp", "def _make_valet_groups(self, _rk, _az, _rule_list):\n\n for rn in _rule_list:\n rule = self.resource.group_rules[rn]\n\n # Valet group naming convention.\n # It contains datacenter id and availability_zone\n # followed by service id and vnf id\n # depending on scope.\n # And concatenate rule name.\n # Exception: quorum-diversity\n\n group_id = self.datacenter_id + \":\"\n\n if rule.rule_type != \"quorum-diversity\":\n group_id += _az + \":\"\n\n if rule.app_scope == \"lcp\":\n group_id += rn\n elif rule.app_scope == \"service\":\n group_id += self.service_instance_id + \":\" + rn\n elif rule.app_scope == \"vnf\":\n group_id += self.service_instance_id + \":\" + self.vnf_instance_id + \":\" + rn\n else:\n return \"unknown app_scope value\"\n\n if group_id in self.groups.keys():\n group = self.groups[group_id]\n else:\n group = Group(group_id)\n group.group_type = rule.rule_type\n group.factory = \"valet\"\n group.level = rule.level\n\n self.groups[group_id] = group\n\n group.server_list.append(self.app_name + \":\" + _rk)\n\n return \"ok\"", "def add_group(self, parent_id: int = None, id: int = None, name: str = None):\n if parent_id is not None:\n assert (parent_id in self.group_ids), ('Parent id does not exist')\n\n if id is None:\n id = int(np.nanmax(np.asarray(self.group_ids, dtype=np.float)) + 1)\n else:\n assert (id not in self.groups_ids()), ('Id already exists')\n\n if name is None:\n name = 'Group {}'.format(id)\n\n new_group = wknml.Group(id, name, [])\n if parent_id is None:\n self.groups.append(new_group)\n else:\n self.groups = Skeleton._group_append(self.groups, parent_id, new_group)\n\n return id, name", "def create_group(self, properties: dict[str, Any | None]) -> dict:\n group = self.ms_client.http_request(method='POST', url_suffix='groups', json_data=properties)\n return group", "def create():\n name = request.json['name']\n level = request.json['level']\n manager = request.json['manager']\n if models.user.Group.get(name):\n raise Conflict('Group already exists.', creation=False)\n else:\n authorize(manager, level=level)\n group = models.user.Group(name=name, level=level, manager=manager)\n models.db.session.add(group)\n models.db.session.commit()\n return response(200, creation=True)", "def create_groups(groups):\n for group_name in groups:\n try:\n Group.objects.get_or_create(name=group_name)\n except Exception as e:\n raise CouldNotCreateGroup(group_name, e)", "def create_group(self, name) -> \"GroupBase\":\n ancestor, group_names, last_name = self._descend(name)\n parent = ancestor._require_descendant_groups(*group_names)\n if last_name in parent:\n raise FileExistsError(f\"Group or dataset found at '{name}'\")\n return parent._create_child_group(last_name)", "def _get_node_group(self, node_name):\n\n pass", "def grp(self, grpNode):\n\t\tself._grp = grpNode", "def create_group(self, path):\n if self.options['storage_method'] == 'hdf5':\n # execute h5py command\n self.file_pointer.create_group(path)\n elif self.options['storage_method'] == 'none':\n # save command for later processing\n self.h5commands.append((\"create_group\", path,))\n else:\n raise Exception('Invalid option value for storage_method (%s)' % storage_method)", "def post(self):\n args = parser.parse_args()\n user_group = UserGroup()\n user_group.name = args['name']\n user_group.createdby = auth.username()\n db_session.add(user_group)\n db_session.commit()\n return user_group, 201", "def addGroup(self, *args):\n return _libsbml.GroupsModelPlugin_addGroup(self, *args)", "def async_create_group_entity(\n hass: HomeAssistant,\n name: str,\n entity_ids: Collection[str] | None = None,\n user_defined: bool = True,\n icon: str | None = None,\n object_id: str | None = None,\n mode: bool | None = None,\n order: int | None = None,\n ) -> Group:\n if order is None:\n hass.data.setdefault(GROUP_ORDER, 0)\n order = hass.data[GROUP_ORDER]\n # Keep track of the group order without iterating\n # every state in the state machine every time\n # we setup a new group\n hass.data[GROUP_ORDER] += 1\n\n group = Group(\n hass,\n name,\n order=order,\n icon=icon,\n user_defined=user_defined,\n entity_ids=entity_ids,\n mode=mode,\n )\n\n group.entity_id = async_generate_entity_id(\n ENTITY_ID_FORMAT, object_id or name, hass=hass\n )\n\n return group", "def test_createGroup(self):\n\t\tself.client.force_authenticate(user=User.objects.get(id=1))\n\t\turl = \"/groups/\"\n\t\tdata = {\n\t\t\t'name' : 'testGroup3',\n\t\t\t'description' : 'This is another test group that just created.',\n\t\t\t'isPublic' : True\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\t\tself.assertEqual(response.data[\"id\"], 3)\n\t\tself.assertEqual(response.data[\"name\"], 'testGroup3')", "def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result", "def create_groups(group_qty):\n if group_qty > 8 or not group_qty or group_qty == 0:\n raise Exception('Group quantity must be between 1 and 8.')\n group_list = []\n for x in range(0, group_qty):\n group_list.append(x+1)\n return group_list", "def create_device_group(self, devicegroup, devices=None):\n self._logger.debug(\"Create device-group: %s\" % (devicegroup,))\n if devices is not None:\n self.set_device_group(devicegroup, devices, exclusive=True)\n else:\n self.xapi.set(pandevice.XPATH_DEVICE_GROUPS + \"/entry[@name='%s']\" % (devicegroup,))", "def pgroup(pynodes, world = False, re = \"\", suffix = \"\"):\n # Initiate return variable\n output = []\n # Filter supplied pynodes, if equal to 0 then return false\n if len(pynodes) == 0:\n return output\n # Group created on each object transformation\n if not world:\n for o in pynodes:\n # Name var\n the_name = o.name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create group for each specified PyNode\n grp = pm.group(empty = True, name = the_name)\n # Align the pgroup to each PyNode transformation\n transformation.align(grp, o, mode = 'transform')\n # Get object parent\n parent = o.getParent()\n # If the object have parent,\n # Parent the group to object parent\n if parent:\n grp.setParent(parent)\n # Parent the object to pgroup\n o.setParent(grp)\n # Collect group to output\n output.append(grp)\n\n else:\n # Name var\n the_name = pynodes[0].name()\n # Replace object name if any\n if re != \"\":\n the_name = the_name.replace(re, suffix)\n else:\n the_name = the_name + suffix\n # Create single group\n grp = pm.group(empty = True, name = the_name)\n # Collect group to output\n output.append(grp)\n # Parent all specified PyNodes to pgroup\n pm.parent(pynodes, grp)\n\n return output", "def create_target_groups(ctx):\n data = self.create_target_groups()\n ctx.info('Created target groups for the load balancer {}:'.format(self.get_balancer_name()))\n ctx.pp.pprint(data)", "def add_node(self, node: base.Node, label: str = None) -> HandleType:\n\n if self._current_group:\n if label and label != self._current_group:\n raise ValueError('The given label does not match the current group: '\n f'{label} vs {self._current_group}.')\n label = self._current_group\n else:\n if not label:\n raise ValueError('Label should not be empty.')\n if label not in self._groups:\n self._groups[label] = [node]\n else:\n self._groups[label].append(node)\n return node.create_handle()", "def _node_groups(self, node, log=None):\n hostgroups = set(self.settings.MON_ZABBIX_HOSTGROUPS_NODE)\n hostgroups.update(node.monitoring_hostgroups)\n\n return self._get_or_create_hostgroups(self._node_kwargs(node), self.settings.MON_ZABBIX_HOSTGROUP_NODE, None,\n hostgroups=hostgroups, log=log)", "def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups", "def get_group_nodes(self, nodePair):\n core = self.core\n parent_node = core.get_base(nodePair.get_bundleNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_bundleGroupNode(core.load_by_path(self.root_node, relative_path))\n\n parent_node = core.get_base(nodePair.get_countryNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_countryGroupNode(core.load_by_path(self.root_node, relative_path))", "def add_nodes(self, node_name_list):\n nodes = requests.post(self.__url + 'nodes', data=json.dumps(\n node_name_list), headers=HEADERS).json()\n node_dict = {}\n for node in nodes:\n node_dict[node['name']] = node['SUID']\n return node_dict", "def create(*nIds):\n nodes = []\n for nId in nIds:\n nodes.append(Node(nId))\n return nodes", "def create_namespaced_group(self, body, **kwargs):\n\n all_params = ['body', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_namespaced_group\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `create_namespaced_group`\")\n\n resource_path = '/oapi/v1/groups'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Group',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def add_move_group_combining_others(self, new_group_name, existing_group_names=None):\n new_group = xml.dom.minidom.Document().createElement('group')\n new_group.setAttribute(\"name\", new_group_name)\n for existing_group_name in existing_group_names:\n new_group.appendChild(xml.dom.minidom.Document().createElement(f'group name=\"{existing_group_name}\"'))\n new_group.writexml(self.new_robot_srdf, indent=\" \", addindent=\" \", newl=\"\\n\")" ]
[ "0.7485883", "0.72396135", "0.71599764", "0.66984046", "0.66795355", "0.65447414", "0.6374939", "0.6371427", "0.63429767", "0.63429767", "0.6336112", "0.6318668", "0.631537", "0.63136333", "0.6290033", "0.6238642", "0.6233603", "0.62333775", "0.616521", "0.61182487", "0.6102469", "0.6092538", "0.6085988", "0.6051518", "0.60488075", "0.5986774", "0.59811246", "0.5972799", "0.5962804", "0.59606504", "0.5956766", "0.59296113", "0.5914792", "0.5898665", "0.58986485", "0.58964425", "0.5880037", "0.58732945", "0.5870162", "0.58540857", "0.58505", "0.58445364", "0.5835782", "0.5830884", "0.5827687", "0.58250254", "0.5820926", "0.5819618", "0.5811116", "0.5810487", "0.5808543", "0.5802188", "0.5790553", "0.57902277", "0.5778338", "0.57760596", "0.5767674", "0.5764979", "0.5754544", "0.57462215", "0.5744429", "0.5732055", "0.5716156", "0.57148945", "0.57053757", "0.5704867", "0.5694523", "0.56891894", "0.5688638", "0.5682427", "0.5680584", "0.5678444", "0.5671964", "0.56656796", "0.5663046", "0.5652962", "0.5645079", "0.56443536", "0.5632936", "0.56325424", "0.5627595", "0.562419", "0.5621108", "0.5615352", "0.5612948", "0.55998707", "0.55949455", "0.5594711", "0.5584334", "0.557836", "0.55724186", "0.5565315", "0.55584246", "0.55573434", "0.55571085", "0.555117", "0.5548716", "0.5547089", "0.5544881", "0.5537315" ]
0.7037025
3
returns a list of node uuids for a given group as, name, pk, uuid or group object
def get_nodes_from_group(group, return_format='uuid'): from aiida.orm import Group from aiida.common.exceptions import NotExistent nodes = [] g_nodes = [] try: group_pk = int(group) except ValueError: group_pk = None group_name = group if group_pk is not None: try: str_group = Group(dbgroup=group_pk) except NotExistent: str_group = None message = ('You have to provide a valid pk for a Group ' 'or a Group name. Reference key: "group".' 'given pk= {} is not a valid group' '(or is your group name integer?)'.format(group_pk)) print(message) elif group_name is not None: try: str_group = Group.get_from_string(group_name) except NotExistent: str_group = None message = ('You have to provide a valid pk for a Group or a Group name.' 'given group name= {} is not a valid group' '(or is your group name integer?)'.format(group_name)) print(message) elif isinstance(group, Group): str_group = group else: str_group = None print('I could not handle given input, either Group, pk, or group name please.') return nodes g_nodes = str_group.nodes for node in g_nodes: if return_format == 'uuid': nodes.append(node.uuid) elif return_format == 'pk': nodes.append(node.pk) return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def GetGroupMembers(self, group):\n return []", "def _get_group_uuid(self, nova, name):\n hints = {}\n try:\n groups = nova.server_groups.list()\n for group in groups:\n gname = getattr(group, 'name', '')\n if name == gname:\n hints['group'] = getattr(group, 'id', '')\n except Exception as e:\n LOG.exception(e)\n finally:\n LOG.info(\"%s:%s() %s: %s\", self.__class__.__name__,\n sys._getframe().f_code.co_name, name, hints)\n return hints", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret", "def group_nodes(self, group, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'map', group)", "def getGroupInfo(groupId):\n url = f\"https://groups.roblox.com/v1/groups/{groupId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def _get_nodes_by_instance(self, instance_uuid):\n try:\n node = pecan.request.dbapi.get_node_by_instance(instance_uuid)\n return [node]\n except exception.InstanceNotFound:\n return []", "def _get_node_group(self, node_name):\n\n pass", "def getNodeLVMGroups(self,node):\n data = self.connect('get','nodes/%s/scan/lvm' % (node),None)\n return data", "def get_uuids_in_node(self, node, project_id):\n program, project = project_id.split(\"-\", 1)\n\n try:\n res = self.paginate_query(node, project_id)\n uuids = [x[\"id\"] for x in res[\"data\"][node]]\n except:\n raise Gen3Error(\n \"Failed to get UUIDs in node '\"\n + node\n + \"' of project '\"\n + project_id\n + \"'.\"\n )\n\n return uuids", "def get_uuids(things):\n return [thing.uuid for thing in things]", "def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return objects[self._total[section][groupid]]\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n #self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return obj", "def _find_groups_for_user(email):\n return [g['name'] for g in groups.find({\"users\":email})]", "def test_get_device_group_by_id(self):\n pass", "def group_members(group):\n\n group = ldapi.lookup(ld, 'cn', group, cfg['ldap_groups_base'])\n\n if group and 'uniqueMember' in group:\n r = re.compile('^uid=([^,]*)')\n return map(lambda x: r.match(x).group(1), group['uniqueMember'])\n return []", "def get_groups(self):\n user_node = self.get()\n grouplist = []\n if user_node:\n for rel in graph.match(start_node=user_node, rel_type='in'):\n grouplist.append(Usergroup(id=rel.end_node()['id']))\n return grouplist", "def get_group(tkn: Token = Depends(from_authotization_header_nondyn),):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return [\n GetGroupShortOut(group_id=str(grp.pk), group_name=grp.group_name)\n for grp in Group.objects().order_by(\"group_name\")\n ]", "def find_group(self,id):\n result = []\n cursor = self._cnx.cursor()\n command = \"SELECT group_id FROM teilnahmen WHERE id={}\".format(id)\n cursor.execute(command)\n tuples = cursor.fetchall()\n\n for (group) in tuples:\n teilnahme = Teilnahme()\n teilnahme.set_le()\n result.append(teilnahme)\n\n self._cnx.commit()\n cursor.close()\n return result", "def _ListGroupDevices(self, group):\n for run_target in six.itervalues(group.run_targets):\n for d in six.itervalues(run_target.devices):\n yield d", "def getTGTasksName(all_taskgroup_objects, tg_name):\n tg_id = all_taskgroup_objects.id\n tg_task_obj = TaskgroupTask.objects.filter(\n taskgroup_id=tg_id\n )\n tg_task = TaskgroupTaskSerializers(\n tg_task_obj,\n many=True\n )\n tasks_list = []\n tg_list = []\n try:\n for i in range(len(tg_task.data)):\n tasks = dict(tg_task.data[i].items())\n print(\"#############\", tasks)\n task_obj = Tafv2Task.objects.get(id=tasks['task_id'])\n tasks_list.append({\n \"task_name\": task_obj.script,\n \"task_id\": task_obj.id\n })\n tg_list.append({\n \"tg_name\": tg_name,\n \"tg_id\": tg_id\n })\n\n context = {'tgList': tg_list, 'tasksList': tasks_list}\n print(\"$$$$$$$$$$$$$$\", context)\n return context\n except Exception as e:\n print(e)", "def get_group_names(self):\r\n return self.groups.keys()", "def get_group_users(groupname):\n return jsonify(admin.get_group_users(current_app.scoped_session(), groupname))", "def _groupNamesToList(settings):\n return [getattr(GroupName, val) for val in settings.dhGroups]", "def find_with_uuid(self, groupid, objects, section):\n if groupid in self._total[section]:\n # we get the objects by name to avoid memory corruption issues,\n # but we're not checking if the names change!\n return self.wrap_object(objects[self._total[section][groupid]],\n section)\n else:\n for obj in objects:\n obj_uuid = self.get_uuid(obj)\n if obj_uuid:\n self._total[section][obj_uuid] = obj.name\n if obj_uuid == groupid:\n return self.wrap_object(obj, section)", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def construct_groups_string(nodes):\n groups = get_groups(nodes)\n if len(groups) <= 1:\n return \"\"\n else:\n result = []\n for color in groups:\n # +1 because .tsp nodes are indexed with 1\n group = [node.nid + 1 for node in nodes if node.color == color]\n result.append(group)\n return str(result)", "def get_groups(nodes):\n return list(set([node.color for node in nodes]))", "def keys(self):\n list_all_dict = self.list_all()\n return list_all_dict[\"nodes\"] + list_all_dict[\"groups\"]", "def group(self, group_cn):\n group = self.search(base=GROUPS, cn=group_cn)\n\n if len(group) == 0:\n return []\n else:\n group_members = group[0]['attributes']['member']\n\n members = []\n for member in group_members:\n members.append(self.search(dn=member))\n\n if self.objects:\n return self.member_objects(members)\n\n return members", "def list_links(self, node, dd):\n link_list = dd[node]['links']\n link_names = []\n for link in link_list:\n if \"subgroup\" in link:\n sublinks = list(link[\"subgroup\"])\n for sublink in sublinks:\n link_names.append(sublink[\"name\"])\n else:\n link_names.append(link[\"name\"])\n return link_names", "def node_group_spec(self):\n return {\n 'name': 'nodes',\n 'source': 'tree', \n 'transform': [\n {\n 'type': 'filter',\n 'expr': 'datum.type == \"node\"'\n }\n ]\n }", "def node_groups(self, node, namespace=None):\n source = self._source(namespace)\n return self._list(source, 'reverse', node)", "def get_assets(id_group):\n data = sql.list_assets(id_group)\n names = [(d['id'], d['name']) for d in data]\n return names", "def customer_group_get_all():\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n query = \"\"\"\n SELECT \n `group_id`, \n `group_name`, \n `description`, \n `timestamp`, \n `created_by`, \n `creation_time`, \n `is_deleted`, \n `updated_by`, \n `role_id`, \n `is_default`, \n `is_customer` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n user_group_details = None\n cursor = db.cursor()\n if cursor.execute(query) != 0:\n user_group_details = cursor.fetchall()\n cursor.close()\n db.close()\n return user_group_details", "def test_groups_group_id_get(self):\n pass", "def get_group_list(self) -> Sequence[str]:\n return [group.Name.lower() for group in self.LedGroups]", "def get_group_members(self, group):\n members = []\n result = self.search('ou=groups,dc=mozilla',\n filterstr='cn=%s' % (group))\n if result == False:\n raise self.SearchError\n elif result == []:\n return []\n for group in result[1]:\n members = list(set(members) | set(group[1]['memberUid']))\n return members", "def find_uuids_linked_to_item(cls, rid):\n ignored(rid)\n return []", "def get_group_nodes(self, nodePair):\n core = self.core\n parent_node = core.get_base(nodePair.get_bundleNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_bundleGroupNode(core.load_by_path(self.root_node, relative_path))\n\n parent_node = core.get_base(nodePair.get_countryNode())\n relative_path = list(core.is_member_of(parent_node).keys())[0]\n nodePair.set_countryGroupNode(core.load_by_path(self.root_node, relative_path))", "def test_get_device_group_by_id1(self):\n pass", "def group_get_members(self,groupname):\n\n if not self.check_prereqs():\n raise StopIteration\n\n db = self.env.get_db_cnx()\n cursor = db.cursor()\n\n query=self.create_query(self.sql_group_get_members_query+\" ORDER BY $username_field$\",{'groupname':groupname,'username_field':self.sql_username_field,'groupname_field':self.sql_groupname_field})\n self.log.debug(\"sqlflexibleauthstore: group_get_members: %s\" % (query,))\n\n cursor.execute(query)\n desc=[i[0] for i in cursor.description]\n for row in cursor:\n dictrow=dict(zip(desc,row))\n yield dictrow[self.sql_username_field]", "def get_groups(id_project):\n data = sql.list_groups(id_project)\n names = [(d['id'], d['name']) for d in data]\n return names", "def get_nested_groups(self, conn, group: str) -> typing.List[str]:\n nested_groups = list()\n conn.search(\n search_base=self.group_search_base,\n search_filter=self.group_search_filter.format(group=group),\n search_scope=ldap3.SUBTREE)\n if conn.response:\n for nested_group in conn.response:\n if 'dn' in nested_group:\n nested_groups.extend([nested_group['dn']])\n groups = self.get_nested_groups(conn, nested_group['dn'])\n nested_groups.extend(groups)\n nested_groups = list(set(nested_groups))\n return nested_groups", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def list_group(self, groupname):\n return self.get_admin(\"groups/{}\".format(groupname))", "def grep_groups(self, response):\n soup = BeautifulSoup(response.body)\n for group_link in soup.table.find_all('a', href=True):\n group_item = response.meta['group_item']\n group_item['full_name'] = group_link.text\n group_item['id'] = group_link['href'][20:]\n yield group_item", "def get_users_in_group(self, group_id):\n members = self.vk.groups.getMembers(group_id=group_id, count=1)\n peoples = members['count']\n ids = []\n while len(ids) < peoples:\n members = self.vk.groups.getMembers(group_id=group_id, offset=len(ids))\n ids += members['items']\n\n return ids", "def group_id_to_name(id_list, group_name):\n id_name_dict = {}\n id_string = [str(x) for x in id_list]\n id_string = \",\".join(id_string)\n url = \"https://www.boardgamegeek.com/xmlapi/boardgame\"\n if group_name != \"game\":\n url = url + group_name\n url += \"/{}\".format(id_string)\n print(url)\n resp = requests.get(url)\n tree = ET.fromstring(resp.content)\n for idx, val in enumerate(list(tree)):\n name = val.find(\"name\").text\n id = id_list[idx]\n id_name_dict[id] = name\n return id_name_dict", "def group_handling(existing_uuids: Set[str]) -> None:", "def product_group_list(obj):\n client = get_client(obj)\n\n res = client.product_group_list()\n\n print(json.dumps(res, indent=4))", "def test_get_device_groups(self):\n pass", "def get_group_info(groupname):\n return jsonify(admin.get_group_info(current_app.scoped_session(), groupname))", "def get_nodes_for_process(self, uuid, clean=True):\n if clean:\n uuid = Process.strip_uuid(uuid)\n return self._get_tree_queryset().filter(process__uuid_full__startswith=uuid)", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def info_materials_groups_get():\n session = info_map.Session()\n\n mat = aliased(info_map.Material)\n grp = aliased(info_map.Group)\n\n q = session.query(mat.group_id,grp.name).join(grp).distinct()\n groups = [Group(group=row.group_id,name=row.name) for row in q.all()]\n return groups, 200", "def groups(self):\n return []", "def get(self):\n usergroup_node = graph.find_one(\"Usergroup\",\n property_key='id',\n property_value=self.id)\n return usergroup_node", "def product_group_get(obj, name):\n client = get_client(obj)\n\n pgs = client.product_group_list(name)\n if not pgs:\n fatal_error('Product group {} does not exist'.format(name))\n\n print(json.dumps(pgs[0], indent=4))", "def customer_group_get_related(group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `groups`.`company_name` = (\n SELECT `asshole`.`company_name` \n FROM \n (\n SELECT * \n FROM `groups` \n WHERE `group_id` = \"%s\"\n ) AS `asshole`\n )\n \"\"\" %(group_id)\n \n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get_all_children_id_list_from_redis_by_pk(gmac_id):\n try:\n gmac = GoogleMapsAddressComponent.objects.get(pk=gmac_id)\n conn = get_redis_connection()\n key = GoogleMapsAddressComponent.get_redis_all_children_key(gmac_id)\n length = conn.llen(key)\n return conn.lrange(key, 0, length)\n except GoogleMapsAddressComponent.DoesNotExist:\n return None", "def locate_group_users(self, group):\n return self.ldap_connection.search_s(\"ou=Groups,dc=redhat,dc=com\",\n ldap.SCOPE_SUBTREE, 'cn={0}'.format(group))", "def get_nodes(self, type, query_args={}):\n endpoint = '/v3/educator/%ss' % (Node.TYPE_MAP[type])\n result = self.request(endpoint, query_args)\n\n nodes = []\n for data in result.response:\n node = Node.instance(type, data)\n nodes.append(node)\n\n return nodes", "def test_get_group(self):\n pass", "def getGroupMembers(group_id):\r\n return Group.getGroupMembers(group_id)", "def group_ids(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"group_ids\")", "def get_group_name(\n group_id: BSONObjectId,\n tkn: Token = Depends(from_authotization_header_nondyn),\n):\n assert_has_clearance(tkn.owner, \"sni.read_group\")\n return GetGroupOut.from_record(Group.objects(pk=group_id).get())", "def get_pingroups(self):\n return self.groups[:]", "def get_members(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n query = (\"SELECT username from \" + ENV_DB + \".Groups WHERE gid='{}'\").format(self.g_id)\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n return list(i[0] for i in data)", "def get_children_from_redis(gmac_id, as_objects=True):\n conn = get_redis_connection()\n klass = GoogleMapsAddressComponent\n results = []\n queue = []\n children = klass.get_children_id_list_from_redis_by_pk(gmac_id)\n results.extend(children)\n queue.extend(children)\n while len(queue) > 0:\n node = queue.pop()\n children = klass.get_children_id_list_from_redis_by_pk(node)\n results.extend(children)\n queue.extend(children)\n if as_objects:\n results = klass.objects.filter(pk__in=results)\n return results", "def get_all_group_ids(token) -> list:\n ids=list()\n _dict = perform_request(app_config.ENDPOINT, token)\n while True:\n for obj in _dict[\"value\"]:\n ids.append(obj[\"id\"])\n if \"@odata.nextLink\" not in _dict:\n return ids\n _dict = perform_request(_dict[\"@odata.nextLink\"], token)", "def all_childnodes_to_nifti1img(h5group):\n child_nodes = []\n def append_parent_if_dataset(name, obj):\n if isinstance(obj, h5py.Dataset):\n if name.split('/')[-1] == 'data':\n child_nodes.append(obj.parent)\n\n vols = []\n h5group.visititems(append_parent_if_dataset)\n for c in child_nodes:\n vols.append(hdfgroup_to_nifti1image(c))\n\n return vols", "def get_ids(self) -> List[str]:", "def test_get_groups(self):\n pass", "def test_get_groups(self):\n pass", "def get_node(self, uuid, clean=True):\n if clean:\n uuid = ProcessNode.strip_uuid(uuid)\n return self._get_tree_queryset().get(uuid_full__startswith=uuid)", "def getNodeNames(self, includeDisabled=False):", "def test_api_v1_groups_names_get(self):\n pass", "def check_uuid(self, obj, groupid):\n if self.get_uuid(obj) == groupid:\n return True", "def GetNodes(self, bulk=False, reason=None):\n query = []\n _AppendIf(query, bulk, (\"bulk\", 1))\n _AppendReason(query, reason)\n\n nodes = self._SendRequest(HTTP_GET, \"/%s/nodes\" % GANETI_RAPI_VERSION,\n query, None)\n if bulk:\n return nodes\n else:\n return [n[\"id\"] for n in nodes]", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def get_group_list(ip_address, headers):\n group_list = None\n group_url = 'https://%s/api/GroupService/Groups' % ip_address\n response = requests.get(group_url, headers=headers, verify=False)\n if response.status_code == 200:\n group_response = response.json()\n if group_response['@odata.count'] > 0:\n group_list = [x['Id'] for x in group_response['value']]\n else:\n print(\"No groups found at \", ip_address)\n else:\n print(\"No groups found at \", ip_address)\n return group_list", "def groupfinder(user_id, request):\n ret = DBSession.query(User).filter_by(user_id=user_id).all()\n if len(ret) == 0:\n return None\n user = ret[0]\n groups = [x.group_name for x in user.groups]\n return groups", "def get_output_nodes(self):\n \n\n self.buildings = self.dataset.groups['buildings']\n self.building_nodes = self.buildings.groups['nodes']\n\n eta_output_added = getattr(self.building_nodes,'eta_output_added')\n uv_output_added = getattr(self.building_nodes,'uv_output_added')\n\n eta = []\n uv = []\n nodeIds = []\n time = []\n \n if(eta_output_added or uv_output_added ):\n time = self.building_nodes.variables['time'][:].tolist()\n nodeIds = self.building_nodes.variables['id'][:].tolist()\n if eta_output_added: eta = self.building_nodes.variables['eta'][:].tolist()\n if uv_output_added: uv = self.building_nodes.variables['uv'][:].tolist()\n\n \n return nodeIds,eta, uv, time", "def test_get_resource_group_by_moid(self):\n pass", "def groups(self):\n return self.get_data(\"groups\")", "def get_organization_group_details_url(organization_group_uuid):\n\n return '{organization_group_api_path}/{organization_group_uuid}/tree'.format \\\n (organization_group_api_path=ORGANIZATION_GROUP_API_COMMON_PATH,\n organization_group_uuid=organization_group_uuid)", "def get_groups(email):\n query=\"SELECT * FROM groups WHERE createdby='{}'\".format(email)\n cur.execute(query)\n return cur.fetchall()", "def test_get_port_sub_group_list(self):\n pass", "def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1", "def get_node_list(self):\n logger.debug('Retrieving node list')\n self.node_ids = []\n\n # Iterate over interfaces, try to grab gateway ipv4 addr\n # Try to /ping gateway over TCP using default port.. if we get a pong, we may get a node ID\n gateways = netifaces.gateways()\n gateways = gateways.get(netifaces.AF_INET, [])\n\n for gateway in gateways:\n node_id = gateway[0]\n node = self.select_node(node_id)\n info = node.get_info()\n\n if info and info.get('node'):\n logger.debug('Found node with ID \"%s\"', node_id)\n self.node_ids.append(node_id)\n\n return self.node_ids", "def test_get_device_groups1(self):\n pass", "def by_nodes(item):\n return Line['nodes', item]", "def group_tag(self):\n return ''.join(['[{}]'.format(x.name) for x in self.groups])", "def groups_by_uid(request, uid):\r\n user = User()\r\n users = user.query({\"uid\":str(uid)})\r\n if(len(users) < 1):\r\n return HttpResponse(\"No user found under uid \"+ str(uid))\r\n group = Group()\r\n group = group.query({\"gid\":str(users[0]['gid'])})\r\n if(len(group) < 1):\r\n return HttpResponse(\"No group found under uid \"+ str(uid))\r\n return HttpResponse(json.dumps(group))", "def get_by_name(cls, context, cluster, name):\n db_nodegroup = cls.dbapi.get_nodegroup_by_name(context, cluster, name)\n nodegroup = NodeGroup._from_db_object(cls(context), db_nodegroup)\n return nodegroup", "def extract_names(collection):\n return (\n '[{}]'.format(', '.join(map(repr, groups[n].entity_names)))\n if n in groups else repr(n) for n in collection\n )", "def list_group_members(self, entity):\n\n members = []\n\n for nodePath, node in self.cache.get_tree(self.userProjects).items():\n if nodePath.startswith(entity.path):\n # Check if node is a direct child\n distance = len(pathlib.Path(nodePath).relative_to(pathlib.Path(entity.path)).parts)\n\n if distance == 1:\n if type(node) is gitlab.v4.objects.Group or type(node) is gitlab.v4.objects.Project:\n members.append(node.path)\n elif type(node) is gitlab.v4.objects.User:\n members.append(node.username)\n\n return members", "def get_users(db, group):\n my_users = {\n user_id\n for user_id, in db(\"\"\"\n select distinct\n users.id\n from users, members\n where\n users.id = members.user_id\n and group_id = %s\n \"\"\",\n group.group_id)\n }\n return my_users" ]
[ "0.6142465", "0.5999166", "0.59815145", "0.58869386", "0.5741488", "0.5735394", "0.5697292", "0.5640951", "0.563847", "0.56142646", "0.55772024", "0.5554826", "0.5521682", "0.5469941", "0.5396493", "0.5393709", "0.53740793", "0.5341698", "0.5340419", "0.5333693", "0.53306866", "0.53110385", "0.5308188", "0.53049964", "0.5273193", "0.5263094", "0.5262305", "0.5256786", "0.52505016", "0.52147794", "0.5209649", "0.5186415", "0.5179269", "0.5172982", "0.5170736", "0.5168547", "0.51625335", "0.5149786", "0.5137854", "0.51282305", "0.51119983", "0.5102721", "0.5102273", "0.50959283", "0.5090843", "0.50596756", "0.5058793", "0.5052648", "0.5051217", "0.5047032", "0.5024038", "0.50216055", "0.5015558", "0.5009927", "0.4995764", "0.4986205", "0.49843484", "0.49771452", "0.49667418", "0.49660653", "0.49560857", "0.49452358", "0.49410757", "0.493784", "0.4933313", "0.4930362", "0.49207258", "0.4917301", "0.4908184", "0.49043694", "0.4901802", "0.4900425", "0.4898625", "0.48870236", "0.48811227", "0.48811227", "0.4876212", "0.48760417", "0.48668334", "0.48657748", "0.48587263", "0.48565102", "0.4852899", "0.48517993", "0.48456594", "0.48445433", "0.4844292", "0.48423952", "0.48404455", "0.48388153", "0.48364002", "0.4834897", "0.4827112", "0.48236766", "0.4822737", "0.48225388", "0.4822297", "0.48221913", "0.48186228", "0.48184037" ]
0.73784983
0
Cost and time of test_fn on a given dataset section. Pass only one of `valid_feeder` or `test_feeder`. Don't pass `train_feed`.
def monitor(data_feeder): _total_time = 0. _costs = [] _data_feeder = data_feeder(BATCH_SIZE, SEQ_LEN, OVERLAP, Q_LEVELS, Q_ZERO, Q_TYPE) for _seqs, _reset, _mask in _data_feeder: _start_time = time.time() _cost = test_fn(_seqs, _mask) _total_time += time.time() - _start_time _costs.append(_cost) return numpy.mean(_costs), _total_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self, reader, feeding=None):\n import py_paddle.swig_paddle as api\n from data_feeder import DataFeeder\n feeder = DataFeeder(self.__data_types__, feeding)\n evaluator = self.__gradient_machine__.makeEvaluator()\n out_args = api.Arguments.createArguments(0)\n evaluator.start()\n total_cost = 0\n num_samples = 0.0\n for data_batch in reader():\n num_samples += len(data_batch)\n in_args = feeder(data_batch)\n self.__prepare_parameter__(in_args)\n self.__gradient_machine__.forward(in_args, out_args, api.PASS_TEST)\n total_cost += out_args.sum()\n self.__gradient_machine__.eval(evaluator)\n\n evaluator.finish()\n return v2_event.TestResult(\n evaluator=evaluator, cost=total_cost / num_samples)", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def _evaluate_during_fit(self, test_loader, epoch):", "def train_and_test(self, train_fn, test_fn):\n logging.info(\"Training..\")\n self.train(train_fn)\n logging.info(\"Testing..\")\n return self.test(test_fn)\n logging.info(\"Done!\")", "def test(test_loader, net, criterion, args, epoch, logger, writer):\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to network mode\n net.eval()\n\n end = time.time()\n for batch_idx, (images, labels) in enumerate(test_loader):\n if args.cuda and torch.cuda.is_available():\n images = images.cuda(args.number)\n labels = labels.cuda(args.number)\n with torch.no_grad():\n output = net(images)\n loss = criterion(output, labels)\n\n # measure record loss, accuracy and elapsed time\n losses.update(loss.item(), images.size(0))\n prec1 = accuracy(output.data, labels, topk=(1,))[0]\n top1.update(prec1.item(), images.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n\n if batch_idx % args.print_freq == 0:\n logger.info('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n batch_idx, len(test_loader),\n batch_time=batch_time,\n loss=losses,\n top1=top1))\n logger.info(' ************************* [{Epoch}] Average Test Prec@1 {top1.avg:.3f} ********************** '.\n format(Epoch=epoch, top1=top1))\n\n # tensorboard\n writer.add_scalar('test_loss', losses.avg, epoch)\n writer.add_scalar('test_acc', top1.avg, epoch)\n\n return top1.avg", "def monitor(data_feeder):\n _total_time = time()\n _h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*DIM), dtype='float32')\n _big_h0 = numpy.zeros((BATCH_SIZE, N_RNN, H0_MULT*BIG_DIM), dtype='float32')\n _costs = []\n _data_feeder = load_data(data_feeder)\n for _seqs, _reset, _mask in _data_feeder:\n _cost, _big_h0, _h0 = test_fn(_seqs, _big_h0, _h0, _reset, _mask)\n _costs.append(_cost)\n\n return numpy.mean(_costs), time() - _total_time", "def test(model, dataloader, idx_to_char, device, config, with_analysis=False, plot_all=False, validation=True, with_iterations=False):\n\n model.eval()\n i = -1\n stat = \"validation\" if validation else \"test\"\n\n for i,x in enumerate(dataloader):\n line_imgs = x['line_imgs'].to(device)\n gt = x['gt'] # actual string ground truth\n\n if \"strokes\" in x and x[\"strokes\"] is not None:\n online = x[\"strokes\"].to(device)\n else:\n online = Variable(x['online'].to(device), requires_grad=False).view(1, -1, 1) if config[\n \"online_augmentation\"] and config[\"online_flag\"] else None\n\n\n loss, initial_err, pred_str = config[\"trainer\"].test(line_imgs, online, gt, validation=validation, with_iterations=with_iterations)\n\n if plot_all:\n imgs = x[\"line_imgs\"][:, 0, :, :, :] if config[\"n_warp_iterations\"] else x['line_imgs']\n plot_recognition_images(imgs, f\"{config['current_epoch']}_{i}_testing\", pred_str, config[\"image_test_dir\"], plot_count=4)\n\n # Only do one test\n if config[\"TESTING\"]:\n break\n\n if i >= 0: # if there was any test data, calculate the CER\n utils.reset_all_stats(config, keyword=stat)\n cer = config[\"stats\"][config[f\"designated_{stat}_cer\"]].y[-1] # most recent test CER\n\n if not plot_all:\n imgs = x[\"line_imgs\"][:, 0, :, :, :] if with_iterations else x['line_imgs']\n plot_recognition_images(imgs, f\"{config['current_epoch']}_testing\", pred_str, config[\"image_test_dir\"], plot_count=4)\n\n LOGGER.debug(config[\"stats\"])\n return cer\n else:\n log_print(f\"No {stat} data!\")\n return np.inf", "def _main(\n get_data: callable,\n EPOCHS: int = 10,\n PERIOD: int = 5,\n BATCH_SIZE: int = 256,\n LR: float = 1e-5,\n NEURONS: list = [128, 128],\n forecast: bool = False,\n tuning: bool = True,\n) -> None:\n @tf.function\n def train_step(x, y):\n with tf.GradientTape() as tape:\n pred = model(x)\n loss = loss_object(y, pred)\n grad = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grad, model.trainable_variables))\n train_loss.update_state(loss)\n train_accuracy.update_state(y, pred)\n\n\n @tf.function\n def test_step(x, y):\n # Test and validation step have the same operation.\n pred = model(x)\n loss = loss_object(y, pred)\n dev_loss.update_state(loss)\n dev_accuracy.update_state(y, pred)\n\n print(\"Reading data...\")\n X_train, X_dev, y_train, y_dev, X_test = get_data()\n print(\"X_train@{}, X_dev@{}\".format(X_train.shape, X_dev.shape))\n train_ds = tf.data.Dataset.from_tensor_slices(\n (X_train, y_train)).shuffle(int(1e6)).batch(BATCH_SIZE)\n\n dev_ds = tf.data.Dataset.from_tensor_slices(\n (X_dev, y_dev)).batch(BATCH_SIZE)\n\n num_fea = X_train.shape[1]\n model = NN(num_neurons=NEURONS)\n\n loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=False)\n optimizer = tf.keras.optimizers.Adam(learning_rate=LR)\n\n train_loss = tf.keras.metrics.Mean(name=\"train_loss\")\n train_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"train_accuracy\")\n\n dev_loss = tf.keras.metrics.Mean(name=\"dev_loss\")\n dev_accuracy = tf.keras.metrics.BinaryAccuracy(\n name=\"dev_accuracy\")\n\n trace = {\"train\": [], \"val\": []}\n for epoch in range(EPOCHS):\n train_loss.reset_states()\n train_accuracy.reset_states()\n dev_loss.reset_states()\n dev_accuracy.reset_states()\n # Loop over batches.\n for x, y in train_ds:\n # x @ (batch_size, num_features)\n # y @ (batch_size, 1) --> probit\n train_step(x, y)\n\n for t_x, t_y in dev_ds:\n test_step(t_x, t_y)\n\n if (epoch+1) % PERIOD == 0:\n report = \"Epoch {:d}, Loss: {:0.6f}, Accuracy: {:0.6f}, Validation Loss: {:0.6f}, Validation Accuracy: {:0.6f}\"\n print(report.format(\n epoch+1,\n train_loss.result(),\n train_accuracy.result()*100,\n dev_loss.result(),\n dev_accuracy.result()*100))\n\n # Record loss\n trace[\"train\"].append(train_loss.result())\n trace[\"val\"].append(dev_loss.result())\n\n # AUC\n pred_train = model(X_train).numpy()\n pred_dev = model(X_dev).numpy()\n\n auc_train = metrics.roc_auc_score(y_true=y_train, y_score=pred_train)\n auc_dev = metrics.roc_auc_score(y_true=y_dev, y_score=pred_dev)\n\n print(\"AUC on Training Set: {: 0.6f}\".format(auc_train))\n print(\"AUC on Developing Set: {: 0.6f}\".format(auc_dev))\n\n if forecast:\n pred = model(X_test)\n return pred.numpy()\n if tuning:\n return {\n \"EPOCHS\": EPOCHS,\n \"BATCH_SIZE\": BATCH_SIZE,\n \"LR\": LR,\n \"NEURONS\": NEURONS,\n \"AUC_TRAIN\": auc_train,\n \"AUC_DEV\": auc_dev,\n \"LOSS_TRAIN\": train_loss.result().numpy(),\n \"LOSS_DEV\": dev_loss.result().numpy(),\n \"ACCURACY_TRAIN\": train_accuracy.result().numpy(),\n \"ACCURACY_DEV\": dev_accuracy.result().numpy(),\n }\n\n plt.plot(np.log(trace[\"train\"]))\n plt.plot(np.log(trace[\"val\"]))\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Log Cross Entropy Loss\")\n plt.legend([\"Training\", \"Validation\"])\n plt.title(\"LR={}, AUC_train={:0.3f}, AUC_dev={:0.3f}\".format(LR, auc_train, auc_dev))\n plt.show()", "def test(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n \r\n dataset.set_split('test')\r\n batch_generator = generate_nmt_batches(dataset, \r\n batch_size=len(dataset), \r\n device=args.device)\r\n\r\n acc_sum = 0.0\r\n model.eval()\r\n \r\n for batch_index, batch_dict in enumerate(batch_generator):\r\n # step 1. compute the output\r\n if isinstance(model,NMTModelWithMLTM):\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_mltm_vector'],\r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n else:\r\n y_pred = model(batch_dict['x_source'], \r\n batch_dict['x_source_length'], \r\n batch_dict['x_target'])\r\n\r\n acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index)\r\n acc_sum += acc_t\r\n \r\n return acc_sum / (batch_index+1)", "def test_model(model, epoch, writer: SummaryWriter = None) -> EpochData:\n # Test interleaved to speed up execution, i.e. don't keep the clients waiting.\n accuracy, loss, class_precision, class_recall = model.test()\n data = EpochData(epoch_id=epoch,\n duration_train=0,\n duration_test=0,\n loss_train=0,\n accuracy=accuracy,\n loss=loss,\n class_precision=class_precision,\n class_recall=class_recall,\n client_id='federator')\n if writer:\n writer.add_scalar('accuracy per epoch', accuracy, epoch)\n return data", "def test(self, dataset, remaining_time_budget=None):\n if self.done_training:\n return None\n\n # Transform data to numpy.ndarray if not done yet\n if not self.X_test:\n # Turn `features` in the tensor tuples (matrix_bundle_0,...,matrix_bundle_(N-1), labels)\n # to a dict. This example model only uses the first matrix bundle\n # (i.e. matrix_bundle_0) (see the documentation of this train() function above for the description of each example)\n dataset = dataset.map(lambda *x: ({'x': x[0]}, x[-1]))\n iterator = dataset.make_one_shot_iterator()\n next_element = iterator.get_next()\n counter = 0\n X_test = []\n with tf.Session() as sess:\n while True:\n try:\n features, labels = sess.run(next_element)\n X_test.append(features.flatten())\n counter += 1\n if counter % 1000 == 0:\n print(counter)\n except tf.errors.OutOfRangeError:\n print(\"The End.\", counter)\n break\n self.X_test = np.array(X_test)\n\n # The following snippet of code intends to do:\n # 0. Use the function self.choose_to_stop_early() to decide if stop the whole\n # train/predict process for next call\n # 1. If there is time budget limit, and some testing has already been done,\n # but not enough remaining time for testing, then return None to stop\n # 2. Otherwise: make predictions normally, and update some\n # variables for time managing\n if self.choose_to_stop_early():\n print_log(\"Oops! Choose to stop early for next call!\")\n self.done_training = True\n test_begin = time.time()\n if remaining_time_budget and self.estimated_time_test and\\\n self.estimated_time_test > remaining_time_budget:\n print_log(\"Not enough time for test. \" +\\\n \"Estimated time for test: {:.2e}, \".format(self.estimated_time_test) +\\\n \"But remaining time budget is: {:.2f}. \".format(remaining_time_budget) +\\\n \"Stop train/predict process by returning None.\")\n return None\n\n msg_est = \"\"\n if self.estimated_time_test:\n msg_est = \"estimated time: {:.2e} sec.\".format(self.estimated_time_test)\n print_log(\"Begin testing...\", msg_est)\n test_results = self.classifier.predict(input_fn=test_input_fn)\n predictions = [x['probabilities'] for x in test_results]\n predictions = np.array(predictions)\n test_end = time.time()\n test_duration = test_end - test_begin\n self.total_test_time += test_duration\n self.cumulated_num_tests += 1\n self.estimated_time_test = self.total_test_time / self.cumulated_num_tests\n print_log(\"[+] Successfully made one prediction. {:.2f} sec used. \".format(test_duration) +\\\n \"Total time used for testing: {:.2f} sec. \".format(self.total_test_time) +\\\n \"Current estimated time for test: {:.2e} sec.\".format(self.estimated_time_test))\n return predictions", "def test(self, test_fn, eval_metrics):\n # Load gold and predict\n X, Y = self.load_dataset(test_fn)\n y = self.model.predict(X)\n\n # Get most probable predictions and flatten\n Y = RNN_model.consolidate_labels(self.transform_output_probs(Y).flatten())\n y = RNN_model.consolidate_labels(self.transform_output_probs(y).flatten())\n\n # Run evaluation metrics and report\n # TODO: is it possible to compare without the padding?\n ret = []\n for (metric_name, metric_func) in eval_metrics:\n ret.append((metric_name, metric_func(Y, y)))\n logging.debug(\"calculating {}\".format(ret[-1]))\n\n for (metric_name, metric_val) in ret:\n logging.info(\"{}: {:.4f}\".format(metric_name,\n metric_val))\n return Y, y, ret", "def test(model, test_loader, loss_function, device):\n\n model.eval()\n test_loss, correct = 0, 0\n\n with torch.no_grad():\n for data, target in test_loader:\n target = target.float().unsqueeze(dim=-1).to(device)\n output = model(data.to(device))\n pred = sigmoid2predictions(output)\n test_loss += loss_function(output, target).sum().item()\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('...validation: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))", "def test_epoch(\n test_loader, model, loss_fun, test_meter, cur_epoch, mode=\"test\", tb=None\n):\n\n # Enable eval mode\n model.eval()\n test_meter.iter_tic()\n pbar = tqdm(enumerate(test_loader), total=len(test_loader), leave=False)\n for cur_iter, batch in pbar:\n # Transfer the data to the current GPU device\n x, y = _prepare_batch(batch)\n # Compute the predictions\n logits, probas = model(x)\n _, predicted_labels = torch.max(probas, 1)\n # Compute test loss\n loss = loss_fun(logits, y)\n # Compute the errors\n label_err = mu.label_errors(predicted_labels, y)\n # Copy the stats from GPU to CPU (sync point)\n loss, label_err = loss.item(), label_err.item()\n update_metrics_values = {\"loss\": loss}\n\n test_meter.iter_toc()\n # Update and log stats\n test_meter.update_stats(label_err, update_metrics_values, x.size(0))\n test_meter.log_iter_stats(cur_epoch, cur_iter)\n test_meter.iter_tic()\n\n # Log epoch stats\n test_meter.log_epoch_stats(cur_epoch)\n test_meter.print_epoch_stats(cur_epoch)\n epoch_loss = test_meter.metrics_meters[\"loss\"][1] / test_meter.num_samples\n\n if tb is not None:\n # log scalars\n epoch_stats = test_meter.get_epoch_stats(cur_epoch)\n tb.write_scalar(epoch_stats, cur_epoch, [\"loss\", \"label_err\"], tag=mode)\n\n test_meter.reset()\n return epoch_loss", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def finetune_eval_fn(\n # The pytype below must be Any, because the code below uses internal\n # methods only present on tff.learning.from_keras_model-derived TFF\n # models.\n model: Any,\n train_data: tf.data.Dataset,\n test_data: tf.data.Dataset,\n ) -> _MetricsType:\n\n @tf.function\n def train_one_batch(num_examples_sum, batch):\n \"\"\"Run gradient descent on a batch.\"\"\"\n with tf.GradientTape() as tape:\n output = model.forward_pass(batch)\n if finetune_last_layer:\n # Only works for models built via `tff.learning.from_keras_model`.\n last_layer_variables = model._keras_model.layers[-1].trainable_variables # pylint:disable=protected-access\n grads = tape.gradient(output.loss, last_layer_variables)\n optimizer.apply_gradients(zip(grads, last_layer_variables))\n else:\n grads = tape.gradient(output.loss, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n return num_examples_sum + output.num_examples\n\n # Starts training.\n metrics_dict = collections.OrderedDict()\n train_data = train_data.batch(batch_size)\n num_examples_sum = 0\n for idx in range(1, num_finetuning_epochs + 1):\n num_examples_sum = train_data.reduce(0, train_one_batch)\n # Evaluate the finetuned model every epoch.\n metrics_dict[f'epoch_{idx}'] = evaluate_fn(model, test_data, batch_size)\n metrics_dict[_NUM_FINETUNE_EXAMPLES] = num_examples_sum\n return metrics_dict", "def test(model, args, test_loader):\n with torch.no_grad():\n model.eval()\n test_loss = 0\n correct = 0\n # Data and target are a single pair of images and labels.\n for data, target in tqdm(test_loader, desc='Batching Test Data'):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n pred, tloss = make_prediction(data, target)\n test_loss += tloss\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n uf.box_print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))", "def imit_test(self, epoch, best): \n a_loss, t_loss = 0., 0.\n data_valid_iter = batch_iter(self.data_valid[0], self.data_valid[1], self.data_valid[2], self.data_valid[3])\n for i, data in enumerate(data_valid_iter):\n loss_a, loss_t = self.user_loop(data)\n a_loss += loss_a.item()\n t_loss += loss_t.item()\n \n a_loss /= i\n t_loss /= i\n logging.debug('<<user simulator>> validation, epoch {}, loss_a:{}, loss_t:{}'.format(epoch, a_loss, t_loss))\n loss = a_loss + t_loss\n if loss < best:\n logging.info('<<user simulator>> best model saved')\n best = loss\n self.save(self.save_dir, 'best')\n \n a_loss, t_loss = 0., 0.\n data_test_iter = batch_iter(self.data_test[0], self.data_test[1], self.data_test[2], self.data_test[3])\n for i, data in enumerate(data_test_iter):\n loss_a, loss_t = self.user_loop(data)\n a_loss += loss_a.item()\n t_loss += loss_t.item()\n \n a_loss /= i\n t_loss /= i\n logging.debug('<<user simulator>> test, epoch {}, loss_a:{}, loss_t:{}'.format(epoch, a_loss, t_loss))\n return best", "def test_net(self, test_loader):\n test_loss = 0\n correct = 0\n cr = self._criterion\n # Make sure we don't modify the weights\n # while testing\n with torch.no_grad():\n for data, target in test_loader:\n data = data.cuda()\n target = target.cuda()\n # Feed the data\n output = self(data).cuda()\n # Calculate the loss\n test_loss += cr(output, target)\n # Get the predicted output and test whether or not\n # it aligns with the correct answer\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).sum()\n test_loss /= len(test_loader.dataset)\n # Output accuracy\n print('\\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n return 100. * float(correct) / len(test_loader.dataset)", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def test(self, curr_epoch):\n if not self.config.full_test_flag and (curr_epoch % self.config.test_step == 0 or\n curr_epoch == 0 or\n curr_epoch == self.config.epochs - 1):\n self.evaluator.test(curr_epoch)\n else:\n if curr_epoch == self.config.epochs - 1:\n self.evaluator.test(curr_epoch)", "def prep_data_fn(self, st_train_dt, end_train_dt, st_val_dt, end_val_dt, st_test_dt, end_test_dt):\n df = self.get_prep_data()\n train = df[(df['ft_data_dt'] >= st_train_dt) & (df['ft_data_dt'] <= end_train_dt)]\n val = df[(df['ft_data_dt'] >= st_val_dt) & (df['ft_data_dt'] <= end_val_dt)].sample(frac=0.4, random_state=2021)\n test = df[(df['ft_data_dt'] >= st_test_dt) & (df['ft_data_dt'] <= end_test_dt)]\n print(f'----train----')\n print(train[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----validation----')\n print(val[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n print(f'----test----')\n print(test[['ft_data_dt', 'target', 'idd']].groupby(['ft_data_dt', 'target']).agg(['count']))\n self.set_train(train)\n self.set_validation(val)\n self.set_test(test)\n train_X = train[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n train_y = train['target']\n val_X = val[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n val_y = val['target']\n test_X = test[[c for c in train.columns if c not in ['idd', 'ft_data_dt', 'target']]]\n test_y = test['target']\n self.set_train_X(train_X)\n self.set_train_y(train_y)\n self.set_val_X(val_X)\n self.set_val_y(val_y)\n self.set_test_X(test_X)\n self.set_test_y(test_y)", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def test(test_loader, model, criterion, epoch):\n global args, writer\n batch_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n #if not args.multi_gpu:\n # if model.beta_ema > 0:\n # old_params = model.get_params()\n # model.load_ema_params()\n #else:\n # if model.module.beta_ema > 0:\n # old_params = model.module.get_params()\n # model.module.load_ema_params()\n\n end = time.time()\n acc_part = []\n with torch.no_grad():\n for i, (input_, target) in enumerate(test_loader):\n if torch.cuda.is_available():\n target = target.cuda(async=True)\n input_ = input_.cuda()\n # compute output\n output = model(input_)\n preds = output.max(dim=1)[1]\n\n # measure accuracy and record loss\n # prec1 = accuracy(output.item(), target, topk=(1,))[0]\n prec1 = (preds == target).sum().item() / preds.size(0)\n top1.update(100 - prec1*100, input_.size(0))\n acc_part.append(prec1)\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0 and args.verbose:\n print('Test: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Err@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n i, len(test_loader), batch_time=batch_time, loss=losses,\n top1=top1))\n\n if args.verbose:\n print(' * Err@1 {top1.avg:.3f}'.format(top1=top1))\n #if not args.multi_gpu:\n # if model.beta_ema > 0:\n # model.load_params(old_params)\n #else:\n # if model.module.beta_ema > 0:\n # model.module.load_params(old_params)\n\n # log to TensorBoard\n if writer is not None:\n writer.add_scalar('test/loss', losses.avg, epoch)\n writer.add_scalar('test/err', top1.avg, epoch)\n layers = model.layers if not args.multi_gpu else model.module.layers\n for k, layer in enumerate(layers):\n if hasattr(layer, 'qz_loga'):\n mode_z = layer.sample_z(1, sample=0).view(-1)\n writer.add_histogram('mode_z/layer{}'.format(k), mode_z.cpu().data.numpy(), epoch)\n return np.mean(acc_part)", "def test(self,test_fn, eval_metrics):\n # Load gold and predict\n X, Y = self.load_dataset(test_fn)\n y = self.model.predict(X)\n\n # Get most probable predictions and flatten\n Y = RNNOIE_model.consolidate_labels(self.transform_output_probs(Y).flatten())\n y = RNNOIE_model.consolidate_labels(self.transform_output_probs(y).flatten())\n\n # Run evaluation metrics and report\n # TODO: is it possible to compare without the padding?\n ret = []\n for (metric_name, metric_func) in eval_metrics:\n ret.append((metric_name, metric_func(Y, y)))\n # logging.debug(\"calculating {}\".format(ret[-1]))\n\n for (metric_name, metric_val) in ret:\n logging.info(\"{}: {:.4f}\".format(metric_name,\n metric_val))\n return Y, y, ret", "def test_distributed(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, self.test_sampler = self._get_smi_dl(phase=\"test\", shuffle=False)\n self.test_sampler.set_epoch(0)\n if self.rank == 0:\n test_loader = tqdm(self.test_loader, desc='testing...')\n else:\n test_loader = self.test_loader\n \n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.cuda(non_blocking=True)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].cuda(non_blocking=True)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n test_batch_size = torch.tensor([test_batch_size]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(test_batch_size, dist.ReduceOp.SUM)\n test_batch_size = test_batch_size.item()\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n batch_correct_preds = torch.tensor([batch_correct_preds]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_correct_preds, dist.ReduceOp.SUM)\n batch_correct_preds = batch_correct_preds.item()\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1 and self.rank == 0: # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n \n batch_loss = torch.tensor([batch_loss]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_loss, dist.ReduceOp.SUM)\n batch_loss = batch_loss.item()\n test_loss += batch_loss\n if self.rank == 0:\n test_loader.set_description(f\"testing...loss={test_loss / test_batch_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n \n dist.barrier()\n message = f\"{self.args.expt_name}\\n\"\n if self.rank == 0:\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def test_test(task_dataset, features):\n features = torch.cat(features)\n feat = features[0]\n expected = features.eq(feat).sum().item() / N_SAMPLES\n\n class FakeModule(nn.Module):\n \"\"\"Always returns the same prediction.\"\"\"\n\n def forward(self, reps):\n \"\"\"Just returns the tag.\"\"\"\n assert reps.shape[-1] == N_DIMS_PER_REP\n logits = torch.zeros(len(reps), N_UNIQUE_FEATS)\n logits[:, feat] = 1\n return logits\n\n actual = learning.test(FakeModule(),\n task_dataset,\n device=torch.device('cpu'))\n assert actual == expected", "def test_model(model, loss_fn, test_generator):\n gold = []\n predicted = []\n\n # Keep track of the loss\n loss = torch.zeros(1) # requires_grad = False by default; float32 by default\n if USE_CUDA:\n loss = loss.cuda()\n\n model.eval()\n cnt = 0\n # Iterate over batches in the test dataset\n with torch.no_grad():\n for X_b, y_b in test_generator:\n # Predict\n X_b = X_b.permute(1, 0, 2)\n y_pred = model(X_b)\n # Save gold and predicted labels for F1 score - take the argmax to convert to class labels\n gold.extend(y_b.cpu().detach().numpy())\n predicted.extend(y_pred.argmax(1).cpu().detach().numpy())\n\n loss += loss_fn(y_pred.double(), y_b.long()).detach().item()\n cnt += 1\n\n test_accuracy = accuracy_score(gold, predicted)\n loss /= cnt\n # Print total loss and macro F1 score\n print(\"Test loss: \")\n print(loss)\n print(\"Test accu: \")\n print(test_accuracy)\n print(\"F-score: \")\n print(f1_score(gold, predicted, average='macro'))\n\n print(\"True value: \")\n print(gold)\n\n print(\"Predicted: \")\n print(predicted)", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def run_epoch(self, train, dev, epoch):\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, self.config.batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss= self.sess.run(\n [self.train_op, self.loss], feed_dict=fd)\n\n# =============================================================================\n# # tensorboard\n# if i % 10 == 0:\n# self.file_writer.add_summary(summary, epoch*nbatches + i)\n# =============================================================================\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n print(msg)\n\n return metrics[\"f1\"]", "def test_model(model, loss_fn, test_generator):\r\n gold = []\r\n predicted = []\r\n\r\n # Keep track of the loss\r\n loss = torch.zeros(1) # requires_grad = False by default; float32 by default\r\n if USE_CUDA:\r\n loss = loss.cuda()\r\n\r\n model.eval()\r\n\r\n # Iterate over batches in the test dataset\r\n with torch.no_grad():\r\n for X_b, y_b in test_generator:\r\n # Predict\r\n y_pred = model(X_b)\r\n\r\n # Save gold and predicted labels for F1 score - take the argmax to convert to class labels\r\n gold.extend(y_b.cpu().detach().numpy())\r\n predicted.extend(y_pred.argmax(1).cpu().detach().numpy())\r\n\r\n loss += loss_fn(y_pred.double(), y_b.long()).data\r\n\r\n # Print total loss and macro F1 score\r\n print(\"Test loss: \")\r\n print(loss)\r\n print(\"F-score: \")\r\n print(f1_score(gold, predicted, average='macro'))", "def test(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, _ = self._get_smi_dl(phase=\"test\", shuffle=False)\n test_loader = tqdm(self.test_loader, desc='testing...')\n\n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.to(self.device)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].to(self.device)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1:\n # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n\n test_loss += batch_loss\n test_loader.set_description(f\"testing...loss={test_loss / epoch_test_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size # self.test_size\n\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n message = f\"{self.args.expt_name}\\n\"\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def test(self, net: nn.Module, clean_data: CSVTextDataset, triggered_data: CSVTextDataset,\n clean_test_triggered_labels_data: CSVTextDataset,\n torch_dataloader_kwargs: dict = None) -> dict:\n test_data_statistics = {}\n test_data_statistics['clean_metrics'] = None\n test_data_statistics['triggered_metrics'] = None\n test_data_statistics['clean_test_metrics'] = None\n net.eval()\n\n triggered_counts = None\n clean_test_counts = None\n test_counts = None\n pin_memory = False\n if self.device.type != 'cpu':\n pin_memory = True\n\n # drop_last=True is from: https://stackoverflow.com/questions/56576716\n data_loader_kwargs_in = dict(batch_size=1, pin_memory=pin_memory, drop_last=True, shuffle=False)\n if torch_dataloader_kwargs:\n data_loader_kwargs_in.update(torch_dataloader_kwargs)\n logger.info('DataLoader[Test] kwargs=' + str(torch_dataloader_kwargs))\n data_loader = self.convert_dataset_to_dataiterator(clean_data, batch_size=1)\n\n # Test the classification accuracy on clean data only, for all labels.\n test_acc, test_n_total, test_n_correct, _, clean_counts = self._eval_acc(data_loader, net)\n\n acc_per_label = \"{\"\n for k in test_n_total.keys():\n acc_per_label += \"{}: {}, \".format(k, 0 if test_n_total[k] == 0 else float(test_n_correct[k]) / float(\n test_n_total[k]))\n acc_per_label += \"}\"\n\n test_data_statistics['clean_accuracy'] = test_acc\n test_data_statistics['clean_n_total'] = test_n_total\n test_data_statistics['clean_per_label_accuracy'] = acc_per_label\n logger.info(\"Accuracy on clean test data: %0.02f\" % (test_data_statistics['clean_accuracy'],))\n logger.info('Per label test accuracy: {}'.format(acc_per_label))\n logger.info('Total per label correct: {}'.format(test_n_correct))\n logger.info('Total per label: {}'.format(test_n_total))\n\n if triggered_data is not None:\n # Test the classification accuracy on triggered data only, for all labels.\n # we set batch_size=1 b/c\n data_loader = self.convert_dataset_to_dataiterator(triggered_data, batch_size=1)\n test_acc, test_n_total, test_n_correct, _, triggered_counts = self._eval_acc(data_loader, net)\n acc_per_label = \"{\"\n for k in test_n_total.keys():\n acc_per_label += \"{}: {}, \".format(k, 0 if test_n_total[k] == 0 else float(test_n_correct[k]) / float(\n test_n_total[k]))\n acc_per_label += \"}\"\n\n test_data_statistics['triggered_accuracy'] = test_acc\n test_data_statistics['triggered_n_total'] = test_n_total\n test_data_statistics['triggered_per_label_accuracy'] = acc_per_label\n\n logger.info(\"Accuracy on triggered test data: %0.02f for n=%s\" %\n (test_data_statistics['triggered_accuracy'], str(test_n_total)))\n\n self.ner_metrics.add_best(self.best_epoch, clean_counts, triggered_counts)\n\n # Write the report for ner_metrics\n self.ner_metrics.write_per_epoch(self.ner_per_epoch_report_filepath)\n self.ner_metrics.write_best_epoch(self.ner_report_filepath)\n\n return test_data_statistics", "def test_model(self, batch_size):\n (_, gen_val, gen_test) = self.dataset.data_loaders(\n batch_size=batch_size,\n split=(0.01, 0.01)\n )\n print('Num Test Batches: ', len(gen_test))\n mean_loss_test, mean_accuracy_test = self.loss_and_acc_test(gen_test)\n print('Test Epoch:')\n print(\n '\\tTest Loss: ', mean_loss_test, '\\n'\n '\\tTest Accuracy: ', mean_accuracy_test * 100\n )", "def test_KNN_test_parameter(params, X_train, X_test, y_train, y_test):", "def test_is_trainable(estimator_fn, machine_settings):\n # Setup\n batch_size = 128 # Must be divisible by number of replicas (8 for TPU v2)\n crop_size = 24\n eval_count = 1024\n eval_steps = int(eval_count / batch_size)\n assert eval_steps * batch_size == eval_count\n estimator = estimator_fn(\n micronet.cifar.linear_model.create_model, batch_size, batch_size)\n\n # Replace with lambda?\n def input_fn(params):\n # Only the TPUEstimator needs to pass batch_size to the input_fn.\n if 'batch_size' in params:\n assert params['batch_size'] == batch_size\n del params\n mini_ds = cifar_ds.train_dataset(\n cloud_storage=machine_settings.is_cloud)\n mini_ds = mini_ds.map(\n cifar_ds.preprocess_fn(augment=False, crop_to=crop_size))\n # Take a small amount and repeat so that the test can show training\n # in a smaller amount of steps (so the test runs quickly).\n mini_ds.take(500).repeat()\n return mini_ds.batch(batch_size, drop_remainder=True)\n\n # Test\n # 1. Check that the untrained model predicts randomly.\n #\n # I want the test to pass 99% of the time.\n # For a 1000 trial experiment with success probability of 1% (100 classes),\n # CDF_inverse(0.01) ~= 3\n # CDF_inverse(0.99) ~= 19\n # (from binomial dist calculator:\n # https://www.di-mgt.com.au/binomial-calculator.html)\n # TODO: is it valid to assume a random output from the untrained model?\n results = estimator.evaluate(input_fn, steps=eval_steps)\n assert 3/eval_count < results[micronet.estimator.TOP_1_ACCURACY_KEY] \\\n <= 19/eval_count\n\n # 2. Check that the model can be trained.\n # Using the eval_steps as the max training steps. Could use something else.\n estimator.train(input_fn, max_steps=eval_steps)\n\n # 3. Check that the training has increased the model's accuracy.\n # Results is a dict containing the metrics defined by the model_fn.\n # FIXME 4: I should encapsulate/separate the metric creation so that it\n # is easy to assume that certain metrics are present.\n results = estimator.evaluate(input_fn, steps=eval_steps)\n # We should expect some improvement over the random case, 1/100. Running\n # it a few times gave ~4.5%, so using a value a little lower to make sure\n # the test reliably passes (while still being useful).\n assert results[micronet.estimator.TOP_1_ACCURACY_KEY] >= 0.040", "def dt_train_test(dt, xTrain, yTrain, xTest, yTest):\n # train the model\n dt.train(xTrain, yTrain['label'])\n # predict the training dataset\n yHatTrain = dt.predict(xTrain)\n trainAcc = accuracy_score(yTrain['label'], yHatTrain)\n # predict the test dataset\n yHatTest = dt.predict(xTest)\n testAcc = accuracy_score(yTest['label'], yHatTest)\n return trainAcc, testAcc", "def evaluate(self,\n test_loader: Type[torch.utils.data.DataLoader],\n **kwargs: float) -> float:\n # initialize loss accumulator\n test_loss = 0.\n # compute the loss over the entire test set\n with torch.no_grad():\n for data in test_loader:\n if len(data) == 1: # VAE mode\n x = data[0]\n loss = self.svi.step(x.to(self.device), **kwargs)\n else: # VED or cVAE mode\n x, y = data\n loss = self.svi.step(\n x.to(self.device), y.to(self.device), **kwargs)\n test_loss += loss\n\n return test_loss / len(test_loader.dataset)", "def _test(self,model, dataset):\n avg_loss = tfe.metrics.Mean('loss')\n accuracy = tfe.metrics.Accuracy('accuracy')\n\n for (images, labels) in tfe.Iterator(dataset):\n logits = model(images, training=False)\n avg_loss(self._loss(logits, labels))\n accuracy(\n tf.argmax(logits, axis=1, output_type=tf.int64),\n tf.cast(labels, tf.int64))\n print('Test set: Average loss: %.4f, Accuracy: %4f%%\\n' %\n (avg_loss.result(), 100 * accuracy.result()))\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar('loss', avg_loss.result())\n tf.contrib.summary.scalar('accuracy', accuracy.result())", "def test(args, model, lossfn, device, data, target):\n model.eval()\n test_loss = 0\n correct = 0\n data, target = data.to(device), target.to(device)\n output = model(data)\n # Final result will be average of averages of the same size\n test_loss += lossfn(output, target).item()\n ppe.reporting.report({\"val/loss\": test_loss})\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n ppe.reporting.report({\"val/acc\": correct / len(data)})", "def test_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n if self.test is not None:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.test,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=False,\n num_workers=self.config.num_workers,\n pin_memory=self.config.pin_memory,\n )", "def test_ann(trainx, trainy, testx, testy):\n\tinput_size = len(trainx.iloc[0])\n\toutput_size = len(np.unique(trainy))\n\ttrain = ClassificationDataSet(input_size, 1, nb_classes=output_size)\n\tfor i in range(len(trainx.index)):\n\t\ttrain.addSample(trainx.iloc[i].values, trainy.iloc[i])\n\n\ttest = ClassificationDataSet(input_size, 1, nb_classes=output_size)\n\tfor i in range(len(testx.index)):\n\t\ttest.addSample(testx.iloc[i].values, testy.iloc[i])\n\ttrain._convertToOneOfMany()\n\ttest._convertToOneOfMany()\n\n\tprint(\"Number of training patterns: \", len(train))\n\tprint(\"Input and output dimensions: \", train.indim, train.outdim)\n\tprint(\"First sample (input, target):\")\n\tprint(train[\"input\"][0], train[\"target\"][0])\n\n\tn_hidden = 3\n\tfnn = buildNetwork(train.indim, n_hidden, train.outdim)\n\ttrainer = BackpropTrainer(\n\t fnn, dataset=train, momentum=0.1, verbose=True, weightdecay=0.01)\n\n\tprint(\"# hidden nodes: {}\".format(n_hidden))\n\tfor i in range(25):\n\t trainer.trainEpochs(1)\n\t trnresult = percentError(trainer.testOnClassData(), train[\"target\"])\n\t tstresult = percentError(\n\t trainer.testOnClassData(dataset=test), test[\"target\"])\n\t print(\"epoch: %4d\" % trainer.totalepochs, \" train error: %5.2f%%\" % trnresult, \" test error: %5.2f%%\" % tstresult)\n\tpred = fnn.activateOnDataset(test)\n\tpreds = [y.argmax() for y in pred]\n\tprint(accuracy_score(preds, testy, normalize=True))", "def run_test(\n data: pd.Series,\n test: str,\n data_name: Optional[str] = None,\n alpha: float = 0.05,\n data_kwargs: Optional[Dict] = None,\n *kwargs,\n) -> pd.DataFrame:\n if test == \"all\":\n results = _test_all(\n data=data, data_name=data_name, alpha=alpha, data_kwargs=data_kwargs\n )\n elif test == \"summary\":\n results = _summary_stats(\n data=data, data_name=data_name, data_kwargs=data_kwargs\n )\n elif test == \"white_noise\":\n results = _is_white_noise(\n data=data,\n data_name=data_name,\n alpha=alpha,\n verbose=True,\n data_kwargs=data_kwargs,\n *kwargs,\n )[1]\n elif test == \"stationarity\":\n results = _is_stationary(\n data=data, data_name=data_name, alpha=alpha, data_kwargs=data_kwargs\n )\n elif test == \"adf\":\n results = _is_stationary_adf(\n data=data,\n data_name=data_name,\n alpha=alpha,\n verbose=True,\n data_kwargs=data_kwargs,\n )[1]\n elif test == \"kpss\":\n results = _is_stationary_kpss(\n data=data,\n data_name=data_name,\n alpha=alpha,\n verbose=True,\n data_kwargs=data_kwargs,\n )[1]\n elif test == \"normality\":\n results = _is_gaussian(\n data=data,\n data_name=data_name,\n alpha=alpha,\n verbose=True,\n data_kwargs=data_kwargs,\n )[1]\n else:\n raise ValueError(f\"Tests: '{test}' is not supported.\")\n return results", "def test_evaluate(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n metric = model.evaluate('test')\n self.assertLessEqual(0, metric)\n self.assertGreaterEqual(1, metric)", "def test(self, dataset):\n\n outputs, errors = self.use(dataset)\n\n ## PUT CODE HERE ##\n # I put the code in the \"use\" function, seems better :-)\n\n return outputs, errors", "def test(neuralnet, dataloader):\n neuralnet.eval()\n batch_transform = data.BatchTransform()\n\n idx = 0\n for iteration, batch in enumerate(dataloader):\n with torch.no_grad():\n im = batch[0].requires_grad_(False).to(DEVICE)\n keypts = batch[1].requires_grad_(False).to(DEVICE)\n\n deformed_batch = batch_transform.exe(im, landmarks=keypts)\n im, future_im, mask = deformed_batch['image'], deformed_batch['future_image'], deformed_batch['mask']\n\n future_im_pred, gauss_mu, _ = neuralnet(im, future_im)\n\n predict = future_im_pred.data.cpu().numpy().transpose(0, 2, 3, 1)\n gauss_mu = gauss_mu.data.cpu().numpy()\n # gauss_map = gauss_map.data.cpu().numpy()\n future_im = future_im.data.cpu().numpy().transpose(0, 2, 3, 1)\n\n os.makedirs('testcheck', exist_ok=True)\n fig_path = path.join('testcheck', 'fig_{}.png'.format(iteration))\n utils.savegrid(fig_path, future_im, predict, gauss_mu=gauss_mu, name='deform')\n\n idx += im.shape[0]\n\n neuralnet.train()\n return idx", "def test_run(self):\n sut = ExperimentEmail()\n train = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n val = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n outdir = tempfile.mkdtemp()\n\n # Act\n sut.run(train, val, outdir, batch_size=32, epochs=2)", "def test_training(self):\n\t\tpass", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def test(\n self,\n model: Optional[LightningModule] = None,\n test_dataloaders: Optional[Union[DataLoader, List[DataLoader]]] = None,\n ckpt_path: Optional[str] = 'best',\n verbose: bool = True,\n datamodule: Optional[LightningDataModule] = None,\n ):\n # --------------------\n # SETUP HOOK\n # --------------------\n self.verbose_test = verbose\n\n self.logger_connector.set_stage(\"test\")\n\n # If you supply a datamodule you can't supply train_dataloader or val_dataloaders\n if test_dataloaders and datamodule:\n raise MisconfigurationException(\n 'You cannot pass test_dataloaders to trainer.test if you supply a datamodule'\n )\n\n # Attach datamodule to get setup/prepare_data added to model before the call to it below\n self.data_connector.attach_datamodule(model or self.get_model(), datamodule, 'test')\n\n if model is not None:\n results = self.__test_given_model(model, test_dataloaders)\n else:\n results = self.__test_using_best_weights(ckpt_path, test_dataloaders)\n\n self.teardown('test')\n\n return results", "def test(self, network, test_data: BaseDataManager,\n metrics: dict, metric_keys=None,\n verbose=False, prepare_batch=lambda x: x,\n convert_fn=lambda x: x, **kwargs):\n\n kwargs = self._resolve_kwargs(kwargs)\n\n predictor = self.setup(None, training=False, model=network,\n convert_batch_to_npy_fn=convert_fn,\n prepare_batch_fn=prepare_batch, **kwargs)\n\n # return first item of generator\n return next(predictor.predict_data_mgr_cache_all(test_data, 1, metrics,\n metric_keys, verbose))", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def test(self, test_iter, step, corpus_type, id):\n\n self.model.eval()\n stats = Statistics()\n if not os.path.exists(self.args.result_path):\n os.makedirs(self.args.result_path)\n if not os.path.exists(self.args.story_path):\n os.makedirs(self.args.story_path)\n can_path = self.args.result_path + corpus_type + '.' + id + '_step%d.candidate' % step\n gold_path = self.args.result_path + corpus_type + '.' + id + '_step%d.gold' % step\n story_path = self.args.story_path + corpus_type + '.' + id + '.story'\n with open(story_path, 'w') as save_story:\n with open(can_path, 'w') as save_pred:\n with open(gold_path, 'w') as save_gold:\n with torch.no_grad():\n for batch in test_iter:\n src = batch.src\n labels = batch.labels\n segs = batch.segs\n clss = batch.clss\n mask = batch.mask\n mask_cls = batch.mask_cls\n weight = batch.weight\n index = batch.index\n\n pred = []\n\n sents_vec, sent_scores, mask, cluster_weight = self.model(src, segs, clss, mask, mask_cls)\n loss = self.loss(sent_scores, labels.float())\n weight_loss = self.weight_loss(cluster_weight, weight)\n loss = (loss * mask.float()).sum()\n total_loss = loss + weight_loss * 10\n batch_stats = Statistics(float(total_loss.cpu().data.numpy()), len(labels))\n stats.update(batch_stats)\n\n sent_scores = sent_scores + mask.float()\n sent_scores = sent_scores.cpu().data.numpy()\n cluster_weight = cluster_weight.cpu().data.numpy()\n selected_ids = np.argsort(-sent_scores, 1)\n cluster_weight = np.argsort(cluster_weight)\n # print(selected_ids)\n # selected_ids = np.sort(selected_ids,1)\n cluster_num = len(cluster_weight)\n for i, idx in enumerate(selected_ids):\n rank = np.where(cluster_weight == i)[0][0]\n\n if rank <= max(cluster_num // 6, 6):\n for j in range(5):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num // 3, 10):\n for j in range(3):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num * 2 // 3, 15):\n for j in range(2):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n else:\n sen_ind = selected_ids[i][0]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n\n gold_summary = (batch.tgt_str[0].strip())\n pred.sort(key=lambda x: x[0])\n for i in range(len(pred)):\n save_story.write(pred[i][1].strip() + '\\n')\n if i == 0:\n save_pred.write(pred[i][1].strip())\n else:\n save_pred.write('<q> ' + pred[i][1].strip())\n save_gold.write(gold_summary)\n for sent in gold_summary.split('<q>'):\n save_story.write('@highlight {}\\n'.format(sent))\n if self.args.test_txt:\n return stats\n else:\n rouges = calculate_rouge(can_path, gold_path)\n logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n self._report_step(0, step, valid_stats=stats)\n return stats, rouges", "def test_fit():\n args = get_layer('fit', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)", "def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs", "def test(model, data_loader, num_train_batches, epoch, test_mloss, test_rloss, test_acc, directory):\r\n print('===> Evaluate mode')\r\n\r\n # Switch to evaluate mode\r\n model.eval()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n loss = 0\r\n margin_loss = 0\r\n recon_loss = 0\r\n\r\n correct = 0\r\n\r\n num_batches = len(data_loader)\r\n\r\n global_step = epoch * num_train_batches + num_train_batches\r\n\r\n start_time = timer()\r\n\r\n for data, target in data_loader:\r\n with torch.no_grad():\r\n batch_size = data.size(0)\r\n target_indices = target\r\n target_one_hot = utils.one_hot_encode(target_indices, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n target = target_one_hot\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n target_indices.to(args.device)\r\n\r\n # Output predictions\r\n output, reconstruction = model(data, target_indices, False) # output from DigitCaps (out_digit_caps)\r\n\r\n # Sum up batch loss\r\n t_loss, m_loss, r_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss += t_loss.data\r\n margin_loss += m_loss.data\r\n recon_loss += r_loss.data\r\n\r\n # Count number of correct predictions\r\n # v_magnitude shape: [128, 10, 1, 1]\r\n v_magnitude = torch.sqrt((output**2).sum(dim=2, keepdim=True))\r\n # pred shape: [128, 1, 1, 1]\r\n pred = v_magnitude.data.max(1, keepdim=True)[1].cpu()\r\n correct += pred.eq(target_indices.view_as(pred)).sum()\r\n\r\n\r\n # Get the reconstructed images of the last batch\r\n if args.use_reconstruction_loss:\r\n reconstruction = model.decoder(output, target_indices, False)\r\n # Input image size and number of channel.\r\n # By default, for MNIST, the image width and height is 28x28 and 1 channel for black/white.\r\n image_width = args.input_width\r\n image_height = args.input_height\r\n image_channel = args.num_conv_in_channels\r\n recon_img = reconstruction.view(-1, image_channel, image_width, image_height)\r\n assert recon_img.size() == torch.Size([batch_size, image_channel, image_width, image_height])\r\n\r\n # Save the image into file system\r\n utils.save_image(recon_img, directory / ('recons_image_test_{}_{}.png'.format(epoch, global_step)))\r\n utils.save_image(data, directory /\r\n ('original_image_test_{}_{}.png'.format(epoch, global_step)))\r\n\r\n end_time = timer()\r\n\r\n # Log test losses\r\n loss /= num_batches\r\n margin_loss /= num_batches\r\n recon_loss /= num_batches\r\n\r\n # Log test accuracies\r\n num_test_data = len(data_loader.dataset)\r\n accuracy = correct / num_test_data\r\n accuracy_percentage = float(correct) * 100.0 / float(num_test_data)\r\n\r\n test_mloss.write('%.6f \\n' % margin_loss)\r\n test_rloss.write('%.6f \\n' % recon_loss)\r\n test_acc.write('%.4f \\n' % accuracy_percentage)\r\n\r\n # Print test losses and accuracy\r\n print('Test: [Loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f}]'.format(\r\n loss,\r\n margin_loss,\r\n recon_loss if args.use_reconstruction_loss else 0))\r\n print('Test Accuracy: {}/{} ({:.2f}%)\\n'.format(\r\n correct, num_test_data, accuracy_percentage))\r\n\r\n\r\n global avg_testing_time_per_epoch\r\n avg_testing_time_per_epoch = (\r\n avg_testing_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n global best_acc\r\n global best_acc_epoch\r\n if accuracy_percentage > best_acc:\r\n best_acc = accuracy_percentage\r\n best_acc_epoch = epoch\r\n test_loader = data_loader\r\n utils.dump(utils.make_full_checkpoint_obj(locals(), globals()), directory / 'trained_model/FP32_model')", "def run_valid(self, dataset, attribution, logger, other=None):\n shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True\n\n model_class = self.model_config.model\n optim_class = self.model_config.optimizer\n stopper_class = self.model_config.early_stopper\n clipping = self.model_config.gradient_clipping\n\n loss_fn = get_loss_func(self.dataset_config['task_type'], self.model_config.exp_name)\n shuffle = self.model_config['shuffle'] if 'shuffle' in self.model_config else True\n\n train_loader, valid_loader, features_scaler, scaler = dataset.get_train_loader(self.model_config['batch_size'],\n shuffle=shuffle)\n\n model = model_class(dim_features=dataset.dim_features, dim_target=dataset.dim_target, model_configs=self.model_config, dataset_configs=self.dataset_config)\n net = ExplainerNetWrapper(model, attribution, dataset_configs=self.dataset_config, model_config=self.model_config,\n loss_function=loss_fn)\n optimizer = optim_class(model.parameters(),\n lr=self.model_config['learning_rate'], weight_decay=self.model_config['l2'])\n scheduler = build_lr_scheduler(optimizer, model_configs=self.model_config, num_samples=dataset.num_samples)\n\n train_loss, train_metric, val_loss, val_metric, _, _, _ = net.train(train_loader=train_loader, valid_loader=valid_loader,\n optimizer=optimizer, scheduler=scheduler,\n clipping=clipping, scaler=scaler,\n early_stopping=stopper_class,\n logger=logger)\n\n if other is not None and 'model_path' in other.keys():\n save_checkpoint(path=other['model_path'], model=model, scaler=scaler)\n\n return train_metric, val_metric", "def postprocess_finetuning_metrics(\n valid_metrics_dict: _MetricsType,\n test_metrics_dict: _MetricsType,\n accuracy_name: str,\n finetuning_fn_name: str,\n) -> collections.OrderedDict[str, Any]:\n # Find the best finetuning epoch using the validation metrics.\n valid_baseline_metrics = valid_metrics_dict[_BASELINE_METRICS]\n valid_finetuning_metrics = valid_metrics_dict[finetuning_fn_name]\n num_finetuning_epochs = len(valid_finetuning_metrics) - 1\n best_epoch = 0\n best_valid_accuracies_mean = np.mean(valid_baseline_metrics[accuracy_name])\n for idx in range(1, num_finetuning_epochs + 1):\n current_valid_accuracies_mean = np.mean(\n valid_finetuning_metrics[f'epoch_{idx}'][accuracy_name])\n if current_valid_accuracies_mean > best_valid_accuracies_mean:\n best_epoch = idx\n best_valid_accuracies_mean = current_valid_accuracies_mean\n # Extract the test accuracies at the best finetuning epoch.\n test_baseline_metrics = test_metrics_dict[_BASELINE_METRICS]\n test_finetuning_metrics = test_metrics_dict[finetuning_fn_name]\n test_accuracies_at_best_epoch_mean = np.mean(\n test_baseline_metrics[accuracy_name]\n )\n if best_epoch > 0:\n test_accuracies_at_best_epoch_mean = np.mean(\n test_finetuning_metrics[f'epoch_{best_epoch}'][accuracy_name]\n )\n # Compute the number of clients whose test accuracy hurts after finetuning.\n num_total_test_clients = len(test_baseline_metrics[accuracy_name])\n num_test_clients_hurt_after_finetuning = 0\n if best_epoch > 0:\n for client_i in range(num_total_test_clients):\n finetuning_accuracy = test_finetuning_metrics[f'epoch_{best_epoch}'][\n accuracy_name\n ][client_i]\n baseline_accuracy = test_baseline_metrics[accuracy_name][client_i]\n if baseline_accuracy > finetuning_accuracy:\n num_test_clients_hurt_after_finetuning += 1\n fraction_clients_hurt = num_test_clients_hurt_after_finetuning / float(\n num_total_test_clients\n )\n # Create the postprocessed metrics dictionary.\n postprocessed_metrics = collections.OrderedDict()\n postprocessed_metrics[_BASELINE_METRICS] = collections.OrderedDict()\n postprocessed_metrics[_BASELINE_METRICS][f'valid_{accuracy_name}_mean'] = (\n np.mean(valid_baseline_metrics[accuracy_name])\n )\n postprocessed_metrics[_BASELINE_METRICS][f'test_{accuracy_name}_mean'] = (\n np.mean(test_baseline_metrics[accuracy_name])\n )\n postprocessed_metrics[_BASELINE_METRICS]['test_num_eval_examples_mean'] = (\n np.mean(test_baseline_metrics[_NUM_TEST_EXAMPLES])\n )\n postprocessed_metrics[_BASELINE_METRICS][\n 'test_num_finetune_examples_mean'\n ] = np.mean(test_finetuning_metrics[_NUM_FINETUNE_EXAMPLES])\n postprocessed_metrics[finetuning_fn_name] = collections.OrderedDict()\n postprocessed_metrics[finetuning_fn_name][\n 'best_finetuning_epoch'\n ] = best_epoch\n postprocessed_metrics[finetuning_fn_name][\n f'valid_{accuracy_name}_at_best_epoch_mean'\n ] = best_valid_accuracies_mean\n postprocessed_metrics[finetuning_fn_name][\n f'test_{accuracy_name}_at_best_epoch_mean'\n ] = test_accuracies_at_best_epoch_mean\n postprocessed_metrics[finetuning_fn_name][\n 'fraction_clients_hurt_at_best_epoch'\n ] = fraction_clients_hurt\n postprocessed_metrics[_RAW_METRICS_BEFORE_PROCESS] = collections.OrderedDict(\n valid=valid_metrics_dict, test=test_metrics_dict\n )\n return postprocessed_metrics", "def test(classifier, data, labels):\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data},\n y=labels,\n num_epochs=1,\n shuffle=False)\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n eval_results[\"F-Score\"] = 2 * eval_results[\"precision\"] * eval_results[\"recall\"] / (eval_results[\"precision\"] + eval_results[\"recall\"])\n# print(eval_results)\n return eval_results", "def test_regression(test_data, model, criterion, batch_size, device, collate_fn=None): \n \n # Set model to evaluation mode\n model.eval()\n test_loss = 0\n\n # Create data loader\n data = DataLoader(test_data, batch_size=batch_size, collate_fn=collate_fn)\n \n # Iterate through data by batch of observations\n for feature, target_class in data:\n \n # Load data to specified device\n feature, target_class = feature.to(device), target_class.to(device)\n \n # Set no update to gradients\n with torch.no_grad():\n \n # Make predictions\n output = model(feature)\n \n # Calculate loss for given batch\n loss = criterion(output, target_class)\n \n # Calculate global loss\n test_loss += loss.item()\n \n return test_loss / len(test_data), np.sqrt(test_loss / len(test_data))", "def test_model(epoch):\n model.eval()\n test_metrics = {\"loss\": [], \"acc\": []}\n timer = Timer()\n for batch_i, (X, y) in enumerate(test_dataloader):\n batch_i += 1\n image_sequences = Variable(X.to(device), requires_grad=False)\n labels = Variable(y, requires_grad=False).to(device)\n\n with torch.no_grad():\n # Reset LSTM hidden state\n model.lstm.reset_hidden_state()\n # Get sequence predictions\n predictions = model(image_sequences)\n\n # Compute metrics\n loss = criterion(predictions, labels)\n acc = (predictions.detach().argmax(1) == labels).cpu().numpy().mean()\n\n # Keep track of loss and accuracy\n test_metrics[\"loss\"].append(loss.item())\n test_metrics[\"acc\"].append(acc)\n\n # Determine approximate time left\n batches_done = batch_i - 1\n batches_left = len(test_dataloader) - batches_done\n time_left = datetime.timedelta(seconds=batches_left * timer.seconds())\n time_iter = round(timer.seconds(), 3)\n timer.reset()\n\n # Log test performance\n logger.info(\n f'Testing - [Epoch: {epoch}/{cfg.train.num_epochs}] [Batch: {batch_i}/{len(test_dataloader)}] [Loss: {np.mean(test_metrics[\"loss\"]):.3f}] [Acc: {np.mean(test_metrics[\"acc\"]):.3f}] [ETA: {time_left}] [Iter time: {time_iter}s/it]'\n )\n\n writer.add_scalar(\"test/loss\", np.mean(test_metrics[\"loss\"]), epoch)\n writer.add_scalar(\"test/acc\", np.mean(test_metrics[\"acc\"]), epoch)\n\n model.train()", "def test(model, test_loader, device):\n model.eval()\n test_loss = 0\n accuracy = 0\n with torch.no_grad():\n for inputs, labels in test_loader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n output = model.forward(inputs)\n\n # Calculate accuracy\n ps = torch.exp(output)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n\n print(f\"Accuracy on test set is: {accuracy/len(test_loader):.3f}\")", "def runtest(params_fc):\n\n npz = {}\n\n fold, cost = params_fc\n dset = np.load(DSET)\n\n idx_tr = dset['split'] != fold\n idx_ts = dset['split'] == fold\n\n lgt_tr, gt_tr = dset['wlogit'][idx_tr], dset['gt'][idx_tr]\n lgt_ts = dset['wlogit'][idx_ts]\n tinv = ut.calib(lgt_tr, gt_tr)\n npz['tinv'] = tinv\n\n wrank_tr, srank_tr = dset['wrank'][idx_tr], dset['srank'][idx_tr]\n wrank_ts, srank_ts = dset['wrank'][idx_ts], dset['srank'][idx_ts]\n\n entr_tr, entr_ts = ut.entropy(lgt_tr, tinv), ut.entropy(lgt_ts, tinv)\n wcost_tr, scost_tr = ut.cost(wrank_tr, srank_tr, cost)\n wcost_ts, scost_ts = ut.cost(wrank_ts, srank_ts, cost)\n npz['wcost_tr'], npz['scost_tr'] = wcost_tr, scost_tr\n npz['wcost_ts'], npz['scost_ts'] = wcost_ts, scost_ts\n npz['entr_tr'] = entr_tr\n\n rew_tr = wcost_tr - scost_tr\n mtog = po.fitmetric(entr_tr, rew_tr)\n npz['mtog'] = np.stack(mtog)\n\n npz['metric_tr'] = np.interp(entr_tr, *mtog)\n npz['metric_ts'] = np.interp(entr_ts, *mtog)\n\n np.savez_compressed(SPATH % (fold, cost), **npz)\n print(\"Completed fold %d, cost %d\" % (fold, cost))", "def test_faster_rcnn_train_one_epoch(config, dataset):\n writer = MagicMock()\n\n # XXX This is just a hot fix to prevent a mysterious folder such as:\n # <MagicMock name='mock.logdir' id='140420520377936'> showed up after\n # running this test.\n writer.logdir = tmp_name\n\n kfp_writer = MagicMock()\n checkpointer = MagicMock()\n estimator = FasterRCNN(\n config=config,\n writer=writer,\n checkpointer=checkpointer,\n kfp_writer=kfp_writer,\n logdir=\"/tmp\",\n no_cuda=True,\n )\n estimator.writer = writer\n estimator.kfp_writer = kfp_writer\n estimator.checkpointer = checkpointer\n estimator.device = torch.device(\"cpu\")\n train_dataset = dataset\n is_distributed = False\n train_sampler = FasterRCNN.create_sampler(\n is_distributed=is_distributed, dataset=train_dataset, is_train=True\n )\n train_loader = dataloader_creator(\n config, train_dataset, train_sampler, TRAIN, is_distributed\n )\n params = [p for p in estimator.model.parameters() if p.requires_grad]\n optimizer, lr_scheduler = FasterRCNN.create_optimizer_lrs(config, params)\n accumulation_steps = config.train.get(\"accumulation_steps\", 1)\n epoch = 1\n estimator.train_one_epoch(\n optimizer=optimizer,\n data_loader=train_loader,\n epoch=epoch,\n lr_scheduler=lr_scheduler,\n accumulation_steps=accumulation_steps,\n )\n writer.add_scalar.assert_called_with(\n \"training/lr\", config.optimizer.args.get(\"lr\"), epoch\n )", "def test(self, test_loader):\n\n self.model.eval()\n with torch.no_grad():\n return self.tester(test_loader, verbose=False)", "def test_model(model, dataloader, criterion, optimizer):\n if dataloader is None:\n return None\n\n since = time.time()\n\n # Final testing phase\n print('Testing')\n print('-' * 10)\n\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.f\n metric_pred, metric_true = [], []\n for inputs, labels in tqdm(dataloader,\"Testing\"):\n inputs = inputs.to(device).float()\n labels = labels.to(device).long()\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(False):\n # Get model outputs and calculate loss\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n\n _, preds = torch.max(outputs, 1)\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n metric_pred.append(preds)\n metric_true.append(labels.data)\n\n test_loss = running_loss / len(dataloader.dataset)\n test_acc = running_corrects.double() / len(dataloader.dataset)\n\n print('{} Loss: {:.4f}, Acc: {:.4f}'.format(\"test\", test_loss, test_acc))\n\n print()\n\n time_elapsed = time.time() - since\n print('Testing complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n\n metric_true = torch.cat(metric_true, 0).cpu()\n metric_pred = torch.cat(metric_pred, 0).cpu()\n\n print(classification_report(metric_true,metric_pred,target_names=TARGET_NAMES,digits=3))\n\n cm = confusion_matrix(metric_true, metric_pred).astype(np.float32)\n for i in range(cm.shape[0]):\n _sum = sum(cm[i])\n for j in range(cm.shape[1]):\n cm[i, j] = cm[i, j] * 100 / _sum\n\n disp = ConfusionMatrixDisplay(cm, display_labels=TARGET_NAMES)\n disp.plot(xticks_rotation='vertical', cmap='Blues', values_format='.0f')\n\n results = classification_report(metric_true,metric_pred,target_names=TARGET_NAMES, output_dict=True, digits=5)\n return test_acc, results", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def test_train_with_dev_dataset(task_dataset, mocker):\n wandb_log = mocker.patch.object(wandb, 'log')\n\n probe = nn.Linear(N_DIMS_PER_REP, N_UNIQUE_FEATS)\n before = probe.weight.data.clone()\n\n learning.train(probe,\n task_dataset,\n dev_dataset=task_dataset,\n epochs=EPOCHS,\n also_log_to_wandb=True)\n after = probe.weight.data\n\n assert not before.equal(after)\n\n expected = []\n for _ in range(EPOCHS):\n expected.extend([mocker.call({'train loss': mocker.ANY})] *\n len(task_dataset))\n expected.extend([mocker.call({'dev loss': mocker.ANY})])\n assert wandb_log.call_args_list == expected", "def get_test_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.Dataset,\n argparse.Namespace,\n torch.nn.Module,\n Progress,\n TaskID,\n ],\n Tuple[Dict[str, float], pd.DataFrame],\n ]:\n pass", "def test_validation(data=None, weight_file=constants.TRAINED_WEIGHTS):\n # TODO(neuberg): Flesh out.", "def test_train_with_early_stopping_and_dev_dataset(task_dataset, mocker):\n wandb_log = mocker.patch.object(wandb, 'log')\n\n early_stopping = learning.EarlyStopping(patience=0)\n # Cannot possible go lower! So we should stop after PATIENCE steps.\n early_stopping(float('-inf'))\n\n probe = nn.Linear(N_DIMS_PER_REP, N_UNIQUE_FEATS)\n before = probe.weight.data.clone()\n\n learning.train(probe,\n task_dataset,\n dev_dataset=task_dataset,\n epochs=EPOCHS,\n stopper=early_stopping,\n also_log_to_wandb=True)\n after = probe.weight.data\n\n assert not before.equal(after)\n\n expected = [\n mocker.call({'train loss': mocker.ANY}),\n ] * len(task_dataset)\n expected.append(mocker.call({'dev loss': mocker.ANY}))\n assert wandb_log.call_args_list == expected", "def get_test(self, even=None):\n\n self.get_train(even)", "def test_eval(model, test_set):\n num_test_batch = len(test_set)\n test_loss = np.zeros((num_test_batch, 1), dtype=float)\n test_acc = np.zeros((num_test_batch, 1), dtype=float)\n for ibatch, batch in enumerate(test_set):\n result = model.test_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n test_loss[ibatch] = result[0]\n test_acc[ibatch] = result[-1]\n return np.mean(test_loss), np.mean(test_acc)", "def testNN(self, nn, testLoader):\n self.nnMapping[nn].eval()\n # check if total prediction is correct\n correct = 0\n total = 0\n # check if each single prediction is correct\n singleCorrect = 0\n singleTotal = 0\n with torch.no_grad():\n for data, target in testLoader:\n output = self.nnMapping[nn](data.to(self.device))\n if target.shape == output.shape[:-1]:\n pred = output.argmax(dim=-1) # get the index of the max value\n elif target.shape == output.shape:\n pred = (output >= 0.5).int()\n else:\n print(f'Error: none considered case for output with shape {output.shape} v.s. label with shape {target.shape}')\n sys.exit()\n target = target.to(self.device).view_as(pred)\n correctionMatrix = (target.int() == pred.int()).view(target.shape[0], -1)\n correct += correctionMatrix.all(1).sum().item()\n total += target.shape[0]\n singleCorrect += correctionMatrix.sum().item()\n singleTotal += target.numel()\n\n accuracy = 100. * correct / total\n singleAccuracy = 100. * singleCorrect / singleTotal\n return accuracy, singleAccuracy", "def compute_test():\n model.eval()\n sets = list(features.keys())\n for dset, loaders in zip(sets, [train_loaders, val_loaders, test_loaders]):\n final_specific_loss = 0\n final_total_loss = 0\n for loader in loaders:\n loader_total_loss = 0\n loader_specific_loss = 0\n for data in loader:\n output = model(data.to(device))\n specific_loss = specific_loss_torch_geom(output, (data.pos, data.y),\n data.batch, batch_sizes[dset]).detach()\n loader_specific_loss += specific_loss\n loader_total_loss += torch.mean(specific_loss)\n # Average the loss over each loader\n loader_specific_loss /= len(loader)\n loader_total_loss /= len(loader)\n # Average the loss over the different loaders\n final_specific_loss += loader_specific_loss / len(loaders)\n final_total_loss += loader_total_loss / len(loaders)\n del output, loader_specific_loss\n\n print(\"Test set results \", dset, \": loss= {:.4f}\".format(final_total_loss))\n print(dset, \": \", final_specific_loss)\n print(\"Results in log scale\", np.log10(final_specific_loss.detach().cpu()),\n np.log10(final_total_loss.detach().cpu().numpy()))\n if args.wandb:\n wandb.run.summary[\"test results\"] = np.log10(final_specific_loss.detach().cpu())\n # free unnecessary data\n\n\n final_specific_numpy = np.log10(final_specific_loss.detach().cpu())\n del final_total_loss, final_specific_loss\n torch.cuda.empty_cache()\n return final_specific_numpy", "def test_dataset(self):\n self.assertIsInstance(self.dataset, LazyDataset)\n\n # Not loaded\n self.assertIsNone(self.dataset._training)\n self.assertIsNone(self.dataset._testing)\n self.assertIsNone(self.dataset._validation)\n self.assertFalse(self.dataset._loaded)\n self.assertFalse(self.dataset._loaded_validation)\n\n # Load\n try:\n self.dataset._load()\n except (EOFError, IOError):\n self.skipTest('Problem with connection. Try this test again later.')\n\n self.assertIsInstance(self.dataset.training, TriplesFactory)\n self.assertIsInstance(self.dataset.testing, TriplesFactory)\n self.assertTrue(self.dataset._loaded)\n\n if self.autoloaded_validation:\n self.assertTrue(self.dataset._loaded_validation)\n else:\n self.assertFalse(self.dataset._loaded_validation)\n self.dataset._load_validation()\n\n self.assertIsInstance(self.dataset.validation, TriplesFactory)\n\n self.assertIsNotNone(self.dataset._training)\n self.assertIsNotNone(self.dataset._testing)\n self.assertIsNotNone(self.dataset._validation)\n self.assertTrue(self.dataset._loaded)\n self.assertTrue(self.dataset._loaded_validation)\n\n self.assertEqual(self.dataset.num_entities, self.exp_num_entities)\n self.assertEqual(self.dataset.num_relations, self.exp_num_relations)\n\n num_triples = sum(\n triples_factory.num_triples for\n triples_factory in (self.dataset._training, self.dataset._testing, self.dataset._validation)\n )\n if self.exp_num_triples_tolerance is None:\n self.assertEqual(self.exp_num_triples, num_triples)\n else:\n self.assertAlmostEqual(self.exp_num_triples, num_triples, delta=self.exp_num_triples_tolerance)\n\n # Test caching\n start = timeit.default_timer()\n _ = self.dataset.training\n end = timeit.default_timer()\n # assert (end - start) < 1.0e-02\n self.assertAlmostEqual(start, end, delta=1.0e-02, msg='Caching should have made this operation fast')", "def test_classification(model, test_loader, criterion, cfg, file):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n model.eval()\n\n end = time.time()\n final_result = []\n\n with torch.no_grad():\n for step, (inputs, labels, ids, chunk_nb,\n split_nb) in enumerate(test_loader):\n data_time.update(time.time() - end)\n val_batch = inputs.cuda()\n val_label = labels.cuda()\n outputs = model(val_batch)\n loss = criterion(outputs, val_label)\n\n for i in range(outputs.size(0)):\n string = \"{} {} {} {} {}\\n\".format(ids[i], \\\n str(outputs.data[i].cpu().numpy().tolist()), \\\n str(int(labels[i].cpu().numpy())), \\\n str(int(chunk_nb[i].cpu().numpy())), \\\n str(int(split_nb[i].cpu().numpy())))\n final_result.append(string)\n\n prec1, prec5 = accuracy(outputs.data, val_label, topk=(1, 5))\n losses.update(loss.item(), val_batch.size(0))\n top1.update(prec1.item(), val_batch.size(0))\n top5.update(prec5.item(), val_batch.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n if step % cfg.CONFIG.LOG.DISPLAY_FREQ == 0 and cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:\n print('----Testing----')\n print_string = 'Epoch: [{0}][{1}/{2}]'.format(\n 0, step + 1, len(test_loader))\n print(print_string)\n print_string = 'data_time: {data_time:.3f}, batch time: {batch_time:.3f}'.format(\n data_time=data_time.val, batch_time=batch_time.val)\n print(print_string)\n print_string = 'loss: {loss:.5f}'.format(loss=losses.avg)\n print(print_string)\n print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(\n top1_acc=top1.avg, top5_acc=top5.avg)\n print(print_string)\n if not os.path.exists(file):\n os.mknod(file)\n with open(file, 'w') as f:\n f.write(\"{}, {}\\n\".format(top1.avg, top5.avg))\n for line in final_result:\n f.write(line)", "def test_multiple_calls_to_fit(\n self,\n train_dataloader: DataLoader,\n model: ComposerModel,\n max_duration: Time[int],\n ):\n # Note that callbacks are tested seperately in tests/callbacks/test_callbacks.py\n # To ensure that they support multiple calls of Event.INIT and Event.FIT\n trainer = Trainer(\n model=model,\n max_duration=max_duration,\n train_dataloader=train_dataloader,\n )\n\n # Train once\n trainer.fit()\n\n # Train again.\n trainer.fit(duration=max_duration)\n\n assert trainer.state.timestamp.get(max_duration.unit) == 2 * max_duration", "def test_model(net, data_loader):\n net.eval()\n running_loss = 0.0\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n y_d = data['y_descreen']\n outputs = net(X)\n loss = criterion(outputs, y_d)\n running_loss += loss\n return running_loss", "def build_newstest_finetune(self):\n # Note that this function is purposefully similar to build_newscomment_only\n # The two datasets have very similar structure and it would just be more\n # confusing to refactor code, creating multiple overlapping paths.\n logging.info('Building newstest finetune dataset')\n logging.info(self.configs[NEWSTEST])\n builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[NEWSTEST],\n data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec()\n logging.info('Training on TFDS dataset %s with split %s',\n WMT_BASE_DATASET_NAME, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=self.shuffle_train_files)\n eval_data = self.default_eval_builder(builder, shard_spec)\n return train_data, eval_data", "def test_step(s1_batch, s2_batch, y_batch, lables_batch, writer=None):\n feed_dict = {\n my_model.sentence_one_word: s1_batch,\n my_model.sentence_two_word: s2_batch,\n my_model.y_true: lables_batch,\n my_model.y: y_batch,\n my_model.is_train: False\n }\n step, summaries, loss, acc, f1, correct_num, y, yhat= sess.run(\n [global_step, dev_summary_op, my_model.loss, my_model.accuracy, my_model.F1, my_model.correct_num, my_model.y, my_model.yhat],\n feed_dict)\n # print(y)\n # print(final_output)\n time_str = datetime.datetime.now().isoformat()\n # print(\"loss and acc in test_batch:\")\n # print(\"{}: step {}, loss {:g}, mse {:g}, se {:g}\".format(time_str, step, loss, mse, se))\n if writer:\n writer.add_summary(summaries, step)\n return correct_num, y, yhat # y, final_output 计算整体的均方误差", "def test(model_params, dataset_test, testing_params, log_directory, device, cuda_available=True,\n metric_fns=None):\n # DATA LOADER\n test_loader = DataLoader(dataset_test, batch_size=testing_params[\"batch_size\"],\n shuffle=False, pin_memory=True,\n collate_fn=imed_loader_utils.imed_collate,\n num_workers=0)\n\n # LOAD TRAIN MODEL\n fname_model = os.path.join(log_directory, \"best_model.pt\")\n print('\\nLoading model: {}'.format(fname_model))\n model = torch.load(fname_model, map_location=device)\n if cuda_available:\n model.cuda()\n model.eval()\n\n # CREATE OUTPUT FOLDER\n path_3Dpred = os.path.join(log_directory, 'pred_masks')\n if not os.path.isdir(path_3Dpred):\n os.makedirs(path_3Dpred)\n\n # METRIC MANAGER\n metric_mgr = imed_metrics.MetricManager(metric_fns)\n\n # UNCERTAINTY SETTINGS\n if (testing_params['uncertainty']['epistemic'] or testing_params['uncertainty']['aleatoric']) and \\\n testing_params['uncertainty']['n_it'] > 0:\n n_monteCarlo = testing_params['uncertainty']['n_it']\n testing_params['uncertainty']['applied'] = True\n print('\\nComputing model uncertainty over {} iterations.'.format(n_monteCarlo))\n else:\n testing_params['uncertainty']['applied'] = False\n n_monteCarlo = 1\n\n for i_monteCarlo in range(n_monteCarlo):\n preds_npy, gt_npy = run_inference(test_loader, model, model_params, testing_params, path_3Dpred,\n cuda_available, i_monteCarlo)\n metric_mgr(preds_npy, gt_npy)\n\n # COMPUTE UNCERTAINTY MAPS\n if n_monteCarlo > 1:\n imed_utils.run_uncertainty(ifolder=path_3Dpred)\n\n metrics_dict = metric_mgr.get_results()\n metric_mgr.reset()\n print(metrics_dict)\n return metrics_dict", "def test(self) -> tf.contrib.data.Dataset:\n return self.__test_dataset", "def eval(model, loss_fn, dataloader, epoch):\n\n # Set the model into test mode\n model.eval()\n\n test_loss = 0\n correct = 0\n total = 0\n datacount = len(dataloader)\n \n # check global variable `best_accuracy`\n global best_accuracy\n\n with torch.no_grad():\n for batch_idx, (test_batch, labels_batch) in enumerate(dataloader):\n\n # move the data onto device\n test_batch, labels_batch = test_batch.to(device), labels_batch.to(device)\n\n # compute the model output\n outputs = model(test_batch)\n loss = loss_fn(outputs, labels_batch)\n \n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += labels_batch.size(0)\n correct += predicted.eq(labels_batch).sum().item()\n \n # log the test_loss\n writer.add_scalar('test/loss', test_loss/(batch_idx+1), (datacount * (epoch+1)) + (batch_idx+1))\n writer.add_scalar('test/accuracy', 100.*correct/total, (datacount * (epoch+1)) + (batch_idx+1))\n\n progress_bar(batch_idx, len(dataloader), 'Test Loss: %.3f | Test Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n current_loss = test_loss/len(dataloader)\n # save checkpoint\n acc = 100. * correct/total\n if acc > best_accuracy:\n print(\"Saving the model.....\")\n save_path = \"/home/htut/Desktop/Knowledge_Distillation_Pytorch/checkpoints/teachers/resnet/resnet50_acc:{:.3f}_loss:{:.3f}.pt\".format(acc, current_loss)\n torch.save(model.state_dict(), save_path)\n \n best_accuracy = acc", "def test(xtest, ytest, neural_net):\n loss, accuracy = neural_net.evaluate(xtest, ytest, verbose=0)\n return accuracy", "def train_ch6(net_fn, train_iter, test_iter, num_epochs, lr,\n device=d2l.try_gpu()):\n device_name = device._device_name\n strategy = tf.distribute.OneDeviceStrategy(device_name)\n with strategy.scope():\n optimizer = tf.keras.optimizers.SGD(learning_rate=lr)\n loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n net = net_fn()\n net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])\n callback = TrainCallback(net, train_iter, test_iter, num_epochs,\n device_name)\n net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])\n return net", "def runner_decrator(cls):\n\n def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)\n\n def custom_test_with_TTA(cls, cfg, model):\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\"))\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\n cls.build_evaluator = classmethod(custom_build_evaluator)\n cls.test_with_TTA = classmethod(custom_test_with_TTA)\n\n return cls", "def test(model, dataloader):\n model.eval()\n device = model.device\n time_start = time.time()\n batch_time = 0.0\n accuracy = 0.0\n all_prob, all_labels = [], []\n \n with torch.no_grad():\n for (batch_seqs, batch_seq_masks, batch_seq_segments, batch_labels) in dataloader:\n batch_start = time.time()\n seqs = batch_seqs.to(device) \n masks = batch_seq_masks.to(device)\n segments = batch_seq_segments.to(device)\n labels = batch_labels.to(device)\n\n _, _, probabilities = model(seqs, masks, segments, labels)\n accuracy += correct_predictions(probabilities, labels)\n batch_time += time.time() - batch_start\n all_prob.extend(probabilities[:, 1].cpu().numpy())\n all_labels.extend(batch_labels)\n batch_time /= len(dataloader)\n total_time = time.time() - time_start\n accuracy /= (len(dataloader.dataset))\n return batch_time, total_time, accuracy, roc_auc_score(all_labels, all_prob)", "def test(model, data_loader, criterion, loss_vector, accuracy_vector,\n data_fraction=1, device='cpu'):\n n_trials_to_test = round(data_fraction * len(data_loader))\n # Set model to evaluation mode\n model.eval()\n # No need for gradient computations\n torch.no_grad()\n # Running collection of loss and outcome for each data sample\n val_loss, correct = 0, 0\n start_time = time.time()\n for i, (data, target) in enumerate(data_loader, 1):\n data = data.to(device)\n target = target.to(device)\n output = model(data)\n val_loss += criterion(output, target).data.item()\n pred = output.data.max(1)[1]\n correct += pred.eq(target.data).cpu().sum()\n if not i % 1000:\n elapsed_time = time.time() - start_time\n start_time = time.time()\n print(str(i) + ' out of ' + str(n_trials_to_test) + ' examples: '\n + str(elapsed_time) + ' seconds')\n if i >= n_trials_to_test:\n break\n\n val_loss /= n_trials_to_test\n loss_vector.append(val_loss)\n\n accuracy = correct.to(torch.float32) / n_trials_to_test\n accuracy_vector.append(accuracy)", "def evaluate_training_on_testing(net_name, dobj, dir_path, t_start, batch_size=32, generator=g.DataGeneratorMultInput ,testing_files=None, **kwargs):\n opt_arg, kwargs = filter_keys(evaluate_net_defaults(), kwargs)\n \n wiki_data = {}\n for k, v in opt_arg.items():\n wiki_data[k] = str(v)\n \n t_string = date_to_file_string(t_start)\n \n ###\n \n if testing_files == None:\n global testing_file_names\n testing_files = testing_file_names\n \n tmp_files = []\n \n for f in testing_files:\n if os.path.isfile(os.path.join(dir_path, f)):\n tmp_files.append(f)\n \n testing_files = tmp_files\n \n ###\n \n print(\"Now loading the last model\")\n \n net_last = keras.models.load_model(os.path.join(dir_path, net_name + '.hf5'), custom_objects=custom_layers.get_custom_objects())\n \n print(\"Now loading the best model\")\n \n #Load networks\n if not opt_arg['best_epoch'] == 0:\n net_best = keras.models.load_model(os.path.join(dir_path, net_name + '_epoch_' + str(opt_arg['best_epoch']) + '.hf5'), custom_objects=custom_layers.get_custom_objects())\n else:\n net_best = None\n \n print(\"Now getting the data\")\n \n #Run predict generator on the test data for each net.\n tmp_prediction_paths_last = []\n tmp_prediction_paths_best = []\n for f in testing_files:\n tmp_prediction_paths_last.append(os.path.join(dir_path, os.path.splitext(f)[0] + '_predictions_last.hf5'))\n if not net_best == None:\n tmp_prediction_paths_best.append(os.path.join(dir_path, os.path.splitext(f)[0] + '_predictions_best.hf5'))\n \n dobj.set_file_path(f)\n dobj.unload_all()\n #dobj.get_set()\n print(\"dobj.shape: {}\".format(dobj.shape))\n dobj.get_formatted_data('testing', 'test_data')\n dobj.get_formatted_data('testing', 'test_labels')\n dobj.get_formatted_data('testing', 'test_snr_calculated')\n \n store_test_results(net_last, dobj, tmp_prediction_paths_last[-1], batch_size=batch_size, generator=generator)\n if not net_best == None:\n store_test_results(net_best, dobj, tmp_prediction_paths_best, batch_size=batch_size, generator=generator)\n \n prediction_path_last = os.path.join(dir_path, net_name + '_predictions_last_epoch_full_testing_' + t_string + '.hf5')\n join_test_results(tmp_prediction_paths_last, prediction_path_last, delete_copied_files=True)\n prediction_path_best = ''\n if not net_best == None:\n prediction_path_best = os.path.join(dir_path, net_name + '_predictions_best_epoch_full_testing_' + t_string + '.hf5')\n join_test_results(tmp_prediction_paths_best, prediction_path_best, delete_copied_files=True)\n \n #Make SNR plots\n SNR_plot_path_last = os.path.join(dir_path, net_name + '_snr_plot_last_epoch_full_testing_' + t_string + '.png')\n \n plot_true_and_calc_from_file(prediction_path_last, dobj, SNR_plot_path_last, show=opt_arg['show_snr_plot'], net_name=net_name + ' last epoch')\n \n SNR_plot_path_best = ''\n \n if not net_best == None:\n SNR_plot_path_best = os.path.join(dir_path, net_name + '_snr_plot_best_epoch_full_testing_' + t_string + '.png')\n \n plot_true_and_calc_from_file(prediction_path_best, dobj, SNR_plot_path_best, show=opt_arg['show_snr_plot'], net_name=net_name + ' best epoch')\n \n #Make false alarm plots\n false_alarm_plot_path_last = os.path.join(dir_path, net_name + '_false_alarm_plot_last_epoch_full_testing_' + t_string + '.png')\n \n tmp_false_alarm_path_last = plot_false_alarm(dobj, prediction_path_last, false_alarm_plot_path_last, show=opt_arg['show_false_alarm'])\n \n false_alarm_plot_prob_path_last = os.path.join(dir_path, net_name + '_false_alarm_plot_prob_last_epoch_full_testing_' + t_string + '.png')\n \n tmp_false_alarm_prob_path_last = plot_false_alarm_prob(dobj, prediction_path_last, false_alarm_plot_prob_path_last, show=opt_arg['show_false_alarm'])\n \n false_alarm_plot_path_best = ''\n \n false_alarm_plot_prob_path_best = ''\n \n tmp_false_alarm_path_best = ''\n \n tmp_false_alarm_prob_path_best = ''\n \n if not net_best == None:\n false_alarm_plot_path_best = os.path.join(dir_path, net_name + '_false_alarm_plot_best_epoch_full_testing_' + t_string + '.png')\n \n false_alarm_plot_prob_path_best = os.path.join(dir_path, net_name + '_false_alarm_plot_prob_best_epoch_full_testing_' + t_string + '.png')\n \n tmp_false_alarm_path_best = plot_false_alarm(dobj, prediction_path_best, false_alarm_plot_path_best, show=opt_arg['show_false_alarm'])\n \n tmp_false_alarm_prob_path_best = plot_false_alarm_prob(dobj, prediction_path_best, false_alarm_plot_prob_path_best, show=opt_arg['show_false_alarm'])\n \n #Make sensitivity plots\n snr_range = dobj.get_file_properties()['snr']\n \n sensitivity_plot_path_last = os.path.join(dir_path, net_name + '_sensitivity_plot_last_epoch_full_testing_' + t_string + '.png')\n \n sensitivity_plot_prob_path_last = os.path.join(dir_path, net_name + '_sensitivity_plot_prob_last_epoch_full_testing_' + t_string + '.png')\n \n plot_sensitivity(dobj, prediction_path_last, tmp_false_alarm_path_last, sensitivity_plot_path_last, bins=(snr_range[0]+1, snr_range[1], 1), show=opt_arg['show_sensitivity_plot'])\n \n plot_sensitivity_prob_from_pred_file(prediction_path_last, sensitivity_plot_prob_path_last, bins=(snr_range[0]+1, snr_range[1], 1))\n #plot_sensitivity_prob(dobj, prediction_path_last, tmp_false_alarm_prob_path_last, sensitivity_plot_prob_path_last, show=opt_arg['show_sensitivity_plot'])\n \n sensitivity_plot_path_best = ''\n \n sensitivity_plot_prob_path_best = ''\n \n if not net_best == None:\n sensitivity_plot_path_best = os.path.join(dir_path, net_name + '_sensitivity_plot_best_epoch_full_testing_' + t_string + '.png')\n \n sensitivity_plot_prob_path_best = os.path.join(dir_path, net_name + '_sensitivity_plot_prob_best_epoch_full_testing_' + t_string + '.png')\n \n plot_sensitivity(dobj, prediction_path_best, tmp_false_alarm_path_best, sensitivity_plot_path_best, bins=(snr_range[0], snr_range[1], 1), show=opt_arg['show_sensitivity_plot'])\n \n plot_sensitivity_prob_from_pred_file(prediction_path_best, sensitivity_plot_prob_path_best, bins=(snr_range[0]+1, snr_range[1], 1))\n #plot_sensitivity_prob(dobj, prediction_path_best, tmp_false_alarm_prob_path_best, sensitivity_plot_prob_path_best, show=opt_arg['show_sensitivity_plot'])\n \n return((SNR_plot_path_last, false_alarm_plot_path_last, false_alarm_plot_prob_path_last, sensitivity_plot_path_last, sensitivity_plot_prob_path_last, SNR_plot_path_best, false_alarm_plot_path_best, false_alarm_plot_prob_path_best, sensitivity_plot_path_best, sensitivity_plot_prob_path_best))", "def train_and_evaluate(model, train_data, val_data, optimizer, scheduler, params, model_dir, restore_dir=None):\n # reload weights from restore_dir if specified\n if restore_dir is not None:\n model = BertForSequenceTagging.from_pretrained(tagger_model_dir)\n \n best_val_f1 = 0.0\n patience_counter = 0\n\n for epoch in range(1, params.epoch_num + 1):\n # Run one epoch\n logging.info(\"Epoch {}/{}\".format(epoch, params.epoch_num))\n\n # Compute number of batches in one epoch\n params.train_steps = params.train_size // params.batch_size\n params.val_steps = params.val_size // params.batch_size\n\n # data iterator for training\n train_data_iterator = data_loader.data_iterator(train_data, shuffle=True)\n\n # Train for one epoch on training set\n train_epoch(model, train_data_iterator, optimizer, scheduler, params)\n\n # data iterator for evaluation\n # train_data_iterator = data_loader.data_iterator(train_data, shuffle=False)\n val_data_iterator = data_loader.data_iterator(val_data, shuffle=False)\n\n # Evaluate for one epoch on training set and validation set\n # params.eval_steps = params.train_steps\n # train_metrics = evaluate(model, train_data_iterator, params, mark='Train') # callback train f1\n params.eval_steps = params.val_steps\n val_metrics = evaluate(model, val_data_iterator, params, mark='Val')\n \n val_f1 = val_metrics['f1']\n improve_f1 = val_f1 - best_val_f1\n if improve_f1 > 1e-5: \n logging.info(\"- Found new best F1\")\n best_val_f1 = val_f1\n model.save_pretrained(model_dir)\n if improve_f1 < params.patience:\n patience_counter += 1\n else:\n patience_counter = 0\n else:\n patience_counter += 1\n\n # Early stopping and logging best f1\n if (patience_counter >= params.patience_num and epoch > params.min_epoch_num) or epoch == params.epoch_num:\n logging.info(\"Best val f1: {:05.2f}\".format(best_val_f1))\n break", "def test(self, model, dl_test, test_verbose=True, return_acc=True):\n\n loss_test = []\n acc_test = []\n for batch_idx, batch in enumerate(dl_test):\n model.eval()\n with torch.no_grad():\n loss, acc = model.test_step(batch, batch_idx)\n loss_test.append(loss.item())\n acc_test.append(acc)\n\n avg_loss_test = round(sum(loss_test) / len(loss_test), 2)\n avg_acc_test = round(sum(acc_test) / len(acc_test), 2)\n if test_verbose:\n print(f\"loss_test={avg_loss_test}\\t acc_test={avg_acc_test}\")\n if return_acc:\n return avg_acc_test", "def get_validation_fn(\n test_dataset: tf.data.Dataset,\n model_fn: Callable[[], tf.keras.models.Model],\n loss_fn: Callable[[], tf.keras.losses.Loss],\n metrics_fn: Callable[[], tf.keras.metrics.Metric],\n) -> Callable[[], tf.data.Dataset]:\n\n def compiled_model() -> tf.keras.Model:\n val_model = model_fn()\n val_model.compile(\n loss=loss_fn(), optimizer=tf.keras.optimizers.Adam(), metrics=metrics_fn()\n )\n return val_model\n\n test_dataset = _convert_fn(test_dataset)\n\n def validation_fn(\n trained_model: tff.learning.Model,\n ) -> Callable[[], tf.data.Dataset]:\n val_model = compiled_model()\n trained_model_weights = tff.learning.ModelWeights(\n trainable=list(trained_model.trainable),\n non_trainable=list(trained_model.non_trainable),\n )\n\n trained_model_weights.assign_weights_to(val_model)\n metrics = val_model.evaluate(test_dataset, verbose=0)\n return dict(\n zip(val_model.metrics_names, val_model.evaluate(test_dataset, verbose=0))\n )\n\n return validation_fn", "def finetune(ft_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========FINETUNE==========')\n\n # Filter out undesired examples with excluded_label\n ds = ft_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.finetune_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) \n for epoch in range(epochs): \n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n # print(tf.math.argmax(out, axis=-1))\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised loss')\n print(train_loss.result())\n print('supervised accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds: \n evaluate(eval_ds, model, task)", "def test_generator(self, test_generator, dimension_test):\n return self.model.evaluate_generator(generator=test_generator, steps=dimension_test//self.batch_size,\n max_queue_size=10, workers=1, use_multiprocessing=True, verbose=1)", "def test_overfit(model, train, FLAGS):\n epochs = 100\n test_size = 32\n steps_per_epoch = 10\n train.question, train.paragraph, train.question_length, train.paragraph_length, train.answer = train[:test_size]\n with tf.Session() as session:\n session.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n epoch_start = timer()\n for step in range(steps_per_epoch):\n feed_dict = model.fill_feed_dict(*train[:test_size], is_training=True)\n fetch_dict = {\n 'step': tf.train.get_global_step(),\n 'loss': model.loss,\n 'train': model.train\n }\n result = session.run(fetch_dict, feed_dict)\n loss = result['loss']\n\n if (step == 0 and epoch == 0):\n print(f'Entropy - Result: {loss:.2f}, Expected (approx.): {2*np.log(FLAGS.max_paragraph_length):.2f}')\n if step == steps_per_epoch-1:\n print(f'Cross entropy: {loss:.2f}')\n train.length = test_size\n prediction, truth = multibatch_prediction_truth(session, model, train, 1)\n overfit_f1 = f1(prediction, truth)\n print(f'F1: {overfit_f1:.2f}')\n global_step = tf.train.get_global_step().eval()\n print(f'Epoch took {timer() - epoch_start:.2f} s (step: {global_step})')", "def test(self, test=False): \n if test == True:\n if os.path.exists(self.student_save_path):\n checkpoint = torch.load(self.student_save_path, map_location=self.device)\n else:\n raise ValueError('No file with the pretrained model selected')\n\n self.student_model.load_state_dict(checkpoint)\n self.student_model.eval()\n\n running_acc = 0\n with torch.no_grad():\n for data, label in self.testloader:\n data, label = data.to(self.device), label.to(self.device)\n\n student_logits, *student_activations = self.student_model(data)\n\n running_acc += utils.accuracy(student_logits.data, label)\n\n print(f\"Test accuracy: {running_acc / len(self.testloader)}\")\n return running_acc / len(self.testloader)", "def batch_fit(self, train_loader: torch.utils.data.DataLoader,\n test_loader: torch.utils.data.DataLoader,\n train_size: int, test_size: int, epochs: int = 1,\n calc_mapk: bool = True):\n\n for epoch in range(epochs):\n stats = {'epoch': epoch+1}\n\n print('Training begins...')\n train_loss = self._training(train_loader, train_size)\n stats['train_loss'] = train_loss\n\n print('Validation begins...')\n if calc_mapk:\n print('validation with mapk')\n val_loss, val_mapk = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_mapk'] = val_mapk\n else:\n print('validation without mapk')\n val_loss = self._validation(\n test_loader, test_size, calc_mapk)\n stats['val_loss'] = val_loss\n print(stats)\n\n self.metrics.append(stats)", "def train_and_evaluate(model, train_dataloader, dev_dataloader, optimizer, loss_fn, metrics, incorrect, params, model_dir,\n restore_file=None):\n # reload weights from restore_file if specified\n if restore_file is not None:\n restore_path = os.path.join(args.model_dir, args.restore_file + '.pth.tar')\n logging.info(\"Restoring parameters from {}\".format(restore_path))\n utils.load_checkpoint(restore_path, model, optimizer)\n\n best_dev_acc = 0.0\n\n for epoch in range(params.num_epochs):\n # Run one epoch\n # if (epoch) % params.save_summary_steps == 0:\n logging.info(\"Epoch {}/{}\".format(epoch + 1, params.num_epochs))\n\n # compute number of batches in one epoch (one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, metrics, params)\n\n # Evaluate for one epoch on validation set\n dev_metrics, incorrect_samples = evaluate(model, loss_fn, dev_dataloader, metrics, incorrect, params)\n\n dev_acc = dev_metrics['accuracy']\n is_best = dev_acc >= best_dev_acc\n\n # Save weights\n utils.save_checkpoint({'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'optim_dict' : optimizer.state_dict()},\n is_best=is_best,\n checkpoint=model_dir)\n\n # If best_eval, best_save_path\n if is_best:\n logging.info(\"- Found new best accuracy\")\n best_dev_acc = dev_acc\n\n # Save best val metrics in a json file in the model directory\n best_json_path = os.path.join(model_dir, \"metrics_dev_best_weights.json\")\n utils.save_dict_to_json(dev_metrics, best_json_path)\n\n best_csv_path = os.path.join(model_dir, \"incorrect_best_samples.csv\")\n utils.save_incorrect_to_csv(incorrect_samples, best_csv_path)\n\n # Save latest val metrics in a json file in the model directory\n last_json_path = os.path.join(model_dir, \"metrics_dev_last_weights.json\")\n utils.save_dict_to_json(dev_metrics, last_json_path)\n\n last_csv_path = os.path.join(model_dir, \"incorrect_last_samples.csv\")\n utils.save_incorrect_to_csv(incorrect_samples, last_csv_path)" ]
[ "0.6524617", "0.6314485", "0.63126904", "0.62246484", "0.61073464", "0.60626286", "0.60052073", "0.5928098", "0.5889206", "0.58809704", "0.586239", "0.5848877", "0.58438563", "0.58372283", "0.5764127", "0.5758687", "0.5740815", "0.5740264", "0.5721871", "0.5714371", "0.57139164", "0.57102334", "0.5700626", "0.5679571", "0.56786263", "0.56668127", "0.56589437", "0.5631658", "0.5620171", "0.5613468", "0.5609307", "0.5606436", "0.5605758", "0.5604223", "0.55923575", "0.55890775", "0.5585175", "0.557004", "0.5554949", "0.5536652", "0.5535005", "0.5531697", "0.55309415", "0.5526756", "0.5521458", "0.5516659", "0.55063665", "0.549519", "0.5487147", "0.5484253", "0.54822093", "0.54812014", "0.5481115", "0.547708", "0.546017", "0.5460021", "0.54567635", "0.5450717", "0.5445845", "0.54429823", "0.5439071", "0.54380655", "0.5427044", "0.5425412", "0.5424052", "0.54175705", "0.539225", "0.53873765", "0.53848207", "0.53787816", "0.5372253", "0.53701013", "0.53677976", "0.5363462", "0.53501266", "0.5343621", "0.5337888", "0.5335896", "0.53357", "0.53327364", "0.5330385", "0.5329867", "0.5327578", "0.53270596", "0.5326907", "0.53113955", "0.52982277", "0.52942514", "0.5290682", "0.5285304", "0.52848303", "0.5281094", "0.52808064", "0.5277496", "0.52730554", "0.5272435", "0.52705044", "0.52696675", "0.52687544", "0.52678084" ]
0.6274846
3
This creates the basic model, which needs only few adjustements by the other configurations, in order to work for other datasets and/or detection neural networks.
def base_model_config(): BASE_CONFIG_FILENAME = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs", "base_config.json") with open(BASE_CONFIG_FILENAME, 'r') as fp: cfg = edict(json.load(fp, encoding="utf8")) return cfg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['D_adv','D_cls', 'G_A','G_B', 'cycle_A','G_adv','reg','idt']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n visual_names_A = ['real_A','A','mask_A', 'fake_B','B','mask_B', 'rec_A']\n #visual_names_B = ['real_B', 'fake_A', 'rec_B']\n # if identity loss is used, we also visualize idt_B=G_A(B) ad idt_A=G_A(B)\n # if self.isTrain and self.opt.lambda_identity > 0.0:\n # visual_names_A.append('idt_B')\n # #visual_names_B.append('idt_A')\n\n # combine visualizations for A and B\n self.visual_names = visual_names_A #+ visual_names_B\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>.\n \n\n # define networks (both Generators and discriminators)\n # The naming is different from those used in the paper.\n # Code (vs. paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n self.netG_A=[]\n self.netG_B=[]\n self.netG_Amask=[]\n self.netG_Bmask=[]\n if self.isTrain:\n self.model_names += ['G_A', 'G_Amask', 'G_B', 'G_Bmask', 'D', 'Dadv']\n else: # during test time, only load Gs\n self.model_names = ['G_A', 'G_Amask', 'G_B', 'G_Bmask']\n for i in range(opt.num_class):\n tG_A, tG_Amask = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n \n self.netG_A.append(tG_A)\n self.netG_Amask.append(tG_Amask)\n tG_B, tG_Bmask = networks.define_G(opt.output_nc, opt.input_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_B.append(tG_B)\n self.netG_Bmask.append(tG_Bmask)\n\n self.netD= networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,opt.num_class)\n self.netDadv = networks.define_D(opt.output_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids, 1)\n \n\n if self.isTrain:\n if opt.lambda_identity > 0.0: # only works when input and output images have the same number of channels\n assert(opt.input_nc == opt.output_nc)\n # create image buffer to store previously generated images\n # self.fake_A_pool = ImagePool(opt.pool_size)\n # create image buffer to store previously generated images\n # self.fake_B_pool = ImagePool(opt.pool_size)\n # define loss functions\n # define GAN loss.\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionGAN_D = networks.GANLoss('multi-label').to(self.device)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizers_G=[]\n for i in range(opt.num_class):\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_A[i].parameters(\n ), self.netG_B[i].parameters()), lr=opt.lr, betas=(opt.beta1, 0.999)) \n self.optimizers_G.append(self.optimizer_G)\n \n self.optimizer_D = torch.optim.Adam(self.netD.parameters(\n ), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers=self.optimizers_G+[self.optimizer_D]", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>\n if self.isTrain:\n self.model_names = ['G', 'D']\n else: # during test time, only load G\n self.model_names = ['G']\n\n # Set TPN_enabled to true if opt.TPN is defined\n if opt.TPN:\n self.TPN_enabled = True\n else:\n self.TPN_enabled = False\n\n # Conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc\n discr_input_nc = opt.input_nc + opt.output_nc\n\n # If TPN is enabled, switch to the U-Net with TPN architecture\n if self.TPN_enabled:\n opt.netG = 'unet_256_TPN'\n discr_input_nc +=1 # Additional Channel for Time Input\n\n # define networks (both generator and discriminator)\n self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.isTrain: # define a discriminator; \n self.netD = networks.define_D(discr_input_nc, opt.ndf, opt.netD,\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if self.TPN_enabled:\n self.loss_names = ['G_GAN', 'G_L1', 'G_TPN', 'D_real', 'D_fake']\n\n # Store final gamma value and then set it to 0\n self.final_gamma = deepcopy(opt.gamma)\n opt.gamma = 0\n\n # Initiliaze m and c to None\n self.update_m = None\n self.update_c = None\n\n # Setup TPN if set to True\n print(\"\\nSetting up TPN\\n\")\n opt_TPN = deepcopy(opt) # copy train options and change later\n opt_TPN.model = 'time_predictor'\n opt_TPN.name = opt.TPN\n opt_TPN.netD = 'time_input'\n opt_TPN.ndf = 16 # Change depending on the ndf size used with the TPN model specified\n # hard-code some parameters for TPN test phase\n opt_TPN.display_id = -1 # no visdom display;\n opt_TPN.isTrain = False\n print(\"Options TPN: {}\\n\\n\".format(opt_TPN))\n self.TPN = create_model(opt_TPN) # create a model given opt_TPN.model and other options\n self.TPN.setup(opt_TPN) # regular setup: load\n\n if self.isTrain:\n # define loss functions\n self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)\n self.criterionL1 = torch.nn.L1Loss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n # Check if lambda_L2 is in range [0,1]\n assert (0 <= self.opt.lambda_L2 <= 1)", "def build_model(self):\n self.g12 = G12(conv_dim=self.g_conv_dim)\n init_weights(self.g12, init_type='normal')\n self.g21 = G21(conv_dim=self.g_conv_dim)\n init_weights(self.g21, init_type='normal')\n self.d1 = D1(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d1, init_type='normal')\n self.d2 = D2(conv_dim=self.d_conv_dim, use_labels=self.use_labels)\n init_weights(self.d2, init_type='normal')\n self.dreid = DSiamese(class_count=self.num_classes_market)\n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n dr_params = list(self.dreid.parameters())\n\n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n self.dr_optimizer = optim.Adam(dr_params, self.lr, [self.beta1, self.beta2])\n\n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()\n self.dreid.cuda()", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def build_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model.summary()\n return model", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def build_model():", "def create_model():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--DISC_LR', type=float, default=1e-4)\r\n parser.add_argument('--GEN_LR', type=float, default=1e-3)\r\n parser.add_argument('--GEN_BETA1', type=float, default=0.9)\r\n parser.add_argument('--GEN_BETA2', type=float, default=0.999)\r\n parser.add_argument('--IMAGE_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_SIZE', type=int, default=None)\r\n parser.add_argument('--QUES_EMBED_SIZE', type=int, default=2048)\r\n parser.add_argument('--WORD_EMBED_SIZE', type=int, default=512)\r\n parser.add_argument('--VOCAB_SIZE', type=int, default=1004)\r\n args, task_args = parser.parse_known_args()\r\n override_if_not_in_args('--max_steps', '1000', task_args)\r\n override_if_not_in_args('--batch_size', '64', task_args)\r\n override_if_not_in_args('--eval_set_size', '370', task_args)\r\n override_if_not_in_args('--eval_interval_secs', '2', task_args)\r\n override_if_not_in_args('--log_interval_secs', '2', task_args)\r\n override_if_not_in_args('--min_train_eval_rate', '2', task_args)\r\n\r\n return Model(args.DISC_LR, args.GEN_LR, args.GEN_BETA1, args.GEN_BETA2,\r\n args.IMAGE_SIZE, args.QUES_EMBED_SIZE, args.WORD_EMBED_SIZE,\r\n args.QUES_SIZE, args.VOCAB_SIZE), task_args", "def model_setup(self):\n self.DNN = DgganMLP(self.settings.hidden_size)\n self.D = DgganMLP(self.settings.hidden_size)\n self.G = Generator(self.settings.hidden_size)", "def main():\n logger.info(\"=> creating model ...\")\n logger.info(\"Classes: %s\", cfg.classes)\n\n value_scale = 255\n mean = [0.485, 0.456, 0.406]\n mean = [item * value_scale for item in mean]\n std = [0.229, 0.224, 0.225]\n std = [item * value_scale for item in std]\n gray_folder = os.path.join(cfg.result_path, 'gray')\n color_folder = os.path.join(cfg.result_path, 'color')\n\n test_transform = pt_transform.Compose([pt_transform.Normalize(mean=mean, std=std, is_train=False)])\n\n if cfg.data_root[-1] == \"/\":\n val_list = cfg.data_root + cfg.val_list\n else:\n val_list = cfg.data_root + '/' + cfg.val_list\n\n test_data = pt_dataset.SemData(\n split='val', data_root=cfg.data_root,\n data_list=val_list,\n transform=test_transform)\n\n test_loader = ds.GeneratorDataset(test_data, column_names=[\"data\", \"label\"],\n shuffle=False)\n test_loader.batch(1)\n colors = numpy.loadtxt(cfg.color_txt).astype('uint8')\n\n from src.model import cpnet\n\n CPNet = cpnet.CPNet(\n prior_channels=256,\n proir__size=60,\n am_kernel_size=11,\n pretrained=True,\n pretrained_path=cfg.pretrain_path,\n deep_base=True\n )\n\n ms_checkpoint = load_checkpoint(cfg.ckpt)\n load_param_into_net(CPNet, ms_checkpoint, strict_load=True)\n CPNet.set_train(False)\n test(test_loader, test_data.data_list, CPNet, cfg.classes, mean, std, cfg.base_size, cfg.test_h,\n cfg.test_w, cfg.scales, gray_folder, color_folder, colors)\n if cfg.split != 'test':\n cal_acc(test_data.data_list, gray_folder, cfg.classes)", "def eval_model(config):\n print 'Model directory: %s' % config.model_output\n print 'Running model: %s' % config.model_type\n if config.model_type == 'fully_connected_mlp':\n from models.fully_connected_mlp import model_struct\n elif config.model_type == 'fully_connected_mlp_2l':\n from models.fully_connected_mlp_2l import model_struct\n elif config.model_type == 'fully_connected_conv':\n from models.fully_connected_conv import model_struct\n elif config.model_type == 'vgg_feature_model':\n from models.vgg_feature_model import model_struct\n else:\n raise Exception\n\n # Prepare model training\n dt_stamp = re.split(\n '\\.', str(datetime.now()))[0].\\\n replace(' ', '_').replace(':', '_').replace('-', '_')\n dt_dataset = config.model_type + '_' + dt_stamp + '/'\n config.train_checkpoint = os.path.join(\n config.model_output, dt_dataset) # timestamp this run\n config.summary_dir = os.path.join(\n config.train_summaries, config.model_output, dt_dataset)\n dir_list = [config.train_checkpoint, config.summary_dir]\n [make_dir(d) for d in dir_list]\n\n # Prepare model inputs\n train_data = os.path.join(config.tfrecord_dir, 'train.tfrecords')\n validation_data = os.path.join(config.tfrecord_dir, 'val.tfrecords')\n feat_mean = 0 # np.mean(np.load(config.mean_file)['feat_list'])\n\n # Prepare data on CPU\n with tf.device('/cpu:0'):\n train_images, train_labels = inputs(\n tfrecord_file=train_data,\n batch_size=config.train_batch,\n num_feats=config.n_features,\n sample=config.sample['train'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n val_images, val_labels = inputs(\n tfrecord_file=validation_data,\n batch_size=1,\n num_feats=config.n_features,\n sample=config.sample['val'],\n max_pixels_per_image=config.max_pixels_per_image,\n im_size=config.resize,\n model_input_shape=config.resize,\n train=config.data_augmentations,\n num_epochs=config.epochs,\n feat_mean_value=feat_mean)\n tf.summary.image('validation images', tf.cast(val_labels, tf.float32))\n\n # Prepare model on GPU\n with tf.device('/gpu:0'):\n with tf.variable_scope('cnn') as scope:\n\n model = model_struct()\n train_mode = tf.get_variable(name='training', initializer=True)\n model.build(\n features=train_images,\n output_categories=len(config.labels.keys()),\n train_mode=train_mode, batchnorm=config.batch_norm)\n\n # Prepare the cost function\n cost = softmax_cost(\n model.res_logits, train_labels, ratio=config.ratio,\n label_reshape=[\n config.batch_size * config.max_pixels_per_image])\n train_op = tf.train.AdamOptimizer(config.lr).minimize(cost)\n\n tf.summary.scalar(\"cost\", cost)\n\n train_score = correlation(\n model.prob, train_labels) # training accuracy\n tf.summary.scalar(\"training correlation\", train_score)\n\n # Setup validation op\n if validation_data is not False:\n scope.reuse_variables()\n # Validation graph is the same as training except no batchnorm\n val_model = model_struct()\n val_model.build(\n features=val_images,\n output_categories=len(config.labels.keys()))\n\n # Calculate validation accuracy\n val_pred = tf.cast(\n tf.reshape(\n tf.argmax(\n val_model.prob, axis=1),\n [1, config.resize[0], config.resize[1], 1]),\n tf.float32)\n tf.summary.image('validation prediction', val_pred)\n val_score = correlation(\n val_model.prob, tf.reshape(\n val_labels, [np.prod(config.resize), 1]))\n tf.summary.scalar(\"validation correlation\", val_score)\n\n # Set up summaries and saver\n saver = tf.train.Saver(\n tf.global_variables(), max_to_keep=config.keep_checkpoints)\n summary_op = tf.summary.merge_all()\n\n # Initialize the graph\n sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))\n\n # Need to initialize both of these if supplying num_epochs to inputs\n sess.run(tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer()))\n summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph)\n\n # Set up exemplar threading\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Start training loop\n np.save(config.train_checkpoint, config)\n step, val_max, losses = 0, 0, []\n train_acc = 0\n try:\n while not coord.should_stop():\n start_time = time.time()\n _, loss_value, train_acc = sess.run([train_op, cost, train_score])\n losses.append(loss_value)\n duration = time.time() - start_time\n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n\n if step % 100 == 0 and step % 10 == 0:\n if validation_data is not False:\n _, val_acc, val_frame = sess.run(\n [train_op, val_score, val_pred])\n\n np.save(\n os.path.join(\n config.model_output, '%s_val_image' % step),\n val_frame)\n else:\n val_acc = -1 # Store every checkpoint\n\n # Summaries\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Training status and validation accuracy\n format_str = (\n '%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training r = %s | '\n 'Validation r = %s | logdir = %s')\n print (format_str % (\n datetime.now(), step, loss_value,\n config.train_batch / duration, float(duration),\n train_acc, val_acc, config.summary_dir))\n\n # Save the model checkpoint if it's the best yet\n if val_acc > val_max:\n saver.save(\n sess, os.path.join(\n config.train_checkpoint,\n 'model_' + str(step) + '.ckpt'), global_step=step)\n\n # Store the new max validation accuracy\n val_max = val_acc\n\n else:\n # Training status\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; '\n '%.3f sec/batch) | Training F = %s')\n print (format_str % (datetime.now(), step, loss_value,\n config.train_batch / duration,\n float(duration), train_acc))\n # End iteration\n step += 1\n\n except tf.errors.OutOfRangeError:\n print('Done training for %d epochs, %d steps.' % (config.epochs, step))\n finally:\n coord.request_stop()\n\n dt_stamp = get_dt() # date-time stamp\n np.save(\n os.path.join(\n config.tfrecord_dir, '%straining_loss' % dt_stamp), losses)\n coord.join(threads)\n sess.close()", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def create_model(self):\n # Create the generator and discriminators\n self.generator_lungs = self.generator_model()\n self.generator_organs = self.generator_model()\n\n self.disc_lungs = self.discriminator_model_lungs()\n self.disc_organs = self.discriminator_model_organs()\n\n # Initialize the optimizer and backend\n self.generator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5)\n self.set_backend = tf.keras.backend.set_floatx('float32')\n\n # Create the summary writer\n self.create_summary_writer()\n print('Models are created.')\n return self", "def build_models(config):\n inputs = Input(shape=(config['patch_height'], config['patch_width'], config['depth'], config['channel']),\n name='patchimg')\n\n kernelinitfun = keras.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=None)\n activationfun = 'relu'\n # kernelinitfun = 'glorot_normal'\n\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv1_1')(inputs)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv1_2')(x)\n # x = Dropout(0.3)(x)\n x = BatchNormalization(name='bn1')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp1', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv2_1')(x)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv2_2')(x)\n # x = Dropout(0.2)(x)\n x = BatchNormalization(name='bn2')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp2', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv3_1')(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv3_2')(x)\n # x = Dropout(0.5)(x)\n x = BatchNormalization(name='bn3')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp3', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv4_1')(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv4_2')(x)\n # x = Dropout(0.5)(x)\n x = BatchNormalization(name='bn4')(x)\n x = Activation(activationfun)(x)\n\n x4 = Flatten(name='aux_fx')(x)\n\n source_classifier = Dropout(0.5)(x4)\n source_classifier = Dense(512, activation='softmax', name=\"mo1\")(source_classifier)\n source_classifier = Dropout(0.5)(source_classifier)\n source_classifier = Dense(128, activation='softmax', name=\"mo2\")(source_classifier)\n # source_classifier = Dropout(0.3)(source_classifier)\n source_classifier = Dense(1, name=\"mo\")(source_classifier)\n\n domain_classifier = Dense(32, activation='linear', name=\"do4\")(x4)\n domain_classifier = BatchNormalization(name=\"do5\")(domain_classifier)\n domain_classifier = Activation(\"elu\", name=\"do6\")(domain_classifier)\n domain_classifier = Dropout(0.5)(domain_classifier)\n\n domain_classifier = Dense(2, activation='softmax', name=\"do\")(domain_classifier)\n\n adamop = keras.optimizers.Adam(learning_rate=1e-5, beta_1=0.9, beta_2=0.999, amsgrad=False)\n comb_model = Model(inputs=inputs, outputs=[source_classifier, domain_classifier])\n comb_model.compile(optimizer=adamop,\n loss={'mo': 'mae', 'do': 'categorical_crossentropy'},\n loss_weights={'mo': 1, 'do': 2}, metrics=['accuracy'], )\n\n source_classification_model = Model(inputs=inputs, outputs=[source_classifier])\n source_classification_model.compile(optimizer=adamop,\n loss={'mo': 'mae'}, metrics=['accuracy'], )\n\n domain_classification_model = Model(inputs=inputs, outputs=[domain_classifier])\n domain_classification_model.compile(optimizer=adamop,\n loss={'do': 'categorical_crossentropy'}, metrics=['accuracy'])\n\n embeddings_model = Model(inputs=inputs, outputs=[x4])\n embeddings_model.compile(optimizer=adamop, loss='categorical_crossentropy', metrics=['accuracy'])\n\n return comb_model, source_classification_model, domain_classification_model, embeddings_model", "def __setup_model(self, **kwargs):\n self.model_architecture = kwargs['model_architecture'].upper()\n self.model = Classifier.IMAGENET_MODELS[self.model_architecture](\n pretrained=True\n )\n\n if 'input_size' in kwargs: # Loading from a checkpoint\n self.input_size = kwargs['input_size']\n self.model.current_epoch = kwargs['current_epoch']\n\n else: # No checkpoint, will be creating a new classifier for the model\n # The number of features coming from the feature detector CNN\n if 'ALEXNET' in self.model_architecture:\n self.input_size = self.model.classifier[1].in_features\n elif 'VGG' in self.model_architecture:\n self.input_size = self.model.classifier[0].in_features\n elif 'DENSENET' in self.model_architecture:\n self.input_size = self.model.classifier.in_features\n\n # Freeze the feature detector parameters to prevent backpropagating\n # through them.\n for param in self.model.parameters():\n param.requires_grad = False\n\n self.model.current_epoch = 1\n\n self.output_size = kwargs['output_size']\n self.hidden_layers = kwargs['hidden_layers']\n self.learn_rate = kwargs['learn_rate']\n self.drop_p = kwargs['drop_p']\n\n self.model.class_to_idx = kwargs['class_to_idx']\n self.model.classifier = Network(self.input_size,\n self.output_size,\n self.hidden_layers,\n self.drop_p)\n\n if 'model_state_dict' in kwargs: # load the state from checkpoint\n self.model.load_state_dict(kwargs['model_state_dict'])\n\n self.criterion = nn.NLLLoss()\n self.optimizer = optim.Adam(self.model.classifier.parameters(),\n lr=self.learn_rate)\n\n if 'optimizer_state_dict' in kwargs: # load the state from checkpoint\n self.optimizer.load_state_dict(kwargs['optimizer_state_dict'])", "def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)", "def build_model(train_inputs,train_labels,model_params,model_mode='classification',\n model_type='naive_bayes'):\n if model_mode == \"classification\":\n if model_type == \"naive_bayes\":\n model = GaussianNB()\n if model_type == \"knn\":\n model = KNeighborsClassifier(n_neighbors=50)\n if model_type == \"svm\":\n model = SVC(kernel='poly', degree =27, coef0 =1, C=5)\n if model_type == \"decision_tree\":\n model = DecisionTreeClassifier(min_samples_split=45,min_samples_leaf=45,criterion=\"gini\")\n #model = RandomForestClassifier(n_estimators=500, n_jobs=-1)\n\n if model_mode == \"regression\":\n if model_type == \"knn\":\n model = KNeighborsRegressor()\n if model_type == \"svm\":\n model = SVR()\n if model_type == \"decision_tree\":\n model = DecisionTreeRegressor()\n\n\n model.fit(train_inputs, train_labels)\n # for name, score in zip(train_inputs.columns,model.feature_importances_):\n # print(name, score)\n\n return model", "def main():\n setup_keras()\n\n args = parse()\n\n train_settings = common.load_settings(args.settings_path, default_conf_name='train.yml')\n train_settings['store'] = args.store\n\n feature_settings = common.load_settings(args.settings_path, default_conf_name='feature.yml')\n model_settings = common.load_settings(args.settings_path, default_conf_name=train_settings['model_conf'])\n\n train_df, val_df = load_training_data(dict(train_settings, **feature_settings))\n assert train_df.shape[0] > val_df.shape[0] * 4.5, f'training data {train_df.shape[0]} should be much larger than validation {val_df.shape[0]}'\n\n sample_featurizer = AudioFeature(feature_settings)\n\n if args.load_name:\n model_name = args.load_name\n print('Loading existing model', model_name)\n m = keras.models.load_model(model_name)\n else:\n t = datetime.datetime.now().strftime('%Y%m%d-%H%M')\n model_name = f\"model-{model_settings['model']}_hop{feature_settings['hop_length']}_{t}\"\n m = models.build(dict(model_settings, **feature_settings))\n m.summary()\n\n output_dir = os.path.join(args.model_store, model_name)\n\n print(f\"Training model: '{model_name}'\", json.dumps(train_settings, indent=1))\n\n combined_settings = dict(train_settings, **model_settings, **feature_settings)\n\n h = train_model(output_dir, train_df, val_df,\n model=m,\n sample_featurizer=sample_featurizer,\n settings=combined_settings)", "def _create_model(self):\n ref = 0 if self.m_cfg['configs']['recursive'] else -1\n out_t, l_t, models = [], [], []\n in_t = [tf.keras.Input(batch_size=self.m_cfg['configs']['batch'],\n shape=self.m_cfg['configs']['patch'])]\n for level in np.arange(self.levels):\n if not self.m_cfg['configs']['recursive'] or not level:\n lat, res, layers = self._set_level_ops(in_t[-1], level)\n opt = self._inst_optimizer()\n self.opt += [opt]\n curr_layers = sum(layers, [])\n vars = sum(list(map(lambda l: l.variables, curr_layers)), [])\n self.vars.append(vars)\n elif self.m_cfg['configs']['recursive']:\n lat, res, layers = self._set_level_ops(in_t[-1], level, layers)\n\n out_t += [res]\n l_t += [lat]\n in_t += [tf.keras.layers.Subtract()([in_t[ref], out_t[-1]])]\n\n inputs, outputs = in_t[0], [in_t[:-1], l_t, out_t]\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.loss = Losses(self.m_cfg['configs']['loss']).value", "def construct_model():\n # model = Sequential()\n # model.add(Dense(units=64, activation='relu', input_dim=100))\n # model.add(Dense(units=10, activation='softmax'))\n # model.compile(loss='categorical_crossentropy',\n # optimizer='sgd',\n # metrics=['accuracy'])\n # return model\n\n model = Sequential()\n # Input Layer\n model.add(Conv2D(64, 3, data_format='channels_last', activation='relu', padding='same',\n input_shape=(img_width, img_height, 3)))\n model.add(MaxPool2D(pool_size=2, strides=2))\n # Hidden Layer 1\n model.add(Conv2D(64, 3, activation='relu', padding='same'))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 2\n model.add(Conv2D(128, 3, activation='relu', padding='same'))\n model.add(Conv2D(128, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 3\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n\n # Fully Connected Layer\n model.add(Flatten())\n # 512 Neuron Layer\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n # Output Layer\n model.add(Dense(num_of_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def initialize_model(model_name, num_classes, feature_extract, verbose=False):\n\n model_ft = None\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n with warnings.catch_warnings(): # temporarily suppress warnings about deprecated functions\n warnings.simplefilter(\"ignore\")\n model_ft = models.squeezenet1_0(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n else: # Unreachable\n exit()\n\n # Gather the parameters to be optimized\n params_to_update = list(filter(lambda p: p.requires_grad, model_ft.parameters()))\n\n # Print model info\n if verbose:\n print()\n print(model_ft)\n print()\n print(\"Params to learn:\")\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n print('\\t', name)\n\n return model_ft, params_to_update", "def createModel(self):\n outputs, inputs = baseUNet(self.input_shape,\n self.conv_depth,\n self.n_classes,\n self.init_w,\n self.dropout)\n \n if self.regression == True:\n outputs = Lambda(getPropOfGround)(outputs)\n \n model = Model(inputs = inputs,outputs = outputs)\n \n model.compile(optimizer = self.optimizer,\n loss=self.loss_function,\n metrics=self.metrics)\n\n if self.old_weights != None:\n model.set_weights(self.old_weights)\n self.model = model", "def setup_model(self):\r\n\r\n logging.info(\"Setup the models.\")\r\n\r\n logging.info(\"{} model\".format(self.base_network_name))\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model, classifier = getattr(setops_models, self.base_network_name)(\r\n num_classes=80,\r\n avgpool_kernel=self.avgpool_kernel\r\n )\r\n else:\r\n base_model = getattr(setops_models, self.base_network_name)()\r\n classifier = getattr(setops_models, self.classifier_name)(num_classes=80)\r\n\r\n if self.init_inception:\r\n logging.info(\"Initialize inception model using Amit's networks.\")\r\n\r\n checkpoint = torch.load(self.resume_path)\r\n\r\n base_model = Inception3(aux_logits=False, transform_input=True)\r\n base_model.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in base_model.state_dict()}\r\n )\r\n classifier.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in classifier.state_dict()}\r\n )\r\n\r\n setops_model_cls = getattr(setops_models, self.sets_network_name)\r\n setops_model = setops_model_cls(\r\n input_dim=2048,\r\n S_latent_dim=self.ops_latent_dim, S_layers_num=self.ops_layer_num,\r\n I_latent_dim=self.ops_latent_dim, I_layers_num=self.ops_layer_num,\r\n U_latent_dim=self.ops_latent_dim, U_layers_num=self.ops_layer_num,\r\n block_cls_name=self.sets_block_name, basic_block_cls_name=self.sets_basic_block_name,\r\n dropout_ratio=self.setops_dropout,\r\n )\r\n\r\n if self.resume_path:\r\n logging.info(\"Resuming the models.\")\r\n models_path = Path(self.resume_path)\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_base_model_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n classifier.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_classifier_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n\r\n setops_models_paths = sorted(models_path.glob(\"networks_setops_model_{}*.pth\".format(self.resume_epoch)))\r\n if len(setops_models_paths) > 0:\r\n setops_model.load_state_dict(\r\n torch.load(setops_models_paths[-1]).state_dict()\r\n )\r\n\r\n return base_model, classifier, setops_model", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def train_joint(configs: Dict, time_identifier: str, model_name: str, base_model_name: str, train_specific: bool = True,\n train_specific_epoch: int = 20, train_general_epoch: int = 40, specific_weight: Optional[Dict] = None,\n general_weight: Optional[str] = None, train_general: bool = False,\n fine_tune: bool = True, fine_tune_epoch: int = 20, input_channels: int = 1, **kwargs) -> None:\n # Init the logger\n logger = GlobalLogger().get_logger()\n logger.info(\"Training the general model.\")\n use_features = [AudioFeatures.MFCC, AudioFeatures.SPECS, AudioFeatures.MELSPECS]\n total_fold = configs['dataset']['k_fold']\n # Train the specific model, this usually happens when there is no previous training\n\n if train_specific:\n logger.info(\"Training the specific model with {}.\".format(base_model_name))\n for specific_feature in use_features:\n logger.info(\n \"Training the specific model with feature {} in {} epochs.\".format(specific_feature.name,\n train_specific_epoch))\n # Init the weight saving directory\n save_dir = os.path.join(configs['weight']['weight_dir'], time_identifier, specific_feature.value)\n create_dir(save_dir)\n # Get fold\n\n for current_fold, (train_dataloader, test_dataloader) in enumerate(\n zip(prepare_dataloader([specific_feature], configs[\"dataset\"], DatasetMode.TRAIN),\n prepare_dataloader([specific_feature], configs[\"dataset\"], DatasetMode.TEST))):\n\n # If not running on GPU\n model = Registers.model[base_model_name](input_shape=(128, 157))\n model = model.cuda()\n\n # Init the criterion, CE by default\n criterion = nn.CrossEntropyLoss()\n # Init the optimizer, SGD by default\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\n for current_epoch in range(1, train_specific_epoch + 1):\n # Setting the model to train mode\n model.train()\n # Get the length of the dataloader\n length = len(train_dataloader)\n # Init the loss\n running_loss = 0.0\n # Init the timer\n current_time = time.time()\n # Create the tqdm bar\n bar = tqdm(range(length))\n bar.set_description(\n \"Training using feature {}, for fold {}/{}, epoch {}\".format(specific_feature, current_fold,\n total_fold,\n current_epoch))\n # Running one batch\n for iteration, data in enumerate(train_dataloader):\n feature, label = data[specific_feature], data[AudioFeatures.LABEL]\n\n if input_channels != 1:\n feature = torch.cat([feature] * input_channels, dim=1)\n\n # Get features and set them to cuda\n feature = feature.cuda()\n label = label.cuda()\n # Set the optimizer to zero\n optimizer.zero_grad()\n # Go through one epoch\n output = model(feature)\n # Calculate the loss\n loss = criterion(output, label)\n # Back propagation\n loss.backward()\n # Update the optimizer\n optimizer.step()\n # Sum up the losses\n running_loss += loss.item()\n # Visualize the loss\n bar.set_postfix(loss=running_loss / (iteration + 1))\n # Update the bar\n bar.update(1)\n # Training finish, close the bar\n bar.close()\n # Calculate the final loss\n losses = running_loss / length\n # Time the time past\n now_time = time.time()\n # Write logs\n logger.info(\n \"Finish training feature {}, for fold {}/{}, epoch {}, time cost {}s ,with loss {}\".format(\n specific_feature,\n current_fold,\n total_fold,\n current_epoch,\n now_time - current_time,\n losses))\n # Re-init the timer\n current_time = time.time()\n # Going into eval mode\n correct = 0\n total = 0\n # Get the length of the test dataloader\n length = len(test_dataloader)\n # Init the bar\n bar_test = tqdm(range(length))\n bar_test.set_description(\n \"Testing using feature {}, for fold {}/{}, epoch {}\".format(specific_feature, current_fold,\n total_fold,\n current_epoch))\n # Set the model to evaluation mode\n model.eval()\n # Do not record the gradiant\n with torch.no_grad():\n # Running one batch\n for data in test_dataloader:\n # Get the features\n feature, label = data[specific_feature], data[AudioFeatures.LABEL]\n\n if input_channels != 1:\n feature = torch.cat([feature] * input_channels, dim=1)\n\n feature = feature.cuda()\n label = label.cuda()\n # Running the model\n output = model(feature)\n # Normalize the output to one-hot mode\n _, predicted = torch.max(func.softmax(output, dim=1), 1)\n # Record the size\n total += label.size(0)\n # Record the correct output\n correct += (predicted == label).sum().item()\n # Calculate the accuracy\n acc = correct / total\n # Visualize the accuracy\n bar_test.set_postfix(acc=acc)\n # Update the bar\n bar_test.update(1)\n # Calculate the accuracy\n final = correct / total\n # Close the bar\n bar_test.close()\n # Time the timer\n now_time = time.time()\n # Write the log\n logger.info(\n \"Finish testing feature {}, for fold {}/{}, epoch {}, time cost {}s ,with acc {}\".format(\n specific_feature,\n current_fold,\n total_fold,\n current_epoch,\n now_time - current_time,\n final))\n # Save the weight to the directory\n save_name = os.path.join(save_dir, \"fold{}_{}-epoch{}-loss{}-acc{}.pth\").format(current_fold,\n total_fold,\n current_epoch,\n losses,\n final)\n torch.save(model.state_dict(), save_name)\n # Write the log\n logger.info(\"Saving weight to {}\".format(save_name))\n\n logger.info(\"Finish training for fold {} of {}.\".format(current_fold, specific_feature.name))\n logger.info(\"Finish training for feature {}.\".format(specific_feature.name))\n logger.info(\"Finishing training for all features.\")\n\n else:\n logger.info(\"Skip training the specific features.\")\n\n if train_general:\n # Init the saving directory\n save_dir = os.path.join(configs['weight']['weight_dir'], time_identifier, \"General\")\n create_dir(save_dir)\n logger.info(\"Training general models.\")\n # Getting the dataloader from the generator\n for current_fold, (train_dataloader, test_dataloader) in enumerate(\n zip(prepare_dataloader(use_features, configs[\"dataset\"], DatasetMode.TRAIN),\n prepare_dataloader(use_features, configs[\"dataset\"], DatasetMode.TEST))):\n # Send the model to GPU\n model = Registers.model[model_name](input_shape=(128, 157))\n model.cuda()\n\n # Init the criterion, CE by default\n criterion = nn.CrossEntropyLoss()\n # Init the optimizer, SGD by default\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\n # Load weight\n if train_specific:\n # Load weight from all separated directories\n for specific_feature in use_features:\n # By default the bast accuracy weight is used\n weight_file = get_best_acc_weight(os.path.join(configs['weight']['weight_dir'], time_identifier),\n total_fold, current_fold, specific_feature)\n # Load weights\n if specific_feature == AudioFeatures.SPECS:\n model.extractor_spec.load_state_dict(torch.load(weight_file), strict=False)\n if specific_feature == AudioFeatures.MELSPECS:\n model.extractor_mel.load_state_dict(torch.load(weight_file), strict=False)\n if specific_feature == AudioFeatures.MFCC:\n model.extractor_mfcc.load_state_dict(torch.load(weight_file), strict=False)\n # Write the logs\n logger.info(\"Load weight {} for {}.\".format(weight_file, specific_feature.value))\n else:\n assert specific_weight is not None\n assert AudioFeatures.MFCC in specific_weight.keys()\n assert AudioFeatures.MELSPECS in specific_weight.keys()\n assert AudioFeatures.SPECS in specific_weight.keys()\n # Load weight from all separated directories\n for specific_feature in use_features:\n # By default the bast accuracy weight is used\n weight_file = get_best_acc_weight(\n os.path.join(configs['weight']['weight_dir'], specific_weight[specific_feature]),\n total_fold, current_fold, specific_feature)\n # Load weights\n if specific_feature == AudioFeatures.SPECS:\n model.extractor_spec.load_state_dict(torch.load(weight_file), strict=False)\n if specific_feature == AudioFeatures.MELSPECS:\n model.extractor_mel.load_state_dict(torch.load(weight_file), strict=False)\n if specific_feature == AudioFeatures.MFCC:\n model.extractor_mfcc.load_state_dict(torch.load(weight_file), strict=False)\n # Write the logs\n logger.info(\"Load weight {} for {}.\".format(weight_file, specific_feature.value))\n # Unfrozen weights\n for params in model.extractor_spec.parameters():\n params.requires_grad = False\n for params in model.extractor_mfcc.parameters():\n params.requires_grad = False\n for params in model.extractor_mel.parameters():\n params.requires_grad = False\n logger.info(\"In mode training, freeze the extractor layers.\")\n\n # Running epoch\n for current_epoch in range(1, train_general_epoch + 1):\n # Setting the model to train mode\n model.train()\n # Get the length of the dataloader\n length = len(train_dataloader)\n # Init the loss\n running_loss = 0.0\n # Init the timer\n current_time = time.time()\n # Create the tqdm bar\n bar = tqdm(range(length))\n bar.set_description(\n \"Training general model for fold {}/{}, epoch {}\".format(current_fold,\n total_fold,\n current_epoch))\n # Running one batch\n for iteration, data in enumerate(train_dataloader):\n # Get features and set them to cuda\n spec, mel, mfcc, label = data[AudioFeatures.SPECS], data[AudioFeatures.MELSPECS], data[\n AudioFeatures.MFCC], data[AudioFeatures.LABEL]\n\n if input_channels != 1:\n spec = torch.cat([spec] * input_channels, dim=1)\n mel = torch.cat([mel] * input_channels, dim=1)\n mfcc = torch.cat([mfcc] * input_channels, dim=1)\n\n spec = spec.cuda()\n mel = mel.cuda()\n mfcc = mfcc.cuda()\n label = label.cuda()\n # Set the optimizer to zero\n optimizer.zero_grad()\n # Go through one epoch\n output = model(mfcc, spec, mel)\n # Calculate the loss\n loss = criterion(output, label)\n # Back propagation\n loss.backward()\n # Update the optimizer\n optimizer.step()\n # Sum up the losses\n running_loss += loss.item()\n # Visualize the loss\n bar.set_postfix(loss=running_loss / (iteration + 1))\n # Update the bar\n bar.update(1)\n # Training finish, close the bar\n bar.close()\n # Calculate the final loss\n losses = running_loss / length\n # Time the time past\n now_time = time.time()\n # Write logs\n logger.info(\n \"Finish training general model, for fold {}/{}, epoch {}, time cost {}s ,with loss {}\".format(\n current_fold,\n total_fold,\n current_epoch,\n now_time - current_time,\n losses))\n # Re-init the timer\n current_time = time.time()\n # Going into eval mode\n correct = 0\n total = 0\n # Get the length of the test dataloader\n length = len(test_dataloader)\n # Init the bar\n bar_test = tqdm(range(length))\n bar_test.set_description(\n \"Testing general model for fold {}/{}, epoch {}\".format(current_fold,\n total_fold,\n current_epoch))\n # Set the model to evaluation mode\n model.eval()\n # Do not record the gradiant\n with torch.no_grad():\n # Running one batch\n for data in test_dataloader:\n # Get the features\n spec, mel, mfcc, label = data[AudioFeatures.SPECS], data[AudioFeatures.MELSPECS], data[\n AudioFeatures.MFCC], data[AudioFeatures.LABEL]\n if input_channels != 1:\n spec = torch.cat([spec] * input_channels, dim=1)\n mel = torch.cat([mel] * input_channels, dim=1)\n mfcc = torch.cat([mfcc] * input_channels, dim=1)\n spec = spec.cuda()\n mel = mel.cuda()\n mfcc = mfcc.cuda()\n label = label.cuda()\n # Running the model\n output = model(mfcc, spec, mel)\n # Normalize the output to one-hot mode\n _, predicted = torch.max(func.softmax(output, dim=1), 1)\n # Record the size\n total += label.size(0)\n # Record the correct output\n correct += (predicted == label).sum().item()\n # Calculate the accuracy\n acc = correct / total\n # Visualize the accuracy\n bar_test.set_postfix(acc=acc)\n # Update the bar\n bar_test.update(1)\n # Calculate the accuracy\n final = correct / total\n # Close the bar\n bar_test.close()\n # Time the timer\n now_time = time.time()\n # Write the log\n logger.info(\n \"Finish testing general model, for fold {}/{}, epoch {}, time cost {}s ,with acc {}\".format(\n current_fold,\n total_fold,\n current_epoch,\n now_time - current_time,\n final))\n # Save the weight to the directory\n save_name = os.path.join(save_dir, \"fold{}_{}-epoch{}-loss{}-acc{}.pth\").format(current_fold,\n total_fold,\n current_epoch, losses,\n final)\n torch.save(model.state_dict(), save_name)\n # Write the log\n logger.info(\"Saving weight to {}\".format(save_name))\n logger.info(\"Finish training the general model for fold {}.\".format(current_fold))\n else:\n logger.info(\"Skip training the general model.\")\n if fine_tune:\n # Init the saving directory\n save_dir = os.path.join(configs['weight']['weight_dir'], time_identifier, \"Fine_tune\")\n create_dir(save_dir)\n logger.info(\"Fine-tune general models.\")\n\n # Getting the dataloader from the generator\n for current_fold, (train_dataloader, test_dataloader) in enumerate(\n zip(prepare_dataloader(use_features, configs[\"dataset\"], DatasetMode.TRAIN),\n prepare_dataloader(use_features, configs[\"dataset\"], DatasetMode.TEST))):\n # Send the model to GPU\n model = Registers.model[model_name](input_shape=(128, 157))\n model.cuda()\n\n # Init the criterion, CE by default\n criterion = nn.CrossEntropyLoss()\n # Init the optimizer, SGD by default\n optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)\n\n if train_general:\n\n # By default the bast accuracy weight is used\n weight_file = get_best_acc_weight(os.path.join(configs['weight']['weight_dir'], time_identifier),\n total_fold, current_fold, \"General\")\n # Load weights\n model.load_state_dict(torch.load(weight_file), strict=True)\n # Write the logs\n logger.info(\"Load weight {} for fine-tuning.\".format(weight_file))\n else:\n assert general_weight is not None\n # By default the bast accuracy weight is used\n weight_file = get_best_acc_weight(os.path.join(configs['weight']['weight_dir'], general_weight),\n total_fold, current_fold, \"General\")\n # Load weights\n model.load_state_dict(torch.load(weight_file), strict=True)\n # Write the logs\n logger.info(\"Load weight {} for fine-tuning.\".format(weight_file))\n\n # If fine-tune, the parameters in extractor should be unfreeze\n for params in model.extractor_spec.parameters():\n params.requires_grad = True\n for params in model.extractor_mfcc.parameters():\n params.requires_grad = True\n for params in model.extractor_mel.parameters():\n params.requires_grad = True\n\n logger.info(\"In mode fine-tune, unfreeze the extractor layers.\")\n # Running epoch\n for current_epoch in range(1, fine_tune_epoch + 1):\n # Setting the model to train mode\n model.train()\n # Get the length of the dataloader\n length = len(train_dataloader)\n # Init the loss\n running_loss = 0.0\n # Init the timer\n current_time = time.time()\n # Create the tqdm bar\n bar = tqdm(range(length))\n bar.set_description(\n \"Fine-tuning general model, for fold {}/{}, epoch {}\".format(current_fold, total_fold,\n current_epoch))\n # Running one batch\n for iteration, data in enumerate(train_dataloader):\n # Get features and set them to cuda\n spec, mel, mfcc, label = data[AudioFeatures.SPECS], data[AudioFeatures.MELSPECS], data[\n AudioFeatures.MFCC], data[AudioFeatures.LABEL]\n\n if input_channels != 1:\n spec = torch.cat([spec] * input_channels, dim=1)\n mel = torch.cat([mel] * input_channels, dim=1)\n mfcc = torch.cat([mfcc] * input_channels, dim=1)\n\n spec = spec.cuda()\n mel = mel.cuda()\n mfcc = mfcc.cuda()\n label = label.cuda()\n\n # Set the optimizer to zero\n optimizer.zero_grad()\n # Go through one epoch\n output = model(mfcc, spec, mel)\n # Calculate the loss\n loss = criterion(output, label)\n # Back propagation\n loss.backward()\n # Update the optimizer\n optimizer.step()\n # Sum up the losses\n running_loss += loss.item()\n # Visualize the loss\n bar.set_postfix(loss=running_loss / (iteration + 1))\n # Update the bar\n bar.update(1)\n # Training finish, close the bar\n bar.close()\n # Calculate the final loss\n losses = running_loss / length\n # Time the time past\n now_time = time.time()\n # Write logs\n logger.info(\n \"Finish fine-tune general model, for fold {}/{}, epoch {}, time cost {}s ,with loss {}\".format(\n current_fold,\n total_fold,\n current_epoch,\n now_time - current_time,\n losses))\n # Re-init the timer\n current_time = time.time()\n # Going into eval mode\n correct = 0\n total = 0\n # Get the length of the test dataloader\n length = len(test_dataloader)\n # Init the bar\n bar_test = tqdm(range(length))\n bar_test.set_description(\n \"Testing general model, for fold {}/{}, epoch {}\".format(current_fold,\n total_fold,\n current_epoch))\n # Set the model to evaluation mode\n model.eval()\n # Do not record the gradiant\n with torch.no_grad():\n # Running one batch\n for data in test_dataloader:\n # Get the features\n spec, mel, mfcc, label = data[AudioFeatures.SPECS], data[AudioFeatures.MELSPECS], data[\n AudioFeatures.MFCC], data[AudioFeatures.LABEL]\n\n if input_channels != 1:\n spec = torch.cat([spec] * input_channels, dim=1)\n mel = torch.cat([mel] * input_channels, dim=1)\n mfcc = torch.cat([mfcc] * input_channels, dim=1)\n\n spec = spec.cuda()\n mel = mel.cuda()\n mfcc = mfcc.cuda()\n label = label.cuda()\n # Running the model\n output = model(mfcc, spec, mel)\n # Normalize the output to one-hot mode\n _, predicted = torch.max(func.softmax(output, dim=1), 1)\n # Record the size\n total += label.size(0)\n # Record the correct output\n correct += (predicted == label).sum().item()\n # Calculate the accuracy\n acc = correct / total\n # Visualize the accuracy\n bar_test.set_postfix(acc=acc)\n # Update the bar\n bar_test.update(1)\n # Calculate the accuracy\n final = correct / total\n # Close the bar\n bar_test.close()\n # Time the timer\n now_time = time.time()\n # Write the log\n logger.info(\n \"Finish testing general model, for fold {}/{}, epoch {}, time cost {}s ,with acc {}\".format(\n current_fold,\n total_fold,\n current_epoch,\n now_time - current_time,\n final))\n # Save the weight to the directory\n save_name = os.path.join(save_dir, \"fold{}_{}-epoch{}-loss{}-acc{}.pth\").format(current_fold,\n total_fold,\n current_epoch, losses,\n final)\n torch.save(model.state_dict(), save_name)\n # Write the log\n logger.info(\"Saving weight to {}\".format(save_name))\n logger.info(\"Finish fine-tuning fold {}.\".format(current_fold))\n logger.info(\"Finish fine-tuning the model.\")\n else:\n logger.info(\"Skip fine-tune the model.\")", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def build_examples():\n build_models([\n \"VGG_16\",\n \"VGG_19\",\n \"RESNET_50\",\n \"MOBILENET\",\n #\"INCEPTION_V3\",\n #\"INCEPTION_RESNET\",\n #\"DENSENET_121\",\n #\"DENSENET_169\",\n #\"DENSENET_201\"])\n ])", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def _prepare_models(self):\n if self.freeze_layers is not None:\n self._set_freeze_layers()\n self._load_weight_if_possible()\n print(self.keras_model.summary())\n self.show_configuration()", "def build(self):\n\n self.W_AA = self.init([self.n_atom_input_feat, self.n_hidden_AA])\n self.b_AA = model_ops.zeros(shape=[\n self.n_hidden_AA,\n ])\n\n self.W_PA = self.init([self.n_pair_input_feat, self.n_hidden_PA])\n self.b_PA = model_ops.zeros(shape=[\n self.n_hidden_PA,\n ])\n\n self.W_A = self.init([self.n_hidden_A, self.n_atom_output_feat])\n self.b_A = model_ops.zeros(shape=[\n self.n_atom_output_feat,\n ])\n\n self.trainable_weights = [\n self.W_AA, self.b_AA, self.W_PA, self.b_PA, self.W_A, self.b_A\n ]\n if self.update_pair:\n self.W_AP = self.init([self.n_atom_input_feat * 2, self.n_hidden_AP])\n self.b_AP = model_ops.zeros(shape=[\n self.n_hidden_AP,\n ])\n\n self.W_PP = self.init([self.n_pair_input_feat, self.n_hidden_PP])\n self.b_PP = model_ops.zeros(shape=[\n self.n_hidden_PP,\n ])\n\n self.W_P = self.init([self.n_hidden_P, self.n_pair_output_feat])\n self.b_P = model_ops.zeros(shape=[\n self.n_pair_output_feat,\n ])\n\n self.trainable_weights.extend(\n [self.W_AP, self.b_AP, self.W_PP, self.b_PP, self.W_P, self.b_P])", "def create_basic_cnn_model(num_classes: int):\n model = Sequential()\n\n # Convolutional + spooling layers\n model.add(Conv2D(64, (5, 5), input_shape=(config.ROI_IMG_SIZE['HEIGHT'], config.ROI_IMG_SIZE['WIDTH'], 1)))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Conv2D(32, (5, 5), padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Flatten())\n\n # Dropout\n model.add(Dropout(0.5, seed=config.RANDOM_SEED, name=\"Dropout_1\"))\n\n # FC\n model.add(Dense(1024, activation='relu', name='Dense_2'))\n\n # Output\n if num_classes == 2:\n model.add(Dense(1, activation='sigmoid', kernel_initializer=\"random_uniform\", name='Output'))\n else:\n model.add(Dense(num_classes, activation='softmax', kernel_initializer=\"random_uniform\", name='Output'))\n\n # Print model details if running in debug mode.\n if config.verbose_mode:\n print(model.summary())\n\n return model", "def make_NN(n_hidden, n_epoch, labelsdict, lr, device, model_name, trainloader, validloader, train_data, pretrain, finetune_whole, custom_model):\n if custom_model == 2:\n # Use custom two-layer convolution model\n print(\"Using Two-Layer CNN\")\n model = TwoLayerConvNet()\n elif custom_model == 5:\n print(\"Using Five-Layer CNN\")\n # Use custom five-layer convolution model\n model = FiveLayerConvNet()\n else:\n # Import NN model (either pretrained or not)\n model = getattr(models, model_name)(pretrained=pretrain)\n \"\"\" ===================================================================================== \"\"\"\"\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER FREEZE THE PARAMETERS OR NOT (WILBERT ARISTO) \"\"\"\n # If we do not need to finetune whole model, freeze parameters that we don't need to re-train\n if not finetune_whole:\n for param in model.parameters():\n param.requires_grad = False\n \"\"\" ===================================================================================== \"\"\"\"\n\n n_out = len(labelsdict)\n\n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Make classifier\n n_in = next(model.fc.modules()).in_features\n model.fc = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.fc.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n else:\n # Make classifier\n n_in = next(model.classifier.modules()).in_features\n model.classifier = NN_Classifier(input_size=n_in, output_size=n_out, hidden_layers=n_hidden)\n \n \"\"\" IMPLEMENTATION OF finetune_whole ARGUMENT TO EITHER OPTIMIZE ALL PARAMETERS OR JUST THE LAST LAYER'S PARAMS (WILBERT ARISTO) \"\"\"\n # Define optimizer\n if finetune_whole:\n optimizer = optim.Adam(model.parameters(), lr = lr)\n else:\n optimizer = optim.Adam(model.classifier.parameters(), lr = lr)\n \"\"\" ============================================================================================================================ \"\"\"\"\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n # Define criterion\n criterion = nn.NLLLoss() \n\n model.to(device)\n start = time.time()\n\n epochs = n_epoch\n steps = 0 \n running_loss = 0\n print_every = 40\n for e in range(epochs):\n model.train()\n for images, labels in trainloader:\n images, labels = images.to(device), labels.to(device)\n\n steps += 1\n\n optimizer.zero_grad()\n\n output = model.forward(images)\n loss = criterion(output, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n # Eval mode for predictions\n model.eval()\n\n # Turn off gradients for validation\n with torch.no_grad():\n test_loss, accuracy = validation(model, validloader, criterion, device)\n\n print(\"Epoch: {}/{} - \".format(e+1, epochs),\n \"Training Loss: {:.3f} - \".format(running_loss/print_every),\n \"Validation Loss: {:.3f} - \".format(test_loss/len(validloader)),\n \"Validation Accuracy: {:.3f}\".format(accuracy/len(validloader)))\n\n running_loss = 0\n\n # Make sure training is back on\n model.train()\n \n \"\"\" CHANGED LAST LAYER TO model.fc IF WE ARE USING RESNET MODEL (WILBERT ARISTO) \"\"\"\n if \"resnet\" in model_name:\n # Add model info \n model.fc.n_in = n_in\n model.fc.n_hidden = n_hidden\n model.fc.n_out = n_out\n model.fc.labelsdict = labelsdict\n model.fc.lr = lr\n model.fc.optimizer_state_dict = optimizer.state_dict\n model.fc.model_name = model_name\n model.fc.class_to_idx = train_data.class_to_idx\n else:\n # Add model info \n model.classifier.n_in = n_in\n model.classifier.n_hidden = n_hidden\n model.classifier.n_out = n_out\n model.classifier.labelsdict = labelsdict\n model.classifier.lr = lr\n model.classifier.optimizer_state_dict = optimizer.state_dict\n model.classifier.model_name = model_name\n model.classifier.class_to_idx = train_data.class_to_idx\n \"\"\" ============================================================================================================================ \"\"\"\"\n\n print('model:', model_name, '- hidden layers:', n_hidden, '- epochs:', n_epoch, '- lr:', lr)\n print(f\"Run time: {(time.time() - start)/60:.3f} min\")\n return model\n\n# Define function to save checkpoint\ndef save_checkpoint(model, path):\n checkpoint = {'c_input': model.classifier.n_in,\n 'c_hidden': model.classifier.n_hidden,\n 'c_out': model.classifier.n_out,\n 'labelsdict': model.classifier.labelsdict,\n 'c_lr': model.classifier.lr,\n 'state_dict': model.state_dict(),\n 'c_state_dict': model.classifier.state_dict(),\n 'opti_state_dict': model.classifier.optimizer_state_dict,\n 'model_name': model.classifier.model_name,\n 'class_to_idx': model.classifier.class_to_idx\n }\n torch.save(checkpoint, path)\n \n# Define function to load model\ndef load_model(path):\n cp = torch.load(path)\n \n # Import pre-trained NN model \n model = getattr(models, cp['model_name'])(pretrained=True)\n \n # Freeze parameters that we don't need to re-train \n for param in model.parameters():\n param.requires_grad = False\n \n # Make classifier\n model.classifier = NN_Classifier(input_size=cp['c_input'], output_size=cp['c_out'], \\\n hidden_layers=cp['c_hidden'])\n \n # Add model info \n model.classifier.n_in = cp['c_input']\n model.classifier.n_hidden = cp['c_hidden']\n model.classifier.n_out = cp['c_out']\n model.classifier.labelsdict = cp['labelsdict']\n model.classifier.lr = cp['c_lr']\n model.classifier.optimizer_state_dict = cp['opti_state_dict']\n model.classifier.model_name = cp['model_name']\n model.classifier.class_to_idx = cp['class_to_idx']\n model.load_state_dict(cp['state_dict'])\n \n return model", "def make_model():\n # create the base pre-trained model\n base_model = efn.EfficientNetB0(input_shape=(img_width, img_height, 3), include_top=False)\n # add a global spatial average pooling layer\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(num_classes, activation=\"softmax\")(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n\n model.compile(optimizer=\"adam\", loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n return base_model, model", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def model(self, hyperparams, test_mode=False):\n run_doc = OrderedDict() # Document important hyperparameters\n run_start_time = time.time()\n run_id = str(uuid4())\n # TODO: Not ideal: Loads from memory every time. Use generator?\n train_data, train_targets, test_data, test_targets = \\\n self.data_loader(dataset=hyperparams['dataset'], size=hyperparams['dataset_size'])\n run_doc['dataset'] = hyperparams['dataset']\n run_doc['data_size'] = len(train_targets)\n # Visualization tools\n if config.INPUT_DEBUG:\n image_analysis(image=train_data[0, :, :, :], label=train_targets[0, :])\n # Input shape comes from image shape\n img_width = train_data[0].shape[0]\n img_height = train_data[0].shape[1]\n num_channels = train_data[0].shape[2]\n input_shape = (img_width, img_height, num_channels)\n run_doc['input_shape'] = '(%d, %d, %d)' % input_shape\n input_tensor = Input(shape=input_shape, dtype='float32', name='input_image')\n try: # Model creation is in separate file\n x, run_doc = custom_model(input_tensor, params=hyperparams, run_doc=run_doc)\n except ValueError as e:\n if not test_mode: # If not testing, ignore error causing models\n return {'loss': 100, 'status': STATUS_OK}\n else:\n raise e\n # Final layer classifies into 4 possible actions\n output = layers.Dense(4, activation='softmax')(x)\n # File names for the model and logs\n log_file = os.path.join(self._logs_dir, run_id)\n model_file = os.path.join(self._models_dir, run_id + '.h5')\n # Add some callbacks so we can track progress using Tensorboard\n callbacks = [keras.callbacks.EarlyStopping('val_loss', patience=config.TRAIN_PATIENCE, mode=\"min\")]\n if not test_mode: # Don't save models/logs if in testing mode\n callbacks += [keras.callbacks.TensorBoard(log_dir=log_file),\n keras.callbacks.ModelCheckpoint(model_file, save_best_only=True)]\n # Choice of optimizer and optimization parameters\n if hyperparams['optimizer'] == 'sgd':\n optimizer = optimizers.SGD(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'rmsprop':\n optimizer = optimizers.RMSprop(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'nadam':\n optimizer = optimizers.Nadam(lr=hyperparams[\"learning_rate\"],\n schedule_decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n elif hyperparams['optimizer'] == 'adam':\n optimizer = optimizers.Adam(lr=hyperparams[\"learning_rate\"],\n decay=hyperparams[\"decay\"],\n clipnorm=hyperparams[\"clipnorm\"])\n # Save optimizer parameters to run doc\n run_doc['optimizer'] = hyperparams['optimizer']\n run_doc['opt_learning_rate'] = hyperparams[\"learning_rate\"]\n run_doc['opt_decay'] = hyperparams[\"decay\"]\n run_doc['opt_clipnorm'] = hyperparams[\"clipnorm\"]\n # Create and compile the model\n model = Model(input_tensor, output)\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n # Print out model summary and store inside run documentation as list of strings\n model.summary()\n run_doc['model_summary'] = []\n model.summary(print_fn=(lambda a: run_doc['model_summary'].append(a)))\n # Fit the model to the datasets\n self.log.info(\"Fitting model (eval %d of %d) ...\" % (self._eval_idx + 1, self._max_eval))\n self._eval_idx += 1\n model.fit(x=train_data, y=train_targets,\n batch_size=hyperparams['batch_size'],\n epochs=hyperparams['epochs'],\n validation_data=(test_data, test_targets),\n callbacks=callbacks,\n verbose=1)\n val_loss, val_acc = model.evaluate(x=test_data, y=test_targets, verbose=2)\n self.log.info(\" .... Completed!\")\n self.log.info(\" -- Evaluation time %ds\" % (time.time() - run_start_time))\n self.log.info(\" -- Total time %ds\" % (time.time() - self._start_time))\n # Save training parameters to run doc\n run_doc['batch_size'] = hyperparams['batch_size']\n run_doc['epochs'] = hyperparams['epochs']\n run_doc['val_loss'] = val_loss\n run_doc['val_acc'] = val_acc\n # Results are used to pick best pirate\n self._results[run_id] = val_loss\n # Save run_doc to pickle file in model directory\n run_doc_file_name = run_id + '.pickle'\n if not test_mode: # Don't save docs if in testing mode\n with open(os.path.join(self._models_dir, run_doc_file_name), 'wb') as f:\n pickle.dump(run_doc, f)\n self.log.info('Run Dictionary %s' % str(run_doc))\n # Delete the session to prevent GPU memory from getting full\n keras.backend.clear_session()\n # Optimizer minimizes validation loss\n return {'loss': val_loss, 'status': STATUS_OK}", "def _build_all_models(self):\r\n self.output_tensors = {}\r\n self.loss_terms = {}\r\n self.metrics = {}\r\n\r\n def _build_datasource_summaries(data_sources, mode):\r\n \"\"\"Register summary operations for input data from given data sources.\"\"\"\r\n with tf.variable_scope('%s_data' % mode):\r\n for data_source_name, data_source in data_sources.items():\r\n tensors = data_source.output_tensors\r\n for key, tensor in tensors.items():\r\n summary_name = '%s/%s' % (data_source_name, key)\r\n shape = tensor.shape.as_list()\r\n num_dims = len(shape)\r\n if num_dims == 4: # Image data\r\n if shape[1] == 1 or shape[1] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_first')\r\n elif shape[3] == 1 or shape[3] == 3:\r\n self.summary.image(summary_name, tensor,\r\n data_format='channels_last')\r\n # TODO: fix issue with no summary otherwise\r\n elif num_dims == 2:\r\n self.summary.histogram(summary_name, tensor)\r\n else:\r\n logger.debug('I do not know how to create a summary for %s (%s)' %\r\n (summary_name, tensor.shape.as_list()))\r\n\r\n def _build_train_or_test(mode):\r\n data_sources = self._train_data if mode == 'train' else self._test_data\r\n\r\n # Build model\r\n output_tensors, loss_terms, metrics = self.build_model(data_sources, mode=mode)\r\n\r\n # Record important tensors\r\n self.output_tensors[mode] = output_tensors\r\n self.loss_terms[mode] = loss_terms\r\n self.metrics[mode] = metrics\r\n\r\n # Create summaries for scalars\r\n if mode == 'train':\r\n for name, loss_term in loss_terms.items():\r\n self.summary.scalar('loss/%s/%s' % (mode, name), loss_term)\r\n for name, metric in metrics.items():\r\n self.summary.scalar('metric/%s/%s' % (mode, name), metric)\r\n\r\n # Build the main model\r\n if len(self._train_data) > 0:\r\n _build_datasource_summaries(self._train_data, mode='train')\r\n _build_train_or_test(mode='train')\r\n logger.info('Built model.')\r\n\r\n # Print no. of parameters and lops\r\n flops = tf.profiler.profile(\r\n options=tf.profiler.ProfileOptionBuilder(\r\n tf.profiler.ProfileOptionBuilder.float_operation()\r\n ).with_empty_output().build())\r\n logger.info('------------------------------')\r\n logger.info(' Approximate Model Statistics ')\r\n logger.info('------------------------------')\r\n logger.info('FLOPS per input: {:,}'.format(flops.total_float_ops / self._batch_size))\r\n logger.info(\r\n 'Trainable Parameters: {:,}'.format(\r\n np.sum([np.prod(v.shape.as_list()) for v in tf.trainable_variables()])\r\n )\r\n )\r\n logger.info('------------------------------')\r\n\r\n # If there are any test data streams, build same model with different scope\r\n # Trainable parameters will be copied at test time\r\n if len(self._test_data) > 0:\r\n _build_datasource_summaries(self._test_data, mode='test')\r\n with tf.variable_scope('test'):\r\n _build_train_or_test(mode='test')\r\n logger.info('Built model for live testing.')\r\n\r\n if self._enable_live_testing:\r\n self._tester._post_model_build() # Create copy ops to be run before every test run\r", "def set_vanilla_model(self):\n logging.debug(\"Setting vanilla model\")\n # Build model\n\n ## Embedding Layer\n word_embedding_layer = self.embed_word()\n pos_embedding_layer = self.embed_pos()\n\n ## Deep layers\n latent_layers = self.stack_latent_layers(self.num_of_latent_layers)\n\n ## Dropout\n dropout = Dropout(self.pred_dropout)\n\n ## Prediction\n predict_layer = self.predict_classes()\n\n ## Prepare input features, and indicate how to embed them\n inputs_and_embeddings = [(Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"word_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"predicate_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"postags_inputs\"),\n pos_embedding_layer),\n ]\n\n ## Concat all inputs and run on deep network\n output = predict_layer(dropout(latent_layers(merge([embed(inp)\n for inp, embed in inputs_and_embeddings],\n mode = \"concat\",\n concat_axis = -1))))\n\n # Build model\n self.model = Model(input = map(itemgetter(0), inputs_and_embeddings),\n output = [output])\n\n # Loss\n self.model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n self.model.summary()\n\n # Save model json to file\n self.save_model_to_file(os.path.join(self.model_dir, \"model.json\"))", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def __create_model(self, classes):\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained_base=True)\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained=True)\r\n # self._model.reset_class(classes, reuse_weights=[cname for cname in classes if cname in self._model.classes])\r\n if self._model is None or classes != self.classes:\r\n model_name = 'ssd_{}_{}_custom'.format(self.img_size, self.backbone)\r\n self._model = model_zoo.get_model(model_name, classes=classes, pretrained=False, pretrained_base=True,\r\n root=self.temp_path)\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"always\")\r\n self._model.initialize()\r\n self._model.collect_params().reset_ctx(self.ctx)\r\n _, _, _ = self._model(mx.nd.zeros((1, 3, self.img_size, self.img_size), self.ctx))\r\n\r\n self._model.reset_class(classes)\r\n self.classes = classes", "def model_setup(params):\n n_classes = len(classes_config.training_ids)\n if general_config.model_id == constants.ssdlite:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list)\n elif general_config.model_id == constants.ssd:\n model = resnet_ssd.SSD300(n_classes=n_classes)\n elif general_config.model_id == constants.ssd_modified:\n model = SSDLite.SSD_Head(n_classes=n_classes, k_list=anchor_config.k_list,\n out_channels=params.out_channels, width_mult=params.width_mult)\n model.to(general_config.device)\n\n return model", "def config_task(self) -> None:\n weights = self.hyperparams[\"weights\"]\n\n if self.hyperparams[\"model\"] == \"unet\":\n self.model = smp.Unet(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=self.hyperparams[\"backbone\"],\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n )\n elif self.hyperparams[\"model\"] == \"fcn\":\n self.model = FCN(\n in_channels=self.hyperparams[\"in_channels\"],\n classes=self.hyperparams[\"num_classes\"],\n num_filters=self.hyperparams[\"num_filters\"],\n )\n else:\n raise ValueError(\n f\"Model type '{self.hyperparams['model']}' is not valid. \"\n f\"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if self.hyperparams[\"loss\"] == \"ce\":\n ignore_value = -1000 if self.ignore_index is None else self.ignore_index\n\n class_weights = None\n if isinstance(self.class_weights, torch.Tensor):\n class_weights = self.class_weights.to(dtype=torch.float32)\n elif hasattr(self.class_weights, \"__array__\") or self.class_weights:\n class_weights = torch.tensor(self.class_weights, dtype=torch.float32)\n\n self.loss = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=class_weights\n )\n elif self.hyperparams[\"loss\"] == \"jaccard\":\n self.loss = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hyperparams[\"num_classes\"]\n )\n elif self.hyperparams[\"loss\"] == \"focal\":\n self.loss = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=self.ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{self.hyperparams['loss']}' is not valid. \"\n f\"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n if self.hyperparams[\"model\"] != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hyperparams.get(\"freeze_backbone\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hyperparams.get(\"freeze_decoder\", False) and self.hyperparams[\n \"model\"\n ] in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False", "def __init__(self, model_type, model_cfg, training_cfg):\n super().__init__()\n self.save_hyperparameters()\n\n self.model_cfg = model_cfg\n self.training_cfg = training_cfg\n \n if model_type == \"ConvLSTM\":\n self.model = Conv_LSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"AutoencLSTM\":\n self.model = AutoencLSTM(input_dim=self.model_cfg[\"input_channels\"],\n output_dim=self.model_cfg[\"output_channels\"],\n hidden_dims=self.model_cfg[\"hidden_channels\"],\n big_mem=self.model_cfg[\"big_mem\"],\n num_layers=self.model_cfg[\"n_layers\"],\n kernel_size=self.model_cfg[\"kernel\"],\n memory_kernel_size=self.model_cfg[\"memory_kernel\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n baseline=self.training_cfg[\"baseline\"],\n layer_norm_flag=self.model_cfg[\"layer_norm\"],\n img_width=self.model_cfg[\"img_width\"],\n img_height=self.model_cfg[\"img_height\"],\n peephole=self.model_cfg[\"peephole\"])\n elif model_type == \"ConvTransformer\":\n self.model = ENS_Conv_Transformer(num_hidden=self.model_cfg[\"num_hidden\"],\n output_dim=self.model_cfg[\"output_channels\"],\n depth=self.model_cfg[\"depth\"],\n dilation_rate=self.model_cfg[\"dilation_rate\"],\n num_conv_layers=self.model_cfg[\"num_conv_layers\"],\n kernel_size=self.model_cfg[\"kernel_size\"],\n img_width=self.model_cfg[\"img_width\"],\n non_pred_channels=self.model_cfg[\"non_pred_channels\"],\n num_layers_query_feat=self.model_cfg[\"num_layers_query_feat\"],\n in_channels=self.model_cfg[\"in_channels\"],\n baseline=self.training_cfg[\"baseline\"])\n self.baseline = self.training_cfg[\"baseline\"]\n self.future_training = self.training_cfg[\"future_training\"]\n self.learning_rate = self.training_cfg[\"start_learn_rate\"]\n self.training_loss = get_loss_from_name(self.training_cfg[\"training_loss\"])\n self.test_loss = get_loss_from_name(self.training_cfg[\"test_loss\"])", "def model_setup(self):\n self.input_a = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_A\")\n self.input_b = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_B\")\n\n self.fake_pool_A = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_A\")\n self.fake_pool_B = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_B\")\n\n self.global_step = slim.get_or_create_global_step()\n\n self.num_fake_inputs = 0\n\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name=\"lr\")\n\n inputs = {\n 'images_a': self.input_a,\n 'images_b': self.input_b,\n 'fake_pool_a': self.fake_pool_A,\n 'fake_pool_b': self.fake_pool_B,\n }\n\n outputs = model.get_outputs(\n inputs, network=self._network_version, skip=self._skip)\n\n self.prob_real_a_is_real = outputs['prob_real_a_is_real']\n self.prob_real_b_is_real = outputs['prob_real_b_is_real']\n self.fake_images_a = outputs['fake_images_a']\n self.fake_images_b = outputs['fake_images_b']\n self.prob_fake_a_is_real = outputs['prob_fake_a_is_real']\n self.prob_fake_b_is_real = outputs['prob_fake_b_is_real']\n\n self.cycle_images_a = outputs['cycle_images_a']\n self.cycle_images_b = outputs['cycle_images_b']\n\n self.prob_fake_pool_a_is_real = outputs['prob_fake_pool_a_is_real']\n self.prob_fake_pool_b_is_real = outputs['prob_fake_pool_b_is_real']", "def create_model(input_shape, n_classes, optimizer='rmsprop', fine_tune=0, n_model=1):\r\n # Pretrained convolutional layers are loaded using the Imagenet weights.\r\n # Include_top is set to False, in order to exclude the model's fully-connected layers.\r\n if n_model == 4:\r\n conv_base = VGG19(include_top=False,\r\n weights='imagenet', \r\n input_shape=input_shape)\r\n else:\r\n conv_base = VGG16(include_top=False,\r\n weights='imagenet', \r\n input_shape=input_shape)\r\n \r\n # Defines how many layers to freeze during training.\r\n # Layers in the convolutional base are switched from trainable to non-trainable\r\n # depending on the size of the fine-tuning parameter.\r\n if fine_tune > 0:\r\n for layer in conv_base.layers[:-fine_tune]:\r\n layer.trainable = False\r\n else:\r\n for layer in conv_base.layers:\r\n layer.trainable = False\r\n\r\n # Create a new 'top' of the model (i.e. fully-connected layers).\r\n # This is 'bootstrapping' a new top_model onto the pretrained layers.\r\n top_model = conv_base.output\r\n top_model = Flatten(name=\"flatten\")(top_model)\r\n if n_model == 1 or n_model == 2:\r\n top_model = Dense(4096, activation='relu')(top_model)\r\n top_model = Dense(1072, activation='relu')(top_model)\r\n\r\n\r\n if n_model == 3 or n_model == 4:\r\n top_model = Dense(4096, activation='relu')(top_model)\r\n top_model = Dense(1024, activation='relu')(top_model)\r\n top_model = Dense(256, activation='relu')(top_model)\r\n top_model = Dense(64, activation='relu')(top_model)\r\n\r\n if n_model == 5: \r\n top_model = Dense(4096, activation='relu')(top_model)\r\n top_model = Dense(2048, activation='relu')(top_model)\r\n top_model = Dense(1024, activation='relu')(top_model)\r\n top_model = Dense(512, activation='relu')(top_model)\r\n top_model = Dense(256, activation='relu')(top_model)\r\n top_model = Dense(128, activation='relu')(top_model)\r\n top_model = Dense(64, activation='relu')(top_model)\r\n top_model = Dense(32, activation='relu')(top_model)\r\n \r\n top_model = Dropout(0.2)(top_model)\r\n output_layer = Dense(n_classes, activation='softmax')(top_model)\r\n \r\n # Group the convolutional base and new fully-connected layers into a Model object.\r\n model = Model(inputs=conv_base.input, outputs=output_layer)\r\n\r\n # Compiles the model for training.\r\n model.compile(optimizer=optimizer, \r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n \r\n return model", "def build_model(self):\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n if self.config.optimizer == 'sgd':\n self.optimizer = tf.keras.optimizers.SGD(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'rms':\n self.optimizer = tf.keras.optimizers.RMSprop(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adam':\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adagrad':\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=self.config.learning_rate)\n elif self.config.optimizer == 'adadelta':\n self.optimizer = tf.keras.optimizers.Adadelta(learning_rate=self.config.learning_rate)\n else:\n raise NotImplementedError(\"No support for %s optimizer\" % self.config.optimizer)\n \n if self.config.optimizer in ['rms', 'adagrad', 'adadelta']:\n with tf.device('cpu:0'):\n self.model.def_parameters()\n else:\n self.model.def_parameters()\n\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)", "def buildFirstModel():\n model = build(IMAGE_HEIGHT, IMAGE_WIDTH, 3, y.shape[1], finalAct=\"sigmoid\")\n opt = Adam(lr=INIT_LE, decay=INIT_LE / EPOCHS)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"acc\"])", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def build_model():\n model_weights = np.load(WEIGHTS_PATH, encoding='latin1').item()\n model = Sequential()\n model.add(InputLayer(batch_input_shape=(1, None, 1)))\n\n filter_parameters = [\n {'name': 'conv1', 'num_filters': 16, 'padding': 32,\n 'kernel_size': 64, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv2', 'num_filters': 32, 'padding': 16,\n 'kernel_size': 32, 'conv_strides': 2,\n 'pool_size': 8, 'pool_strides': 8},\n\n {'name': 'conv3', 'num_filters': 64, 'padding': 8,\n 'kernel_size': 16, 'conv_strides': 2},\n\n {'name': 'conv4', 'num_filters': 128, 'padding': 4,\n 'kernel_size': 8, 'conv_strides': 2},\n\n {'name': 'conv5', 'num_filters': 256, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2,\n 'pool_size': 4, 'pool_strides': 4},\n\n {'name': 'conv6', 'num_filters': 512, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv7', 'num_filters': 1024, 'padding': 2,\n 'kernel_size': 4, 'conv_strides': 2},\n\n {'name': 'conv8_2', 'num_filters': 401, 'padding': 0,\n 'kernel_size': 8, 'conv_strides': 2},\n ]\n\n for x in filter_parameters:\n model.add(ZeroPadding1D(padding=x['padding']))\n model.add(Conv1D(x['num_filters'],\n kernel_size=x['kernel_size'],\n strides=x['conv_strides'],\n padding='valid'))\n weights = model_weights[x['name']]['weights'].reshape(model.layers[-1].get_weights()[0].shape)\n biases = model_weights[x['name']]['biases']\n\n model.layers[-1].set_weights([weights, biases])\n\n if 'conv8' not in x['name']:\n gamma = model_weights[x['name']]['gamma']\n beta = model_weights[x['name']]['beta']\n mean = model_weights[x['name']]['mean']\n var = model_weights[x['name']]['var']\n\n model.add(BatchNormalization())\n model.layers[-1].set_weights([gamma, beta, mean, var])\n model.add(Activation('relu'))\n if 'pool_size' in x:\n model.add(MaxPooling1D(pool_size=x['pool_size'],\n strides=x['pool_strides'],\n padding='valid'))\n\n #\n return Model(inputs=model.input, outputs=model.get_layer('activation_7').output)", "def create_org_model( width=28, \r\n height=28, channel=1, verbose=True,epochs=10):\r\n input1 = Input(\r\n shape=(\r\n width,\r\n height,\r\n channel,\r\n ), name='concat_input')\r\n conv1 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv2 = Conv2D(32, kernel_size=5, activation='relu', padding='same')\r\n conv3 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n conv4 = Conv2D(64, kernel_size=3, activation='relu', padding='same')\r\n dense1 = Dense(256, activation='relu')\r\n predict = Dense(10, activation='softmax')\r\n\r\n conv1o = conv1(input1)\r\n conv2o = conv2(conv1o)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv2o)\r\n drop1 = Dropout(.25)(pool1)\r\n conv3o = conv3(drop1)\r\n conv4o = conv4(conv3o)\r\n pool2 = MaxPooling2D(pool_size=(2, 2), strides=(2,2))(conv4o)\r\n drop2 = Dropout(.25)(pool2)\r\n drop2f = Flatten()(drop2)\r\n fc1 = dense1(drop2f)\r\n softmax1 = predict(fc1)\r\n\r\n drop2_2 = Input(shape=(7,7,64), name='concat_input') \r\n drop2f_2 = Flatten()(drop2_2)\r\n fc1_2 = dense1(drop2f_2)\r\n softmax1_2 = predict(fc1_2)\r\n\r\n mlp = Model(input1, softmax1)\r\n optimizer = tf.keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)\r\n mlp.compile(\r\n loss='sparse_categorical_crossentropy',\r\n optimizer=optimizer,\r\n metrics=['accuracy'])\r\n\r\n\r\n mlp.load_weights(model_dir+'complete_model.h5')\r\n\r\n for layer in mlp.layers:\r\n layer.trainable = False\r\n\r\n feature_model = Model(input1, drop2)\r\n predict_model = Model(drop2_2, softmax1_2)\r\n\r\n return feature_model, predict_model, mlp", "def main():\n\n args = get_arguments()\n\n w, h = map(int, args.input_size.split(','))\n\n config_path = os.path.join(os.path.dirname(args.restore_from),'opts.yaml')\n with open(config_path, 'r') as stream:\n config = yaml.load(stream)\n\n args.model = config['model']\n print('ModelType:%s'%args.model)\n print('NormType:%s'%config['norm_style'])\n gpu0 = args.gpu\n batchsize = args.batchsize\n\n model_name = os.path.basename( os.path.dirname(args.restore_from) )\n #args.save += model_name\n\n if not os.path.exists(args.save):\n os.makedirs(args.save)\n confidence_path = os.path.join(args.save, 'submit/confidence')\n label_path = os.path.join(args.save, 'submit/labelTrainIds')\n label_invalid_path = os.path.join(args.save, 'submit/labelTrainIds_invalid')\n for path in [confidence_path, label_path, label_invalid_path]:\n if not os.path.exists(path):\n os.makedirs(path)\n\n if args.model == 'DeepLab':\n model = DeeplabMulti(num_classes=args.num_classes, use_se = config['use_se'], train_bn = False, norm_style = config['norm_style'])\n elif args.model == 'Oracle':\n model = Res_Deeplab(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_ORC\n elif args.model == 'DeeplabVGG':\n model = DeeplabVGG(num_classes=args.num_classes)\n if args.restore_from == RESTORE_FROM:\n args.restore_from = RESTORE_FROM_VGG\n\n if args.restore_from[:4] == 'http' :\n saved_state_dict = model_zoo.load_url(args.restore_from)\n else:\n saved_state_dict = torch.load(args.restore_from)\n\n try:\n model.load_state_dict(saved_state_dict)\n except:\n model = torch.nn.DataParallel(model)\n model.load_state_dict(saved_state_dict)\n model.eval()\n model.cuda(gpu0)\n\n testloader = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(h, w), resize_size=(w, h), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n scale = 1.25\n testloader2 = data.DataLoader(DarkZurichDataSet(args.data_dir, args.data_list, crop_size=(round(h*scale), round(w*scale) ), resize_size=( round(w*scale), round(h*scale)), mean=IMG_MEAN, scale=False, mirror=False, set=args.set),\n batch_size=batchsize, shuffle=False, pin_memory=True, num_workers=4)\n\n\n if version.parse(torch.__version__) >= version.parse('0.4.0'):\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)\n else:\n interp = nn.Upsample(size=(1080, 1920), mode='bilinear')\n\n sm = torch.nn.Softmax(dim = 1)\n log_sm = torch.nn.LogSoftmax(dim = 1)\n kl_distance = nn.KLDivLoss( reduction = 'none')\n prior = np.load('./utils/prior_all.npy').transpose((2,0,1))[np.newaxis, :, :, :]\n prior = torch.from_numpy(prior)\n for index, img_data in enumerate(zip(testloader, testloader2) ):\n batch, batch2 = img_data\n image, _, name = batch\n image2, _, name2 = batch2\n\n inputs = image.cuda()\n inputs2 = image2.cuda()\n print('\\r>>>>Extracting feature...%04d/%04d'%(index*batchsize, args.batchsize*len(testloader)), end='')\n if args.model == 'DeepLab':\n with torch.no_grad():\n output1, output2 = model(inputs)\n output_batch = interp(sm(0.5* output1 + output2))\n\n heatmap_batch = torch.sum(kl_distance(log_sm(output1), sm(output2)), dim=1)\n\n output1, output2 = model(fliplr(inputs))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs\n\n output1, output2 = model(inputs2)\n output_batch += interp(sm(0.5* output1 + output2))\n output1, output2 = model(fliplr(inputs2))\n output1, output2 = fliplr(output1), fliplr(output2)\n output_batch += interp(sm(0.5 * output1 + output2))\n del output1, output2, inputs2\n ratio = 0.95\n output_batch = output_batch.cpu() / 4\n # output_batch = output_batch *(ratio + (1 - ratio) * prior)\n output_batch = output_batch.data.numpy()\n heatmap_batch = heatmap_batch.cpu().data.numpy()\n elif args.model == 'DeeplabVGG' or args.model == 'Oracle':\n output_batch = model(Variable(image).cuda())\n output_batch = interp(output_batch).cpu().data.numpy()\n\n output_batch = output_batch.transpose(0,2,3,1)\n score_batch = np.max(output_batch, axis=3)\n output_batch = np.asarray(np.argmax(output_batch, axis=3), dtype=np.uint8)\n\n threshold = 0.3274\n for i in range(output_batch.shape[0]):\n output_single = output_batch[i,:,:]\n output_col = colorize_mask(output_single)\n output = Image.fromarray(output_single)\n\n name_tmp = name[i].split('/')[-1]\n dir_name = name[i].split('/')[-2]\n save_path = args.save + '/' + dir_name\n if not os.path.isdir(save_path):\n os.mkdir(save_path)\n output.save('%s/%s' % (save_path, name_tmp))\n print('%s/%s' % (save_path, name_tmp))\n output_col.save('%s/%s_color.png' % (save_path, name_tmp.split('.')[0]))\n\n # heatmap_tmp = heatmap_batch[i,:,:]/np.max(heatmap_batch[i,:,:])\n # fig = plt.figure()\n # plt.axis('off')\n # heatmap = plt.imshow(heatmap_tmp, cmap='viridis')\n # fig.colorbar(heatmap)\n # fig.savefig('%s/%s_heatmap.png' % (save_path, name_tmp.split('.')[0]))\n\n if args.set == 'test' or args.set == 'val':\n # label\n output.save('%s/%s' % (label_path, name_tmp))\n # label invalid\n output_single[score_batch[i, :, :] < threshold] = 255\n output = Image.fromarray(output_single)\n output.save('%s/%s' % (label_invalid_path, name_tmp))\n # conficence\n\n confidence = score_batch[i, :, :] * 65535\n confidence = np.asarray(confidence, dtype=np.uint16)\n print(confidence.min(), confidence.max())\n iio.imwrite('%s/%s' % (confidence_path, name_tmp), confidence)\n\n return args.save", "def run_defaultmodel(cls):\n\n model_params = {\n\n 'width': 200,\n\n 'height': 100,\n\n 'pop_total': 700,\n\n 'entrances': 3,\n\n 'entrance_space': 2,\n\n 'entrance_speed': .1,\n\n 'exits': 2,\n\n 'exit_space': 1,\n\n 'speed_min': .1,\n\n 'speed_desire_mean': 1,\n\n 'speed_desire_std': 1,\n\n 'separation': 2,\n\n 'batch_iterations': 900,\n\n 'do_save': True,\n\n 'do_ani': False,\n\n }\n\n # Run the model\n\n Model(model_params).batch()", "def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def build_model(encoders):\n\n # Pclass\n input_pclass_size = len(encoders['pclass_encoder'].classes_)\n input_pclass = Input(shape=(\n input_pclass_size if input_pclass_size != 2 else 1,), name=\"input_pclass\")\n\n # Sex\n input_sex_size = len(encoders['sex_encoder'].classes_)\n input_sex = Input(\n shape=(input_sex_size if input_sex_size != 2 else 1,), name=\"input_sex\")\n\n # Age\n input_age = Input(shape=(10,), name=\"input_age\")\n\n # Siblings/Spouses Aboard\n input_siblings_spouses_aboard_size = len(\n encoders['siblings_spouses_aboard_encoder'].classes_)\n input_siblings_spouses_aboard = Input(shape=(\n input_siblings_spouses_aboard_size if input_siblings_spouses_aboard_size != 2 else 1,), name=\"input_siblings_spouses_aboard\")\n\n # Parents/Children Aboard\n input_parents_children_aboard_size = len(\n encoders['parents_children_aboard_encoder'].classes_)\n input_parents_children_aboard = Input(shape=(\n input_parents_children_aboard_size if input_parents_children_aboard_size != 2 else 1,), name=\"input_parents_children_aboard\")\n\n # Fare\n input_fare = Input(shape=(10,), name=\"input_fare\")\n\n # Combine all the inputs into a single layer\n concat = concatenate([\n input_pclass,\n input_sex,\n input_age,\n input_siblings_spouses_aboard,\n input_parents_children_aboard,\n input_fare\n ], name=\"concat\")\n\n # Multilayer Perceptron (MLP) to find interactions between all inputs\n hidden = Dense(256, activation=\"relu\", name=\"hidden_1\",\n kernel_regularizer=l2(1e-3))(concat)\n hidden = BatchNormalization(name=\"bn_1\")(hidden)\n hidden = Dropout(0.0, name=\"dropout_1\")(hidden)\n\n for i in range(2-1):\n hidden = Dense(64, activation=\"relu\", name=\"hidden_{}\".format(\n i+2), kernel_regularizer=l2(1e-3))(hidden)\n hidden = BatchNormalization(name=\"bn_{}\".format(i+2))(hidden)\n hidden = Dropout(0.0, name=\"dropout_{}\".format(i+2))(hidden)\n\n output = Dense(1, activation=\"sigmoid\", name=\"output\",\n kernel_regularizer=None)(hidden)\n\n # Build and compile the model.\n model = Model(inputs=[\n input_pclass,\n input_sex,\n input_age,\n input_siblings_spouses_aboard,\n input_parents_children_aboard,\n input_fare\n ],\n outputs=[output])\n model.compile(loss=\"binary_crossentropy\",\n optimizer=AdamWOptimizer(learning_rate=0.1,\n weight_decay=0.05))\n\n return model", "def build_model(self):\n \n # initalizing generators\n self.g12 = G12(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n self.g21 = G21(conv_dim=self.numGenFilter, domainA_channels = self.domainA_channels, domainB_channels = self.domainB_channels)\n \n # initializing discriminators\n self.d1 = D1(conv_dim=self.numDiscFilter, domainA_channels = self.domainA_channels, use_labels=self.use_labels)\n self.d2 = D2(conv_dim=self.numDiscFilter, domainB_channels = self.domainB_channels, use_labels=self.use_labels)\n \n\n g_params = list(self.g12.parameters()) + list(self.g21.parameters())\n d_params = list(self.d1.parameters()) + list(self.d2.parameters())\n \n self.g_optimizer = optim.Adam(g_params, self.lr, [self.beta1, self.beta2])\n self.d_optimizer = optim.Adam(d_params, self.lr, [self.beta1, self.beta2])\n \n if torch.cuda.is_available():\n self.g12.cuda()\n self.g21.cuda()\n self.d1.cuda()\n self.d2.cuda()", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def construct_model(self):\n # Set the placeholder for the input episode\n self.inputa = tf.placeholder(tf.float32) # episode train images\n self.inputb = tf.placeholder(tf.float32) # episode test images\n self.labela = tf.placeholder(tf.float32) # episode train labels\n self.labelb = tf.placeholder(tf.float32) # episode test labels\n\n with tf.variable_scope('meta-model', reuse=None) as training_scope:\n # construct the model weights\n self.ss_weights = ss_weights = self.construct_resnet_ss_weights()\n self.weights = weights = self.construct_resnet_weights()\n self.fc_weights = fc_weights = self.construct_fc_weights()\n\n # Load base epoch number from FLAGS\n num_updates = FLAGS.train_base_epoch_num\n\n def task_metalearn(inp, reuse=True):\n \"\"\"The function to process one episode in a meta-batch.\n Args:\n inp: the input episode.\n reuse: whether reuse the variables for the normalization.\n Returns:\n A serious outputs like losses and accuracies.\n \"\"\"\n # Seperate inp to different variables\n inputa, inputb, labela, labelb = inp\n # Generate empty list to record losses\n lossa_list = [] # Base train loss list\n lossb_list = [] # Base test loss list\n\n # Embed the input images to embeddings with ss weights\n emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse) # Embed episode train \n emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True) # Embed episode test \n\n # Run the first epoch of the base learning\n # Forward fc layer for episode train \n outputa = self.forward_fc(emb_outputa, fc_weights)\n # Calculate base train loss\n lossa = self.loss_func(outputa, labela)\n # Record base train loss\n lossa_list.append(lossa)\n # Forward fc layer for episode test\n outputb = self.forward_fc(emb_outputb, fc_weights)\n # Calculate base test loss\n lossb = self.loss_func(outputb, labelb)\n # Record base test loss\n lossb_list.append(lossb) \n # Calculate the gradients for the fc layer \n grads = tf.gradients(lossa, list(fc_weights.values()))\n gradients = dict(zip(fc_weights.keys(), grads))\n # Use graient descent to update the fc layer\n fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fc_weights.keys()]))\n \n for j in range(num_updates - 1):\n # Run the following base epochs, these are similar to the first base epoch\n lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)\n lossa_list.append(lossa)\n lossb = self.loss_func(self.forward_fc(emb_outputb, fast_fc_weights), labelb)\n lossb_list.append(lossb) \n grads = tf.gradients(lossa, list(fast_fc_weights.values()))\n gradients = dict(zip(fast_fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))\n\n # Calculate final episode test predictions\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n # Calculate the final episode test loss, it is the loss for the episode on meta-train \n final_lossb = self.loss_func(outputb, labelb)\n # Calculate the final episode test accuarcy\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n\n # Reorganize all the outputs to a list\n task_output = [final_lossb, lossb_list, lossa_list, accb]\n\n return task_output\n\n # Initial the batch normalization weights\n if FLAGS.norm is not 'None':\n unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)\n\n # Set the dtype of the outputs\n out_dtype = [tf.float32, [tf.float32]*num_updates, [tf.float32]*num_updates, tf.float32]\n\n # Run two episodes for a meta batch using parallel setting\n result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \\\n dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)\n # Seperate the outputs to different variables\n lossb, lossesb, lossesa, accsb = result\n\n # Set the variables to output from the tensorflow graph\n self.total_loss = total_loss = tf.reduce_sum(lossb) / tf.to_float(FLAGS.meta_batch_size)\n self.total_accuracy = total_accuracy = tf.reduce_sum(accsb) / tf.to_float(FLAGS.meta_batch_size)\n self.total_lossa = total_lossa = [tf.reduce_sum(lossesa[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n self.total_lossb = total_lossb = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]\n\n # Set the meta-train optimizer\n optimizer = tf.train.AdamOptimizer(self.meta_lr)\n self.metatrain_op = optimizer.minimize(total_loss, var_list=list(ss_weights.values()) + list(fc_weights.values()))\n\n # Set the tensorboard \n self.training_summaries = []\n self.training_summaries.append(tf.summary.scalar('Meta Train Loss', (total_loss / tf.to_float(FLAGS.metatrain_epite_sample_num))))\n self.training_summaries.append(tf.summary.scalar('Meta Train Accuracy', total_accuracy))\n for j in range(num_updates):\n self.training_summaries.append(tf.summary.scalar('Base Train Loss Step' + str(j+1), total_lossa[j]))\n for j in range(num_updates):\n self.training_summaries.append(tf.summary.scalar('Base Val Loss Step' + str(j+1), total_lossb[j]))\n\n self.training_summ_op = tf.summary.merge(self.training_summaries)\n\n self.input_val_loss = tf.placeholder(tf.float32)\n self.input_val_acc = tf.placeholder(tf.float32)\n self.val_summaries = []\n self.val_summaries.append(tf.summary.scalar('Meta Val Loss', self.input_val_loss))\n self.val_summaries.append(tf.summary.scalar('Meta Val Accuracy', self.input_val_acc))\n self.val_summ_op = tf.summary.merge(self.val_summaries)", "def setupNetwork(self):\n\t\tin_layer = Input(shape=(28, ))\n\t\td1 = Dense(40, activation='relu')(in_layer)\n\t\td2 = Dense(10, activation='relu')(d1)\n\t\tout = Dense(1, activation='sigmoid')(d2)\n\n\t\tself.model = tf.keras.Model(inputs=in_layer, outputs=out)", "def model_setup(self):\n self.DNN = SganMLP(self.settings.number_of_bins)\n self.D = SganMLP(self.settings.number_of_bins)\n self.G = Generator()", "def build_model():\n with tf.name_scope('placeholders'):\n real_data_int = tf.placeholder(tf.int32, shape=[None, picture_size])\n x_true = 2 * ((tf.cast(real_data_int, tf.float32) / 255.) - .5)\n z = tf.placeholder(tf.float32, [None, input_dim])\n if use_JL:\n JL = tf.placeholder(tf.float32, [d_last_layer_size, JL_dim])\n P_non_normalized = tf.placeholder(tf.float32, [JL_dim, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n else:\n JL = None\n P_non_normalized = tf.placeholder(tf.float32, [d_last_layer_size, n_projections])\n P_non_normalized_SWD = tf.placeholder(tf.float32, [picture_size, n_projections])\n\n x_generated = generator(z, n_features_first=n_features_first_g,\n n_features_reduction_factor=n_features_reduction_factor, min_features=64,\n BN=BN, power=power, extra_layer=extra_layer_g,\n init_method=init_method, n_features_image=n_features_image)\n\n d_pred_true, d_last_true = discriminator(x_true, reuse=False, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n d_pred_gen, d_last_gen = discriminator(x_generated, reuse=True, n_features_last=n_features_last_d,\n n_features_increase_factor=n_features_reduction_factor,\n min_features=min_features, d_BN=d_BN, power=power,\n n_features_image=n_features_image, init_method=init_method)\n\n # define generator loss (big part taken from SWG)\n with tf.name_scope('g_loss'):\n # apply the Johnson-Lindenstrauss map, if wanted, to the flattened array\n if use_JL:\n JL_true = tf.matmul(d_last_true, JL)\n JL_gen = tf.matmul(d_last_gen, JL)\n else:\n JL_true = d_last_true\n JL_gen = d_last_gen\n\n # next project the samples (images). After being transposed, we have tensors\n # of the format: [[projected_image1_proj1, projected_image2_proj1, ...],\n # [projected_image1_proj2, projected_image2_proj2, ...],...]\n # Each row has the projections along one direction. This makes it easier for the sorting that follows.\n # first normalize the random normal vectors to lie in the sphere\n P = tf.nn.l2_normalize(P_non_normalized, axis=0)\n\n projected_true = tf.transpose(tf.matmul(JL_true, P))\n projected_fake = tf.transpose(tf.matmul(JL_gen, P))\n\n sorted_true, true_indices = tf.nn.top_k(input=projected_true, k=batch_size)\n sorted_fake, fake_indices = tf.nn.top_k(input=projected_fake, k=batch_size)\n\n # For faster gradient computation, we do not use sorted_fake to compute\n # loss. Instead we re-order the sorted_true so that the samples from the\n # true distribution go to the correct sample from the fake distribution.\n\n # It is less expensive (memory-wise) to rearrange arrays in TF.\n # Flatten the sorted_true from dim [n_projections, batch_size].\n flat_true = tf.reshape(sorted_true, [-1])\n\n # Modify the indices to reflect this transition to an array.\n # new index = row + index\n rows = np.asarray([batch_size * np.floor(i * 1.0 / batch_size) for i in range(n_projections * batch_size)])\n rows = rows.astype(np.int32)\n flat_idx = tf.reshape(fake_indices, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n # The scatter operation takes care of reshaping to the rearranged matrix\n shape = tf.constant([batch_size * n_projections])\n rearranged_true = tf.reshape(tf.scatter_nd(flat_idx, flat_true, shape), [n_projections, batch_size])\n\n generator_loss = tf.reduce_mean(tf.square(projected_fake - rearranged_true))\n\n # get the sliced Wasserstein distance (SWD) (since SWD and JLSWD are not comparable)\n with tf.name_scope('SWD'):\n P_SWD = tf.nn.l2_normalize(P_non_normalized_SWD, axis=0)\n\n projected_true_SWD = tf.transpose(tf.matmul(x_true, P_SWD))\n projected_fake_SWD = tf.transpose(tf.matmul(x_generated, P_SWD))\n\n sorted_true_SWD, true_indices_SWD = tf.nn.top_k(input=projected_true_SWD, k=batch_size)\n sorted_fake_SWD, fake_indices_SWD = tf.nn.top_k(input=projected_fake_SWD, k=batch_size)\n\n flat_true_SWD = tf.reshape(sorted_true_SWD, [-1])\n flat_idx_SWD = tf.reshape(fake_indices_SWD, [-1, 1]) + np.reshape(rows, [-1, 1])\n\n rearranged_true_SWD = tf.reshape(tf.scatter_nd(flat_idx_SWD, flat_true_SWD, shape),\n [n_projections, batch_size])\n\n SWD = tf.reduce_mean(tf.square(projected_fake_SWD - rearranged_true_SWD))\n\n # define the discriminator loss\n with tf.name_scope('d_loss'):\n d_true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_pred_true), logits=d_pred_true)\n d_fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_pred_gen), logits=d_pred_gen)\n discriminator_loss = tf.reduce_mean(d_true_loss + d_fake_loss)\n\n with tf.name_scope('g_optimizer'):\n generator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n g_train = g_optimizer.minimize(generator_loss, var_list=generator_vars)\n\n with tf.name_scope('d_optimizer'):\n discriminator_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_optimizer = tf.train.AdamOptimizer(learning_rate, beta1=0.5)\n d_train = d_optimizer.minimize(discriminator_loss, var_list=discriminator_vars)\n\n return real_data_int, z, x_generated, JL, P_non_normalized, P_non_normalized_SWD, SWD, g_train, d_train", "def pre_dl_models():\n keras.applications.xception.Xception(include_top=True, weights='imagenet', input_tensor=None,\n input_shape=None, pooling=None, classes=1000)\n\n # keras.applications.vgg16.VGG16(include_top=True, weights='imagenet', input_tensor=None,\n # input_shape=None, pooling=None, classes=1000)\n\n # keras.applications.vgg19.VGG19(include_top=True, weights='imagenet', input_tensor=None,\n # input_shape=None, pooling=None, classes=1000)\n\n # keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet', input_tensor=None,\n # input_shape=None, pooling=None, classes=1000)\n\n # keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet', input_tensor=None,\n # input_shape=None, pooling=None, classes=1000)", "def make_model():\n \n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(150, 150, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(37, activation='softmax'))\n \n #model.add(layers.Dense(1, activation='sigmoid'))\n \n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n return model", "def main(configuration_path, mode):\n config = toml.load(configuration_path)\n train_conf = read_config(config)\n\n click.echo(\"\\n Train config:\")\n print(train_conf, \"\\n\")\n\n # create databunch\n data = create_databunch(\n data_path=train_conf[\"data_path\"],\n fourier=train_conf[\"fourier\"],\n batch_size=train_conf[\"batch_size\"],\n )\n\n # get image size\n train_conf[\"image_size\"] = data.train_ds[0][0][0].shape[1]\n\n # define architecture\n arch = define_arch(\n arch_name=train_conf[\"arch_name\"], img_size=train_conf[\"image_size\"]\n )\n\n if mode == \"train\":\n if train_conf[\"normalize\"] == \"mean\":\n train_conf[\"norm_factors\"] = get_normalisation_factors(data)\n # check out path and look for existing model files\n check_outpath(train_conf[\"model_path\"], train_conf)\n\n click.echo(\"Start training of the model.\\n\")\n\n # define_learner\n learn = define_learner(data, arch, train_conf)\n\n # load pretrained model\n if train_conf[\"pre_model\"] != \"none\":\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n # Train the model, except interrupt\n # train_conf[\"comet_ml\"] = True\n try:\n if train_conf[\"comet_ml\"]:\n learn.comet.experiment.log_parameters(train_conf)\n with learn.comet.experiment.train():\n learn.fit(train_conf[\"num_epochs\"])\n else:\n learn.fit(train_conf[\"num_epochs\"])\n except KeyboardInterrupt:\n pop_interrupt(learn, train_conf)\n\n end_training(learn, train_conf)\n\n if train_conf[\"inspection\"]:\n after_training_plots(train_conf, rand=True)\n\n if mode == \"fine_tune\":\n click.echo(\"Start fine tuning of the model.\\n\")\n\n # define_learner\n learn = define_learner(\n data,\n arch,\n train_conf,\n )\n\n # load pretrained model\n if train_conf[\"pre_model\"] == \"none\":\n click.echo(\"Need a pre-trained modle for fine tuning!\")\n return\n\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n # Train the model, except interrupt\n try:\n learn.fine_tune(train_conf[\"num_epochs\"])\n except KeyboardInterrupt:\n pop_interrupt(learn, train_conf)\n\n end_training(learn, train_conf)\n if train_conf[\"inspection\"]:\n after_training_plots(train_conf, rand=True)\n\n if mode == \"lr_find\":\n click.echo(\"Start lr_find.\\n\")\n if train_conf[\"normalize\"] == \"mean\":\n train_conf[\"norm_factors\"] = get_normalisation_factors(data)\n\n # define_learner\n learn = define_learner(data, arch, train_conf, lr_find=True)\n\n # load pretrained model\n if train_conf[\"pre_model\"] != \"none\":\n learn.create_opt()\n load_pre_model(learn, train_conf[\"pre_model\"])\n\n learn.lr_find()\n\n # save loss plot\n plot_lr_loss(\n learn,\n train_conf[\"arch_name\"],\n Path(train_conf[\"model_path\"]).parent,\n skip_last=5,\n output_format=train_conf[\"format\"],\n )\n\n if mode == \"plot_loss\":\n click.echo(\"Start plotting loss.\\n\")\n\n # define_learner\n learn = define_learner(data, arch, train_conf, plot_loss=True)\n # load pretrained model\n if Path(train_conf[\"model_path\"]).exists:\n load_pre_model(learn, train_conf[\"model_path\"], plot_loss=True)\n else:\n click.echo(\"Selected model does not exist.\")\n click.echo(\"Exiting.\\n\")\n sys.exit()\n\n plot_lr(\n learn, Path(train_conf[\"model_path\"]), output_format=train_conf[\"format\"]\n )\n plot_loss(\n learn, Path(train_conf[\"model_path\"]), output_format=train_conf[\"format\"]\n )", "def build_model(self):\n self.model = models.Sequential()\n self.model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='mse', metrics=['mae'])\n self.model.add(layers.Flatten())\n self.model.add(layers.Dense(64, activation='relu'))\n self.model.add(layers.Dense(10, activation='softmax'))\n self.model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n\n baseEncoder = self.createEncoder(inputs)\n baseEncoder = Dropout(self.drop)(baseEncoder)\n\n # Instantiate encoder layers\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(baseEncoder)\n z_log_var = Q_z_log_var(baseEncoder)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n \n G_0 = Dense(8*self.kernel_init)(encoding)\n G_0 = Dropout(self.drop)(G_0)\n baseDecoder = self.createDecoder(G_0)\n\n self.model =Model(inputs, baseDecoder)\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n self.model.summary()\n print(\"Completed model setup.\")", "def build_model(self) -> nn.Module:\n pass", "def _regular_build(self):\n # This overwrites define_model, is that ok?\n self.define_model = tf.make_template(self.define_model.__name__, #pylint: disable=E1101\n self.define_model,\n create_scope_now_=True)\n\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n\n def _build(mode):\n outputs, losses, others = self.define_model(data_source=self.dataset[mode], mode=mode)\n self.outputs[mode] = outputs\n self.losses[mode] = losses\n self.otters[mode] = others\n if mode == 'train':\n self._build_optimizer()\n\n # TODO Move clean and summary to proper section\n self.summary_ops = {}\n if self._train_model:\n _build('train')\n summary = []\n for idx, loss in enumerate(self.losses['train']):\n summary.append(\n tf.summary.scalar(name='train/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['train']):\n summary.append(\n tf.summary.scalar(name='train/otter_{}'.format(idx), tensor=element))\n self.summary_ops['train'] = tf.summary.merge(summary)\n\n if self._validate_model:\n _build('validation')\n summary = []\n for idx, loss in enumerate(self.losses['validation']):\n summary.append(\n tf.summary.scalar(name='val/loss_{}'.format(idx), tensor=loss))\n for idx, element in enumerate(self.otters['validation']):\n summary.append(\n tf.summary.scalar(name='val/otter_{}'.format(idx), tensor=element))\n self.summary_ops['validation'] = tf.summary.merge(summary)\n\n self.writer = tf.summary.FileWriter(self.output_path,\n self.session.graph)\n self.saver = tf.train.Saver()\n # TODO Add routine to save\n logging.info('Model construction complete.')", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def __init__(self, resultDir: str, modelName: str, x_train, y_train_oh, x_dev, y_dev_oh, x_test, y_test_oh, drop1, drop2, drop3):\n\t\tself.resultDir = resultDir\n\t\tself.modelName = modelName\n\t\tself.x_train = x_train\n\t\tself.x_dev = x_dev\n\t\tself.x_test = x_test\n\t\tself.y_train_oh = y_train_oh\n\t\tself.y_dev_oh = y_dev_oh\n\t\tself.y_test_oh = y_test_oh\n\n\t\tself.drop1 = drop1\n\t\tself.drop2 = drop2\n\t\tself.drop3 = drop3\n\t\t\n\t\tself.model = Sequential()\n\n\t\tself.model.add(Dense(500, activation='relu', input_shape=(1361,)))\n\t\tself.model.add(Dropout(self.drop1))\n\n\t\tself.model.add(Dense(500, activation='relu'))\n\t\tself.model.add(Dropout(self.drop2))\n\n\t\tself.model.add(Dense(256, activation='relu'))\n\t\tself.model.add(Dropout(self.drop3))\n\n\t\tself.model.add(Dense(256, activation='softmax'))\n\n\t\tself.model.compile(loss='categorical_crossentropy', metrics=['categorical_accuracy'], optimizer='adam')\n\t\tprint(\"Model summary\\n\")\n\t\tprint(self.model.summary())", "def build_model():\n pretrained_model = VGG16(input_shape=(fixed_size[0], fixed_size[1], 3), weights='imagenet', include_top=False)\n # We will not train the layers imported.\n for layer in pretrained_model.layers:\n layer.trainable = False\n transfer_learning_model = Sequential()\n transfer_learning_model.add(pretrained_model)\n transfer_learning_model.add(Flatten())\n transfer_learning_model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n transfer_learning_model.add(Dropout(0.5))\n transfer_learning_model.add(Dense(3, activation='softmax'))\n transfer_learning_model.summary()\n opt = Adam(learning_rate=.0003)\n transfer_learning_model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n return transfer_learning_model", "def train(**kwargs):\n\n # Roll out the parameters\n patch_size = kwargs[\"patch_size\"]\n image_data_format = kwargs[\"image_data_format\"]\n generator_type = kwargs[\"generator_type\"]\n dset = kwargs[\"dset\"]\n batch_size = kwargs[\"batch_size\"]\n n_batch_per_epoch = kwargs[\"n_batch_per_epoch\"]\n nb_epoch = kwargs[\"nb_epoch\"]\n model_name = kwargs[\"model_name\"]\n save_weights_every_n_epochs = kwargs[\"save_weights_every_n_epochs\"]\n visualize_images_every_n_epochs = kwargs[\"visualize_images_every_n_epochs\"]\n use_mbd = kwargs[\"use_mbd\"]\n label_smoothing = kwargs[\"use_label_smoothing\"]\n label_flipping_prob = kwargs[\"label_flipping_prob\"]\n use_l1_weighted_loss = kwargs[\"use_l1_weighted_loss\"]\n prev_model = kwargs[\"prev_model\"]\n discriminator_optimizer = kwargs[\"discriminator_optimizer\"]\n n_run_of_gen_for_1_run_of_disc = kwargs[\"n_run_of_gen_for_1_run_of_disc\"]\n MAX_FRAMES_PER_GIF = kwargs[\"MAX_FRAMES_PER_GIF\"]\n\n # batch_size = args.batch_size\n # n_batch_per_epoch = args.n_batch_per_epoch\n # nb_epoch = args.nb_epoch\n # save_weights_every_n_epochs = args.save_weights_every_n_epochs\n # generator_type = args.generator_type\n # patch_size = args.patch_size\n # label_smoothing = False\n # label_flipping_prob = False\n # dset = args.dset\n # use_mbd = False\n\n # Check and make the dataset\n # If .h5 file of dset is not present, try making it\n if not os.path.exists(\"../../data/processed/%s_data.h5\" % dset):\n print(\"dset %s_data.h5 not present in '../../data/processed'!\" % dset)\n if not os.path.exists(\"../../data/%s/\" % dset):\n print(\"dset folder %s not present in '../../data'!\\n\\nERROR: Dataset .h5 file not made, and dataset not available in '../../data/'.\\n\\nQuitting.\" % dset)\n return\n else:\n if not os.path.exists(\"../../data/%s/train\" % dset) or not os.path.exists(\"../../data/%s/val\" % dset) or not os.path.exists(\"../../data/%s/test\" % dset):\n print(\"'train', 'val' or 'test' folders not present in dset folder '../../data/%s'!\\n\\nERROR: Dataset must contain 'train', 'val' and 'test' folders.\\n\\nQuitting.\" % dset)\n return\n else:\n print(\"Making %s dataset\" % dset)\n subprocess.call(['python3', '../data/make_dataset.py', '../../data/%s' % dset, '3'])\n print(\"Done!\")\n\n epoch_size = n_batch_per_epoch * batch_size\n\n init_epoch = 0\n\n if prev_model:\n print('\\n\\nLoading prev_model from', prev_model, '...\\n\\n')\n prev_model_latest_gen = sorted(glob.glob(os.path.join('../../models/', prev_model, '*gen*.h5')))[-1]\n prev_model_latest_disc = sorted(glob.glob(os.path.join('../../models/', prev_model, '*disc*.h5')))[-1]\n prev_model_latest_DCGAN = sorted(glob.glob(os.path.join('../../models/', prev_model, '*DCGAN*.h5')))[-1]\n # Find prev model name, epoch\n model_name = prev_model_latest_DCGAN.split('models')[-1].split('/')[1]\n init_epoch = int(prev_model_latest_DCGAN.split('epoch')[1][:5]) + 1\n\n # Setup environment (logging directory etc), if no prev_model is mentioned\n general_utils.setup_logging(model_name)\n\n # img_dim = X_full_train.shape[-3:]\n img_dim = (256, 256, 3)\n\n # Get the number of non overlapping patch and the size of input image to the discriminator\n nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_data_format)\n\n try:\n\n # Create optimizers\n opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n if discriminator_optimizer == 'sgd':\n opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)\n elif discriminator_optimizer == 'adam':\n opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n # Load generator model\n generator_model = models.load(\"generator_unet_%s\" % generator_type,\n img_dim,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n generator_model.compile(loss='mae', optimizer=opt_discriminator)\n\n # Load discriminator model\n discriminator_model = models.load(\"DCGAN_discriminator\",\n img_dim_disc,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n discriminator_model.trainable = False\n\n DCGAN_model = models.DCGAN(generator_model,\n discriminator_model,\n img_dim,\n patch_size,\n image_data_format)\n\n if use_l1_weighted_loss:\n loss = [l1_weighted_loss, 'binary_crossentropy']\n else:\n loss = [l1_loss, 'binary_crossentropy']\n\n loss_weights = [1E1, 1]\n DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan)\n\n discriminator_model.trainable = True\n discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator)\n\n # Load prev_model\n if prev_model:\n generator_model.load_weights(prev_model_latest_gen)\n discriminator_model.load_weights(prev_model_latest_disc)\n DCGAN_model.load_weights(prev_model_latest_DCGAN)\n\n # Load and rescale data\n print('\\n\\nLoading data...\\n\\n')\n X_full_train, X_sketch_train, X_full_val, X_sketch_val = data_utils.load_data(dset, image_data_format)\n check_this_process_memory()\n print('X_full_train: %.4f' % (X_full_train.nbytes/2**30), \"GB\")\n print('X_sketch_train: %.4f' % (X_sketch_train.nbytes/2**30), \"GB\")\n print('X_full_val: %.4f' % (X_full_val.nbytes/2**30), \"GB\")\n print('X_sketch_val: %.4f' % (X_sketch_val.nbytes/2**30), \"GB\")\n\n # Losses\n disc_losses = []\n gen_total_losses = []\n gen_L1_losses = []\n gen_log_losses = []\n\n # Start training\n print(\"\\n\\nStarting training\\n\\n\")\n for e in range(nb_epoch):\n # Initialize progbar and batch counter\n # progbar = generic_utils.Progbar(epoch_size)\n batch_counter = 0\n gen_total_loss_epoch = 0\n gen_L1_loss_epoch = 0\n gen_log_loss_epoch = 0\n start = time.time()\n for X_full_batch, X_sketch_batch in data_utils.gen_batch(X_full_train, X_sketch_train, batch_size):\n # Create a batch to feed the discriminator model\n X_disc, y_disc = data_utils.get_disc_batch(X_full_batch,\n X_sketch_batch,\n generator_model,\n batch_counter,\n patch_size,\n image_data_format,\n label_smoothing=label_smoothing,\n label_flipping_prob=label_flipping_prob)\n # Update the discriminator\n disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)\n # Create a batch to feed the generator model\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8)\n y_gen[:, 1] = 1\n # Freeze the discriminator\n discriminator_model.trainable = False\n # Train generator\n for _ in range(n_run_of_gen_for_1_run_of_disc-1):\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n # Add losses\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n # Unfreeze the discriminator\n discriminator_model.trainable = True\n # Progress\n # progbar.add(batch_size, values=[(\"D logloss\", disc_loss),\n # (\"G tot\", gen_loss[0]),\n # (\"G L1\", gen_loss[1]),\n # (\"G logloss\", gen_loss[2])])\n print(\"Epoch\", str(init_epoch+e+1), \"batch\", str(batch_counter+1), \"D_logloss\", disc_loss, \"G_tot\", gen_loss[0], \"G_L1\", gen_loss[1], \"G_log\", gen_loss[2])\n batch_counter += 1\n if batch_counter >= n_batch_per_epoch:\n break\n gen_total_loss = gen_total_loss_epoch/n_batch_per_epoch\n gen_L1_loss = gen_L1_loss_epoch/n_batch_per_epoch\n gen_log_loss = gen_log_loss_epoch/n_batch_per_epoch\n disc_losses.append(disc_loss)\n gen_total_losses.append(gen_total_loss)\n gen_L1_losses.append(gen_L1_loss)\n gen_log_losses.append(gen_log_loss)\n check_this_process_memory()\n print('Epoch %s/%s, Time: %.4f' % (init_epoch + e + 1, init_epoch + nb_epoch, time.time() - start))\n # Save images for visualization\n if (e + 1) % visualize_images_every_n_epochs == 0:\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"training\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Get new images from validation\n X_full_batch, X_sketch_batch = next(data_utils.gen_batch(X_full_val, X_sketch_val, batch_size))\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"validation\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Plot losses\n data_utils.plot_losses(disc_losses, gen_total_losses, gen_L1_losses, gen_log_losses, model_name, init_epoch)\n # Save weights\n if (e + 1) % save_weights_every_n_epochs == 0:\n gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n generator_model.save_weights(gen_weights_path, overwrite=True)\n disc_weights_path = os.path.join('../../models/%s/disc_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n discriminator_model.save_weights(disc_weights_path, overwrite=True)\n DCGAN_weights_path = os.path.join('../../models/%s/DCGAN_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)\n\n except KeyboardInterrupt:\n pass", "def _build_model(self):\n\n with tf.variable_scope(self.name):\n # adds placeholders, data_normalization and data_noise if desired. Also adds a placeholder for dropout probability\n self.layer_in_x, self.layer_in_y = self._build_input_layers()\n\n # create core multi-layer perceptron\n mlp_output_dim = 2 * self.ndim_y * self.n_centers + self.n_centers\n core_network = MLP(\n name=\"core_network\",\n input_layer=self.layer_in_x,\n output_dim=mlp_output_dim,\n hidden_sizes=self.hidden_sizes,\n hidden_nonlinearity=self.hidden_nonlinearity,\n output_nonlinearity=None,\n weight_normalization=self.weight_normalization,\n dropout_ph=self.dropout_ph if self.dropout else None\n )\n\n core_output_layer = core_network.output_layer\n\n # slice output of MLP into three equally sized parts for loc, scale and mixture weights\n slice_layer_locs = L.SliceLayer(core_output_layer, indices=slice(0, self.ndim_y * self.n_centers), axis=-1)\n slice_layer_scales = L.SliceLayer(core_output_layer, indices=slice(self.ndim_y * self.n_centers, 2 * self.ndim_y * self.n_centers), axis=-1)\n slice_layer_weights = L.SliceLayer(core_output_layer, indices=slice(2 * self.ndim_y * self.n_centers, mlp_output_dim), axis=-1)\n\n # locations mixture components\n self.reshape_layer_locs = L.ReshapeLayer(slice_layer_locs, (-1, self.n_centers, self.ndim_y))\n self.locs = L.get_output(self.reshape_layer_locs)\n\n # scales of the mixture components\n reshape_layer_scales = L.ReshapeLayer(slice_layer_scales, (-1, self.n_centers, self.ndim_y))\n self.softplus_layer_scales = L.NonlinearityLayer(reshape_layer_scales, nonlinearity=tf.nn.softplus)\n self.scales = L.get_output(self.softplus_layer_scales)\n\n # weights of the mixture components\n self.logits = L.get_output(slice_layer_weights)\n self.softmax_layer_weights = L.NonlinearityLayer(slice_layer_weights, nonlinearity=tf.nn.softmax)\n self.weights = L.get_output(self.softmax_layer_weights)\n\n # # put mixture components together\n self.y_input = L.get_output(self.layer_in_y)\n self.cat = cat = Categorical(logits=self.logits)\n self.components = components = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale\n in zip(tf.unstack(self.locs, axis=1), tf.unstack( self.scales, axis=1))]\n self.mixture = mixture = Mixture(cat=cat, components=components, value=tf.zeros_like(self.y_input))\n\n # regularization\n self._add_softmax_entropy_regularization()\n self._add_l1_l2_regularization(core_network)\n\n # tensor to store samples\n self.samples = mixture.sample() #TODO either use it or remove it\n\n # tensor to compute probabilities\n if self.data_normalization:\n self.pdf_ = mixture.prob(self.y_input) / tf.reduce_prod(self.std_y_sym)\n self.log_pdf_ = mixture.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym))\n else:\n self.pdf_ = mixture.prob(self.y_input)\n self.log_pdf_ = mixture.log_prob(self.y_input)\n\n # symbolic tensors for getting the unnormalized mixture components\n if self.data_normalization:\n self.scales_unnormalized = self.scales * self.std_y_sym\n self.locs_unnormalized = self.locs * self.std_y_sym + self.mean_y_sym\n else:\n self.scales_unnormalized = self.scales\n self.locs_unnormalized = self.locs\n\n # initialize LayersPowered --> provides functions for serializing tf models\n LayersPowered.__init__(self, [self.softmax_layer_weights, self.softplus_layer_scales, self.reshape_layer_locs,\n self.layer_in_y])", "def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)", "def _create_base_model(self, modality):\n\n if modality == \"RGB\":\n in_channels = 3\n elif modality == \"Flow\":\n in_channels = 10\n elif modality == \"Audio\":\n in_channels = 1\n\n model_dir = os.path.dirname(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\n model_dir = os.path.join(model_dir, \"weights\")\n\n is_audio = True if modality == \"Audio\" else False\n\n if \"vgg\" in self.base_model_name:\n base_model = VGG(self.cfg.model.vgg.type, modality, in_channels)\n elif \"resnet\" in self.base_model_name:\n base_model = Resnet(self.cfg.model.resnet.depth, modality, in_channels)\n elif self.base_model_name == \"bninception\":\n pretrained = \"kinetics\" if modality == \"Flow\" else \"imagenet\"\n base_model = bninception(\n in_channels,\n modality,\n model_dir=model_dir,\n pretrained=pretrained,\n is_audio=is_audio,\n attend=self.use_attention,\n )\n\n return base_model", "def train(model_name, batch_size, steps_per_epoch, epochs, validation_steps, \n model_file=None, save_path=None):\n \n print(\"- Loading configuration...\")\n if model_name in models_default_params:\n default_params = models_default_params[model_name]\n else:\n print(\"Error: the model '{}' has not been implemented\".format(model_name))\n return\n custom_objects = default_params['custom_objects']\n patch_size = default_params['patch_size']\n if save_path is None:\n save_path = default_params['default_path']\n if os.path.isfile(save_path):\n print(\"Warning: {} is an existing file and will be overwritten.\".format(save_path))\n print(\"- Configuration loaded.\")\n \n print(\"- Loading datasets...\")\n train_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Training/RGB/\",\n y_directory = \"datasets/Potsdam/Training/Labels/\",\n patch_size = patch_size)\n val_gen = preprocess.load_dataset(batch_size, x_directory = \"datasets/Potsdam/Validation/RGB/\",\n y_directory = \"datasets/Potsdam/Validation/Labels/\",\n patch_size = patch_size)\n print(\"- Data loaded.\")\n \n print(\"- Initialising model...\")\n if(model_file is not None): # Further train existing model\n model = keras.models.load_model(model_file, custom_objects=custom_objects)\n else: # Create new model\n if model_name == 'fcn':\n model = fcn.make_fcn_resnet((patch_size, patch_size, channels), nb_labels, \n use_pretraining=False, freeze_base=False)\n elif model_name == 'pspnet':\n model = pspnet.build_pspnet(nb_classes=nb_labels, resnet_layers=50,\n input_shape=patch_size)\n elif model_name == 'mobilenetv2':\n model = mobilenetv2.MobileNetv2((patch_size, patch_size, channels), nb_labels) \n\n model.compile(\n optimizer = optimizers.Adam(lr = 0.00001),\n loss = losses.categorical_crossentropy,\n metrics = [metrics.categorical_accuracy]) \n model.summary() \n print(\"- Model initialised.\")\n \n tensorboard = callbacks.TensorBoard(log_dir='./logs')\n csv_logger = callbacks.CSVLogger('logs/training.csv')\n checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,\n save_weights_only=True,\n save_best_only=True)\n \n print(\"- Starting training.\")\n model.fit_generator(\n generator=train_gen,\n steps_per_epoch=steps_per_epoch,\n epochs=epochs,\n validation_data=val_gen,\n validation_steps=validation_steps,\n # callbacks=[checkpoint, csv_logger]\n )\n print(\"- Training complete.\")\n \n model.save(save_path)\n print(\"- Model saved to {}\".format(save_path))", "def load(self, name=\"\"):\n\n self.constructed = True\n if name == \"\":\n name = \"/home/unai/Escritorio/MultiNetwork/model/model\"\n\n network_descriptors = {\"Generic\": GenericDescriptor, \"Decoder\": DecoderDescriptor, \"Discrete\": DiscreteDescriptor, \"Convolution\": ConvolutionDescriptor}\n\n if not os.path.isfile(name):\n print(\"Error at loading the model\")\n return None\n\n f = open(name, \"r+\")\n\n lines = f.readlines()\n\n i = 0\n while lines[i] != \"\\n\": # Each component is stored in a line\n ident, n_inp, kind, n_hidden, layers, init, act, cond_rand, taking, producing, depth, reachable, belows = lines[i][:-1].split(\"_\")\n kwargs = {}\n if int(ident[1:]) > self.last_net:\n self.last_net = int(ident[1:])\n\n self.reachable[ident] = reachable.split(\",\")\n self.comps_below[ident] = belows.split(\",\")\n\n if \"onv\" in kind: # Not working right now\n filters, sizes, layers, strides = layers.split(\"*\")\n sizes = sizes.split(\",\")\n s = np.array([[int(sz) for sz in szs.split(\"/\")] for szs in sizes])\n desc = network_descriptors[kind](int(inp), int(outp), int(n_inp), layers.split(\",\"), filters.split(\",\"), [int(x) for x in strides.split(\",\")], s, [int(x) for x in act.split(\",\")], [int(x) for x in init.split(\",\")], kwargs)\n else:\n if len(kwargs) > 0: # Not working right now\n kwargs = kwargs.split(\"-\")\n kwargs[0] = [int(x) for x in kwargs[0].split(\".\") if len(x) > 0]\n kwargs[1] = [int(x) for x in kwargs[1].split(\".\") if len(x) > 0]\n if len(cond_rand) > 0:\n cond_rand = cond_rand.split(\"-\")\n cond_rand[0] = [int(x) for x in cond_rand[0].split(\",\") if len(x) > 0]\n cond_rand[1] = [int(x) for x in cond_rand[1].split(\",\") if len(x) > 0]\n kwargs[\"conds\"] = cond_rand\n desc = network_descriptors[kind](int(taking.split(\",\")[0]), int(producing.split(\",\")[0]), int(n_inp), int(n_hidden), [int(x) for x in layers.split(\",\") if x != \"-1\"], init_functions[[int(x) for x in init.split(\",\") if x != \"-1\"]],\n act_functions[[int(x) for x in act.split(\",\") if x != \"-1\"]], **kwargs)\n\n # print(\"ident\", ident, \"n_inp\", n_inp, \"kind\", kind, \"inp\", inp, \"outp\", outp, \"layers\", layers, \"init\", init, \"act\", act, \"taking\", taking, \"producing\", producing, \"depth\", depth, \"kwargs\", kwargs)\n net = NetworkComp(desc, InOut(size=int(taking.split(\",\")[0]), data_type=taking.split(\",\")[1]), InOut(data_type=producing.split(\",\")[1], size=int(producing.split(\",\")[0])), int(depth))\n\n self.add_net(net, ident)\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Inputs\n\n ident, size, kind, depth = lines[i].split(\"_\")\n\n self.inputs[ident] = ModelComponent(None, InOut(size=int(size), data_type=kind), int(depth))\n i += 1\n\n i += 1\n\n while lines[i] != \"\\n\": # Outputs\n\n ident, size, kind, depth, belows = lines[i].split(\"_\")\n\n self.outputs[ident] = ModelComponent(InOut(size=int(size), data_type=kind), None, int(depth))\n self.comps_below[ident] = belows.split(\",\")\n i += 1\n\n i += 1\n\n while i < len(lines): # Connections\n name, inp, outp, kind, size = lines[i].split(\"_\")\n\n if int(name[1:]) > self.last_con:\n self.last_con = int(name[1:])\n\n self.connections[name] = Connection(inp, outp, InOut(kind, int(size)), name)\n i += 1\n self.update_below()", "def build_model(\n config: Mapping, cardinalities: Mapping[str, int]\n) -> keras.Model:\n\n model_config = config['model']\n if isinstance(model_config, str):\n model = keras.models.load_model(\n model_config, custom_objects={\n 'loss_fn': _create_loss(config['loss'])\n }\n )\n\n return model\n\n features = Features(config['features'])\n inputs_all = []\n\n # Constituents of different types\n constituent_types = [\n key for key in sorted(model_config.keys()) # Ensure order\n if key not in {'head', 'load_weights'}\n ]\n outputs_constituents = []\n for constituent_type in constituent_types:\n inputs_numerical = keras.Input(\n shape=(None, len(features.numerical(constituent_type))),\n ragged=True, name=f'{constituent_type}_numerical'\n )\n inputs_categorical = OrderedDict()\n for feature in features.categorical(constituent_type):\n inputs_categorical[feature] = keras.Input(\n shape=(None,), ragged=True, name=feature\n )\n inputs_all.append(inputs_numerical)\n inputs_all.extend(inputs_categorical.values())\n\n outputs = _apply_deep_set(\n inputs_numerical, inputs_categorical,\n model_config[constituent_type], cardinalities, constituent_type\n )\n outputs_constituents.append(outputs)\n\n # Head\n inputs_global_numerical = keras.Input(\n shape=(len(features.numerical('global')),),\n name='global_numerical'\n )\n inputs_global_categorical = OrderedDict()\n for feature in features.categorical('global'):\n inputs_global_categorical[feature] = keras.Input(\n shape=(None,), name=feature\n )\n embeddings_global = {\n feature: Embedding(\n cardinalities[feature],\n model_config['head']['embeddings'][feature],\n name=feature + '_embeddings'\n )(inputs)\n for feature, inputs in inputs_global_categorical.items()\n }\n inputs_all.append(inputs_global_numerical)\n inputs_all.extend(inputs_global_categorical.values())\n inputs_head = Concatenate(name='head_concatenate')(\n [inputs_global_numerical]\n + [\n embeddings_global[feature]\n for feature in inputs_global_categorical.values()\n ]\n + outputs_constituents\n )\n outputs = _apply_dense_from_config(\n inputs_head, model_config['head'], name_prefix='head_'\n )\n\n outputs = Dense(1, name='head_dense_output')(outputs) # Output unit\n model = keras.Model(inputs=inputs_all, outputs=outputs, name='full')\n\n model.compile(\n optimizer=_create_optimizer(config.get('optimizer', None)),\n loss=_create_loss(config['loss'])\n )\n if 'load_weights' in model_config:\n # Normally, a saved model should be loaded\n # keras.models.load_model at the beginning of thsi function.\n # However, this is currently not supported for models that use\n # ragged tensors [1]. As a workaround, construct the model anew\n # and then load saved weights. The path to weights would\n # usually be \"{model_directory}/variables/variables\", with the\n # \".index\" file extension stripped off. This doesn't restore\n # the state of the optimizer.\n # [1] https://github.com/tensorflow/tensorflow/issues/41034\n model.load_weights(model_config['load_weights'])\n return model", "def __init__(self, config):\n super().__init__()\n self.model_list = []\n self.model_name_list = []\n for key in config[\"Models\"]:\n model_config = config[\"Models\"][key]\n freeze_params = False\n pretrained = None\n if \"freeze_params\" in model_config:\n freeze_params = model_config.pop(\"freeze_params\")\n if \"pretrained\" in model_config:\n pretrained = model_config.pop(\"pretrained\")\n model = BaseModel(model_config)\n if pretrained is not None:\n load_pretrained_params(model, pretrained)\n if freeze_params:\n for param in model.parameters():\n param.trainable = False\n self.model_list.append(self.add_sublayer(key, model))\n self.model_name_list.append(key)", "def cnn_model(model_name, img_size, weights):\n input_size = (img_size, img_size, 3)\n if model_name == \"xception\":\n baseModel = Xception(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"iv3\":\n baseModel = InceptionV3(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"irv2\":\n baseModel = InceptionResNetV2(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"resnet\":\n baseModel = ResNet50(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"nasnet\":\n baseModel = NASNetLarge(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"ef0\":\n baseModel = EfficientNetB0(\n input_size,\n weights=\"imagenet\",\n include_top=False\n )\n elif model_name == \"ef5\":\n baseModel = EfficientNetB5(\n input_size,\n weights=\"imagenet\",\n include_top=False\n )\n\n headModel = baseModel.output\n headModel = GlobalAveragePooling2D()(headModel)\n headModel = Dense(\n 512,\n activation=\"relu\",\n kernel_initializer=\"he_uniform\",\n name=\"fc1\")(\n headModel\n )\n headModel = Dropout(0.4)(headModel)\n predictions = Dense(\n 2,\n activation=\"softmax\",\n kernel_initializer=\"he_uniform\")(\n headModel\n )\n model = Model(inputs=baseModel.input, outputs=predictions)\n\n model.load_weights(weights)\n print(\"Weights loaded...\")\n model_lstm = Model(\n inputs=baseModel.input,\n outputs=model.get_layer(\"fc1\").output\n )\n\n for layer in baseModel.layers:\n layer.trainable = True\n\n optimizer = Nadam(\n lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004\n )\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"]\n )\n return model_lstm", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n\n self.loss_names = ['G_SH']\n self.visual_names = ['input', 'pr_SH', 'gt_SH']\n self.model_names = ['G1']\n\n if not opt.no_brightness:\n self.loss_names += ['G_BA', 'G_BC']\n self.visual_names += ['pr_BA', 'gt_BA']\n self.model_names += ['G3']\n\n if opt.reg_LTM:\n self.loss_names += ['LTMReg']\n\n self.light_res = opt.light_res\n\n\n # Intrinsic network\n if opt.latent_Ls or opt.latent_Lt:\n netG1name = 'unet_256_latent_inL'\n else:\n netG1name = 'unet_256_latent'\n\n input_nc = opt.input_nc\n if opt.in_Ls:\n input_nc += 1\n if opt.in_Lt:\n input_nc += 1\n\n if opt.LTM:\n self.dim_LTM = self.light_res**2\n if self.opt.enc_LTM:\n self.dim_LTM = opt.dim_LTM\n use_hidden = True if not opt.enc_ill_hid==-1 else False\n self.enc_LTM = networks.init_net(networks.IlluminationEncoder(self.light_res**2, opt.enc_ill_hid, self.dim_LTM, use_hidden), opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netG1 = networks.define_G(input_nc, self.dim_LTM, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, True, self.gpu_ids)\n\n else:\n if opt.no_latent_color:\n output_nc = 3\n else:\n output_nc = 1\n self.netG1 = networks.define_G(input_nc, output_nc, opt.ngf, netG1name, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n\n # Brightness network\n g3_input_nc = 3\n if opt.cas and opt.cat_In:\n g3_input_nc = g3_input_nc + opt.input_nc\n if not opt.cas:\n if opt.in_Ls:\n g3_input_nc += 1\n if opt.in_Lt:\n g3_input_nc += 1\n self.netG3 = networks.define_G(g3_input_nc, 1, opt.ngf, 'resnet_9blocks_latent', opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, False, self.gpu_ids)\n if self.isTrain:\n # define loss functions\n self.criterionS = torch.nn.MSELoss()\n self.criterionBA = torch.nn.MSELoss()\n # self.criterionBP = torch.nn.MSELoss()\n self.criterionBC = torch.nn.MSELoss()\n self.criterionReg = torch.nn.MSELoss()\n # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.\n self.optimizer_G1 = torch.optim.Adam(self.netG1.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n # self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G3 = torch.optim.Adam(self.netG3.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G1)\n # self.optimizers.append(self.optimizer_G2)\n self.optimizers.append(self.optimizer_G3)", "def initModel(self):\n input_shape = (self.params[\"nb_features\"],)\n x = input_tensor = Input(input_shape)\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n for i in range(2, self.params[\"nb_layers\"] + 1):\n x = Dense(self.params[\"nb_neurons\"], activation=\"relu\")(x)\n if self.params[\"dropout\"]:\n x = Dropout(self.params[\"dropout\"])(x)\n x = output_tensor = Dense(4)(x)\n model = Model(input_tensor, output_tensor)\n return model", "def __init__(self, opt):\n BaseModel.__init__(self, opt)\n self.loss_names = ['G_GAN', 'D_real', 'D_fake', 'Feat', 'VGG', 'SSIM', 'PSNR']\n self.visual_names = ['fake_B', 'real_B']\n if self.isTrain:\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n\n self.netG = generator.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG, opt.norm,\n not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids,\n not opt.no_transp_conv,\n opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,\n opt.n_blocks_local)\n\n if self.isTrain:\n self.netD = discriminator.define_D(opt.input_nc + opt.output_nc, opt.ndf, 'pix2pixHD_multiscale',\n opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids,\n not (opt.gan_mode == 'lsgan'), opt.num_D)\n\n self.criterionGAN = loss.GANLoss(opt.gan_mode, multiscale_D=opt.netD == 'pix2pixHD_multiscale').to(\n self.device)\n self.criterionVGG = loss.VGGLoss().to(self.device)\n self.criterionFeat = loss.FeatureMatchingLoss(opt.n_layers_D, opt.num_D)\n\n self.criterionSSIM = loss.SkimageLoss(partial(ssim, multichannel=True))\n self.criterionPSNR = loss.SkimageLoss(psnr)\n\n if opt.netG.startswith('pix2pixHD') and (opt.n_epochs_fix_global > 0):\n params_dict = dict(self.netG.named_parameters())\n netG_params = []\n for key, value in params_dict.items():\n if key.startswith('model' + str(opt.n_local_enhancers)):\n netG_params += [value]\n else:\n netG_params = self.netG.parameters()\n\n self.optimizer_G = torch.optim.Adam(netG_params, lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n if opt.load_pretrain:\n pretrained_path = '' if not self.isTrain else opt.load_pretrain\n self.load_network(self.netG, 'G', opt.epoch, pretrained_path)\n if self.isTrain:\n self.load_network(self.netD, 'D', opt.epoch, pretrained_path)\n\n self.real_A = None\n self.real_B = None\n self.fake_A = None\n self.fake_B = None\n self.loss_D_real = None\n self.loss_D_fake = None\n self.loss_D = None\n self.loss_G_GAN = None\n self.loss_Feat = None\n self.loss_VGG = None\n self.loss_G = None\n self.loss_SSIM = None\n self.loss_PSNR = None", "def build(self):\n input_shape_img = (None, None, 3)\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n \n output_region_proposal = self.region_proposal_net(shared_layers, num_anchors)\n output_classifier = self.classifier(shared_layers,\n self.cnn_model.classifier_layers, \n roi_input, self.C.num_roi, \n num_class=len(self.class_count), trainable=True)\n \n self.model_region_proposal = Model(img_input, output_region_proposal[:2])\n self.model_classifier = Model([img_input, roi_input], output_classifier)\n self.model_all = Model([img_input, roi_input], output_region_proposal[:2] + output_classifier)\n\n optimizer = Adam(lr=1e-5)\n self.model_region_proposal.compile(optimizer=optimizer, \n loss=[losses.rpn_loss_cls(num_anchors), \n losses.rpn_loss_regr(num_anchors)])\n self.model_classifier.compile(optimizer=optimizer, \n loss=[losses.class_loss_cls, \n losses.class_loss_regr(len(self.class_count)-1)], \n metrics={'dense_class_{}'.format(len(self.class_count)): 'accuracy'})\n self.model_all.compile(optimizer='sgd', loss='mae')\n\n # print(self.model_all.summary())\n plot_model(self.model_region_proposal, show_shapes=True, to_file='./frcnn/images/region_proposal.png')\n plot_model(self.model_classifier, show_shapes=True, to_file='./frcnn/images/classifier.png')\n plot_model(self.model_all, show_shapes=True, to_file='./frcnn/images/model_all.png')", "def load_model(self):\n print(\"=============start loading models=============\")\n # load models from basemodel and fine-tune layers\n base_model = DenseNet(reduction=0.5, classes=1000, weights_path=BASE_WEIGHT_DIR)\n base_model.layers.pop()\n base_model.layers.pop()\n x4 = Dense(6, activation='relu')(base_model.layers[-1].output)\n o = Activation('softmax')(x4)\n\n model = Model(inputs=base_model.input, outputs=[o])\n model.load_weights(WEIGHT_DIR)\n\n self.model = model\n print(\"=============finish loading models=============\")", "def BuildModel(ANNSetup,model):\n\n if(isinstance(ANNSetup.Activ,str)):\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), activation=ANNSetup.Activ, kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation=ANNSetup.Activ))\n else:\n model.add(Dense(ANNSetup.Neurons[0], kernel_regularizer=l2(ANNSetup.Regu), kernel_initializer=Winit(ANNSetup.Winit), input_dim=ANNSetup.InputDim))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n if(ANNSetup.Dropout != None):\n model.add(Dropout(ANNSetup.Dropout))\n for i in range(1,len(ANNSetup.Neurons)):\n if(i == len(ANNSetup.Neurons)-1):\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit), activation='sigmoid'))\n else:\n model.add(Dense(ANNSetup.Neurons[i], kernel_initializer=Winit(ANNSetup.Winit)))\n model.add(LeakyReLU(alpha=ANNSetup.Activ))\n\n return model", "def create_model(self, input_shape, num_actions, mode, args, model_name='q_network'):\n assert (mode in (\"linear\", \"duel\", \"dqn\"))\n with tf.variable_scope(model_name):\n input_data = Input(shape=input_shape, name=\"input\")\n if mode == \"linear\":\n # #version 4 elu:\n # flatten_hidden = Flatten(name=\"flatten\")(input_data)\n # FC_1 = Dense(512, activation='elu', name='FC1-elu')(flatten_hidden)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(FC_1)\n # FC_3 = Dense(512, activation='elu', name='FC3-elu')(FC_2)\n # FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n # output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n #version 4 elu:\n flatten_hidden = Flatten(name=\"flatten\")(input_data)\n FC_1 = Dense(1024, activation='elu', name='FC1-elu')(flatten_hidden)\n FC_2 = Dense(1024, activation='elu', name='FC2-elu')(FC_1)\n FC_3 = Dense(1024, activation='elu', name='FC3-elu')(FC_2)\n FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n else:\n if not (args.recurrent):\n # # # version 1:\n # h1 = Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\")(input_data)\n # h2 = Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\")(h1)\n # h3 = Convolution2D(64, (3, 3), strides=1, activation=\"relu\", name=\"conv3\")(h2)\n # context = Flatten(name=\"flatten\")(h3)\n\n # # version 2:\n # conv1 = Convolution2D(1, (5, 5), strides=1, activation=\"elu\", name=\"conv1\")(input_data)\n # flatten = Flatten(name=\"flatten\")(conv1)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(flatten)\n # context = Dense(512, activation='elu', name='FC4-elu')(FC_2)\n\n # version 3:\n conv1 = Convolution2D(32, (2, 2), strides=1, activation=\"relu\", name=\"conv1\")(input_data)\n flatten = Flatten(name=\"flatten\")(conv1)\n FC_2 = Dense(128, activation='relu', name='FC2-relu')(flatten)\n FC_3 = Dense(128, activation='relu', name='FC3-relu')(FC_2)\n context = Dense(128, activation='elu', name='FC4-elu')(FC_3)\n\n\n\n # else:\n # print('>>>> Defining Recurrent Modules...')\n # input_data_expanded = Reshape((input_shape[0], input_shape[1], input_shape[2], 1),\n # input_shape=input_shape)(input_data)\n # input_data_TimeDistributed = Permute((3, 1, 2, 4), input_shape=input_shape)(input_data_expanded)\n # h1 = TimeDistributed(Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\"), \\\n # input_shape=(args.num_frames, input_shape[0], input_shape[1], 1))(\n # input_data_TimeDistributed)\n # h2 = TimeDistributed(Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\"))(h1)\n # h3 = TimeDistributed(Convolution2D(64, (2, 2), strides=1, activation=\"relu\", name=\"conv3\"))(h2)\n # flatten_hidden = TimeDistributed(Flatten())(h3)\n # hidden_input = TimeDistributed(Dense(512, activation='relu', name='flat_to_512'))(flatten_hidden)\n # if not (args.a_t):\n # context = LSTM(512, return_sequences=False, stateful=False, input_shape=(args.num_frames, 512))(\n # hidden_input)\n # else:\n # if args.bidir:\n # hidden_input = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # all_outs = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # else:\n # all_outs = LSTM(512, return_sequences=True, stateful=False,\n # input_shape=(args.num_frames, 512))(hidden_input)\n # # attention\n # attention = TimeDistributed(Dense(1, activation='tanh'))(all_outs)\n # # print(attention.shape)\n # attention = Flatten()(attention)\n # attention = Activation('softmax')(attention)\n # attention = RepeatVector(512)(attention)\n # attention = Permute([2, 1])(attention)\n # sent_representation = merge([all_outs, attention], mode='mul')\n # context = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(512,))(sent_representation)\n # # print(context.shape)\n\n if mode == \"dqn\":\n h4 = Dense(512, activation='elu', name=\"fc\")(context)\n output = Dense(num_actions, name=\"output\")(h4)\n # elif mode == \"duel\":\n # value_hidden = Dense(512, activation='relu', name='value_fc')(context)\n # value = Dense(1, name=\"value\")(value_hidden)\n # action_hidden = Dense(512, activation='relu', name='action_fc')(context)\n # action = Dense(num_actions, name=\"action\")(action_hidden)\n # action_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1, keep_dims=True), name='action_mean')(\n # action)\n # output = Lambda(lambda x: x[0] + x[1] - x[2], name='output')([action, value, action_mean])\n model = Model(inputs=input_data, outputs=output)\n print(model.summary())\n return model", "def config1() :\n data_name = \"titanic\" ### in data/input/\n model_class = 'AutoML' ### ACTUAL Class name for model_sklearn.py\n n_sample = 1000\n\n def post_process_fun(y): ### After prediction is done\n return int(y)\n\n def pre_process_fun(y): ### Before the prediction is done\n return int(y)\n\n\n model_dict = {'model_pars': {\n ### LightGBM API model #######################################\n 'model_class': model_class\n ,'model_pars' : {\n 'total_time_limit' : 20,\n 'algorithms' : 'auto',\n 'results_path' : root_repo + f'/data/output/{data_name}/{os_get_function_name()}/automl_1',\n 'eval_metric' : 'auto'\n\n # mode='Explain',\n # ml_task='auto', model_time_limit=None, algorithms='auto', train_ensemble=True,\n # stack_models='auto', eval_metric='auto', validation_strategy='auto', explain_level='auto',\n # golden_features='auto', features_selection='auto', start_random_models='auto',\n # hill_climbing_steps='auto', top_models_to_improve='auto', verbose=1, random_state=1234)\n }\n\n , 'post_process_fun' : post_process_fun ### After prediction ##########################################\n , 'pre_process_pars' : {'y_norm_fun' : pre_process_fun , ### Before training ##########################\n\n\n ### Pipeline for data processing ##############################\n 'pipe_list': [\n #### coly target prorcessing\n {'uri': 'source/prepro.py::pd_coly', 'pars': {}, 'cols_family': 'coly', 'cols_out': 'coly', 'type': 'coly' },\n\n\n {'uri': 'source/prepro.py::pd_colnum_bin', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'colnum_bin', 'type': '' },\n {'uri': 'source/prepro.py::pd_colnum_binto_onehot', 'pars': {}, 'cols_family': 'colnum_bin', 'cols_out': 'colnum_onehot', 'type': '' },\n\n #### catcol INTO integer, colcat into OneHot\n {'uri': 'source/prepro.py::pd_colcat_bin', 'pars': {}, 'cols_family': 'colcat', 'cols_out': 'colcat_bin', 'type': '' },\n # {'uri': 'source/prepro.py::pd_colcat_to_onehot', 'pars': {}, 'cols_family': 'colcat_bin', 'cols_out': 'colcat_onehot', 'type': '' },\n\n\n ### Cross_feat = feat1 X feat2\n # {'uri': 'source/prepro.py::pd_colcross', 'pars': {}, 'cols_family': 'colcross', 'cols_out': 'colcross_pair', 'type': 'cross'},\n\n\n #### Example of Custom processor\n #{'uri': THIS_FILEPATH + '::pd_col_myfun', 'pars': {}, 'cols_family': 'colnum', 'cols_out': 'col_myfun', 'type': '' }, \n\n\n ],\n }\n },\n\n 'compute_pars': { 'metric_list': ['accuracy_score','average_precision_score']\n\n ,'mlflow_pars' : None # {} ### Not empty --> use mlflow\n },\n\n 'data_pars': { 'n_sample' : n_sample,\n\n 'download_pars' : None,\n\n\n 'cols_input_type' : cols_input_type_1,\n ### family of columns for MODEL #########################################################\n # \"colnum\", \"colnum_bin\", \"colnum_onehot\", \"colnum_binmap\", #### Colnum columns\n # \"colcat\", \"colcat_bin\", \"colcat_onehot\", \"colcat_bin_map\", #### colcat columns\n # 'colcross_single_onehot_select', \"colcross_pair_onehot\", 'colcross_pair', #### colcross columns 'coldate', 'coltext',\n 'cols_model_group': [ 'colnum_bin',\n 'colcat_bin',\n # 'coltext',\n # 'coldate',\n #'colcross_pair',\n \n ### example of custom\n # 'col_myfun'\n ]\n\n ### Filter data rows ##################################################################\n ,'filter_pars': { 'ymax' : 2 ,'ymin' : -1 }\n\n }\n }\n\n ##### Filling Global parameters ############################################################\n model_dict = global_pars_update(model_dict, data_name, config_name=os_get_function_name() )\n return model_dict", "def build_model(self):\n input_pencil = tf.keras.Input((128,128,3))\n # generator's output\n gen_image = self.gan_generator.model(input_pencil)\n # generator's output\n x = self.gan_discriminator.model([input_pencil,gen_image])\n model = tf.keras.Model(input_pencil,[x,gen_image])\n # compiling the model\n model.compile(loss=['hinge', 'mae'], optimizer = self.optimizer,loss_weights=[1,100], metrics=['accuracy'])\n self.model = model", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def build_model_mobilenet(num_classes):", "def config(self):\n\n train_dataset = RandomClassificationDataset()\n eval_dataset = RandomClassificationDataset()\n\n return {\n 'model':\n SimpleModel(),\n 'train_dataloader':\n DataLoader(\n dataset=train_dataset,\n batch_size=4,\n sampler=dist.get_sampler(train_dataset),\n ),\n 'eval_dataloader':\n DataLoader(\n dataset=eval_dataset,\n sampler=dist.get_sampler(eval_dataset),\n ),\n 'max_duration':\n '2ep',\n 'autoresume':\n True,\n 'loggers': [],\n }", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def init_model(model_filename, doGPU):\n # set model attributes list\n ##print(\"Model-dataset =\", model_ds_name)\n ##if model_ds_name == 'modelRAP':\n ## model_labels = loader_rapdataset_yiqiang.ATTRIBUTES\n ##elif model_ds_name == 'modelPETA':\n ## model_labels = loader_peta_dataset.ATTRIBUTES\n ##elif model_ds_name == 'modelRAPPETA':\n ## model_labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n ##else:\n ## print(\"ERROR: unknown model-dataset.\")\n ## sys.exit()\n model_labels = loader_rap_plus_peta_dataset.ATTRIBUTES\n assert (len(model_labels) == 49)\n\n # create model\n person.NO_ATTRIBUTES = len(model_labels) #TODO-elo: ugly, attr. nbr should be a parameter of person.Net.__init__()\n net = person.Net()\n if doGPU:\n net = person.Net().cuda()\n\n # load model\n print('loading model \"' + model_filename + '\"')\n person.load_model(net, model_filename)\n\n return net, model_labels", "def build_model(num_classes=43):\n model = models.Sequential()\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n \n\n model.add(layers.Flatten())\n model.add(layers.Dense(num_classes, activation='softmax'))\n model.summary()\n\n return model", "def setup_to_transfer_learn(model, base_model):\n for layer in base_model.layers:\n layer.trainable = False\n model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['categorical_accuracy', f1_score])", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())" ]
[ "0.72591865", "0.7110485", "0.7058212", "0.70567673", "0.7052484", "0.7031042", "0.7014822", "0.7008694", "0.6998083", "0.6991139", "0.6976989", "0.6958172", "0.6947366", "0.69150037", "0.69144577", "0.6914014", "0.68923914", "0.6875319", "0.6874616", "0.6869344", "0.6869033", "0.6856122", "0.68444127", "0.6842929", "0.6836625", "0.6834208", "0.68285924", "0.6801924", "0.6801277", "0.67952096", "0.6792389", "0.6786373", "0.6769579", "0.67628115", "0.6759937", "0.67556095", "0.6755118", "0.67448795", "0.67340714", "0.6728676", "0.6728442", "0.67229474", "0.670632", "0.6702987", "0.6701704", "0.66961163", "0.66807765", "0.6674469", "0.6666368", "0.66624504", "0.6662278", "0.6652094", "0.6649602", "0.66433287", "0.66345245", "0.6633533", "0.66316307", "0.6631386", "0.6630117", "0.66236764", "0.6597157", "0.6595606", "0.6593125", "0.65928286", "0.65879", "0.65871", "0.65845954", "0.6583203", "0.6570983", "0.6570957", "0.6568241", "0.6561804", "0.65562373", "0.65534955", "0.65531963", "0.65512943", "0.6548037", "0.6540513", "0.65403026", "0.65380365", "0.6534887", "0.65345323", "0.6533627", "0.65274256", "0.65245825", "0.65236723", "0.652118", "0.6519294", "0.65119904", "0.65095204", "0.65094775", "0.650939", "0.65069896", "0.6502718", "0.65012836", "0.64988273", "0.64926267", "0.64921033", "0.64898485", "0.6489395", "0.64829373" ]
0.0
-1
This function is usefull for merging basic config with external configs. External config's constants overwrite those of basic config.
def cook_config(ext_config_filename): mc = base_model_config() with open(ext_config_filename, "r") as fp: ext_mc = edict(json.load(fp, encoding="utf8")) for s in ext_mc.keys(): mc[s] = ext_mc[s] # mc.ANCHOR_BOX = set_anchors(mc) # print(np.max(np.square(np.array(set_anchors_testing(mc)) - np.array(set_anchors(mc))))) # mc.ANCHORS = len(mc.ANCHOR_BOX) # H, W, C = _get_output_shape(mc) # mc.MODEL_OUTPUT_SHAPE = [H, W, mc.ANCHOR_PER_GRID] return mc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _merge(config, env):\n if 'common' in config and env in config:\n c = config['common'].copy()\n c.update(config[env])\n elif env in config.keys():\n c = config[env]\n elif 'common' in config.keys():\n c = config['common']\n else:\n c = config\n return c", "def base_config():\n return deepcopy(__CONFIG)", "def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config", "def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config", "def merge_config(self_config, indict):\n\n self_config.merge(indict)\n patch_config(self_config, indict)", "def replace_config(a, b):\n a.update(b)\n return a", "def concat_config(config, new_config):\n for new_path in new_config:\n if new_path not in config:\n config[new_path] = new_config[new_path]\n else:\n config[new_path][0] = config[new_path][0] or new_config[new_path][0]\n for filename in config[new_path]:\n if filename != 0:\n if filename in new_config[new_path]:\n for opt in config[new_path][filename]:\n if opt in new_config[new_path][filename]:\n new_config[new_path][filename][opt]\\\n .update(config[new_path][filename][opt])\n else:\n new_config[new_path][filename][opt] = \\\n config[new_path][filename][opt]\n else:\n new_config[new_path][filename] = config[new_path][filename]\n return config", "def test_config_merging():\n toml = StringIO(\n dedent(\n \"\"\"\\\n [tool.vulture]\n exclude = [\"toml_exclude\"]\n ignore_decorators = [\"toml_deco\"]\n ignore_names = [\"toml_name\"]\n make_whitelist = false\n min_confidence = 10\n sort_by_size = false\n verbose = false\n paths = [\"toml_path\"]\n \"\"\"\n )\n )\n cliargs = [\n \"--exclude=cli_exclude\",\n \"--ignore-decorators=cli_deco\",\n \"--ignore-names=cli_name\",\n \"--make-whitelist\",\n \"--min-confidence=20\",\n \"--sort-by-size\",\n \"--verbose\",\n \"cli_path\",\n ]\n result = make_config(cliargs, toml)\n expected = dict(\n paths=[\"cli_path\"],\n exclude=[\"cli_exclude\"],\n ignore_decorators=[\"cli_deco\"],\n ignore_names=[\"cli_name\"],\n make_whitelist=True,\n min_confidence=20,\n sort_by_size=True,\n verbose=True,\n )\n assert result == expected", "def spread_default_parameters(config, dev_cfg):\n def_cfg = config.get('DEFAULT')\n if def_cfg is None:\n return\n\n for (key, value) in def_cfg.items():\n if key not in dev_cfg:\n dev_cfg[key] = value", "def _merge_two_config(user_cfg, default_cfg):\n if type(user_cfg) is not edict:\n return\n for key, val in user_cfg.iteritems():\n # Since user_cfg is a sub-file of default_cfg\n if not default_cfg.has_key(key):\n raise KeyError('{} is not a valid config key'.format(key))\n\n if type(default_cfg[key]) is not type(val):\n if isinstance(default_cfg[key], np.ndarray):\n val = np.array(val, dtype=default_cfg[key].dtype)\n else:\n raise ValueError(\n 'Type mismatch ({} vs. {}) '\n 'for config key: {}'.format(type(default_cfg[key]),\n type(val), key))\n # Recursive merge config\n if type(val) is edict:\n try:\n _merge_two_config(user_cfg[key], default_cfg[key])\n except:\n print 'Error under config key: {}'.format(key)\n raise\n else:\n default_cfg[key] = val", "def merge_config(config_dict, template_dict):\n\n # Turn off interpolation so what gets merged is the symbolic name\n # (such as WEEWX_ROOT), and not its interpolated value. \n csave, config_dict.interpolation = config_dict.interpolation, False\n tsave, template_dict.interpolation = template_dict.interpolation, False\n\n # Merge new stuff from the template:\n weeutil.weeutil.conditional_merge(config_dict, template_dict)\n \n config_dict.interpolation = csave\n template_dict.interpolation = tsave\n\n # Finally, update the version number:\n config_dict['version'] = template_dict['version']\n\n return config_dict", "def append_common(envin, content):\n # This is the original libconfig.h. However, in case somebody (like\n # pbdagcon) builds libpbdata in-place, we need to drop a copy of\n # libconfig.h wherever pbdata is actually built, which we will not\n # know until later. This can all be cleared up later, when we are\n # more clear about where things are built.\n libconfig_h = os.path.abspath(os.path.join(os.getcwd(), 'libconfig.h'))\n content += \"\"\"\nLIBCONFIG_H:=%s\n# Use PREFIX dir, if available.\nINCLUDES += ${PREFIX_INC}\nLIBS += ${PREFIX_LIB}\n\"\"\"%libconfig_h\n env = dict(envin)\n # Some extra defs.\n if 'PREFIX' in envin:\n PREFIX = envin['PREFIX']\n setenv(env, 'PREFIX_INC', os.path.join(PREFIX, 'include'))\n setenv(env, 'PREFIX_LIB', os.path.join(PREFIX, 'lib'))\n poss = [\n 'SH_LIB_EXT',\n 'EXTRA_LDFLAGS',\n 'PREFIX_LIB', 'PREFIX_INC',\n ]\n vals = ['%-20s := %s' %(k, v) for k,v in sorted(env.items()) if k in poss]\n return '\\n'.join([''] + vals + ['']) + content", "def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def merge_config(config, opts):\n for key, value in opts.items():\n if \".\" not in key:\n if isinstance(value, dict) and key in config:\n config[key].update(value)\n else:\n config[key] = value\n else:\n sub_keys = key.split('.')\n assert (\n sub_keys[0] in config\n ), \"the sub_keys can only be one of global_config: {}, but get: \" \\\n \"{}, please check your running command\".format(\n config.keys(), sub_keys[0])\n cur = config[sub_keys[0]]\n for idx, sub_key in enumerate(sub_keys[1:]):\n if idx == len(sub_keys) - 2:\n cur[sub_key] = value\n else:\n cur = cur[sub_key]\n return config", "def get_rllib_full_config(self):\n return merged_dict(self.get_default_config(), self.get_config())", "def merge(self, other_config):\n # Make a copy of the current attributes in the config object.\n config_options = copy.copy(self._user_provided_options)\n\n # Merge in the user provided options from the other config\n config_options.update(other_config._user_provided_options)\n\n # Return a new config object with the merged properties.\n return Config(**config_options)", "def _load_common_config(self, config: Dict[str, Any]) -> Dict[str, Any] :\n # Log level\n if 'loglevel' in self.args.loglevel:\n config.update({'verbosity': self.args.loglevel})\n else:\n config.update({'verbosity': 0})\n logging.basicConfig(\n level=logging.INFO if config['verbosity'] < 1 else logging.DEBUG,\n format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n set_loggers(config['verbosity'])\n logger.info('Verbosity set to %s', config['verbosity'])\n\n # Add dynamic whitelist if found\n if 'dynamic_whitelist' in self.args and self.args.dynamic_whitelist:\n config['pairlist'] = {\n 'method': 'VolumePairList',\n 'config': {'number_assets': self.args.dynamic_whitelist}\n }\n logger.warning(\n 'Parameter --dynamic-whitelist has been deprecated, '\n 'and will be completely replaced by the whitelist dict in the future. '\n 'For now: using dynamically generated whitelist based on VolumePairList. '\n '(not applicable with Backtesting and Hyperopt)'\n )\n if self.args.db_url and self.args.db_url != constant.DEFAULT_DB_PROD_URL:\n config.update({'db_url': self.args.db_url})\n logger.info('Parameter --db-url detected ...')\n\n if config.get('dry_run', False):\n logger.info('Dry run is enabled')\n if config.get('db_url') in [None, constant.DEFAULT_DB_PROD_URL]:\n # Default to in-memory db for dry_run if not specified\n config['db_url'] = constant.DEFAULT_DB_DRYRUN_URL\n else:\n if not config.get('db_url', None):\n config['db_url'] = constant.DEFAULT_DB_PROD_URL\n logger.info('Dry run is disabled')\n\n if config.get('forcebuy_enable', False):\n logger.warning('`forcebuy` RPC message enabled.')\n\n # Setting max_open_trades to infinite if -1\n if config.get('max_open_trades') == -1:\n config['max_open_trades'] = float('inf')\n\n logger.info(f'Using DB: \"{config[\"db_url\"]}\"')\n\n # Check if the exchange set by the user is supported\n self.check_exchange(config)\n\n return config", "def reset_cfg():\n _C.merge_from_other_cfg(_CFG_DEFAULT)", "def merge_new_overrides():\n # Take the dex config as is:\n new_doc = {'config': copy.deepcopy(DEFINES['dex_config'])}\n # Convert old dex certs.web.secret to https-tls volume/volumeMounts\n mount = {'mountPath': get_httpstls_mount(), 'name': 'https-tls'}\n vol = {'secret': {'secretName': get_httpstls_secret(),\n 'defaultMode': DEFAULT_HTTPSTLS_MODE},\n 'name': 'https-tls'}\n # Take 'extra' volumes and mounts that may exist in old dex\n # This is expected to be the WAD certificate\n volumes = []\n volumeMounts = []\n if 'volumes' in DEFINES:\n volumes = copy.deepcopy(DEFINES['volumes'])\n if 'volumeMounts' in DEFINES:\n volumeMounts = copy.deepcopy(DEFINES['volumeMounts'])\n\n # only add volumes/mounts if 'extra' was specified, or\n # if there was non-default mount\n if volumes or 'tls_secret' in DEFINES:\n volumes.append(vol)\n if volumeMounts or 'dex_https_tlsCert' in DEFINES:\n volumeMounts.append(mount)\n if volumes:\n new_doc['volumes'] = volumes\n if volumeMounts:\n new_doc['volumeMounts'] = volumeMounts\n return new_doc", "def test_load_configuration_extends_the_previous():\n config.load_configuration(extended_configuration_path)\n assert config.get('test.nested.path.value') == 'overriding test value'", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)", "def merge_configs(config1, config2, additional_keys = []):\n\t\tmerged_config = copy.deepcopy(config1)\n\t\t\n\t\tfor key in list(set([\n\t\t\t\t\"nicks\",\n\t\t\t\t\"directories\",\n\t\t\t\t\"files\",\n\t\t\t\t\"folders\",\n\t\t\t\t\"x_expressions\",\n\t\t\t\t\"scale_factors\",\n\t\t\t\t\"weights\",\n\t\t\t\t\"x_bins\",\n\t\t\t\t\"y_bins\",\n\t\t\t\t\"z_bins\",\n\t\t\t\t\"tree_draw_options\",\n\t\t\t\t\"proxy_prefixes\",\n\t\t\t\t\"histogram_to_scale_nicks\",\n\t\t\t\t\"integral_histogram_nicks\",\n\t\t\t\t\"scale_by_inverse_integrals\",\n\t\t\t\t\"add_nicks\",\n\t\t\t\t\"add_result_nicks\",\n\t\t\t\t\"sum_nicks\",\n\t\t\t\t\"sum_result_nicks\",\n\t\t\t\t\"stacks\",\n\t\t\t\t\"markers\",\n\t\t\t\t\"colors\",\n\t\t\t\t\"labels\",\n\t\t\t\t\"legend_markers\",\n\t\t\t\t\"shape_nicks\",\n\t\t\t\t\"yield_nicks\",\n\t\t\t\t\"shape_yield_nicks\"\n\t\t] + additional_keys)):\n\t\t\tif key in merged_config or key in config2:\n\t\t\t\tmerged_config.setdefault(key, []).extend(config2.get(key, []))\n\t\t\n\t\tfor key in [\n\t\t\t\t\"analysis_modules\",\n\t\t]:\n\t\t\tfor item in config2.get(key, []):\n\t\t\t\tif not item in merged_config.get(key, []):\n\t\t\t\t\tmerged_config.setdefault(key, []).append(item)\n\t\t\n\t\tfor key, value in config2.iteritems():\n\t\t\tif not key in merged_config:\n\t\t\t\tmerged_config[key] = value\n\t\t\n\t\treturn merged_config", "def merge_configs(configFile:str, oldSampleFile:str, newSampleFile:str, unsafeAttributesFile:str, filetype:str):\n upgrade_config(configFile, oldSampleFile, newSampleFile, unsafeAttributesFile, filetype)", "def test_config_overwrites():\n basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", '..'))\n temppath = '/tmp/'\n\n conf = core.Config(datapath=temppath)\n\n assert conf.basepath.lower() == basepath.lower()\n assert conf.datapath.lower() == temppath.lower()", "def build_configs():", "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "def _set_default_concat_config(concat):\n concat = concat or []\n if not isinstance(concat, list):\n concat = [concat]\n for c in concat:\n c['target'] = c.get('target', '__broken_target__')\n c['concat_columns'] = c.get('concat_columns', [])\n c['delimiter'] = c.get('delimiter', ' ')\n c['concat_values'] = {}\n\n return concat", "def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir", "def test_config_merging_missing():\n toml = StringIO(\n dedent(\n \"\"\"\\\n [tool.vulture]\n verbose = true\n ignore_names = [\"name1\"]\n \"\"\"\n )\n )\n cliargs = [\n \"cli_path\",\n ]\n result = make_config(cliargs, toml)\n assert result[\"verbose\"] is True\n assert result[\"ignore_names\"] == [\"name1\"]", "def update_config_external_template(config):\r\n\r\n # best parameters from the paper\r\n config['train_batch_size'] = 16384\r\n config['lr'] = 3e-4\r\n config['sgd_minibatch_size'] = 4096\r\n config['num_sgd_iter'] = 4\r\n config['rollout_fragment_length'] = 100\r\n\r\n # run ID to communicate to the http trainer\r\n config['run_uid'] = '_setme'\r\n\r\n # stable baselines accepts full episodes\r\n config[\"batch_mode\"] = \"complete_episodes\"\r\n\r\n # stable baselines server address\r\n config[\"http_remote_port\"] = \"http://127.0.0.1:50001\"\r\n\r\n # no gpus, stable baselines might use them\r\n config['num_gpus'] = 0\r\n\r\n # set trainer class\r\n config['_trainer'] = \"External\"\r\n config['_policy'] = \"PPO\"\r\n\r\n # tuned\r\n config['num_envs_per_worker'] = 10\r\n config['num_workers'] = 3\r\n return config", "def handle_config_inited(app, config):\n\n def handle_legacy(new, orig):\n if getattr(config, new) is None and getattr(config, orig) is not None:\n config[new] = config[orig]\n\n # copy over deprecated configuration names to new names (if any)\n handle_legacy('confluence_publish_allowlist', 'confluence_publish_subset')\n handle_legacy('confluence_purge_from_root', 'confluence_purge_from_master')\n handle_legacy('confluence_root_homepage', 'confluence_master_homepage')\n handle_legacy('confluence_space_key', 'confluence_space_name')", "def _build_config() -> None:\n\n global CONFIGURATION\n\n configuration_common = {\n 'SERVICE_HOST': os.getenv('SERVICE_HOST', '0.0.0.0'),\n 'SERVICE_PORT': os.getenv('SERVICE_PORT', 8080),\n 'FILE_NAME': os.getenv('FILE_NAME', 'example'),\n }\n\n CONFIGURATION = {\n **configuration_common\n }", "def platform_config_update(config):\n global remote_port_map\n config[\"port_map\"] = remote_port_map.copy()\n config[\"caps_table_idx\"] = 0", "def update_global_config(self, config, **kwargs):\n pass", "def update(self, other: Mapping[str, Any]) -> None:\n self._config.update(self._flatten_dict(other))", "def extend(clself, other):\n clself._cfg_def.extend(other._cfg_def)\n for key, optdef in clself._cfg_def.options.iteritems():\n setattr(clself, key, optdef)", "def test_confiure_read_merge(self):\n class TestBase(pyperry.Base):\n def _config(cls):\n cls.configure('read', poop='smells')\n\n class Test(TestBase):\n def _config(cls):\n cls.configure('read', foo='bar')\n\n self.assertEqual(Test.adapter_config['read']['foo'], 'bar')\n self.assertEqual(Test.adapter_config['read']['poop'], 'smells')\n\n class Test2(Test):\n def _config(cls):\n cls.configure('read', { 'poop': 'stanks' })\n\n self.assertEqual(Test2.adapter_config['read']['poop'], 'stanks')\n self.assertEqual(Test.adapter_config['read']['poop'], 'smells')", "def combine_pax_configs(config, overrides):\n # TODO: we should soon be able to get this from pax, but let's wait a while to prevent incompatibilties\n for section_name, stuff in overrides.items():\n config.setdefault(section_name, {})\n config[section_name].update(stuff)\n return config", "def createMergedConfigFile(self):\n # Read config data\n if os.path.isfile(self.config_file):\n with open(self.config_file, 'r') as stream:\n try:\n cfg = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n if debug:\n print(\"Using Config file: \" + self.config_file)\n else:\n if debug:\n print(\"Config file does not exist: \" + self.config_file)\n exit(1)\n\n # If project namespace was not in the config file, set a default\n if (cfg is not None\n and 'generic' in cfg\n and 'project_namespace' in cfg['generic']\n and cfg['generic']['project_namespace'] is not None\n and len(cfg['generic']['project_namespace']) > 0):\n if debug:\n print(\"Using specified namespace\")\n else:\n conf_dir = os.path.dirname(self.config_file)\n cmd = \"cd \" + conf_dir + ' && basename `git rev-parse --show-toplevel`'\n try:\n result_bytes = subprocess.check_output(cmd,\n timeout=300,\n shell=True)\n project_namespace = result_bytes.decode('UTF-8').rstrip()\n if debug:\n print(\"Derived namespace from git: \" + project_namespace)\n except subprocess.CalledProcessError as e:\n if debug:\n print(\"Error deriving project namespace from git: \", e.output)\n sys.exit(1)\n # Insert the project_namespace into the config data\n if cfg is None:\n cfg = {}\n if 'generic' not in cfg:\n cfg['generic'] = {}\n cfg['generic']['project_namespace'] = project_namespace\n\n # Confirm project namespace\n if debug:\n print(\"Project Namespace: \" + cfg['generic']['project_namespace'])\n\n # Read overrides\n override_file_data = {}\n if os.path.isfile(self.override_file):\n with open(self.override_file, 'r') as stream:\n try:\n override_file_data = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n\n # Created merged data\n self.config_data = cfg\n # print(\"Applying override_file_data: \" + str(override_file_data))\n if override_file_data is not None:\n self.config_data = merge(self.config_data, override_file_data)\n\n # Ensure parent directory for merged file exists\n directory = Path(self.merged_file).parent\n if not os.path.exists(directory):\n os.makedirs(directory)\n # Created merged file\n with open(self.merged_file, 'w') as out_file:\n yaml.dump(self.config_data, out_file)", "def combine_config(user, default):\n # currently supports nesting only the first level of dictionaries\n # I feel like this could be written a lot better but....\n default_config = dict(**default)\n user_config = dict(**user)\n for k in default_config.keys():\n if user_config.get(k, None) == None:\n user_config[k] = default_config[k]\n else: # that dict already exists, check and make sure it's values do as well\n if type(user_config[k]) == dict:\n for k2 in default_config[k].keys():\n if user_config[k].get(k2, None) == None:\n user_config[k][k2] = default_config[k][k2]\n return user_config", "def _use_custom_config(self, standard_conf_path):\n conf_filename = os.path.basename(standard_conf_path)\n custom_conf_expected_path = CUSTOM_CONFIG_DIR + '/' + self._get_tempdir() + '/' + conf_filename\n shutil.copy(custom_conf_expected_path,\n self._get_tempdir() + '/' + standard_conf_path)", "def read_config(self, config):\n try:\n newconfig = ConfigObj(config, interpolation=False,\n configspec=self._configspec)\n except ConfigObjError as e:\n raise ConfigError(e)\n newconfig = self._validate(newconfig)\n self._config.merge(newconfig)\n logger.info(\"Loaded additional config: {0}\".format(config))", "def get_config(self):\n config = {\n 'multichannel': self._multichannel,\n 'complex_part': self._complex_part\n }\n base_config = super().get_config()\n return {**base_config, **config}", "def merge_config(user: dict, default: dict) -> dict:\n\n if isinstance(user, dict) and isinstance(default, dict):\n for kk, vv in default.items():\n if kk not in user:\n user[kk] = vv\n else:\n user[kk] = merge_config(user[kk], vv)\n\n return user", "def _fillConfig(config, bare=False):\n if not bare:\n configFile = CONFIG_STRUCTURE.format(\n sup2Sub=config.sup2Sub,\n handBrake=config.handBrake,\n java=config.java,\n mkvExtract=config.mkvExtract,\n mkvMerge=config.mkvMerge,\n bFrames=config.bFrames,\n audioFallback=config.audioFallback,\n language=config.language,\n sorting=config.sorting,\n sortingReverse=config.sortingReverse,\n x264Speed=config.x264Speed,\n bq1080=config.quality['bq']['1080'],\n bq720=config.quality['bq']['720'],\n bq480=config.quality['bq']['480'],\n hq1080=config.quality['hq']['1080'],\n hq720=config.quality['hq']['720'],\n hq480=config.quality['hq']['480'],\n uq1080=config.quality['uq']['1080'],\n uq720=config.quality['uq']['720'],\n uq480=config.quality['uq']['480'],\n )\n else:\n configFile = CONFIG_STRUCTURE_BARE.format(\n sup2Sub=config.sup2Sub,\n handBrake=config.handBrake,\n java=config.java,\n mkvExtract=config.mkvExtract,\n mkvMerge=config.mkvMerge\n )\n\n return configFile", "def includeme(config):", "def test_config_overwrite(self):\n inc = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", False, True)\n ini = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", False, True)\n\n self.assertEquals(inc, ini)", "def add_config(self):\n\n config = {\n 'invert_byte': InvertByte,\n 'invert_word': InvertWord,\n 'invert_double_word': InvertDoubleWord,\n 'and_byte': AndByte,\n 'and_word': AndWord,\n 'and_double_word': AndDoubleWord,\n 'or_byte': OrByte,\n 'or_word': OrWord,\n 'or_double_word': OrDoubleWord,\n 'exclusive_or_byte': ExclusiveOrByte,\n 'exclusive_or_word': ExclusiveOrWord,\n 'exclusive_or_double_word': ExclusiveOrDoubleWord\n }\n\n return config", "def apply_config_defaults():\n\n # don't worry about broken settings, validate_config() will take\n # care of them\n\n if 'pre_action_callbacks' not in nori.cfg:\n nori.cfg['pre_action_callbacks'] = [\n (pre_action_drupal_readonly, [], {})\n ]\n\n if 'post_action_callbacks' not in nori.cfg:\n nori.cfg['post_action_callbacks'] = [\n (post_action_drupal_readonly, [], {}, True)\n ]\n\n if 'source_type' not in nori.cfg:\n nori.cfg['source_type'] = 'generic'\n\n if 'source_query_func' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_func'] = generic_db_query\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_func'] = drupal_db_query\n\n if 'source_query_defaulter' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_defaulter'] = None\n\n if 'source_query_validator' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_validator'] = validate_generic_args\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_validator'] = validate_drupal_args\n\n if 'source_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_template_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'source_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_global_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'dest_type' not in nori.cfg:\n nori.cfg['dest_type'] = 'generic'\n\n if 'dest_query_func' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_func'] = generic_db_query\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_func'] = drupal_db_query\n\n if 'dest_query_defaulter' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_defaulter'] = None\n\n if 'dest_query_validator' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_validator'] = validate_generic_args\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_validator'] = validate_drupal_args\n\n if 'dest_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_template_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'dest_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_global_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'templates' not in nori.core.cfg:\n return\n if not isinstance(nori.core.cfg['templates'],\n nori.core.MAIN_SEQUENCE_TYPES):\n return\n\n for i, template in enumerate(nori.core.cfg['templates']):\n if not isinstance(nori.core.cfg['templates'][i],\n nori.core.MAPPING_TYPES):\n continue\n\n if T_MULTIPLE_KEY not in template:\n nori.core.cfg['templates'][i][T_MULTIPLE_KEY] = False\n\n if T_S_QUERY_ARGS_KEY in template:\n args_t = template[T_S_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['source_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_D_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_D_FUNC_KEY] = None\n\n if T_S_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_S_NO_REPL_KEY] = False\n\n if T_S_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_S_CHANGE_CB_KEY] = []\n\n if T_D_QUERY_ARGS_KEY in template:\n args_t = template[T_D_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['dest_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_S_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_S_FUNC_KEY] = None\n\n if T_D_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_D_NO_REPL_KEY] = False\n\n if T_D_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_D_CHANGE_CB_KEY] = []\n\n if T_KEY_MODE_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_MODE_KEY] = 'all'\n\n if T_KEY_LIST_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_LIST_KEY] = []", "def load_configs(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\t# Get root default config.\n\t\t# TODO: change default_cnf so it places whatever the values are at this stage of the build.\n\t\tconfigs = [('defaults', StringIO(default_cnf)), os.path.expanduser('~/.shutit/config'), os.path.join(self.host['shutit_path'], 'config'), 'configs/build.cnf']\n\t\t# Add the shutit global host- and user-specific config file.\n\t\t# Add the local build.cnf\n\t\t# Get passed-in config(s)\n\t\tfor config_file_name in self.build['extra_configs']:\n\t\t\trun_config_file = os.path.expanduser(config_file_name)\n\t\t\tif not os.path.isfile(run_config_file):\n\t\t\t\tshutit_global.shutit_global_object.shutit_print('Did not recognise ' + run_config_file + ' as a file - do you need to touch ' + run_config_file + '?')\n\t\t\t\tshutit_global.shutit_global_object.handle_exit(exit_code=0)\n\t\t\tconfigs.append(run_config_file)\n\t\t# Image to use to start off. The script should be idempotent, so running it\n\t\t# on an already built image should be ok, and is advised to reduce diff space required.\n\t\tif self.action['list_configs'] or self.loglevel <= logging.DEBUG:\n\t\t\tmsg = ''\n\t\t\tfor c in configs:\n\t\t\t\tif isinstance(c, tuple):\n\t\t\t\t\tc = c[0]\n\t\t\t\tmsg = msg + ' \\n' + c\n\t\t\t\tself.log(' ' + c,level=logging.DEBUG)\n\n\t\t# Interpret any config overrides, write to a file and add them to the\n\t\t# list of configs to be interpreted\n\t\tif self.build['config_overrides']:\n\t\t\t# We don't need layers, this is a temporary configparser\n\t\t\toverride_cp = ConfigParser.RawConfigParser()\n\t\t\tfor o_sec, o_key, o_val in self.build['config_overrides']:\n\t\t\t\tif not override_cp.has_section(o_sec):\n\t\t\t\t\toverride_cp.add_section(o_sec)\n\t\t\t\toverride_cp.set(o_sec, o_key, o_val)\n\t\t\toverride_fd = StringIO()\n\t\t\toverride_cp.write(override_fd)\n\t\t\toverride_fd.seek(0)\n\t\t\tconfigs.append(('overrides', override_fd))\n\n\t\tself.config_parser = self.get_configs(configs)\n\t\tself.get_base_config()", "def _overwrite_with_config(self, new_cfg):\n for section in new_cfg.sections():\n for key, val in new_cfg.items(section):\n self.config.set(section, key, val)", "def upgrade_config(configFile:str, oldSampleFile:str, newSampleFile:str, unsafeAttributesFile:str, filetype:str):\n\n #If config file is not present then abort merging.\n if not os.path.isfile(configFile):\n Log.error(f'config file {configFile} does not exist')\n raise Exception(f'ERROR: config file {configFile} does not exist')\n\n Log.info(f'config file {str(configFile)} upgrade started.')\n\n # old sample file\n conf_old_sample = filetype + oldSampleFile\n cs_conf_old_sample = S3CortxConfStore(config=conf_old_sample, index=conf_old_sample)\n\n # new sample file\n conf_new_sample = filetype + newSampleFile\n cs_conf_new_sample = S3CortxConfStore(config=conf_new_sample, index=conf_new_sample)\n conf_new_sample_keys = cs_conf_new_sample.get_all_keys()\n\n # unsafe attribute file\n conf_unsafe_file = filetype + unsafeAttributesFile\n cs_conf_unsafe_file = S3CortxConfStore(config=conf_unsafe_file, index=conf_unsafe_file)\n conf_unsafe_file_keys = cs_conf_unsafe_file.get_all_keys()\n\n # active config file\n conf_file = filetype + configFile\n cs_conf_file = S3CortxConfStore(config=conf_file, index=conf_file)\n conf_file_keys = cs_conf_file.get_all_keys()\n\n #logic to determine which keys to merge.\n keys_to_overwrite = []\n for key in conf_new_sample_keys:\n #If key is marked for unsafe then do not modify/overwrite.\n if key in conf_unsafe_file_keys:\n continue\n #if key not present active config file then add it\n # (this will also add and hence effectively overwrite keys removed in above [] handing\n # and hence will always result in overwrite for these keys from the new sample file).\n if key not in conf_file_keys:\n keys_to_overwrite.append(key)\n #if key is not unsafe and value is not changed by user then overwrite it.\n elif cs_conf_file.get_config(key) == cs_conf_old_sample.get_config(key):\n keys_to_overwrite.append(key)\n #if user has changed the value of the key then skip it.\n else:\n continue\n\n cs_conf_file.merge_config(source_index=conf_new_sample, keys_to_include=keys_to_overwrite)\n cs_conf_file.save_config()\n Log.info(f'config file {str(configFile)} upgrade completed')", "def include(name):\n env.configs = name.split(' ') + env.configs", "def get_full_config(self):\n\n raise Exception(\"Child classes must override get_full_config()\")", "def _augment_pipeline_cfg(self):", "def overwrite(cls, config_file_overwrite: str):\n conf_overwrite: dict = GC.read_conf(config_file_overwrite)\n for sec, attr in conf_overwrite.items():\n for key, val in attr.items():\n try:\n _ = GC.conf[sec][key]\n GC.conf[sec][key] = val\n except KeyError:\n print(\"Overwrite config file has section/key that \"\n \"don't exist in base config!!! Abort!!!\")\n sys.exit(1)", "def get_default_config(cls):\n default = super(LSHNearestNeighborIndex, cls).get_default_config()\n\n lf_default = plugin.make_config(get_lsh_functor_impls())\n default['lsh_functor'] = lf_default\n\n di_default = plugin.make_config(get_descriptor_index_impls())\n default['descriptor_index'] = di_default\n\n hi_default = plugin.make_config(get_hash_index_impls())\n default['hash_index'] = hi_default\n default['hash_index_comment'] = \"'hash_index' may also be null to \" \\\n \"default to a linear index built at \" \\\n \"query time.\"\n\n h2u_default = plugin.make_config(get_key_value_store_impls())\n default['hash2uuids_kvstore'] = h2u_default\n\n return default", "def config():", "def config():", "def __build_empty_config(self):\n\n self.__config.add_section('IN_OUT')\n self.__config['IN_OUT']['source'] = 'Set Source Directory'\n self.__config['IN_OUT']['destination'] = 'Set Destination Directory'\n self.__save_config()\n\n self.__is_dirty = False\n self.__default = True", "def cascade_config(run):\n conf=read_config(run)\n conf['run']=run\n\n conf['lens_conf']=read_config(conf['lcat_vers'])\n conf['source_conf']=read_config(conf['scat_vers'])\n conf['cosmo_conf']=read_config(conf['lens_conf']['cosmo_vers'])\n conf['mask_conf']=read_config(conf['lens_conf']['mask_vers'])\n\n lc=conf['lens_conf']['cosmo_vers']\n sc=conf['source_conf']['cosmo_vers']\n if lc != sc:\n raise ValueError(\"cosmo mismatch: '%s' '%s'\" % (lc,sc))\n\n lc=conf['lens_conf']['mask_vers']\n sc=conf['source_conf']['mask_vers']\n\n # OK for one of them to be none\n if lc != 'mask-none' and sc != 'mask-none':\n if lc != sc:\n raise ValueError(\"mask version: '%s' '%s'\" % (lc,sc))\n\n return conf", "def test_merge_configparser(self):\n cd = ConfigDict.from_dict({\n 'a': 1,\n 'b': {\n 'c': 2,\n 'd': 3,\n }\n })\n\n schema = Schema({\n 'a': Coerce(int),\n 'z': basestring,\n 'b': {\n 'c': Coerce(int)\n }\n }, extra=True)\n cd.register_trigger(\n SchemaTrigger(schema)\n )\n\n cfg = ConfigParser()\n cfg.read_string(u\"\"\"\n [main]\n a = 11\n z = 99\n\n [b]\n c = 22\n \"\"\")\n\n cd.merge_configparser(cfg)\n cd.configure()\n\n self.assertEquals(cd.a,11)\n self.assertEquals(cd.z, '99')\n self.assertEquals(cd.b.c, 22)\n self.assertEquals(cd.b.d, 3)", "def _config_helper(config_key, required_dict, config, filename, defaults):\n this_type = config.get(config_key)\n if this_type not in required_dict:\n config[config_key] = defaults.get(config_key)\n print(f\"{config_key} {this_type} does not exist. Using default of \"\n f\"{defaults.get(config_key)} instead.\")\n\n new_type = config.get(config_key)\n for c in required_dict.get(new_type):\n if c not in config:\n config[c] = defaults.get(c)\n print(f\"{config_key} is {new_type} however {c} not specified \"\n f\"in {filename}. Default is {defaults.get(c)}.\")", "def _load_raw_configs(override_path, defaults, overrides):\n if override_path:\n path = override_path\n else:\n search_dirs = [\".\", _locate_config_dir()]\n path = _locate_config_file(search_dirs)\n\n with open(path) as f:\n parsed_configs = yaml.safe_load(f)\n\n out = []\n try:\n items = parsed_configs[\"items\"]\n for config_dict in items:\n # Legacy fix for renamed key. TODO: Remove this after a while.\n if \"copy_type\" in config_dict:\n config_dict[\"install_method\"] = config_dict[\"copy_type\"]\n del config_dict[\"copy_type\"]\n\n # Name this config (since we may override the local_path).\n config_dict[\"name\"] = config_dict[\"local_path\"]\n\n nones = {key: None for key in Config._fields}\n combined = strif.dict_merge(nones, defaults, config_dict, overrides)\n log.debug(\"raw, combined config: %r\", combined)\n\n try:\n out.append(combined)\n except TypeError as e:\n raise ConfigError(\"error in config value: %s: %s\" % (e, config_dict))\n except ValueError as e:\n raise ConfigError(\"error reading config file: %s\" % e)\n\n return out", "def default_configs(cls):\n config = super().default_configs()\n config.update({\"model\": \"openie\"})\n return config", "def __initConfiguration(self):\n conf = configparser.ConfigParser()\n with open(self.configFile, \"r\") as f:\n conf.readfp(f)\n self.orgConf = conf\n # check additionalSection\n adSection = self.additionalSection\n if adSection in conf:\n adSection = conf[adSection]\n self.conf = {}\n for i in [self.CLIENT_ID, self.CLIENT_SECRET, self.AUTHZ_ENDPOINT,\n self.TOKEN_ENDPOINT, self.REDIRECT_URI, self.SCOPE]:\n if adSection != None and i in adSection:\n self.conf[i] = adSection[i]\n else:\n self.conf[i] = conf[\"DEFAULT\"][i]", "def __set_special_config_values(cfg: __Config, config: dict) -> \"__Config\":\n cfg.file_name_plane_masks = lambda i: str(i) + config['file_name_plane_mask_suf']\n cfg.file_name_planercnn_image = lambda i: str(i) + config['file_name_planercnn_image_suf']\n cfg.dir_results = f\"{cfg.edge_detection_type}\" # will be the output folder, create in data dir\n cfg.image_size = tuple(int(x) for x in config['image_size'].split(\" \"))\n return cfg", "def load_config():\n nested_config = {\n 'ALIASES': {},\n 'COLOR': {\n 'INFO': None,\n 'DEBUG': None,\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red'\n },\n 'TREES': {},\n }\n config = {}\n\n for filename in reversed(get_config_paths()):\n parsed_config = parse_config_file(filename)\n\n for key in nested_config:\n nested_config[key].update(parsed_config.pop(key, {}))\n\n config.update(parsed_config)\n\n config.update(nested_config)\n\n return config", "def config_func(tools, index, device_id, config_old: {}, config_new: {}):\n\n # This is an example of a firmware upgrade requiring a configuration migration\n\n # Firmware 01.03.XX to 01.04.XX configuration migration.\n\n # GENERAL section, no changes\n config_new[\"general\"] = config_old[\"general\"]\n\n # LOG section, error_frames added\n config_new[\"log\"] = config_old[\"log\"]\n config_new[\"log\"][\"error_frames\"] = {\"state\": 0}\n \n # RTC section, no changes\n config_new[\"rtc\"] = config_old[\"rtc\"]\n\n # SECONDARY PORT section, no changes\n config_new['secondaryport'] = config_old['secondaryport']\n\n # CAN sections, remote_frames added, filter moved\n for can_x in [\"can_1\", \"can_2\"]:\n config_new[can_x] = config_old[can_x]\n config_new[can_x][\"filter\"] = {\"remote_frames\": 0, \"id\": config_old[can_x][\"filter\"]}\n\n # LIN sections, before optional, now mandatory\n for lin_x in [\"lin_1\", \"lin_2\"]:\n if lin_x in config_old:\n config_new[lin_x] = config_old[lin_x]\n\n # CONNECT section, server->request_style now mandatory\n if \"connect\" in config_old:\n config_new[\"connect\"] = config_old[\"connect\"]\n\n # Add mandatory \"request_style\" if not already set\n if \"s3\" in config_new[\"connect\"]:\n if \"server\" in config_new[\"connect\"][\"s3\"]:\n if \"request_style\" not in config_new[\"connect\"][\"s3\"][\"server\"]:\n config_new[\"connect\"][\"s3\"][\"server\"][\"request_style\"] = 0\n\n return config_new", "def init_config(self):\n # self.config.read(self.cnfgfile)\n if not self.config.has_section(VERSION_SECTION):\n self.config.add_section(VERSION_SECTION)", "def _get_initial_config(self):\r\n config = GeneralConfiguration()\r\n caching_config = CacheBaseyearConfiguration()\r\n config.merge(caching_config)\r\n return config", "def test_replace_namespaced_build_config(self):\n pass", "def update_config(update):\n global _config\n new_config = copy.deepcopy(_config)\n _update_dict_recursive(new_config, update)\n logging.config.dictConfig(new_config)\n _configure_ulog_bridge()\n _config = new_config", "def merge(self, newer_config, **kwargs):\n kwargs['merge'] = True\n logger.debug('from parent merge: %s', kwargs)\n return self.update(newer_config, **kwargs)", "def init_config(self):\n pass", "def setup_config():\n if CONFIG.get(\"environment\", \"server\") == 'production':\n return 'config.ProductionConfig'\n else:\n return 'config.TestingConfig'", "def merge_external_params_with_configs(configs, hparams=None, kwargs_dict=None):\n\n if kwargs_dict is None:\n kwargs_dict = {}\n if hparams:\n kwargs_dict.update(hparams.values())\n for key, value in kwargs_dict.items():\n tf.logging.info(\"Maybe overwriting %s: %s\", key, value)\n # pylint: disable=g-explicit-bool-comparison\n if value == \"\" or value is None:\n continue\n # pylint: enable=g-explicit-bool-comparison\n elif _maybe_update_config_with_key_value(configs, key, value):\n continue\n elif _is_generic_key(key):\n _update_generic(configs, key, value)\n else:\n tf.logging.info(\"Ignoring config override key: %s\", key)\n return configs", "def set_config_defaults(config):\n new_config = config.copy()\n\n new_config.setdefault(\"window_title\", \"Materials Cloud Tool\")\n new_config.setdefault(\n \"page_title\",\n \"<PLEASE SPECIFY A PAGE_TITLE AND A WINDOW_TITLE IN THE CONFIG FILE>\",\n )\n\n new_config.setdefault(\"custom_css_files\", {})\n new_config.setdefault(\"custom_js_files\", {})\n new_config.setdefault(\"templates\", {})\n\n return new_config", "def setup(self):\n\n default_config = self.read()\n\n self.write(default_config)", "def loadconfigtable(ui, extname, configtable):\n for section, items in sorted(configtable.items()):\n knownitems = ui._knownconfig.setdefault(section, itemregister())\n knownkeys = set(knownitems)\n newkeys = set(items)\n for key in sorted(knownkeys & newkeys):\n msg = b\"extension '%s' overwrite config item '%s.%s'\"\n msg %= (extname, section, key)\n ui.develwarn(msg, config=b'warn-config')\n\n knownitems.update(items)", "def mergeConfig(self):\n config = \\\n \"from Configuration.DataProcessing.Merge import mergeProcess\\nprocess = mergeProcess(\\n \"\n config += \",\".join(self.merge_inputs)\n config += \",\\n\"\n config += \" output_file = \\\"%s\\\",\\n\" % os.path.basename(self.lfn)\n config += \" output_lfn = \\\"%s\\\"\\n) \" % self.lfn\n return config", "def configure(new_config: Mapping):\n config.update(new_config)", "def add_over(self, override: ItemConfig) -> None:\n self.all_conf = lazy_conf.concat(self.all_conf, override.all_conf)\n\n for vers_id, styles in override.versions.items():\n our_styles = self.versions.setdefault(vers_id, {})\n for sty_id, style in styles.items():\n if sty_id not in our_styles:\n our_styles[sty_id] = style\n else:\n our_styles[sty_id] = lazy_conf.concat(our_styles[sty_id], style)", "def writeConfig(quickLogger, commonDictionary, analysisDictionary = {}, likelihoodDictionary = {}, plotDictionary = {}, curveDictionary = {}):\n \n basename = commonDictionary['base']\n\n config = ConfigParser.RawConfigParser()\n config.read(basename+'.cfg')\n if(not config.has_section('common')):\n config.add_section('common')\n\n for variable, value in commonDictionary.iteritems():\n config.set('common', variable, value)\n quickLogger.info(\"wrote common config to \"+basename+\".cfg.\")\n\n if(analysisDictionary):\n if(config.has_section('quickAnalysis')):\n quickLogger.info(\"quickAnalysis config exists, overwriting...\") \n else:\n config.add_section('quickAnalysis') \n for variable, value in analysisDictionary.iteritems():\n config.set('quickAnalysis', variable, value)\n quickLogger.info(\"wrote quickAnalysis config to \"+basename+\".cfg.\")\n\n if(likelihoodDictionary):\n if(config.has_section('quickLike')):\n quickLogger.info(\"quickLike config exists, overwriting...\") \n else:\n config.add_section('quickLike') \n for variable, value in likelihoodDictionary.iteritems():\n config.set('quickLike', variable, value)\n quickLogger.info(\"wrote quickLikeconfig to \"+basename+\".cfg.\")\n\n if(plotDictionary):\n if(config.has_section('quickPlot')):\n quickLogger.info(\"quickPlot config exists, overwriting...\") \n else:\n config.add_section('quickPlot') \n for variable, value in plotDictionary.iteritems():\n config.set('quickPlot', variable, value)\n quickLogger.info(\"wrote quickPlot config to \"+basename+\".cfg.\")\n\n if(curveDictionary):\n if(config.has_section('quickCurve')):\n quickLogger.info(\"quickCurve config exists, overwriting...\") \n else:\n config.add_section('quickCurve') \n for variable, value in curveDictionary.iteritems():\n config.set('quickCurve', variable, value)\n quickLogger.info(\"wrote quickCurve config to \"+basename+\".cfg.\")\n\n with open(basename+'.cfg', 'wb') as configfile:\n config.write(configfile)", "def override_config(config_nm, usr_conf, flags_dict):\n # check all mandatory params are here:\n for p, v in mandatory_dict.iteritems():\n if not hasattr(usr_conf, p):\n print('User should supply path for %s: %s' % (p, v))\n exit(-1)\n user_params = getdefflags(config_nm)\n # check if optional user defined params are listed\n # if not just use the specified params\n for p, v in flags_dict.iteritems():\n if p in user_params:\n if not hasattr(usr_conf, p):\n setattr(usr_conf, p, v)\n\n return usr_conf", "def update_config(config_file, config_base=None):\n if config_base is None:\n config_base = def_config_file\n assert(os.path.isfile(config_base))\n if not os.path.isfile(config_file):\n shutil.copy(config_base, config_file)\n cp = CisConfigParser()\n cp.read(config_file)\n miss = []\n if platform._is_win: # pragma: windows\n miss += update_config_windows(cp)\n with open(config_file, 'w') as fd:\n cp.write(fd)\n for sect, opt, desc in miss: # pragma: windows\n warnings.warn((\"Could not locate option %s in section %s.\"\n + \"Please set this in %s to: %s\")\n % (opt, sect, config_file, desc))", "def merge_config(log_conf: LogConf, conf: Config) -> Config:\n #pylint: disable=too-many-locals\n\n name = conf.name # take individual conf value, ignore common log_conf value\n filename = _ITEM_OR_DEFAULT(log_conf.filename, conf.filename)\n logger_level = _ITEM_OR_DEFAULT(log_conf.logger_level, conf.logger_level)\n log_fmt = _ITEM_OR_DEFAULT(log_conf.log_fmt, conf.log_fmt)\n log_datefmt = _ITEM_OR_DEFAULT(log_conf.log_datefmt, conf.log_datefmt)\n log_level = _ITEM_OR_DEFAULT(log_conf.log_level, conf.log_level)\n log_enabled = _ITEM_OR_DEFAULT(log_conf.log_enabled, conf.log_enabled)\n cout_fmt = _ITEM_OR_DEFAULT(log_conf.cout_fmt, conf.cout_fmt)\n cout_datefmt = _ITEM_OR_DEFAULT(log_conf.cout_datefmt, conf.cout_datefmt)\n cout_level = _ITEM_OR_DEFAULT(log_conf.cout_level, conf.cout_level)\n cout_enabled = _ITEM_OR_DEFAULT(log_conf.cout_enabled, conf.cout_enabled)\n propagate = _ITEM_OR_DEFAULT(log_conf.propagate, conf.propagate)\n log_dir = _ITEM_OR_DEFAULT(log_conf.log_dir, conf.log_dir)\n sub_dir = _ITEM_OR_DEFAULT(log_conf.sub_dir, conf.sub_dir)\n override_allowed = conf.override_allowed # take individual conf value, ignore common log_conf value\n\n n_conf: Config = Config(name, filename, logger_level, log_fmt, log_datefmt, log_level, log_enabled, cout_fmt,\n cout_datefmt, cout_level, cout_enabled, propagate, log_dir, sub_dir, override_allowed)\n\n return n_conf", "def merge_configurations(config_list):\n current_config = {}\n for config in config_list:\n current_config = merge(current_config, config)\n\n return current_config", "def _initConfig(self):\n from tg import config as tg_config\n\n # Set config defaults\n config = DEFAULT_CONFIG.copy()\n temp_verbose = config[\"verbose\"]\n\n # Configuration file overrides defaults\n default_config_file = os.path.abspath(DEFAULT_CONFIG_FILE)\n config_file = tg_config.get('wsgidav.config_path', default_config_file)\n fileConf = self._readConfigFile(config_file, temp_verbose)\n config.update(fileConf)\n\n if not useLxml and config[\"verbose\"] >= 1:\n print(\n \"WARNING: Could not import lxml: using xml instead (slower). Consider installing lxml from http://codespeak.net/lxml/.\")\n from wsgidav.dir_browser import WsgiDavDirBrowser\n from tracim.lib.webdav.tracim_http_authenticator import TracimHTTPAuthenticator\n from wsgidav.error_printer import ErrorPrinter\n from tracim.lib.webdav.utils import TracimWsgiDavDebugFilter\n\n config['middleware_stack'] = [\n WsgiDavDirBrowser,\n TracimHTTPAuthenticator,\n ErrorPrinter,\n TracimWsgiDavDebugFilter,\n ]\n\n config['provider_mapping'] = {\n config['root_path']: Provider(\n # TODO: Test to Re enabme archived and deleted\n show_archived=False, # config['show_archived'],\n show_deleted=False, # config['show_deleted'],\n show_history=False, # config['show_history'],\n manage_locks=config['manager_locks']\n )\n }\n\n config['domaincontroller'] = TracimDomainController(presetdomain=None, presetserver=None)\n\n return config", "def build_config(self, config):\n \n config.setdefaults(\n 'Network', {'IP': '192.168.1.16', 'port': 8000}\n )\n config.setdefaults(\n 'Camera', {'ISO': 100, 'Shutter': 5000, 'Aperture': 4, 'Zoom': 45}\n )\n config.setdefaults(\n 'Admin', {'Logging Path': gs.AUVSI_BASE_FOLDER}\n )\n config.setdefaults(\n 'CV', {'image_rescaling': 0.25}\n )\n \n #\n # Disable multi touch emulation with the mouse.\n #\n from kivy.config import Config\n Config.set('input', 'mouse', 'mouse,disable_multitouch')", "def conf():\n global config\n return config", "def merge_config(a, b):\n for key, b_value in b.items():\n if not isinstance(b_value, dict):\n a[key] = b_value\n else:\n a_value = a.get(key)\n if a_value is not None and isinstance(a_value, dict):\n merge_config(a_value, b_value)\n else:\n a[key] = b_value\n return a", "def set_config(config_name, host, port, core=''):\n global CONFIGS\n CONFIGS[config_name] = {'host': host, 'port': port, 'core': core}", "def merge_algorithm_configs(\n cls,\n config1: AlgorithmConfigDict,\n config2: PartialAlgorithmConfigDict,\n _allow_unknown_configs: Optional[bool] = None,\n ) -> AlgorithmConfigDict:\n config1 = copy.deepcopy(config1)\n if \"callbacks\" in config2 and type(config2[\"callbacks\"]) is dict:\n deprecation_warning(\n \"callbacks dict interface\",\n \"a class extending rllib.algorithms.callbacks.DefaultCallbacks; \"\n \"see `rllib/examples/custom_metrics_and_callbacks.py` for an example.\",\n error=True,\n )\n\n if _allow_unknown_configs is None:\n _allow_unknown_configs = cls._allow_unknown_configs\n return deep_update(\n config1,\n config2,\n _allow_unknown_configs,\n cls._allow_unknown_subkeys,\n cls._override_all_subkeys_if_type_changes,\n cls._override_all_key_list,\n )", "def _merge_with_default_values(self, cr, uid, external_session, ressource, vals, sub_mapping_list, defaults=None, context=None):\n if not defaults: return vals\n for key in defaults:\n if not key in vals:\n vals[key] = defaults[key]\n return vals", "def get_default_config(self):\n return config.read(pathlib.Path(__file__).parent / \"ext.conf\")", "def config(self, **kw):\n group = kw.pop('group', None)\n for k, v in kw.items():\n CONF.set_override(k, v, group)", "def _add_roa_configs(CONFIG):\n CONFIG.declare(\n 'level_coef',\n ConfigValue(\n default=0.5,\n domain=PositiveFloat,\n description='The coefficient in the regularization main problem'\n 'represents how much the linear approximation of the MINLP problem is trusted.',\n ),\n )\n CONFIG.declare(\n 'solution_limit',\n ConfigValue(\n default=10,\n domain=PositiveInt,\n description='The solution limit for the regularization problem since it does not need to be solved to optimality.',\n ),\n )\n CONFIG.declare(\n 'reduce_level_coef',\n ConfigValue(\n default=False,\n description='Whether to reduce level coefficient in ROA single tree when regularization problem is infeasible.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'use_bb_tree_incumbent',\n ConfigValue(\n default=False,\n description='Whether to use the incumbent solution of branch & bound tree in ROA single tree when regularization problem is infeasible.',\n domain=bool,\n ),\n )\n CONFIG.declare(\n 'sqp_lag_scaling_coef',\n ConfigValue(\n default='fixed',\n domain=In(['fixed', 'variable_dependent']),\n description='The coefficient used to scale the L2 norm in sqp_lag.',\n ),\n )", "def _init_config_(self):\n self._config= {}", "def _map_merge(dest: \"BaseContainer\", src: \"BaseContainer\") -> None:\n from omegaconf import AnyNode, DictConfig, OmegaConf, ValueNode\n\n assert isinstance(dest, DictConfig)\n assert isinstance(src, DictConfig)\n src_type = src._metadata.object_type\n src_ref_type = get_ref_type(src)\n assert src_ref_type is not None\n\n # If source DictConfig is:\n # - an interpolation => set the destination DictConfig to be the same interpolation\n # - None => set the destination DictConfig to None\n if src._is_interpolation() or src._is_none():\n dest._set_value(src._value())\n _update_types(node=dest, ref_type=src_ref_type, object_type=src_type)\n return\n\n dest._validate_merge(value=src)\n\n def expand(node: Container) -> None:\n rt = node._metadata.ref_type\n val: Any\n if rt is not Any:\n if is_dict_annotation(rt):\n val = {}\n elif is_list_annotation(rt):\n val = []\n else:\n val = rt\n elif isinstance(node, DictConfig):\n val = {}\n else:\n assert False\n\n node._set_value(val)\n\n if (\n src._is_missing()\n and not dest._is_missing()\n and is_structured_config(src_ref_type)\n ):\n # Replace `src` with a prototype of its corresponding structured config\n # whose fields are all missing (to avoid overwriting fields in `dest`).\n src = _create_structured_with_missing_fields(\n ref_type=src_ref_type, object_type=src_type\n )\n\n if (dest._is_interpolation() or dest._is_missing()) and not src._is_missing():\n expand(dest)\n\n for key, src_value in src.items_ex(resolve=False):\n src_node = src._get_node(key, validate_access=False)\n dest_node = dest._get_node(key, validate_access=False)\n\n if isinstance(dest_node, DictConfig):\n dest_node._validate_merge(value=src_node)\n\n missing_src_value = _is_missing_value(src_value)\n\n if (\n isinstance(dest_node, Container)\n and OmegaConf.is_none(dest, key)\n and not missing_src_value\n and not OmegaConf.is_none(src_value)\n ):\n expand(dest_node)\n\n if dest_node is not None and dest_node._is_interpolation():\n target_node = dest_node._dereference_node(\n throw_on_resolution_failure=False\n )\n if isinstance(target_node, Container):\n dest[key] = target_node\n dest_node = dest._get_node(key)\n\n if (\n dest_node is None\n and is_structured_config(dest._metadata.element_type)\n and not missing_src_value\n ):\n # merging into a new node. Use element_type as a base\n dest[key] = DictConfig(content=dest._metadata.element_type, parent=dest)\n dest_node = dest._get_node(key)\n\n if dest_node is not None:\n if isinstance(dest_node, BaseContainer):\n if isinstance(src_value, BaseContainer):\n dest_node._merge_with(src_value)\n elif not missing_src_value:\n dest.__setitem__(key, src_value)\n else:\n if isinstance(src_value, BaseContainer):\n dest.__setitem__(key, src_value)\n else:\n assert isinstance(dest_node, ValueNode)\n assert isinstance(src_node, ValueNode)\n # Compare to literal missing, ignoring interpolation\n src_node_missing = src_value == \"???\"\n try:\n if isinstance(dest_node, AnyNode):\n if src_node_missing:\n node = copy.copy(src_node)\n # if src node is missing, use the value from the dest_node,\n # but validate it against the type of the src node before assigment\n node._set_value(dest_node._value())\n else:\n node = src_node\n dest.__setitem__(key, node)\n else:\n if not src_node_missing:\n dest_node._set_value(src_value)\n\n except (ValidationError, ReadonlyConfigError) as e:\n dest._format_and_raise(key=key, value=src_value, cause=e)\n else:\n from omegaconf import open_dict\n\n if is_structured_config(src_type):\n # verified to be compatible above in _validate_merge\n with open_dict(dest):\n dest[key] = src._get_node(key)\n else:\n dest[key] = src._get_node(key)\n\n _update_types(node=dest, ref_type=src_ref_type, object_type=src_type)\n\n # explicit flags on the source config are replacing the flag values in the destination\n flags = src._metadata.flags\n assert flags is not None\n for flag, value in flags.items():\n if value is not None:\n dest._set_flag(flag, value)", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": None,\n \"attribute\": None,\n \"index_annotation\": None,\n }\n )\n return config" ]
[ "0.6710672", "0.6409883", "0.6342374", "0.6342374", "0.63211066", "0.6320828", "0.6272454", "0.62308407", "0.6153629", "0.61424506", "0.6138934", "0.61278063", "0.61195934", "0.6030474", "0.6018767", "0.6000672", "0.5999597", "0.5980619", "0.5938822", "0.589723", "0.5872071", "0.58610404", "0.58540285", "0.58501107", "0.58302385", "0.5814946", "0.5795102", "0.5774249", "0.57648593", "0.57631916", "0.5748822", "0.57360244", "0.5732761", "0.57299536", "0.5727199", "0.5719739", "0.57176954", "0.5710071", "0.5698773", "0.56960243", "0.569376", "0.56866056", "0.5686559", "0.567311", "0.56685656", "0.5645375", "0.56042296", "0.5596703", "0.55900604", "0.558737", "0.55857414", "0.55832857", "0.5579821", "0.5576427", "0.5573239", "0.55514693", "0.55414957", "0.5538859", "0.5538859", "0.552275", "0.55116343", "0.55107766", "0.5510201", "0.5507144", "0.5491898", "0.54812837", "0.5477624", "0.5477577", "0.5475094", "0.5474356", "0.5467918", "0.5467405", "0.5461995", "0.545647", "0.5451992", "0.5442342", "0.5437696", "0.543689", "0.54326975", "0.5432482", "0.54186785", "0.54150635", "0.5414569", "0.5413705", "0.5413621", "0.54014635", "0.5399308", "0.5397138", "0.53949404", "0.539269", "0.5390034", "0.5383816", "0.53837705", "0.5383187", "0.53789485", "0.53730536", "0.5370168", "0.5370053", "0.5366841", "0.53571707", "0.5356688" ]
0.0
-1
This function returns the default anchors given the image shapes and the anchors per grid point. The grid has width and height equal to the final's layer output.
def set_anchors(mc): H, W, C = _get_output_shape(mc) B = mc.ANCHOR_PER_GRID X = np.array(mc.INITIAL_ANCHOR_SHAPES) X[:,0] *= mc.IMAGE_WIDTH X[:,1] *= mc.IMAGE_HEIGHT anchor_shapes = np.reshape( # it refers to the anchor width and height [X] * H * W, (H, W, B, 2) ) center_x = np.reshape( np.transpose( np.reshape( np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B), (B, H, W) ), (1, 2, 0) ), (H, W, B, 1) ) center_y = np.reshape( np.transpose( np.reshape( np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B), (B, W, H) ), (2, 1, 0) ), (H, W, B, 1) ) anchors = np.reshape( np.concatenate((center_x, center_y, anchor_shapes), axis=3), (-1, 4) ) return anchors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_grid_anchors(grid_sizes, strides, cell_anchors):\n anchors = []\n assert cell_anchors is not None\n\n for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors):\n grid_height, grid_width = size\n stride_height, stride_width = stride\n\n # For output anchor, compute [x_center, y_center, x_center, y_center]\n shifts_x = np.arange(0, grid_width) * stride_width\n shifts_y = np.arange(0, grid_height) * stride_height\n shift_x, shift_y = np.meshgrid(shifts_y, shifts_x)\n shift_x = shift_x.reshape(-1)\n shift_y = shift_y.reshape(-1)\n shifts = np.stack((shift_x, shift_y, shift_x, shift_y), axis=1)\n\n # For every (base anchor, output anchor) pair,\n # offset each zero-centered base anchor by the center of the output anchor.\n anchors.append(\n (shifts.reshape((-1, 1, 4)) + base_anchors.reshape((1, -1, 4))).reshape(-1, 4)\n )\n\n return anchors", "def create_cell_anchors():\n k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL\n scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE\n aspect_ratios = cfg.RETINANET.ASPECT_RATIOS\n anchor_scale = cfg.RETINANET.ANCHOR_SCALE\n A = scales_per_octave * len(aspect_ratios)\n anchors = {}\n for lvl in range(k_min, k_max + 1):\n # create cell anchors array\n stride = 2. ** lvl\n cell_anchors = np.zeros((A, 4))\n a = 0\n for octave in range(scales_per_octave):\n octave_scale = 2 ** (octave / float(scales_per_octave))\n for aspect in aspect_ratios:\n anchor_sizes = (stride * octave_scale * anchor_scale, )\n anchor_aspect_ratios = (aspect, )\n cell_anchors[a, :] = generate_anchors(\n stride=stride, sizes=anchor_sizes,\n aspect_ratios=anchor_aspect_ratios)\n a += 1\n anchors[lvl] = cell_anchors\n return anchors", "def _get_anchor_grid(self, width, height, batch_size):\n anchors = tf.cast(self._anchors, dtype = self.dtype)\n anchors = tf.reshape(anchors, [1, -1])\n anchors = tf.repeat(anchors, width*height, axis = 0)\n anchors = tf.reshape(anchors, [1, width, height, self._num, -1])\n anchors = tf.repeat(anchors, batch_size, axis = 0)\n return anchors", "def get_all_anchors(stride=None, sizes=None):\n if stride is None:\n stride = cfg.ANCHOR.ANCHOR_STRIDE\n if sizes is None:\n sizes = cfg.ANCHOR.ANCHOR_SIZES\n # Generates a NAx4 matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors\n # are centered on stride / 2, have (approximate) sqrt areas of the specified\n # sizes, and aspect ratios as given.\n cell_anchors = generate_anchors(\n stride,\n scales=np.array(sizes, dtype=np.float) / stride,\n ratios=np.array(cfg.ANCHOR.ANCHOR_RATIOS, dtype=np.float))\n # anchors are intbox here.\n # anchors at featuremap [0,0] are centered at fpcoor (8,8) (half of stride)\n\n max_size = cfg.DATA.MAX_SIZE\n field_size = int(np.ceil(max_size / stride))\n shifts = np.arange(0, field_size) * stride\n shift_x, shift_y = np.meshgrid(shifts, shifts)\n shift_x = shift_x.flatten()\n shift_y = shift_y.flatten()\n shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()\n # Kx4, K = field_size * field_size\n K = shifts.shape[0]\n\n A = cell_anchors.shape[0]\n field_of_anchors = (\n cell_anchors.reshape((1, A, 4)) +\n shifts.reshape((1, K, 4)).transpose((1, 0, 2)))\n field_of_anchors = field_of_anchors.reshape((field_size, field_size, A, 4))\n # FSxFSxAx4\n # Many rounding happens inside the anchor code anyway\n # assert np.all(field_of_anchors == field_of_anchors.astype('int32'))\n field_of_anchors = field_of_anchors.astype('float32')\n field_of_anchors[:, :, :, [2, 3]] += 1\n return field_of_anchors", "def get_anchors(self, image_shape):\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]", "def anchors(self, img_shape, dtype=np.float32):\n return np_methods.ssd_anchors_all_layers(img_shape,\n self.params.feat_shapes,\n self.params.anchor_sizes,\n self.params.anchor_ratios,\n self.params.anchor_steps,\n self.params.anchor_offset,\n dtype)", "def generate_anchors(self):\n self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)\n size = self.stride * self.stride\n count = 0\n for r in self.ratios:\n ws = int(math.sqrt(size * 1. / r))\n hs = int(ws * r)\n\n for s in self.scales:\n w = ws * s\n h = hs * s\n self.anchors[count][:] = [-w * 0.5, -h * 0.5, w * 0.5, h * 0.5][:]\n count += 1", "def get_anchor_points(self):\n rows, cols = np.where(self.overlap_mask)\n self.anchor_points = tuple(zip(rows, cols))[:: self.sampling_int]\n print(\"# of anchors: {}\".format(len(self.anchor_points)))", "def _mkanchors(ws, hs, x_ref, y_ref):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n\n anchors = np.hstack(\n (\n x_ref - 0.5 * (ws - 1),\n y_ref - 0.5 * (hs - 1),\n x_ref + 0.5 * (ws - 1),\n y_ref + 0.5 * (hs - 1)\n )\n )\n return anchors", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1), y_ctr - 0.5 * (hs - 1), x_ctr + 0.5 * (ws - 1), y_ctr + 0.5 * (hs - 1)))\n return anchors", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1)))\n return anchors", "def _mkanchors(ws, hs, x_ctr, y_ctr):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack(\n (\n x_ctr - 0.5 * (ws - 1),\n y_ctr - 0.5 * (hs - 1),\n x_ctr + 0.5 * (ws - 1),\n y_ctr + 0.5 * (hs - 1),\n )\n )\n return anchors", "def _generate_anchors(point, sizes, aspect_ratios, layout, beta, include_depth):\n\n distance = point[2]\n base_size = sizes[0]\n scales = sizes[1:] / base_size\n # beta = 8\n scales = (beta/distance)*scales\n\n center = (point[0], point[1])\n anchor = np.array([center[0] - base_size/2.0, center[1] - base_size/2.0,\n center[0] + base_size/2.0, center[1] + base_size/2.0],\n dtype=np.float)\n\n anchors = _ratio_enum(anchor, aspect_ratios)\n anchors = np.vstack(\n [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]\n )\n\n all_anchors = np.empty((0,4))\n for l in layout:\n new_anchors = _shift_anchors(anchors, l)\n all_anchors = np.vstack((all_anchors, new_anchors))\n\n if int(include_depth)==1:\n # Add the distance as the 5th element to all anchors\n new_shape = (all_anchors.shape[0], all_anchors.shape[1]+1)\n new_anchors = np.ones(new_shape) * distance\n new_anchors[:,:-1] = all_anchors\n all_anchors = new_anchors\n\n return all_anchors", "def make_anchors(self, img_shape, dtype=np.float32):\n return anchor_utils.ssd_anchors_all_layers(img_shape,\n self.params.feat_shapes,\n self.params.anchor_sizes,\n self.params.anchor_ratios,\n self.params.anchor_steps,\n self.params.anchor_offset,\n dtype)", "def adjust_regular_roi_anchors(bounds: QRectF, anchors: list):\n for point in anchors:\n off = point.boundingRect().width() / 2\n if point.position == AnchorPosition.LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off + bounds.height() / 2)\n elif point.position == AnchorPosition.TOP:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_LEFT:\n point.setPos(bounds.left() - off, bounds.top() - off)\n elif point.position == AnchorPosition.TOP_RIGHT:\n point.setPos(bounds.right() - off, bounds.top() - off)\n elif point.position == AnchorPosition.BOTTOM:\n point.setPos(bounds.left() - off + bounds.width() / 2, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_LEFT:\n point.setPos(bounds.left() - off, bounds.bottom() - off)\n elif point.position == AnchorPosition.BOTTOM_RIGHT:\n point.setPos(bounds.right() - off, bounds.bottom() - off)", "def generate_anchors(base_size=16, feat_stride=16, anchor_scales=(8, 16, 32), anchor_ratios=(0.5, 1, 2)):\n anchors = generate_base_anchors(base_size=base_size, ratios=np.array(anchor_ratios), scales=np.array(anchor_scales))\n A = anchors.shape[0]\n shift_x = np.arange(0, IM_SCALE // feat_stride) * feat_stride\n shift_x, shift_y = np.meshgrid(shift_x, shift_x)\n shifts = np.stack([shift_x, shift_y, shift_x, shift_y], -1)\n all_anchors = shifts[:, :, None] + anchors[None, None]\n return all_anchors", "def center_image_grid_anchors(image_grid):\n for image in image_grid:\n center_image_anchor(image)", "def generate_anchors_info():\n original_height, original_width = 512, 640\n input_anchor = Anchor(\n min_level=2,\n max_level=6,\n num_scales=1,\n aspect_ratios=[1.0, 2.0, 0.5],\n anchor_size=8,\n image_size=(_IMAGE_SIZE.value, _IMAGE_SIZE.value))\n anchor_boxes = input_anchor.multilevel_boxes\n for key in anchor_boxes:\n anchor_boxes[key] = anchor_boxes[key].numpy()\n\n scale = min(_IMAGE_SIZE.value / original_height,\n _IMAGE_SIZE.value / original_width)\n image_info = np.array([[[original_height, original_width],\n [_IMAGE_SIZE.value, _IMAGE_SIZE.value],\n [scale, scale], [0, 0]]])\n\n return anchor_boxes, image_info", "def anchors(self):\n dims = self.dims\n anchors = []\n for peak in self:\n possible_anchors = []\n for combination in combinations(range(dims), 2):\n spins = [peak[i] for i in combination]\n if any(s.res_num is None or s.atom is None for s in spins):\n continue\n res_nums = [spin.res_num for spin in spins]\n atoms = [spin.atom for spin in spins]\n elements = [atom[0] for atom in atoms]\n positions = [atom[1:] for atom in atoms]\n same_res_num = res_nums[0] == res_nums[1]\n valid_pairs = [set(('H', 'N')), set(('H', 'C'))]\n is_proton_heavy_pair = set(elements) in valid_pairs\n same_position = all(c[0] == c[1] for c in zip(*positions))\n if same_res_num and is_proton_heavy_pair and same_position:\n if '' in positions and set(elements) != set(('H', 'N')):\n # One of the atom names must have been 'H', 'N' or 'C'\n # Of these, only the amide proton anchor is valid\n continue\n if elements[0] == 'H':\n possible_anchors.append(combination)\n else:\n possible_anchors.append(combination[::-1])\n if len(possible_anchors) > 1:\n pa_sets = [set(pa) for pa in possible_anchors]\n overlap = set.intersection(*pa_sets)\n if overlap:\n # Ambiguous, overlapping anchors\n continue\n for poss_anc in possible_anchors:\n if poss_anc not in anchors:\n anchors.append(poss_anc)\n anchors = tuple(anchors)\n return anchors", "def anchor_target_layer(rpn_cls_score, gt_boxes, im_info, _feat_stride, all_anchors, num_anchors):\n A = num_anchors\n total_anchors = all_anchors.shape[0]\n K = total_anchors / num_anchors\n\n # allow boxes to sit over the edge by a small amount\n _allowed_border = 0\n\n # map of shape (..., H, W)\n height, width = rpn_cls_score.shape[1:3]\n\n # only keep anchors inside the image\n inds_inside = np.where(\n (all_anchors[:, 0] >= -_allowed_border) &\n (all_anchors[:, 1] >= -_allowed_border) &\n (all_anchors[:, 2] < im_info[1] + _allowed_border) & # width\n (all_anchors[:, 3] < im_info[0] + _allowed_border) # height\n )[0]\n\n # keep only inside anchors\n anchors = all_anchors[inds_inside, :]\n\n # label: 1 is positive, 0 is negative, -1 is dont care\n labels = np.empty((len(inds_inside),), dtype=np.float32)\n labels.fill(-1)\n\n # overlaps between the anchors and the gt boxes\n # overlaps (ex, gt)\n overlaps = bbox_overlaps(\n np.ascontiguousarray(anchors, dtype=np.float),\n np.ascontiguousarray(gt_boxes, dtype=np.float))\n argmax_overlaps = overlaps.argmax(axis=1)\n max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]\n gt_argmax_overlaps = overlaps.argmax(axis=0)\n gt_max_overlaps = overlaps[gt_argmax_overlaps,\n np.arange(overlaps.shape[1])]\n gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]\n\n if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels first so that positive labels can clobber them\n # first set the negatives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # fg label: for each gt, anchor with highest overlap\n labels[gt_argmax_overlaps] = 1\n\n # fg label: above threshold IOU\n labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1\n\n if cfg.TRAIN.RPN_CLOBBER_POSITIVES:\n # assign bg labels last so that negative labels can clobber positives\n labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0\n\n # subsample positive labels if we have too many\n num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)\n fg_inds = np.where(labels == 1)[0]\n if len(fg_inds) > num_fg:\n disable_inds = npr.choice(\n fg_inds, size=(len(fg_inds) - num_fg), replace=False)\n labels[disable_inds] = -1\n\n # subsample negative labels if we have too many\n num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)\n bg_inds = np.where(labels == 0)[0]\n if len(bg_inds) > num_bg:\n disable_inds = npr.choice(\n bg_inds, size=(len(bg_inds) - num_bg), replace=False)\n labels[disable_inds] = -1\n\n bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)\n bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])\n\n bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n # only the positive ones have regression targets\n bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)\n\n bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)\n if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:\n # uniform weighting of examples (given non-uniform sampling)\n num_examples = np.sum(labels >= 0)\n positive_weights = np.ones((1, 4)) * 1.0 / num_examples\n negative_weights = np.ones((1, 4)) * 1.0 / num_examples\n else:\n assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &\n (cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))\n positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /\n np.sum(labels == 1))\n negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /\n np.sum(labels == 0))\n bbox_outside_weights[labels == 1, :] = positive_weights\n bbox_outside_weights[labels == 0, :] = negative_weights\n\n # map up to original set of anchors\n labels = _unmap(labels, total_anchors, inds_inside, fill=-1)\n bbox_targets = _unmap(bbox_targets, total_anchors, inds_inside, fill=0)\n bbox_inside_weights = _unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)\n bbox_outside_weights = _unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)\n\n # labels\n labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)\n labels = labels.reshape((1, 1, A * height, width))\n rpn_labels = labels\n\n # bbox_targets\n bbox_targets = bbox_targets \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_targets = bbox_targets\n # bbox_inside_weights\n bbox_inside_weights = bbox_inside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_inside_weights = bbox_inside_weights\n\n # bbox_outside_weights\n bbox_outside_weights = bbox_outside_weights \\\n .reshape((1, height, width, A * 4))\n\n rpn_bbox_outside_weights = bbox_outside_weights\n return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights", "def get_anchors(self, featmap_sizes, img_metas, device='cuda'):\n num_imgs = len(img_metas)\n\n # since feature map sizes of all images are the same, we only compute\n # anchors for one time\n multi_level_anchors = self.anchor_generator.grid_anchors(\n featmap_sizes, device)\n anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n # for each image, we compute valid flags of multi level anchors\n valid_flag_list = []\n for img_id, img_meta in enumerate(img_metas):\n multi_level_flags = self.anchor_generator.valid_flags(\n featmap_sizes, img_meta['pad_shape'], device)\n valid_flag_list.append(multi_level_flags)\n\n return anchor_list, valid_flag_list", "def _mkanchors(ws, ctr):\n ws = ws[:, np.newaxis]\n anchors = np.hstack(\n (\n ctr - 0.5 * ws,\n ctr + 0.5 * ws,\n )\n )\n return anchors", "def rpn_anchor_boxes(image_size, *args, **kwargs):\n anchor_boxes = generate_anchor_boxes(image_size, *args, **kwargs)\n valid_ab_indices = valid_anchor_boxes(anchor_boxes, image_size)\n return anchor_boxes, valid_ab_indices", "def generate_anchors(base_size, ratios, scales, rotations):\n num_anchors = len(ratios) * len(scales) * len(rotations)\n # initialize output anchors\n anchors = np.zeros((num_anchors, 5))\n # scale base_size\n anchors[:, 2:4] = base_size * np.tile(scales, (2, len(ratios) * len(rotations))).T\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales) * len(rotations)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales) * len(rotations))\n # add rotations\n anchors[:, 4] = np.tile(np.repeat(rotations, len(scales)), (1, len(ratios))).T[:, 0]\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0:3:2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1:4:2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n return anchors", "def generate_anchor_boxes(image_size, feature_map_size, sizes, scales):\n image_height, image_width = image_size\n fm_height, fm_width = feature_map_size\n height_stride = int(image_height / fm_height)\n width_stride = int(image_width / fm_width)\n\n # Compose horizontal and vertical positions into grid and reshape result into (-1, 2)\n y_centers = np.arange(0, image_height, height_stride)\n x_centers = np.arange(0, image_width, width_stride)\n centers = np.dstack(np.meshgrid(y_centers, x_centers)).reshape((-1, 2))\n\n # Creates anchor boxes pyramid. Somewhat vectorized version of itertools.product\n r_scales = np.repeat([scales], len(sizes), axis=0).ravel()\n r_sides = np.repeat([sizes], len(scales), axis=1).ravel()\n ab_pyramid = np.transpose([r_sides / (r_scales ** .5),\n r_sides * (r_scales ** .5)]).astype(int)\n\n # Creates combinations of all anchor boxes centers and sides\n r_centers = np.repeat(centers, len(ab_pyramid), axis=0)\n r_ab_pyramid = np.repeat([ab_pyramid], len(centers), axis=0).reshape((-1, 2))\n return np.hstack((r_centers, r_ab_pyramid))", "def __create_anchors(self, sizes, aspects):\n k = len(sizes) * len(aspects)\n img_anchors = []\n for i in sizes:\n for j in aspects:\n img_anchors.append(\n [0, 0, 2 * i * j[0] / (j[0] + j[1]), 2 * i * j[1] / (j[0] + j[1])])\n\n self.anchors = np.asarray(img_anchors)", "def generate_all_anchors(self):\n self.feature_sizes = [int(np.round(self.resolution/stride)) for stride in self.strides]\n \n #generate all anchors for each level of the FPN\n all_anchors = [self.generate_feature_level_base_anchors(size=size) for size in self.sizes]\n all_anchors = [self.shift_and_duplicate(layer_anchors, feature_size, stride) for layer_anchors, feature_size, stride in zip(all_anchors, self.feature_sizes, self.strides)]\n all_anchors = tf.concat(all_anchors, axis=0)\n\n return all_anchors", "def generate_anchors(scales=(32,), aspect_ratios=(0.5, 1, 2), dtype=np.float32):\n scales = np.array(scales)\n aspect_ratios = np.array(aspect_ratios, dtype=dtype)\n h_ratios = np.sqrt(aspect_ratios)\n w_ratios = 1 / h_ratios\n\n ws = (w_ratios[:, None] * scales[None, :]).reshape(-1)\n hs = (h_ratios[:, None] * scales[None, :]).reshape(-1)\n\n base_anchors = np.stack([-ws, -hs, ws, hs], axis=1) / 2\n return base_anchors", "def generate_anchors(base_size=16, ratios=None, scales=None):\n\n if ratios is None:\n ratios = np.array([0.5, 1, 2])\n\n if scales is None:\n scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n\n num_anchors = len(ratios) * len(scales)\n\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n\n # scale base_size\n anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T\n\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))\n\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors", "def generate_anchors(base_size=16, ratios=None, scales=None):\n\n if ratios is None:\n ratios = np.array([0.5, 1, 2])\n\n if scales is None:\n scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])\n\n num_anchors = len(ratios) * len(scales)\n\n # initialize output anchors\n anchors = np.zeros((num_anchors, 4))\n\n # scale base_size\n anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T\n\n # compute areas of anchors\n areas = anchors[:, 2] * anchors[:, 3]\n\n # correct for ratios\n anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))\n\n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n\n return anchors", "def _generate_anchors(base_size, scales, aspect_ratios):\n anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 0.5\n anchors = _ratio_enum(anchor, aspect_ratios)\n anchors = np.vstack(\n [_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]\n )\n return torch.from_numpy(anchors)", "def get_anchors(self):\n self.anchors_dic = {}\n meta = self.get_metadata()\n lines = meta.split(\"|\")\n for line in lines:\n data= line.split()\n anchor_name = data[0]\n # appending anchor in dictionary with its coordinates \n self.anchors_dic[anchor_name] = (data[1], data[2], data[3])", "def valid_anchor_boxes(anchor_boxes, image_size):\n img_height, img_width = image_size\n y, x, height, width = np.transpose(anchor_boxes)\n\n # TODO(Mocurin) Optimize?\n # Indicator matrix\n indicators = np.array([y - height // 2 >= 0,\n x - width // 2 >= 0,\n y + height // 2 <= img_height,\n x + width // 2 <= img_width]).transpose()\n\n # Get indices of anchor boxes inside image\n return np.flatnonzero(np.all(indicators, axis=1, keepdims=False))", "def generate_feature_level_base_anchors(self, size):\n \n anchors = np.zeros((self.n_anchors, 4)) \n #scale base size at different scales\n anchors[:, 2:] = size * np.tile(self.scales, (2, len(self.ratios))).T\n # get different combinations of aspect ratios\n areas = anchors[:, 2] * anchors[:, 3]\n anchors[:, 2] = np.sqrt(areas / np.repeat(self.ratios, len(self.scales)))\n anchors[:, 3] = anchors[:, 2] * np.repeat(self.ratios, len(self.scales))\n \n # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)\n anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T\n anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T\n \n #self.base_anchors = tf.cast(anchors, dtype=tf.float32)\n return anchors", "def __build_anchors(anchor_parameters, features):\n anchors = [\n layers.Anchors(\n size=anchor_parameters.sizes[i],\n stride=anchor_parameters.strides[i],\n ratios=anchor_parameters.ratios,\n scales=anchor_parameters.scales,\n name='anchors_{}'.format(i)\n )(f) for i, f in enumerate(features)\n ]\n\n return keras.layers.Concatenate(axis=1, name='anchors')(anchors)", "def __call__(self, loc, scores, anchors, img_size):\n anchors = bbox.loc2bbox(anchor)", "def generate_anchors(base_size = 16, ratios = [0.5, 1, 2.0], \n scales = 2**np.arange(4,7)):\n n_anchors = len(ratios)*len(scales)\n\n base_anchor = np.array([int(base_size*0.5), \n int(base_size*0.5), \n base_size, \n base_size], np.float32) \n # First, generate a list of anchors of the appropriate area:\n scaled_anchors = np.tile(base_anchor,(len(scales), 1))\n final_anchors = np.zeros((len(scales),len(ratios), 4), np.float32)\n for s, i in zip(scales, xrange(len(scales))):\n scaled_anchors[i,2:] *= s\n for r, j in zip(ratios, xrange(len(ratios))):\n t = np.copy(scaled_anchors[i])\n t[2] *= np.sqrt(r)\n t[3] *= 1./np.sqrt(r)\n final_anchors[i,j] = np.round(t)\n return np.reshape(final_anchors, (n_anchors, 4))", "def numpy_select_label_anchors_minmax(_ground_truths, \n _anchors, \n _positive_threshold = 0.7, \n _negative_threshold = 0.3,\n _n_targets= 128):\n \n # Prune anchors that sit over the edge of the images\n \n\n\n _possible_anchors = _anchors \n\n # Compute the IoU for these anchors with the ground truth boxes:\n iou = numpy_IoU_minmax(_ground_truths, _possible_anchors)\n\n\n\n\n # For each ground truth, we select the anchor that has the highest overlap:\n \n\n _best_anchors = np.argmax(iou, axis=-1)\n _best_anchor_iou = np.max(iou, axis=1)\n\n\n gt_id = np.arange(0, len(_best_anchors))\n\n\n _best_anchor_indexs = [gt_id, _best_anchors]\n \n # Additionally, select anchors with an IOU greater than the positive threshold:\n _positive_anchors = np.where(iou > 0.7)\n \n # For negative anchors, we need to make sure each anchor has an IoU with all \n # ground truth that is at most 0.3\n _worst_anchors = np.max(iou.T, axis=1)\n\n _neg_anchors = np.where(_worst_anchors < 0.3)\n\n # print _neg_anchors[0][0:5]\n # print _neg_anchors[1][0:5]\n\n # We need to keep track of the label for each anchor, as well as it's matched\n # ground truth box\n\n labels = np.full(iou.shape, -1)\n\n labels[_positive_anchors] = 1\n # Assign the negative labels first, so it doesn't clobber the best IoU if they \n # are all really low\n labels[:,_neg_anchors] = 0\n\n labels[_best_anchor_indexs] = 1\n\n # Now, we know where the anchors are positive and negative (or don't care)\n # We create a list of anchors (positive and negative) \n\n # Gather the positive labels:\n pos_gt, pos_anchors = np.where(labels == 1)\n n_pos_labels = len(pos_gt)\n\n neg_gt, neg_anchors = np.where(labels == 0)\n n_neg_labels = len(neg_gt)\n\n # Downselect:\n\n if n_pos_labels > (_n_targets / 2):\n pos_labels = np.random.choice(len(pos_gt), size=(int(_n_targets/2)), replace=False)\n pos_gt = pos_gt[pos_labels]\n pos_anchors = pos_anchors[pos_labels]\n n_pos_labels = len(pos_gt)\n pass\n if n_neg_labels > (_n_targets - n_pos_labels):\n neg_labels = np.random.choice(len(neg_gt), size=(_n_targets - n_pos_labels), replace=False)\n neg_gt = neg_gt[neg_labels]\n neg_anchors = neg_anchors[neg_labels]\n print neg_labels.shape\n\n # Join everything together:\n _gt = np.concatenate([pos_gt, neg_gt])\n _matched_anchors = np.concatenate([pos_anchors, neg_anchors])\n _labels = np.zeros((_n_targets))\n _labels[0:len(pos_gt)] = 1\n\n return _labels, _gt, _matched_anchors", "def activations_to_bboxes(actn, anchors, grid_sizes):\n #taken from fastai\n anchors = anchors.type(torch.float64)\n actn_offsets = torch.tanh(actn)\n\n actn_centers = actn_offsets[:, :2]/2 * grid_sizes + anchors[:, :2]\n actn_hw = (actn_offsets[:, 2:]/2+1) * anchors[:, 2:]\n\n return hw2corners(actn_centers, actn_hw)", "def forward(self, image, dtype=torch.float32):\n image_shape = image.shape[2:]\n\n if image_shape == self.last_shape and image.device in self.last_anchors:\n return self.last_anchors[image.device]\n\n if self.last_shape is None or self.last_shape != image_shape:\n self.last_shape = image_shape\n\n if dtype == torch.float16:\n dtype = np.float16\n else:\n dtype = np.float32\n\n boxes_all = []\n for stride in self.strides:\n boxes_level = []\n for scale, ratio in itertools.product(self.scales, self.ratios):\n if image_shape[1] % stride != 0:\n raise ValueError('input size must be divided by the stride.')\n base_anchor_size = self.anchor_scale * stride * scale\n anchor_size_x_2 = base_anchor_size * ratio[0] / 2.0\n anchor_size_y_2 = base_anchor_size * ratio[1] / 2.0\n\n x = np.arange(stride / 2, image_shape[1], stride)\n y = np.arange(stride / 2, image_shape[0], stride)\n xv, yv = np.meshgrid(x, y)\n xv = xv.reshape(-1)\n yv = yv.reshape(-1)\n\n # y1,x1,y2,x2\n boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,\n yv + anchor_size_y_2, xv + anchor_size_x_2))\n boxes = np.swapaxes(boxes, 0, 1)\n boxes_level.append(np.expand_dims(boxes, axis=1))\n # concat anchors on the same level to the reshape NxAx4\n boxes_level = np.concatenate(boxes_level, axis=1)\n boxes_all.append(boxes_level.reshape([-1, 4]))\n\n anchor_boxes = np.vstack(boxes_all)\n\n anchor_boxes = torch.from_numpy(anchor_boxes.astype(dtype)).to(image.device)\n anchor_boxes = anchor_boxes.unsqueeze(0)\n\n # save it for later use to reduce overhead\n self.last_anchors[image.device] = anchor_boxes\n return anchor_boxes", "def get_tiled_anchors_for_shape(self, width, height):\n anchors = tf.expand_dims(self.anchors, axis=0)\n feat_height = tf.cast(tf.ceil(height/self.feat_stride), tf.int32)\n feat_width = tf.cast(tf.ceil(width/self.feat_stride), tf.int32)\n anchor_shape = [feat_height * feat_width, 1, 1]\n anchors = tf.tile(anchors, tf.stack(anchor_shape))\n\n x = tf.range(0.0, feat_width * self.feat_stride, self.feat_stride)\n y = tf.range(0.0, feat_height * self.feat_stride, self.feat_stride)\n\n X, Y = tf.meshgrid(x, y)\n X = tf.expand_dims(X, 2)\n Y = tf.expand_dims(Y, 2)\n\n shift = tf.reshape(tf.concat([Y, X, tf.zeros_like(X), tf.zeros_like(X)], 2), [-1, 1, 4])\n\n shift = tf.tile(shift, [1, self.num_anchors, 1])\n\n anchors = tf.cast(anchors, tf.float32) + shift\n return tf.reshape(anchors, [-1, 4])", "def generate_base_anchors(base_size=16, ratios=[0.5, 1, 2], scales=2 ** np.arange(3, 6)):\n base_anchor = np.array([1, 1, base_size, base_size]) - 1\n ratio_anchors = _ratio_enum(base_anchor, ratios)\n anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales) for i in range(ratio_anchors.shape[0])])\n return anchors", "def pad_anchors(input_anchors, n_tiles_x=32, n_tiles_y=32, \n step_size_x=16, step_size_y=16):\n n_input_anchors = len(input_anchors)\n \n # Map the main anchors to cover every n pixels:\n _list_of_anchors = np.tile(input_anchors, reps=(n_tiles_x,n_tiles_y,1,1))\n \n for i in xrange(n_tiles_x):\n _list_of_anchors[i,:,:,0] += step_size_x*i\n for i in xrange(n_tiles_y):\n _list_of_anchors[:,i,:,1] += step_size_y*i\n\n # Flatten the result into a list of anchors (final shape [n_anchors, 4])\n return np.reshape(_list_of_anchors, (n_input_anchors*n_tiles_x*n_tiles_y, 4))", "def center(self, anchors):\n # self.anchors_ = boxes[np.random.choice(n, self.k, replace=True)]\n if isinstance(anchors, list):\n anchors = np.array(anchors)\n self.anchors_ = anchors", "def adjust_anchors(self):\n pass", "def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # TODO: If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE - np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox", "def create_pos_overlap_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def pos_overlap(gt_boxes):\n pos_overlaps = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n gt_area = (gt_x1 - gt_x0) * (gt_y1 - gt_y0)\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n pos_overlaps.append(int_area / gt_area)\n # Group by anchor boxes\n pos_overlaps = np.transpose(pos_overlaps)\n # Get max metric index\n gt_indices = np.argmax(pos_overlaps, axis=1)\n # Choose max metric\n pos_overlaps = np.squeeze(np.take_along_axis(pos_overlaps, gt_indices[:, np.newaxis], axis=1))\n # Take respective ground-truth boxes. No reason to return indices, at least in RPN\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return pos_overlaps, gt_boxes\n return pos_overlap", "def mk_anchor(hs, ws, h_c, w_c):\n ws = ws[:, np.newaxis]\n hs = hs[:, np.newaxis]\n anchors = np.hstack((w_c - 0.5 * ws, h_c - 0.5 * hs, w_c + 0.5 * ws, h_c + 0.5 * hs))\n return anchors", "def generate_pyramid_anchors(scales, ratios, feature_shapes, feature_strides,\n anchor_stride):\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n anchors = []\n for i in range(len(scales)):\n anchors.append(generate_anchors(scales[i], ratios, feature_shapes[i],\n feature_strides[i], anchor_stride))\n return np.concatenate(anchors, axis=0)", "def anchor_and_clip(image):\n\n\t# Offsets for approximate in-game solitaire window size at 1600x900 game window size\n\tmax_x = 1074\n\tmax_y = 675\n\n\tcorner = cv2.imread(\"card_back/anchor/anchor.png\")\n\tresult = cv2.matchTemplate(image, corner, cv2.TM_SQDIFF)\n\tx, y = cv2.minMaxLoc(result)[2]\n\tx += 3\n\ty += 2\n\n\tcrop_image = image[y:y + max_y, x:x + max_x]\n\treturn x, y, crop_image", "def build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n n_boxes, n_image_source = gt_boxes.shape[:2]\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, n_image_source, 4))\n\n # master boxes\n master_gt_boxes = gt_boxes[:, 0, :]\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n # crowd_ix = np.where(gt_class_ids < 0)[0]\n # if crowd_ix.shape[0] > 0:\n # # Filter out crowds from ground truth class IDs and boxes\n # non_crowd_ix = np.where(gt_class_ids > 0)[0]\n # crowd_boxes = master_gt_boxes[crowd_ix]\n # gt_class_ids = gt_class_ids[non_crowd_ix]\n # master_gt_boxes = master_gt_boxes[non_crowd_ix]\n # # Compute overlaps with crowd boxes [anchors, crowds]\n # crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n # crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n # no_crowd_bool = (crowd_iou_max < 0.001)\n # else:\n # # All anchors don't intersect a crowd\n # no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, master_gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n # rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n rpn_match[anchor_iou_max < 0.3] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n for idx_image_source in range(n_image_source):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i], idx_image_source]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix, idx_image_source] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix, idx_image_source] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox", "def anchor_matching_nms(self, anchors, targets, box_cls):\n gt_classes = []\n gt_anchors_deltas = []\n keep_nms_list = []\n anchors = Boxes.cat(anchors) # Rx4\n\n box_cls = [permute_to_N_HWA_K(x, self.num_classes) for x in box_cls]\n\n for img_idx, targets_per_image in enumerate(targets):\n match_quality_matrix = pairwise_iou(targets_per_image.gt_boxes, anchors)\n gt_matched_idxs, anchor_labels = self.matcher(match_quality_matrix)\n\n box_cls_per_image = [box_cls_per_level[img_idx] for box_cls_per_level in box_cls]\n box_cls_per_image = torch.cat(box_cls_per_image, dim=0)\n keep_nms = torch.zeros_like(box_cls_per_image).sum(dim=1)\n has_gt = len(targets_per_image) > 0\n if has_gt:\n # ground truth box regression\n matched_gt_boxes = targets_per_image.gt_boxes[gt_matched_idxs]\n gt_anchors_reg_deltas_i = self.box2box_transform.get_deltas(\n anchors.tensor, matched_gt_boxes.tensor\n )\n\n gt_classes_i = targets_per_image.gt_classes[gt_matched_idxs]\n # Anchors with label 0 are treated as background.\n gt_classes_i[anchor_labels == 0] = self.num_classes\n # Anchors with label -1 are ignored.\n gt_classes_i[anchor_labels == -1] = -1\n\n for instance_idxs in range(len(targets_per_image.gt_classes)):\n valid_idx = ((gt_matched_idxs == instance_idxs) & (anchor_labels == 1))\n if len(box_cls_per_image[valid_idx, gt_classes_i[valid_idx]]) == 0:\n continue\n max_id = torch.argmax(box_cls_per_image[valid_idx, gt_classes_i[valid_idx]])\n keep_id = torch.where(valid_idx)[0]\n keep_id = keep_id[max_id]\n keep_nms[keep_id] = 1\n keep_nms = (keep_nms == 1)\n else:\n gt_classes_i = torch.zeros_like(gt_matched_idxs) + self.num_classes\n gt_anchors_reg_deltas_i = torch.zeros_like(anchors.tensor)\n\n gt_classes.append(gt_classes_i)\n gt_anchors_deltas.append(gt_anchors_reg_deltas_i)\n keep_nms_list.append(keep_nms)\n\n return torch.stack(gt_classes), torch.stack(gt_anchors_deltas), torch.stack(keep_nms_list)", "def get_targets(\n self,\n anchor_list,\n valid_flag_list,\n gt_bboxes_list,\n img_metas,\n gt_bboxes_ignore_list=None,\n gt_labels_list=None,\n label_channels=1,\n unmap_outputs=True,\n ):\n\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n concat_anchor_list = []\n concat_valid_flag_list = []\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n concat_anchor_list.append(torch.cat(anchor_list[i]))\n concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(num_imgs)]\n results = multi_apply(\n self._get_targets_single,\n concat_anchor_list,\n concat_valid_flag_list,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n img_metas,\n label_channels=label_channels,\n unmap_outputs=unmap_outputs)\n\n (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,\n valid_neg_inds, sampling_result) = results\n\n # Due to valid flag of anchors, we have to calculate the real pos_inds\n # in origin anchor set.\n pos_inds = []\n for i, single_labels in enumerate(labels):\n pos_mask = (0 <= single_labels) & (\n single_labels < self.num_classes)\n pos_inds.append(pos_mask.nonzero().view(-1))\n\n gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]\n return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n gt_inds)", "def k_anchors(true_entity, sen_len, idx):\n anchors = []\n anchor_labels = []\n cls_ids = []\n sample_indexes = []\n\n entity_tag = False\n\n # type 1: C\n anchors.append([idx, idx])\n an_label, cls_id = get_anchor_label(\n idx, idx, true_entity, sen_len)\n anchor_labels.append(an_label)\n cls_ids.append(cls_id)\n if (an_label == 1):\n entity_tag = True\n\n # type 2: CD\n anchors.append([idx, idx + 1])\n an_label, cls_id = get_anchor_label(\n idx, idx + 1, true_entity, sen_len)\n anchor_labels.append(an_label)\n cls_ids.append(cls_id)\n if (an_label == 1):\n entity_tag = True\n\n # type 3: BCD\n anchors.append([idx - 1, idx + 1])\n an_label, cls_id = get_anchor_label(\n idx - 1, idx + 1, true_entity, sen_len)\n anchor_labels.append(an_label)\n cls_ids.append(cls_id)\n if (an_label == 1):\n entity_tag = True\n\n # type 4: BCDE\n anchors.append([idx - 1, idx + 2])\n an_label, cls_id = get_anchor_label(\n idx - 1, idx + 2, true_entity, sen_len)\n anchor_labels.append(an_label)\n cls_ids.append(cls_id)\n if (an_label == 1):\n entity_tag = True\n\n # type 5: ABCDE\n anchors.append([idx - 2, idx + 2])\n an_label, cls_id = get_anchor_label(\n idx - 2, idx + 2, true_entity, sen_len)\n anchor_labels.append(an_label)\n cls_ids.append(cls_id)\n\n # type 6: ABCDEF\n anchors.append([idx - 2, idx + 3])\n an_label, cls_id = get_anchor_label(\n idx - 2, idx + 3, true_entity, sen_len)\n anchor_labels.append(an_label)\n cls_ids.append(cls_id)\n\n if (an_label == 1):\n entity_tag = True\n\n if (entity_tag == True):\n # add other entities as negs\n sample_indexes = list(range(idx*5, (idx+1)*5))\n #print(\"*******word idx\", idx, \"sample indexes:\", sample_indexes)\n\n return anchors, anchor_labels, cls_ids, sample_indexes", "def anchor_pairs(self):\n # TODO unit test for this method\n def _anchors(given_anchor):\n if given_anchor is not None:\n yield given_anchor\n else:\n yield from anchors.Anchor\n for src_anch in _anchors(self.orig_anchor):\n for dest_anch in _anchors(self.dest_anchor):\n yield (src_anch, dest_anch)", "def generate_anchor_base(self,base_size=16, ratios=[0.5, 1, 2],\n anchor_scales=[8, 16, 32]):\n py = base_size / 2.\n px = base_size / 2.\n\n anchor_base = np.zeros((len(ratios) * len(anchor_scales), 4),\n dtype=np.float32)\n for i in six.moves.range(len(ratios)):\n for j in six.moves.range(len(anchor_scales)):\n h = base_size * anchor_scales[j] * np.sqrt(ratios[i])\n w = base_size * anchor_scales[j] * np.sqrt(1. / ratios[i])\n\n index = i * len(anchor_scales) + j\n anchor_base[index, 0] = py - h / 2.\n anchor_base[index, 1] = px - w / 2.\n anchor_base[index, 2] = py + h / 2.\n anchor_base[index, 3] = px + w / 2.\n return anchor_base", "def get_atss_targets(\n self,\n anchor_list,\n valid_flag_list,\n gt_bboxes_list,\n img_metas,\n gt_bboxes_ignore_list=None,\n gt_labels_list=None,\n label_channels=1,\n unmap_outputs=True,\n ):\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list[i] = torch.cat(anchor_list[i])\n valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n if gt_labels_list is None:\n gt_labels_list = [None for _ in range(num_imgs)]\n (\n all_anchors,\n all_labels,\n all_label_weights,\n all_bbox_targets,\n all_bbox_weights,\n pos_inds_list,\n neg_inds_list,\n ) = multi_apply(\n self._get_target_single,\n anchor_list,\n valid_flag_list,\n num_level_anchors_list,\n gt_bboxes_list,\n gt_bboxes_ignore_list,\n gt_labels_list,\n img_metas,\n label_channels=label_channels,\n unmap_outputs=unmap_outputs,\n )\n # no valid anchors\n if not all(labels is not None for labels in all_labels):\n return None\n # sampled anchors of all images\n num_total_pos = sum(max(inds.numel(), 1) for inds in pos_inds_list)\n num_total_neg = sum(max(inds.numel(), 1) for inds in neg_inds_list)\n # split targets to a list w.r.t. multiple levels\n anchors_list = images_to_levels(all_anchors, num_level_anchors)\n labels_list = images_to_levels(all_labels, num_level_anchors)\n valid_label_mask = self.get_valid_label_mask(img_metas=img_metas, all_labels=all_labels)\n valid_label_mask = [i.to(anchor_list[0].device) for i in valid_label_mask]\n if len(valid_label_mask) > 0:\n valid_label_mask = images_to_levels(valid_label_mask, num_level_anchors)\n\n label_weights_list = images_to_levels(all_label_weights, num_level_anchors)\n bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors)\n bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors)\n return (\n anchors_list,\n labels_list,\n label_weights_list,\n bbox_targets_list,\n bbox_weights_list,\n valid_label_mask,\n num_total_pos,\n num_total_neg,\n )", "def _generate_anchors(self, sizes, ratios, step, alloc_size, offsets):\n assert len(sizes) == 2, \"SSD requires sizes to be (size_min, size_max)\"\n anchors = []\n for i in range(alloc_size[0]):\n for j in range(alloc_size[1]):\n cy = (i + offsets[0]) * step\n cx = (j + offsets[1]) * step\n # ratio = ratios[0], size = size_min or sqrt(size_min * size_max)\n r = ratios[0]\n anchors.append([cx, cy, sizes[0] / 2, sizes[0] / 2])\n # size = sizes[0], ratio = ...\n for r in ratios[1:]:\n sr = np.sqrt(r)\n w = sizes[0] * sr\n h = sizes[0] / sr\n anchors.append([cx, cy, w, h])\n if self._index > 0:\n anchors.append([cx, cy, sizes[1], sizes[1]])\n return np.array(anchors).reshape(1, 1, alloc_size[0], alloc_size[1], -1)", "def box_3d_to_anchor(boxes_3d, ortho_rotate=False):\n\n boxes_3d = np.asarray(boxes_3d).reshape(-1, 7)\n\n fc.check_box_3d_format(boxes_3d)\n\n num_anchors = len(boxes_3d)\n anchors = np.zeros((num_anchors, 6))\n\n # Set x, y, z\n anchors[:, [0, 1, 2]] = boxes_3d[:, [0, 1, 2]]\n\n # Dimensions along x, y, z\n box_l = boxes_3d[:, [3]]\n box_w = boxes_3d[:, [4]]\n box_h = boxes_3d[:, [5]]\n box_ry = boxes_3d[:, [6]]\n\n # Rotate to nearest multiple of 90 degrees\n if ortho_rotate:\n half_pi = np.pi / 2\n box_ry = np.round(box_ry / half_pi) * half_pi\n\n cos_ry = np.abs(np.cos(box_ry))\n sin_ry = np.abs(np.sin(box_ry))\n\n # dim_x, dim_y, dim_z\n anchors[:, [3]] = box_l * cos_ry + box_w * sin_ry\n anchors[:, [4]] = box_h\n anchors[:, [5]] = box_w * cos_ry + box_l * sin_ry\n\n return anchors", "def _generate_anchors(stride, sizes):\n anchor = np.array([0, stride], dtype=np.float)\n anchors = _scale_enum(anchor, sizes)\n return torch.from_numpy(anchors)", "def yolo_head_base(features, anchors, num_classes, input_shape):\n\n dtype = K.dtype(features)\n num_anchors = len(anchors)\n\n grid, grid_shape = construct_grid(features)\n\n # Reshape anchors and features\n anchors_shape = [1, 1, 1, num_anchors, 2] # batch, height, width, num_anchors, box_params\n anchors_tensor = K.reshape(K.constant(anchors), anchors_shape)\n features_shape = [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5]\n features = K.reshape(features, features_shape)\n\n # Adjust predictions to each spatial grid point and anchor size.\n box_xy = (K.sigmoid(features[..., :2]) + grid) / K.cast(grid_shape[::-1], dtype)\n box_wh = K.exp(features[..., 2:4]) * anchors_tensor / K.cast(input_shape[::-1], dtype)\n\n return grid, features, box_xy, box_wh", "def _generate_anchors(self, sizes, ratios, step, alloc_size, offsets):\n assert len(sizes) == 2, \"SSD requires sizes to be (size_min, size_max)\"\n anchors = []\n for i in range(alloc_size[0]):\n for j in range(alloc_size[1]):\n cy = (i + offsets[0]) * step\n cx = (j + offsets[1]) * step\n # ratio = ratios[0], size = size_min or sqrt(size_min * size_max)\n r = ratios[0]\n anchors.append([cx, cy, sizes[0], sizes[0]])\n anchors.append([cx, cy, sizes[1], sizes[1]])\n # size = sizes[0], ratio = ...\n for r in ratios[1:]:\n sr = np.sqrt(r)\n w = sizes[0] * sr\n h = sizes[0] / sr\n anchors.append([cx, cy, w, h])\n return np.array(anchors).reshape(1, 1, alloc_size[0], alloc_size[1], -1)", "def label_anchors(anchors, anchor_is_untruncated, gt_classes, gt_bboxes, background_id, iou_low_threshold=0.41, iou_high_threshold=0.61):\n n = anchors.shape[0]\n k = gt_bboxes.shape[0]\n \n # Compute the IoUs of the anchors and ground truth boxes\n tiled_anchors = np.tile(np.expand_dims(anchors, 1), (1, k, 1))\n tiled_gt_bboxes = np.tile(np.expand_dims(gt_bboxes, 0), (n, 1, 1))\n\n tiled_anchors = tiled_anchors.reshape((-1, 4))\n tiled_gt_bboxes = tiled_gt_bboxes.reshape((-1, 4))\n\n ious, ioas, iogs = iou_bbox(tiled_anchors, tiled_gt_bboxes)\n ious = ious.reshape(n, k)\n ioas = ioas.reshape(n, k)\n iogs = iogs.reshape(n, k)\n\n # Label each anchor based on its max IoU\n max_ious = np.max(ious, axis=1)\n max_ioas = np.max(ioas, axis=1)\n max_iogs = np.max(iogs, axis=1)\n \n best_gt_bbox_ids = np.argmax(ious, axis=1)\n\n labels = -np.ones((n), np.int32)\n positive_idx = np.where(max_ious >= iou_high_threshold)[0]\n negative_idx = np.where(max_ious < iou_low_threshold)[0]\n labels[positive_idx] = 1\n labels[negative_idx] = 0\n \n # Truncated anchors are always ambiguous\n ignore_idx = np.where(anchor_is_untruncated==0)[0]\n labels[ignore_idx] = -1\n\n bboxes = gt_bboxes[best_gt_bbox_ids]\n\n classes = gt_classes[best_gt_bbox_ids]\n classes[np.where(labels<1)[0]] = background_id\n\n max_ious[np.where(anchor_is_untruncated==0)[0]] = -1\n max_ioas[np.where(anchor_is_untruncated==0)[0]] = -1\n max_iogs[np.where(anchor_is_untruncated==0)[0]] = -1\n\n return labels, bboxes, classes, max_ious, max_ioas, max_iogs", "def create_overlap_metric(anchor_boxes):\n y, x, h, w = np.transpose(anchor_boxes)\n ab_area = w * h\n y0 = y - h // 2\n x0 = x - w // 2\n y1 = y + h // 2\n x1 = x + w // 2\n\n def overlap(gt_boxes):\n overlaps = []\n for gt_box in gt_boxes:\n gt_y0, gt_x0, gt_y1, gt_x1 = gt_box\n int_y0 = np.maximum(gt_y0, y0)\n int_x0 = np.maximum(gt_x0, x0)\n int_y1 = np.minimum(gt_y1, y1)\n int_x1 = np.minimum(gt_x1, x1)\n int_area = np.maximum(0, int_x1 - int_x0) * np.maximum(0, int_y1 - int_y0)\n overlaps.append(int_area / ab_area)\n overlaps = np.transpose(overlaps)\n gt_indices = np.argmax(overlaps, axis=1)\n overlaps = np.squeeze(np.take_along_axis(overlaps, gt_indices[:, np.newaxis], axis=1))\n gt_boxes = np.take(gt_boxes, gt_indices, axis=0)\n return overlaps, gt_boxes\n return overlap", "def anchor_preds(self, preds, train_anchor_inds, image_offset):\n assert train_anchor_inds.size(1) == 4\n tai = train_anchor_inds.data.clone()\n tai[:, 0] -= image_offset\n train_regions = gather_nd(preds, tai)\n class_preds = train_regions[:, :2]\n box_preds = train_regions[:, 2:]\n return class_preds, box_preds", "def anchor_position(self):\n return self._anchor_x, self._anchor_y", "def load_anchors_blocks(path):\t\t\n\t#TODO : automatization\n\tleft = Building_Block(abbrev=\"l\", num_atoms=2,origin=0, para_pos=0, para_angle=0, meta_pos=0 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = 0,complexity=1, path=path+\"/anchor_small_left.xyz\")\n\tright = Building_Block(abbrev=\"r\", num_atoms=2,origin=0, para_pos=0, para_angle=0., meta_pos=0 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/anchor_small_right.xyz\")\n\t\n\tanchors = [left,right]\n\n\treturn anchors", "def auto_set_anchors(self, kpt=(0.0, 0.0, 0.0)):\n ik = self.find_k(kpt)\n psi = self.get_psi_k(ik)[:, :] * self.occ[ik][None, :]\n psi_Dagger = psi.T.conj()\n self.cols = scdm(psi_Dagger, self.nwann)\n if self.sort_cols:\n self.cols = np.sort(self.cols)\n print(f\"The eigenvalues at anchor k: {self.get_eval_k(ik)}\")\n print(f\"anchor_kpt={kpt}. Selected columns: {self.cols}.\")", "def _set_anchor_center(img):\n img.anchor_x = int(img.width / 2)\n img.anchor_y = int(img.height / 2)", "def build_rpn_targets(anchors, gt_class_ids, gt_boxes, config):\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4), dtype=np.float32)\n\n # can happen if all items cropped out or image with no items\n if (gt_class_ids == 0).all():\n return rpn_match, rpn_bbox\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = box_utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = box_utils.compute_overlaps(from_numpy(anchors),\n from_numpy(gt_boxes))\n overlaps = overlaps.cpu().numpy()\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n\n # 2. Set an anchor for each GT box (regardless of IoU value).\n gt_iou_argmax = np.argmax(overlaps, axis=0)\n rpn_match[gt_iou_argmax] = 1\n\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n boxes = box_utils.box_refinement(from_numpy(anchors[ids]),\n from_numpy(gt_boxes[anchor_iou_argmax[ids]]))\n boxes = boxes.cpu().numpy()\n rpn_bbox[:len(boxes)] = boxes\n\n # Normalize\n rpn_bbox /= np.array(config.RPN_BBOX_STD_DEV, dtype=np.float32)\n\n return rpn_match, rpn_bbox", "def generate_anchors(\n stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)\n):\n return _generate_anchors(\n stride,\n np.array(sizes, dtype=np.float) / stride,\n np.array(aspect_ratios, dtype=np.float),\n )", "def num_anchors_per_location(self):\n pass", "def align(self, image, landmark_indices, anchor_points, size=96):\n\n detected = self.detect(image)\n coords = self.find_landmarks(image, detected)\n in_points = coords[landmark_indices]\n in_points = in_points.astype('float32')\n out_points = anchor_points * size\n warp_mat = cv2.getAffineTransform(in_points, out_points)\n warp_dst = cv2.warpAffine(image, warp_mat, (size, size))\n\n return warp_dst", "def get_anchor_pos(self, anchor) -> Vec:\n x, y = self.pos\n w, h = self.size\n\n # faster and prettier than if/elif chains\n rct = {\n Anchor.top_left: Vec(x, y),\n Anchor.top: Vec(x + (w / 2), y),\n Anchor.top_right: Vec(x + (w / 2), y + h),\n Anchor.middle_left: Vec(x, y + (h / 2)),\n Anchor.middle: Vec(x + (w / 2), y + (h / 2)),\n Anchor.middle_right: Vec(x + w, y + (h / 2)),\n Anchor.bottom_left: Vec(x, y + h),\n Anchor.bottom: Vec(x + (w / 2), y + h),\n Anchor.bottom_right: Vec(x + w, y + h)\n }\n\n if anchor in rct:\n return rct[anchor]\n return rct[Anchor.top_left]", "def base_anchor(base_size=16, ratios=(0.5, 1, 2), scales=(8, 16, 32)):\n base_anchor = np.array([0, 0, base_size, base_size]).reshape((1, 4))\n ratio_anchors = ratio_enum(base_anchor, np.array(ratios))\n anchors = np.vstack([scale_enum(ratio_anchors[i, :], np.array(scales)) for i in range(len(ratio_anchors))])\n return totensor(anchors).float()", "def im_detections(model, im, anchors):\n k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL\n A = cfg.RETINANET.SCALES_PER_OCTAVE * len(cfg.RETINANET.ASPECT_RATIOS)\n inputs = {}\n inputs['data'], inputs['im_info'] = _get_image_blob(im)\n cls_probs, box_preds = [], []\n for lvl in range(k_min, k_max + 1):\n suffix = 'fpn{}'.format(lvl)\n cls_probs.append(core.ScopedName('retnet_cls_prob_{}'.format(suffix)))\n box_preds.append(core.ScopedName('retnet_bbox_pred_{}'.format(suffix)))\n for k, v in inputs.items():\n workspace.FeedBlob(core.ScopedName(k), v.astype(np.float32, copy=False))\n\n workspace.RunNet(model.net.Proto().name)\n scale = inputs['im_info'][0, 2]\n cls_probs = workspace.FetchBlobs(cls_probs)\n box_preds = workspace.FetchBlobs(box_preds)\n\n # here the boxes_all are [x0, y0, x1, y1, score]\n boxes_all = defaultdict(list)\n\n cnt = 0\n for lvl in range(k_min, k_max + 1):\n # create cell anchors array\n stride = 2. ** lvl\n cell_anchors = anchors[lvl]\n\n # fetch per level probability\n cls_prob = cls_probs[cnt]\n box_pred = box_preds[cnt]\n cls_prob = cls_prob.reshape((\n cls_prob.shape[0], A, int(cls_prob.shape[1] / A),\n cls_prob.shape[2], cls_prob.shape[3]))\n box_pred = box_pred.reshape((\n box_pred.shape[0], A, 4, box_pred.shape[2], box_pred.shape[3]))\n cnt += 1\n\n if cfg.RETINANET.SOFTMAX:\n cls_prob = cls_prob[:, :, 1::, :, :]\n\n cls_prob_ravel = cls_prob.ravel()\n # In some cases [especially for very small img sizes], it's possible that\n # candidate_ind is empty if we impose threshold 0.05 at all levels. This\n # will lead to errors since no detections are found for this image. Hence,\n # for lvl 7 which has small spatial resolution, we take the threshold 0.0\n th = cfg.RETINANET.INFERENCE_TH if lvl < k_max else 0.0\n candidate_inds = np.where(cls_prob_ravel > th)[0]\n if (len(candidate_inds) == 0):\n continue\n\n pre_nms_topn = min(cfg.RETINANET.PRE_NMS_TOP_N, len(candidate_inds))\n inds = np.argpartition(\n cls_prob_ravel[candidate_inds], -pre_nms_topn)[-pre_nms_topn:]\n inds = candidate_inds[inds]\n\n inds_5d = np.array(np.unravel_index(inds, cls_prob.shape)).transpose()\n classes = inds_5d[:, 2]\n anchor_ids, y, x = inds_5d[:, 1], inds_5d[:, 3], inds_5d[:, 4]\n scores = cls_prob[:, anchor_ids, classes, y, x]\n\n boxes = np.column_stack((x, y, x, y)).astype(dtype=np.float32)\n boxes *= stride\n boxes += cell_anchors[anchor_ids, :]\n\n if not cfg.RETINANET.CLASS_SPECIFIC_BBOX:\n box_deltas = box_pred[0, anchor_ids, :, y, x]\n else:\n box_cls_inds = classes * 4\n box_deltas = np.vstack(\n [box_pred[0, ind:ind + 4, yi, xi]\n for ind, yi, xi in zip(box_cls_inds, y, x)]\n )\n pred_boxes = (\n box_utils.bbox_transform(boxes, box_deltas)\n if cfg.TEST.BBOX_REG else boxes)\n pred_boxes /= scale\n pred_boxes = box_utils.clip_tiled_boxes(pred_boxes, im.shape)\n box_scores = np.zeros((pred_boxes.shape[0], 5))\n box_scores[:, 0:4] = pred_boxes\n box_scores[:, 4] = scores\n\n for cls in range(1, cfg.MODEL.NUM_CLASSES):\n inds = np.where(classes == cls - 1)[0]\n if len(inds) > 0:\n boxes_all[cls].extend(box_scores[inds, :])\n\n # Combine predictions across all levels and retain the top scoring by class\n detections = []\n for cls, boxes in boxes_all.items():\n cls_dets = np.vstack(boxes).astype(dtype=np.float32)\n # do class specific nms here\n keep = box_utils.nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep, :]\n out = np.zeros((len(keep), 6))\n out[:, 0:5] = cls_dets\n out[:, 5].fill(cls)\n detections.append(out)\n\n detections = np.vstack(detections)\n # sort all again\n inds = np.argsort(-detections[:, 4])\n detections = detections[inds[0:cfg.TEST.DETECTIONS_PER_IM], :]\n boxes = detections[:, 0:4]\n scores = detections[:, 4]\n classes = detections[:, 5]\n return boxes, scores, classes", "def decode_boxes(raw_boxes, anchors):\n boxes = np.zeros_like(raw_boxes)\n\n x_center = raw_boxes[..., 0] / x_scale * anchors[:, 2] + anchors[:, 0]\n y_center = raw_boxes[..., 1] / y_scale * anchors[:, 3] + anchors[:, 1]\n\n w = raw_boxes[..., 2] / w_scale * anchors[:, 2]\n h = raw_boxes[..., 3] / h_scale * anchors[:, 3]\n\n boxes[..., 0] = y_center - h / 2. # ymin\n boxes[..., 1] = x_center - w / 2. # xmin\n boxes[..., 2] = y_center + h / 2. # ymax\n boxes[..., 3] = x_center + w / 2. # xmax\n\n for k in range(num_keypoints):\n offset = 4 + k*2\n keypoint_x = raw_boxes[..., offset] / x_scale * anchors[:, 2] + anchors[:, 0]\n keypoint_y = raw_boxes[..., offset + 1] / y_scale * anchors[:, 3] + anchors[:, 1]\n boxes[..., offset] = keypoint_x\n boxes[..., offset + 1] = keypoint_y\n\n return boxes", "def get_valid_anchor_weights_in_flattened_image(true_image_shapes, height,\n width):\n\n indices = tf.reshape(tf.range(height * width), [1, -1])\n batch_size = tf.shape(true_image_shapes)[0]\n batch_indices = tf.ones((batch_size, 1), dtype=tf.int32) * indices\n\n y_coords, x_coords, _ = row_col_channel_indices_from_flattened_indices(\n batch_indices, width, 1)\n\n max_y, max_x = true_image_shapes[:, 0], true_image_shapes[:, 1]\n max_x = _to_float32(tf.expand_dims(max_x, 1))\n max_y = _to_float32(tf.expand_dims(max_y, 1))\n\n x_coords = _to_float32(x_coords)\n y_coords = _to_float32(y_coords)\n\n valid_mask = tf.math.logical_and(x_coords < max_x, y_coords < max_y)\n\n return _to_float32(valid_mask)", "def generate_anchor(input_size, stride, anchor_scale, anchor_aspect):\n assert len(anchor_scale) == len(anchor_aspect)\n h, w = input_size\n hs, ws = h // stride, w // stride\n S_fmap = hs * ws\n total_anchor_size = []\n for ab_scale, aspect_ratio in zip(anchor_scale, anchor_aspect):\n for a in aspect_ratio:\n S_ab = S_fmap * ab_scale\n ab_w = np.floor(np.sqrt(S_ab))\n ab_h =ab_w * a\n total_anchor_size.append([ab_w, ab_h])\n return total_anchor_size", "def get_boxes_v2(output, img_size, anchors):\n bias_w, bias_h = anchors\n \n w_img, h_img = img_size[1], img_size[0]\n grid_w, grid_h, num_boxes = output.shape[:3]\n\n offset_x = \\\n np.tile(np.arange(grid_w)[:, np.newaxis], (grid_h, 1, num_boxes))\n offset_y = np.transpose(offset_x, (1, 0, 2))\n boxes = output.copy()\n boxes[:, :, :, 0] = (offset_x + logistic(boxes[:, :, :, 0])) / grid_w\n boxes[:, :, :, 1] = (offset_y + logistic(boxes[:, :, :, 1])) / grid_h\n boxes[:, :, :, 2] = np.exp(boxes[:, :, :, 2]) * bias_w / grid_w\n boxes[:, :, :, 3] = np.exp(boxes[:, :, :, 3]) * bias_h / grid_h\n\n boxes[:, :, :, [0, 2]] *= w_img\n boxes[:, :, :, [1, 3]] *= h_img\n\n return boxes", "def create_fixed_generator(anchor_boxes, valid_indices,\n lower_threshold, upper_threshold,\n ratio=1., metric='iou', minibatch_size=256, seed=42):\n assert minibatch_size <= len(valid_indices), 'Minibatch length must be greater than valid regions number'\n assert metric in _metrics.keys(), 'Only available metrics are \\'iou\\', \\'positive_overlap\\' and \\'overlap\\''\n valid_ab = anchor_boxes[valid_indices]\n compute_metric = _metrics[metric](valid_ab)\n neg_samples = floor(minibatch_size / (1 + ratio))\n pos_samples = ceil(neg_samples * ratio)\n targets_shape = (len(anchor_boxes), 5)\n random_generator = np.random.default_rng(seed=seed)\n\n def targets_generator(gt_boxes):\n metrics, gt_boxes = compute_metric(gt_boxes)\n neg_ind = np.flatnonzero(metrics < lower_threshold)\n pos_ind = np.flatnonzero(metrics > upper_threshold)\n\n if len(neg_ind) > neg_samples:\n neg_ind = random_generator.choice(neg_ind, neg_samples, replace=False)\n elif len(neg_ind) < neg_samples:\n neg_ind = np.argpartition(metrics, neg_samples)[:neg_samples]\n if len(pos_ind) > pos_samples:\n pos_ind = random_generator.choice(pos_ind, pos_samples, replace=False)\n elif len(pos_ind) < pos_samples:\n pos_ind = np.argpartition(metrics, len(metrics) - pos_samples)[-pos_samples:]\n labels = np.full_like(metrics, -1, dtype='int')\n labels[pos_ind] = 1\n labels[neg_ind] = 0\n\n deltas = np.full_like(gt_boxes, 0, dtype='float')\n deltas[pos_ind] = compute_deltas(valid_ab[pos_ind], gt_boxes[pos_ind])\n\n targets = np.zeros(targets_shape, dtype='float')\n targets[:, 0] = -1\n targets[valid_indices] = np.hstack([labels[:, np.newaxis], deltas])\n # Since there is no way to give a loss function two tensors,\n # we have to make one, containing all required labels\n return targets\n return targets_generator", "def calc_rpn_label_regr(img_data, width, height, resized_width, resize_height, downsampling_ratio,\n anchor_box_sizes, anchor_box_ratios, rpn_max_overlap, rpn_min_overlap):\n num_anchors = len(anchor_box_sizes) * len(anchor_box_ratios)\n num_anchor_ratios = len(anchor_box_ratios)\n num_bboxes = len(img_data[\"bboxes\"])\n # get the output feature map size based on the model architecture downsampling ratio\n fm_width, fm_height = resized_width / downsampling_ratio, resize_height / downsampling_ratio\n\n # stores the label of each anchor, indicating whether this anchor contains an object or not\n y_rpn_label = np.zeros((fm_height, fm_width, num_anchors))\n # stores the validness of each anchor, indicating whether this anchor has a label or not\n y_is_box_valid = np.zeros((fm_height, fm_width, num_anchors))\n # stores the delta regressions of each anchor,\n # [delta_center_x, delta_center_y, delta_width, delta_height]\n y_rpn_regr = np.zeros((fm_height, fm_width, num_anchors * 4))\n\n # number of anchors that one bounding box contains\n num_anchors_for_bbox = np.zeros(num_bboxes).astype(int)\n # the best anchor that one bounding box contains\n # [ feature_map_row_pixel_index, feature_map_column_pixel_index, anchor_ratio_index, anchor_size_index ]\n best_anchor_for_bbox = -1 * np.ones((num_bboxes, 4)).astype(int)\n # the best iou that one bounding box intersects with anchors\n best_iou_for_bbox = np.zeros(num_bboxes).astype(np.float32)\n # the best anchor regression target that one bounding box contains\n # [ delta_center_x, delta_center_y, delta_width, delta_height ]\n best_delta_for_bbox = np.zeros((num_bboxes, 4)).astype(np.float32)\n\n # convert bounding boxes in original images to that in resized images\n # columns: [ x1, x2, y1, y2 ]\n gta = np.zeros((num_bboxes, 4))\n for index, bbox in enumerate(img_data[\"bboxes\"]):\n gta[index, 0] = bbox[\"x1\"] * (resized_width * 1.0 / width)\n gta[index, 1] = bbox[\"x2\"] * (resized_width * 1.0 / width)\n gta[index, 2] = bbox[\"y1\"] * (resized_height * 1.0 / height)\n gta[index, 3] = bbox[\"y2\"] * (resized_height * 1.0 / height)\n\n # we start to iterate each combination of anchors\n for anchor_size_idx in range(len(anchor_box_sizes)):\n for anchor_ratio_idx in range(num_anchor_ratios):\n # first we determine the (width, height) of the anchor\n anchor_width = anchor_box_sizes[anchor_size_idx] * anchor_box_ratios[anchor_ratio_idx][0]\n anchor_height = anchor_box_sizes[anchor_size_idx] * anchor_box_ratios[anchor_ratio_idx][1]\n\n # then we traverse the feature map plane\n for ix in range(fm_width):\n # the anchor coordinates in resized image input\n anchor_x1 = downsampling_ratio * (ix + 0.5) - anchor_width / 2\n anchor_x2 = downsampling_ratio * (ix + 0.5) + anchor_width / 2\n\n if anchor_x1 < 0 or anchor_x2 > resized_width:\n continue\n\n for jy in range(fm_height):\n # the anchor coordinates in resized image input\n anchor_y1 = downsampling_ratio * (yj + 0.5) - anchor_height / 2\n anchor_y2 = downsampling_ratio * (yj + 0.5) + anchor_height / 2\n\n if anchor_y1 < 0 or anchor_y2 > resized_height:\n continue\n\n # ok, until now we get the specific anchor in resized image \n # (anchor_x1, anchor_x2, anchor_y1, anchor_y2)\n current_anchor_coord = [ anchor_x1, anchor_y1, anchor_x2, anchor_y2 ]\n\n anchor_label = \"neg\"\n best_iou_for_anchor = 0.0\n\n for bbox_idx in range(num_bboxes):\n current_bbox_coord = [ gta[bbox_idx, 0], gta[bbox_idx, 2], gta[bbox_idx, 1], gta[bbox_idx, 3] ]\n current_iou = iou.iou(current_bbox_coord, current_anchor_coord)\n\n # calculate regression target\n center_bbox_x = (gta[bbox_idx, 0] + gta[bbox_idx, 1]) / 2.0\n center_bbox_y = (gta[bbox_idx, 2] + gta[bbox_idx, 3]) / 2.0\n center_anchor_x = (anchor_x1 + anchor_x2) / 2.0\n center_anchor_y = (anchor_y1 + anchor_y2) / 2.0\n bbox_width = gta[bbox_idx, 1] - gta[bbox_idx, 0]\n bbox_height = gta[bbox_idx, 3] - gta[bbox_idx, 2]\n\n delta_center_x = (center_bbox_x - center_anchor_x) / anchor_width\n delta_center_y = (center_bbox_y - center_anchor_y) / anchor_height\n delta_width = np.log(bbox_width / anchor_width)\n delta_height = np.log(bbox_height / anchor_height)\n\n # we should help non-background bounding box find a best anchor\n if img_data[\"bboxes\"][bbox_idx][\"class\"] != \"bg\":\n if current_iou > best_iou_for_bbox[bbox_idx]:\n best_anchor_for_bbox[bbox_num] = [jy, ix, anchor_ratio_idx, anchor_size_idx]\n best_iou_for_bbox[bbox_num] = current_ious\n best_delta_for_bbox[bbox_num, :] = [delta_center_x, delta_center_y, delta_width, delta_height]\n\n # if the current iou surpasses the upper threshold, we will set the anchor\n # label to be true\n if current_iou > rpn_max_overlap:\n anchor_label = \"pos\"\n num_anchors_for_bbox[bbox_num] += 1\n # we should find the best regression target\n if current_iou > best_iou_for_anchor:\n best_iou_for_anchor = current_iou\n best_regr = (delta_center_x, delta_center_y, delta_width, delta_height)\n\n # if the current iou is in between lower and upper threshold, we will not\n # set the anchor label\n if current_ious > rpn_min_overlap and current_ious < rpn_max_overlap:\n if anchor_label != \"pos\":\n anchor_label = \"neutral\"\n\n # determine the classification target\n if anchor_label == \"neg\":\n y_is_box_valid[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 1\n y_rpn_label[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 0\n elif anchor_label == \"neutral\":\n y_is_box_valid[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 0\n y_rpn_label[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 0\n elif anchor_label == \"pos\":\n y_is_box_valid[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 1\n y_rpn_label[jy, ix, num_anchor_ratios * anchor_size_idx + anchor_ratio_idx] = 1\n start = 4 * (num_anchor_ratios * anchor_size_idx + anchor_ratio_idx)\n y_rpn_regr[jy, ix, start: start + 4] = best_regr\n\n\n # maybe some ground truth bounding box has no anchors iou more than upper threshold,\n # we should assign the best anchor for the ground truth\n for idx in range(num_anchors_for_bbox.shape[0]):\n if num_anchors_for_bbox[idx] == 0:\n if best_anchor_for_bbox[idx, 0] == -1:\n continue\n jy, ix, ratio_index, size_index = best_anchor_for_bbox[idx, :]\n y_is_box_valid[jy, ix, num_anchor_ratios * size_index + ratio_index] = 1\n y_rpn_label[jy, ix, num_anchor_ratios * size_index + ratio_index] = 1\n start = 4 * (num_anchor_ratios * size_index + ratio_index)\n y_rpn_regp[jy, ix, start: start + 4] = best_delta_for_bbox[idx, :]\n\n y_rpn_label = np.expand_dims(y_rpn_label, axis=0)\n y_is_box_valid = np.expand_dims(y_is_box_valid, axis=0)\n\n positives = np.where(np.logical_and(y_rpn_label[0, :, :, :] == 1, y_is_box_valid[0, :, :, :] == 1))\n negatives = np.where(np.logical_and(y_rpn_label[0, :, :, :] == 0, y_is_box_valid[0, :, :, :] == 1))\n\n num_positives = len(positives[0])\n num_negatives = len(negatives[0])\n\n # normally the rpn has more negatives than positives, so we close some negatives, and limit the\n # total number\n num_regions = 256\n\n if num_positives > num_regions / 2:\n sampled_positives = random.sample(range(num_positives), num_positives - num_regions / 2)\n y_is_box_valid[0, positives[0][sampled_positives], positives[1][sampled_positives], positives[2][sampled_positives]] = 0\n num_positives = num_regions / 2\n\n if num_negatives + num_positives > num_regions:\n sampled_negatives = random.sample(range(num_negatives), num_negatives + num_positives - num_regions)\n y_is_box_valid[0, negatives[0][sampled_negatives], negatives[1][sampled_negatives], negatives[2][sampled_negatives]] = 0\n num_negatives = num_regions - num_positives\n\n # the result rpn classification labels, for the last axis, the first half part indicates whether\n # this anchor is a sample of not(contribute to the loss), the second half part indicates the\n # true labels\n result_rpn_labels = np.concatenate([y_is_box_valid, y_rpn_label], axis=3)\n # the result rpn regression targets, for the last axis, the first half part indicates whether the\n # (index + half length) postision should contribute to the regression loss, you know only the\n # anchors containing objects calculate the loss\n result_rpn_regr = np.concatenate([np.repeat(y_rpn_label, 4, axis=3), y_rpn_regr], axis=3)\n\n return np.copy(result_rpn_labels), np.copy(result_rpn_regr)", "def build_anchor_generator(anchor_config):\n\n if 'anchor_generator_stride' in anchor_config:\n config = anchor_config.anchor_generator_stride\n ag = AnchorGeneratorStride(\n sizes=list(config.sizes),\n anchor_strides=list(config.strides),\n anchor_offsets=list(config.offsets),\n rotations=list(config.rotations),\n match_threshold=config.matched_threshold,\n unmatch_threshold=config.unmatched_threshold,\n class_id=config.class_name)\n return ag\n elif 'anchor_generator_range' in anchor_config:\n config = anchor_config.anchor_generator_range\n ag = AnchorGeneratorRange(\n sizes=list(config.sizes),\n anchor_ranges=list(config.anchor_ranges),\n rotations=list(config.rotations),\n match_threshold=config.matched_threshold,\n unmatch_threshold=config.unmatched_threshold,\n class_id=config.class_name)\n return ag\n else:\n raise ValueError(\" unknown anchor generator type\")", "def build_targets(pred_boxes, pred_conf, pred_cls, target, anchors, num_anchors, num_classes, grid_size, ignore_thres, img_dim):\n nB = target.size(0)\n nA = num_anchors\n nC = num_classes\n nG = grid_size\n mask = torch.zeros(nB, nA, nG, nG)\n conf_mask = torch.ones(nB, nA, nG, nG)\n tx = torch.zeros(nB, nA, nG, nG)\n ty = torch.zeros(nB, nA, nG, nG)\n tw = torch.zeros(nB, nA, nG, nG)\n th = torch.zeros(nB, nA, nG, nG)\n tconf = torch.ByteTensor(nB, nA, nG, nG).fill_(0)\n tcls = torch.ByteTensor(nB, nA, nG, nG, nC).fill_(0)\n\n nGT = 0\n nCorrect = 0\n for b in range(nB):\n for t in range(target.shape[1]):\n if target[b, t].sum() == 0:\n # pad\n continue\n nGT += 1\n # Convert to position relative to box\n gx = target[b, t, 1] * nG\n gy = target[b, t, 2] * nG\n gw = target[b, t, 3] * nG\n gh = target[b, t, 4] * nG\n # Get grid box indices\n gi = int(gx)\n gj = int(gy)\n # Get shape of gt box\n gt_box = torch.FloatTensor(\n np.array([0, 0, gw, gh])).unsqueeze(0)\n # Get shape of anchor box\n anchor_shapes = torch.FloatTensor(np.concatenate(\n (np.zeros((len(anchors), 2)), np.array(anchors)), 1))\n\n # Calculate iou between gt and anchor shapes\n # 1 on 3\n anch_ious = bbox_iou(gt_box, anchor_shapes)\n # Where the overlap is larger than threshold set mask to zero (ignore)\n conf_mask[b, anch_ious > ignore_thres, gj, gi] = 0\n # Find the best matching anchor box\n\n best_n = np.argmax(anch_ious)\n # Get ground truth box\n gt_box = torch.FloatTensor(\n np.array([gx, gy, gw, gh])).unsqueeze(0)\n # Get the best prediction\n pred_box = pred_boxes[b, best_n, gj, gi].unsqueeze(0)\n # Masks\n mask[b, best_n, gj, gi] = 1\n conf_mask[b, best_n, gj, gi] = 1\n # Coordinates\n tx[b, best_n, gj, gi] = gx - gi\n ty[b, best_n, gj, gi] = gy - gj\n # Width and height\n tw[b, best_n, gj, gi] = math.log(\n gw / anchors[best_n][0] + 1e-16)\n th[b, best_n, gj, gi] = math.log(\n gh / anchors[best_n][1] + 1e-16)\n # One-hot encoding of label\n target_label = int(target[b, t, 0])\n tcls[b, best_n, gj, gi, target_label] = 1\n tconf[b, best_n, gj, gi] = 1\n\n # Calculate iou between ground truth and best matching prediction\n iou = bbox_iou(gt_box, pred_box, x1y1x2y2=False)\n pred_label = torch.argmax(pred_cls[b, best_n, gj, gi])\n score = pred_conf[b, best_n, gj, gi]\n if iou > 0.5 and pred_label == target_label and score > 0.5:\n nCorrect += 1\n\n return nGT, nCorrect, mask, conf_mask, tx, ty, tw, th, tconf, tcls", "def get_bboxes_from_deltas(anchors, deltas):\n all_anc_width = anchors[:, :, 3] - anchors[:, :, 1]\n all_anc_height = anchors[:, :, 2] - anchors[:, :, 0]\n all_anc_ctr_x = anchors[:, :, 1] + 0.5 * all_anc_width\n all_anc_ctr_y = anchors[:, :, 0] + 0.5 * all_anc_height\n #\n all_bbox_width = tf.exp(deltas[:, :, 3]) * all_anc_width\n all_bbox_height = tf.exp(deltas[:, :, 2]) * all_anc_height\n all_bbox_ctr_x = (deltas[:, :, 1] * all_anc_width) + all_anc_ctr_x\n all_bbox_ctr_y = (deltas[:, :, 0] * all_anc_height) + all_anc_ctr_y\n #\n y1 = all_bbox_ctr_y - (0.5 * all_bbox_height)\n x1 = all_bbox_ctr_x - (0.5 * all_bbox_width)\n y2 = all_bbox_height + y1\n x2 = all_bbox_width + x1\n #\n return tf.stack([y1, x1, y2, x2], axis=2)", "def tf_box_3d_to_anchor(boxes_3d):\n\n boxes_3d = tf.reshape(boxes_3d, [-1, 7])\n\n anchors_x = boxes_3d[:, 0]\n anchors_y = boxes_3d[:, 1]\n anchors_z = boxes_3d[:, 2]\n\n # Dimensions along x, y, z\n box_l = boxes_3d[:, 3]\n box_w = boxes_3d[:, 4]\n box_h = boxes_3d[:, 5]\n box_ry = boxes_3d[:, 6]\n\n # Ortho rotate\n half_pi = np.pi / 2\n box_ry = tf.round(box_ry / half_pi) * half_pi\n cos_ry = tf.abs(tf.cos(box_ry))\n sin_ry = tf.abs(tf.sin(box_ry))\n\n anchors_dimx = box_l * cos_ry + box_w * sin_ry\n anchors_dimy = box_h\n anchors_dimz = box_w * cos_ry + box_l * sin_ry\n\n anchors = tf.stack([anchors_x, anchors_y, anchors_z,\n anchors_dimx, anchors_dimy, anchors_dimz],\n axis=1)\n\n return anchors", "def preprocess_true_boxes(true_boxes, anchors, image_size, num_classes):\n height, width = image_size\n num_anchors = len(anchors)\n # Downsampling factor of 5x 2-stride max_pools == 32.\n # TODO: Remove hardcoding of downscaling calculations.\n assert height % 32 == 0, 'Image sizes in YOLO_v2 must be multiples of 32.'\n assert width % 32 == 0, 'Image sizes in YOLO_v2 must be multiples of 32.'\n conv_height = height // 32\n conv_width = width // 32\n num_box_params = true_boxes.shape[1] - 1 + num_classes\n detectors_mask = np.zeros(\n (num_anchors, 1, conv_height, conv_width), dtype=np.float32)\n matching_true_boxes = np.zeros(\n (num_anchors, num_box_params, conv_height, conv_width),\n dtype=np.float32)\n\n for box in true_boxes:\n # scale box to convolutional feature spatial dimensions\n box_class = box[4:5].astype(np.int32)\n box_class_one_hot = np.zeros(num_classes)\n box_class_one_hot[box_class] = 1\n\n box = box[0:4] * np.array(\n [conv_width, conv_height, conv_width, conv_height])\n i = min(conv_height - 1, max(np.floor(box[1]).astype('int'), 0))\n j = min(conv_width - 1, max(np.floor(box[0]).astype('int'), 0))\n best_iou = 0\n best_anchor = 0\n\n for k, anchor in enumerate(anchors):\n # Find IOU between box shifted to origin and anchor box.\n box_maxes = box[2:4] / 2.\n box_mins = -box_maxes\n anchor_maxes = (anchor / 2.)\n anchor_mins = -anchor_maxes\n\n intersect_mins = np.maximum(box_mins, anchor_mins)\n intersect_maxes = np.minimum(box_maxes, anchor_maxes)\n intersect_wh = np.maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[0] * intersect_wh[1]\n box_area = box[2] * box[3]\n anchor_area = anchor[0] * anchor[1]\n iou = intersect_area / (box_area + anchor_area - intersect_area)\n if iou > best_iou:\n best_iou = iou\n best_anchor = k\n\n if best_iou > 0:\n detectors_mask[best_anchor, :, i, j] = 1\n adjusted_box = np.array(\n [\n box[0] - j,\n box[1] - i,\n np.log(box[2] / anchors[best_anchor][0]),\n np.log(box[3] / anchors[best_anchor][1])\n ],\n dtype=np.float32)\n adjusted_box = np.concatenate((adjusted_box, box_class_one_hot))\n matching_true_boxes[best_anchor, :, i, j] = adjusted_box\n return detectors_mask, matching_true_boxes", "def _shift_anchors(anchors, direction):\n new_anchors = deepcopy(anchors)\n if direction == 'center':\n pass\n\n elif direction == 'top':\n heights = new_anchors[:,3] - new_anchors[:,1] + 1\n heights = heights[:,np.newaxis]\n new_anchors[:,[1,3]] = new_anchors[:,[1,3]] - heights/2\n\n elif direction == 'bottom':\n heights = new_anchors[:,3] - new_anchors[:,1] + 1\n heights = heights[:,np.newaxis]\n new_anchors[:,[1,3]] = new_anchors[:,[1,3]] + heights/2\n\n elif direction == 'right':\n widths = new_anchors[:,2] - new_anchors[:,0] + 1\n widths = widths[:,np.newaxis]\n new_anchors[:,[0,2]] = new_anchors[:,[0,2]] + widths/2\n\n elif direction == 'left':\n widths = new_anchors[:,2] - new_anchors[:,0] + 1\n widths = widths[:,np.newaxis]\n new_anchors[:,[0,2]] = new_anchors[:,[0,2]] - widths/2\n\n return new_anchors", "def generate_all_locations(grid, shape):", "def align(self, image, landmark_indices, anchor_points, size=96):\n # Detect face in image and find landmarks\n box = self.detect(image)\n landmarks = self.find_landmarks(image, box)\n\n # Select three points in the landmarks(Eyes and nose)\n points_in_image = landmarks[landmark_indices]\n points_in_image = points_in_image.astype('float32')\n # Generate the normalized output size\n output_size = anchor_points * size\n\n # Calculates the 2 \\times 3 matrix of an affine transform\n affine_transf = cv2.getAffineTransform(points_in_image, output_size)\n\n # Transforms the source image using the specified matrix\n transformed_img = cv2.warpAffine(image, affine_transf, (size, size))\n\n return transformed_img", "def get_im_proposals(point, sizes=(64, 128, 256, 512), aspect_ratios=(0.5, 1, 2),\n layout=['center'], beta=8, include_depth=0):\n anchors = _generate_anchors(point,\n np.array(sizes, dtype=np.float),\n np.array(aspect_ratios, dtype=np.float),\n layout, \n beta,\n include_depth=include_depth)\n\n anchors = _filter_anchors(anchors)\n\n return anchors", "def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):\r\n len_anchors = anchors_max - anchors_min\r\n int_xmin = np.maximum(anchors_min, box_min)\r\n int_xmax = np.minimum(anchors_max, box_max)\r\n inter_len = np.maximum(int_xmax - int_xmin, 0.)\r\n union_len = len_anchors - inter_len + box_max - box_min\r\n # print inter_len,union_len\r\n jaccard = np.divide(inter_len, union_len)\r\n return jaccard", "def test_anchor_point(self):\n nb_points = 5\n points = np.array([[1, 2], [2, 1], [3, 7], [7, 2]]) # example of points\n\n anchor_point = convex_hull.lowest_coordinate(points) # anchor point\n right_anchor_point = [2, 1] # the right anchor points\n\n self.assertTrue((anchor_point == right_anchor_point).all())", "def iou_with_anchors(anchors_min, anchors_max, box_min, box_max):\n len_anchors = anchors_max - anchors_min\n int_xmin = np.maximum(anchors_min, box_min)\n int_xmax = np.minimum(anchors_max, box_max)\n inter_len = np.maximum(int_xmax - int_xmin, 0.)\n union_len = len_anchors - inter_len + box_max - box_min\n # print inter_len,union_len\n jaccard = np.divide(inter_len, union_len)\n return jaccard", "def set_projectors_with_anchors(self, anchors):\n self.projectors = []\n for k, ibands in anchors.items():\n if self.wfn_anchor is None:\n ik = self.find_k(k)\n for iband in ibands:\n self.projectors.append(self.get_psi_k(ik)[:, iband])\n else:\n for iband in ibands:\n #print(\"adding anchor\")\n self.projectors.append(self.wfn_anchor[tuple(k)][ :, iband])\n assert len(\n self.projectors\n ) == self.nwann, \"The number of projectors != number of wannier functions\"", "def filter_anchors(anchors, classification_targets, regression_targets, img_width=640, img_height=640):\n anchor_centers = tf.transpose(tf.stack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]))\n \n outside_wdith_indices = tf.math.logical_or(tf.math.greater_equal(anchor_centers[:, 0], img_width), tf.math.less_equal(anchor_centers[:, 0], 0.))\n outside_height_indices = tf.math.logical_or(tf.math.greater_equal(anchor_centers[:, 0], img_height), tf.math.less_equal(anchor_centers[:, 0], 0.))\n ignore_indices = tf.math.logical_or(outside_wdith_indices, outside_height_indices)\n \n #update\n if tf.shape(ignore_indices)[0]!=0:\n classification_targets = tf.tensor_scatter_nd_update(classification_targets, ignore_indices, tf.constant(-1, shape=tf.shape(ignore_indices)[0], dtype=tf.float32))\n regression_targets = tf.tensor_scatter_nd_update(regression_targets, ignore_indices, tf.constant(-1, shape=tf.shape(ignore_indices)[0], dtype=tf.float32))\n\n return (classification_targets, regression_targets)", "def anchor_inside_flags(flat_anchors,\n valid_flags,\n img_shape,\n allowed_border=0):\n img_h, img_w = img_shape[:2]\n if allowed_border >= 0:\n inside_flags = valid_flags & \\\n (flat_anchors[:, 0] >= -allowed_border) & \\\n (flat_anchors[:, 1] >= -allowed_border) & \\\n (flat_anchors[:, 2] < img_w + allowed_border) & \\\n (flat_anchors[:, 3] < img_h + allowed_border)\n else:\n inside_flags = valid_flags\n return inside_flags", "def process_predictions_and_anchors(self, anchor_list, valid_flag_list,\n cls_scores, bbox_preds, img_metas,\n gt_bboxes_ignore_list):\n num_imgs = len(img_metas)\n assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n # anchor number of multi levels\n num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n num_level_anchors_list = [num_level_anchors] * num_imgs\n\n anchor_list_ = []\n valid_flag_list_ = []\n # concat all level anchors and flags to a single tensor\n for i in range(num_imgs):\n assert len(anchor_list[i]) == len(valid_flag_list[i])\n anchor_list_.append(torch.cat(anchor_list[i]))\n valid_flag_list_.append(torch.cat(valid_flag_list[i]))\n\n # compute targets for each image\n if gt_bboxes_ignore_list is None:\n gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n\n num_levels = len(cls_scores)\n cls_score_list = []\n bbox_pred_list = []\n\n mlvl_cls_score_list = [\n cls_score.permute(0, 2, 3, 1).reshape(\n num_imgs, -1, self.num_base_priors * self.cls_out_channels)\n for cls_score in cls_scores\n ]\n mlvl_bbox_pred_list = [\n bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n self.num_base_priors * 4)\n for bbox_pred in bbox_preds\n ]\n\n for i in range(num_imgs):\n mlvl_cls_tensor_list = [\n mlvl_cls_score_list[j][i] for j in range(num_levels)\n ]\n mlvl_bbox_tensor_list = [\n mlvl_bbox_pred_list[j][i] for j in range(num_levels)\n ]\n cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0)\n cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0)\n cls_score_list.append(cat_mlvl_cls_score)\n bbox_pred_list.append(cat_mlvl_bbox_pred)\n return (anchor_list_, valid_flag_list_, num_level_anchors_list,\n cls_score_list, bbox_pred_list, gt_bboxes_ignore_list)", "def generate(self, objective, nb_anchor_points=10, nb_samples=1000):\n # No checks are made for duplicate points here. We could try to include something to ensure that the points\n # are somehow separated from each other.\n points = np.array([self.manifold.rand() for i in range(nb_samples)])\n\n if self.matrix_to_vector_transform is not None:\n # Transform the sampled matrix points in vectors\n points = np.array([self.matrix_to_vector_transform(points[i]) for i in range(nb_samples)])\n\n scores = objective(points)[0][:, 0]\n\n anchor_points = points[np.argsort(scores)[:min(len(scores), nb_anchor_points)], :]\n\n return anchor_points", "def _get_target_single(self,\n flat_anchors,\n valid_flags,\n num_level_anchors,\n gt_bboxes,\n gt_bboxes_ignore,\n gt_labels,\n img_meta,\n label_channels=1,\n unmap_outputs=True):\n inside_flags = self.anchor_inside_flags(flat_anchors, valid_flags,\n img_meta['img_shape'][:2],\n self.train_cfg.allowed_border)\n if not inside_flags.any():\n return (None,) * 7\n # assign gt and sample anchors\n anchors = flat_anchors[inside_flags, :]\n\n num_level_anchors_inside = self.get_num_level_anchors_inside(\n num_level_anchors, inside_flags)\n assign_result = self.assigner.assign(anchors, num_level_anchors_inside,\n gt_bboxes, gt_bboxes_ignore,\n gt_labels)\n\n sampling_result = self.sampler.sample(assign_result, anchors,\n gt_bboxes)\n\n num_valid_anchors = anchors.shape[0]\n bbox_targets = torch.zeros_like(anchors)\n bbox_weights = torch.zeros_like(anchors)\n labels = anchors.new_full((num_valid_anchors,),\n self.num_classes,\n dtype=torch.long)\n label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n pos_inds = sampling_result.pos_inds\n neg_inds = sampling_result.neg_inds\n if len(pos_inds) > 0:\n pos_bbox_targets = self.bbox_coder.encode(\n sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n bbox_targets[pos_inds, :] = pos_bbox_targets\n bbox_weights[pos_inds, :] = 1.0\n if gt_labels is None:\n # Only rpn gives gt_labels as None\n # Foreground is the first class since v2.5.0\n labels[pos_inds] = 0\n else:\n labels[pos_inds] = gt_labels[\n sampling_result.pos_assigned_gt_inds]\n if self.train_cfg.pos_weight <= 0:\n label_weights[pos_inds] = 1.0\n else:\n label_weights[pos_inds] = self.train_cfg.pos_weight\n if len(neg_inds) > 0:\n label_weights[neg_inds] = 1.0\n\n # map up to original set of anchors\n if unmap_outputs:\n num_total_anchors = flat_anchors.size(0)\n anchors = self.unmap(anchors, num_total_anchors, inside_flags)\n labels = self.unmap(\n labels, num_total_anchors, inside_flags, fill=self.num_classes)\n label_weights = self.unmap(label_weights, num_total_anchors,\n inside_flags)\n bbox_targets = self.unmap(bbox_targets, num_total_anchors, inside_flags)\n bbox_weights = self.unmap(bbox_weights, num_total_anchors, inside_flags)\n\n return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n pos_inds, neg_inds)" ]
[ "0.71884936", "0.7036838", "0.70205647", "0.692611", "0.6890038", "0.6888356", "0.68680495", "0.6852056", "0.68242455", "0.6721483", "0.6713985", "0.6680048", "0.6649211", "0.6587107", "0.6582483", "0.6504692", "0.6504201", "0.65007716", "0.64910555", "0.6475948", "0.64250964", "0.6394033", "0.6307537", "0.6254617", "0.6244812", "0.62378734", "0.62375695", "0.6226987", "0.6209553", "0.6209553", "0.6194187", "0.6147876", "0.61377496", "0.61355245", "0.61320245", "0.61298996", "0.6101787", "0.6072845", "0.6057052", "0.6040109", "0.60259473", "0.59782416", "0.5968743", "0.5888694", "0.58724105", "0.5870127", "0.58482534", "0.58429104", "0.58187217", "0.5788331", "0.5787055", "0.57686347", "0.5766759", "0.5760458", "0.57263315", "0.5701905", "0.5685284", "0.5650548", "0.56492674", "0.56094724", "0.5596735", "0.55952746", "0.5593034", "0.5577508", "0.55716306", "0.5563381", "0.5557359", "0.5517143", "0.55085367", "0.5502636", "0.54365575", "0.5433314", "0.5414815", "0.53940415", "0.53881866", "0.5382773", "0.53789014", "0.5369317", "0.5357586", "0.5349648", "0.5325385", "0.5320151", "0.5317211", "0.5311283", "0.529909", "0.52892536", "0.52833277", "0.5276376", "0.52626264", "0.52508545", "0.5199651", "0.51989514", "0.5193229", "0.5185147", "0.5178092", "0.51655775", "0.5130996", "0.51066", "0.51036614", "0.509842" ]
0.73078704
0
test create new user
def test_create_user(self): url = reverse('rest_register') data = { 'username': "tommy", 'email': "[email protected]", 'password1': "thisPass", 'password2': "thisPass", } resp = self.client.post(url, data) self.assertEqual(resp.status_code, 201) self.assertIn('key', resp.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_create_user(self):\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n res = self.post(url=\"/users\", data=data)\n self.assertEqual(res.status_code, 200)\n self.assertIn(b\"Created user.\", res.data)\n\n user = User.query.filter_by(id=6).first()\n self.assertTrue(user)\n self.assertEqual(user.firstname, \"John\")\n self.assertEqual(user.lastname, \"Doe\")\n self.assertFalse(user.is_verified)", "def test_create_user(self):\n first_name = \"b\"\n last_name = \"b\"\n username = \"b\"\n email = \"b\"\n password = \"b\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertTrue(result)\n\n user = User.objects.get(username=username)\n self.assertEqual(first_name, user.first_name)\n self.assertEqual(last_name, user.last_name)\n self.assertEqual(username, user.username)\n self.assertEqual(email, user.email)\n self.assertEqual(password, user.testdata.password)\n self.assertEqual(username, user.testdata.username)\n self.assertEqual(email, user.testdata.email)\n self.assertNotEqual(user.authtests, None)", "def test_add_user(self):\n pass", "def test_create_user(self):\n user = User(\"Gideon Bamuleseyo\", \"[email protected]\", \"secret\")\n self.assertEqual(user.name, \"Gideon Bamuleseyo\")\n self.assertEqual(user.email, \"[email protected]\")\n self.assertEqual(user.password, \"secret\")", "def test_create_user(self):\n \n new_user = {\"username\": \"beny1976\", \"vocab_count\": 0, \"name\": \"beny rood\", \"sex\": \"male\", \"dob\": \"18/10/1979\"}\n msg = app.create_user(predefined_user=new_user)\n self.assertTrue(msg != \"\")", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def test_good_user_creation(self):\n data = json.dumps({\n \"username\" : \"mark\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 201)", "def test_createuser():\n url = baseUrl + userurl\n payload = user_payload\n logging.info(\"Create a user: %s\" % payload)\n r = requests.post(url, data=json.dumps(payload), headers=header)\n assert r.status_code == 201\n resp = r.text\n assert resp == 'Success'", "def test_create_user(self):\n url = reverse('create_user')\n data = {\n 'first_name': 'Jimbo',\n 'email': '[email protected]',\n 'password': 'jimboland',\n 'postal_code': 'jimbo',\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().first_name, 'Jimbo')", "def test_create_user(self):\n self.login()\n res = self.submit()\n\n assert res.status_code == 200", "def test_create(self):\n userValue = {'name': 'User Test 1',\n 'login': 'usertest1',\n 'user_profile_id': self.user_profile2.id,\n }\n Users = self.env['res.users']\n user_test = Users.create(userValue)\n newUser = self.env['res.users'].browse(user_test.id)\n self.assertEqual(userValue['name'], newUser['name'])", "def test_create_user(self):\n self.assertIsInstance(\n User.objects.create_user(username=\"username\", email=\"[email protected]\", password=\"password\"), User)", "def test_create_simple_user(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'password231',\n 'name': 'vasia'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n u = get_user_model().objects.get(**res.data)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(u.check_password(self.payload['password']))\n self.assertEqual(u.email, self.payload['email'])", "def test_user_creation(self):\n self.assertTrue(User.objects.exists())", "def test_new_user(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"1c6cd9c1-ca4c-41fe-b369-912075a5d3ce\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_contact_email_primary\": \"[email protected]\",\n \"lis_person_sourcedid\": \"new_user\",\n },\n passport,\n )\n\n self.assertEqual(\"new_user\", new_user.public_username)\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"[email protected]\", new_user.email)\n self.assertEqual(\"new_user@consumer\", new_user.username)\n self.assertEqual(user_count + 1, get_user_model().objects.count())", "def test_user_creation(self):\r\n \r\n self.assertIsInstance(self.user, User)", "def test_create_user_exists(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user(self):\n url = reverse('rest_register')\n data = {\n 'email': '[email protected]',\n 'password1': 'notshortpassword',\n 'password2': 'notshortpassword'\n }\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertEqual(User.objects.get().email, '[email protected]')", "def test_create_new_user(self):\n\n\t\tdata = {'username': u'Test_User',\n\t\t\t\t\t'password': u'test',\n\t\t\t\t\t'work': u'88 7th Avenue, New York, NY, United States',\n\t\t\t\t\t'home': u'152 Lexington Avenue, New York, NY, United States',\n\t\t\t\t\t'homeLngLat': u'-73.98199699999998 40.743772',\n\t\t\t\t\t'workLngLat': u'-74.0014936 40.7396046'}\n\n\t\t# Add Test_User to the database\n\t\tserver.create_new_user(data)\n\n\t\tnew_user = db.session.query(User).filter(User.username=='Test_User').one()\n\n\t\t# new_user would return none if it did not exist in the db\n\t\tself.assertTrue(new_user, 'Test_User was not sucessfully added to db.')\n\t\tself.assertNotEqual(new_user.password, 'password', 'Password likely not hashed before stored in db.')", "def test_new_user(self):\n json_resp = make_user(self.client)\n # check api response\n self.assertEqual(json_resp['status'], 'user registered')\n self.assertEqual(json_resp['username'], 'Dan')\n # check that user is in database\n self.assertEqual(User.query.count(), 1)\n\n # check malformed query\n resp = self.client.post('/user/',\n headers=api_headers(),\n data=json.dumps({'username': 'Dan'}))\n json_resp = json.loads(resp.data.decode('utf-8'))\n # check api response\n self.assertEqual(resp.status, '400 BAD REQUEST')\n self.assertEqual(json_resp['status'], 'missing fields')\n self.assertEqual(json_resp['missing'], ['email', 'password'])", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def test_create_user(self):\n #open the django admin page.\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin\")\n )\n\n #fill in login information of admin\n username = self.selenium.find_element_by_id(\"id_username\")\n username.send_keys(\"admin\")\n password = self.selenium.find_element_by_id(\"id_password\")\n password.send_keys(\"admin\")\n\n #locate login button and click it.\n self.selenium.find_element_by_xpath('//input[@value=\"Inloggen\"]').click()\n self.selenium.get(\n '%s%s' % (self.live_server_url, \"/admin/auth/user/add/\")\n )\n\n # Fill the create user form with username and password\n self.selenium.find_element_by_id(\"id_username\").send_keys(\"test\")\n self.selenium.find_element_by_id(\"id_password1\").send_keys(\"test1234\")\n self.selenium.find_element_by_id(\"id_password2\").send_keys(\"test1234\")\n\n # Forms can be submitted directly by calling its method submit\n self.selenium.find_element_by_id(\"user_form\").submit()\n self.assertIn(\"Change user\", self.selenium.title)", "def test_create_user(self):\n url = reverse('signup')\n data = {'username': 'ctest', 'name': 'name', 'password': 'ctest12345', 'bio': 'bio',\n 'phoneNumber': '9382593895', 'city': 'tehran', 'email': '[email protected]', 'device': 'android'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_user_exists(self):\n\n payload = {\n 'email': '[email protected]',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create(self, client):\n count = get_user_model().objects.count()\n data = factory.build(dict, FACTORY_CLASS=UserFactory)\n url = reverse('users:create')\n response = client.post(url, data)\n assert response.status_code == 302\n\n user = get_user_model().objects.last()\n assert user.username == data['username']\n assert user.birthday == datetime.datetime.strptime(data['birthday'], '%Y-%m-%d').date()\n assert get_user_model().objects.count() == count + 1", "def test_create_valid_user_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n user = get_user_model().objects.get(**res.data)\n self.assertTrue(user.check_password, payload['password'])\n self.assertNotIn('password', res.data)", "def test_create_user(self):\n email = '[email protected]'\n password = 'testPass'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n\n self.assertEqual(user.email, email)\n self.assertEqual(user.role, Role.PLAYER)\n self.assertTrue(user.check_password(password))\n self.assertTrue(user.is_active)\n self.assertFalse(user.is_staff)", "def test_create_valid_user_success(self):\n payload = {\n 'email': \"[email protected]\",\n 'password': 'testing004',\n 'name': 'Test name'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n user = get_user_model().objects.get(**res.data)\n self.assertTrue(user.check_password(payload['password']))\n self.assertNotIn('password', res.data)", "def test_create_valid_user_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n 'name': 'TestName'\n }\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n \n user = get_user_model().objects.get(**res.data)\n \n self.assertTrue(user.check_password(payload['[email protected]', \n 'testpass']))\n self.assertNotIn('testpass', res.data)", "def test_user_exists(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_creation(self):\n user = UserModel.objects.create_user(\n username=\"saimer\"\n )\n self.assertEqual(user.email, \"\")\n self.assertEqual(user.username, \"saimer\")\n self.assertFalse(user.has_usable_password())", "def test_create_user(self):\n self.assertEqual(self.new_user.first_name, 'John')\n self.assertEqual(self.new_user.last_name, 'Doe')\n self.assertEqual(self.new_user.username, 'john_doe')\n self.assertEqual(self.new_user.email, '[email protected]')\n self.assertTrue(self.new_user.check_password('test_password'))\n self.assertFalse(self.new_user.is_staff)\n self.assertFalse(self.new_user.is_superuser)\n self.assertFalse(self.new_user.is_active)\n\n with self.assertRaises(ValueError):\n User.objects.create_user(\n first_name='', last_name='', username='', email='', bio='', password=''\n )", "def test_user_exists(self):\n payload = {'email': '[email protected]','password': 'testpass'}\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_valid_user_success(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_201_CREATED\n user = get_user_model().objects.get(**res.data)\n assert user.check_password(payload['password'])\n assert 'password' not in res.data", "def test_add_new_user(self):\n\n result = self.client.post(\"/add_new_user\",\n data={\"user_name\": \"rachel\", \"password\": \"123\", \"email\": \"[email protected]\"},\n follow_redirects=True)\n self.assertIn(\"<p>Please sign in</p>\", result.data)", "def test_create_valid_user_success(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass1234',\r\n 'name': 'Tester'\r\n }\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\r\n\r\n user = get_user_model().objects.get(**res.data)\r\n self.assertTrue(user.check_password(payload['password']))\r\n\r\n self.assertNotIn('password', res.data)", "def test_admin_create_user(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Summer Love has been registered')\n self.assertEqual(resp.status_code, 201)", "def test_user_exists(self):\n payload = {\n 'email': '[email protected]',\n 'password': '123PassW0rd',\n 'name': 'Test Name'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEquals(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_valid_user_success(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n user = get_user_model().objects.get(**res.data)\n self.assertTrue(user.check_password(payload['password']))\n self.assertNotIn('password', res.data)", "def test_add_new_user_to_db(self):\n\n test_user = 'test_first_user'\n test_password = 'liamNees0n_T4k3n'\n user_object = User(username=test_user, password=test_password)\n db.session.add(user_object)\n db.session.commit()\n self.assertEqual(user_object.username, 'test_first_user')", "def test_user_exists(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass123'\n }\n create_user(**payload)\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_valid_user_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass123',\n 'name': 'Haider'\n }\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n user = get_user_model().objects.get(**res.data)\n self.assertTrue(user.check_password(payload['password']))\n self.assertNotIn('password', res.data)", "def test_create_user_valid(self):\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n user = get_user_model().objects.get(**res.data)\n\n self.assertTrue(user.check_password(self.mock_user['password']))\n self.assertNotIn('password', res.data)", "def setUp(self):\n self.new_user = User.objects.create_user(first_name='John', last_name='Doe', username='john_doe', email='[email protected]', bio='I am new here.', password='test_password', website='example.com', social_media={\n 'facebook':'Facebook link',\n 'Dribble': 'Dribble link',\n })", "def test_create_user(self):\n user = User(email=\"[email protected]\", password=\"testpassword\")\n\n self.assertEqual(user.email, \"[email protected]\")\n self.assertNotEqual(user.password, \"testpassword\")\n self.assertFalse(user.confirmed)\n self.assertIsNone(user.confirmed_at)\n self.assertIsNotNone(user.created_at)\n self.assertIsNotNone(user.confirmation_token)", "def test_user_creation(self):\n username = 'Smith'\n password = 'password'\n email = '[email protected]'\n new_user = User.objects.create_user(username)\n new_user.set_password(password)\n new_user.email = email\n new_user.save()\n new_profile = Profile()\n new_profile.user = new_user\n new_profile.save()\n self.assertEqual(username, new_user.username)\n self.assertEqual(email, new_user.email)\n self.assertTrue(authenticate(username=new_user.username, password=password))", "def test_create_user(self):\n User.objects.create_user(username='abc', password='abcpass', email='[email protected]')\n user_obj = User.objects.get(username='abc')\n self.assertTrue(user_obj.email, \"[email protected]\")\n self.assertEqual(str(user_obj), \"abc\")", "def test_create_valied_user_success(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'abcd1234',\n 'name': 'Test name'\n }\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Check if statuscode returns a HTTP201 exception when created\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Test that the user is actually created\n # response.data is a dic responce like our payload\n # but with an additional id field\n user = get_user_model().objects.get(**response.data)\n # this will assert that the password is true\n self.assertTrue(user.check_password(payload['password']))\n # Ensure that password is not returned in the request\n # because it is a potential security voulnarability\n self.assertNotIn('password', response.data)", "def test_registering_user(self):\n new_user = self.app\n new_user.create_user()\n client = app.test_client(self)\n response = client.post('/login', follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def test_user_existence(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n get_user_model().objects.create_user(**credentials)\n\n # Check that this is a bad request since the user does already exists.\n response = self.client.post(URL_CREATE_USER, credentials)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def setUp(self):\n self.new_user = User('JosphatOtieno','jose@otis45')", "def test_user_exists(self):\n payload = {'email': '[email protected]', 'password': 'password'}\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_api_can_create_users(self):\n res = self.client().post('/api/v1/user/', data = self.req)\n self.assertEquals(res.status_code, 200)\n self.assertIn('mary', str(res.data))", "def test_create_new_user(client):\n user = \"[email protected]\"\n payload = {\n \"email\": user,\n \"password\": \"1234\"\n }\n\n response = client.post(\"/auth/register\", json=payload)\n assert response.status_code == 201\n assert response.json()[\"email\"] == user", "def test_create_user(self):\n data = {\n 'email': '[email protected]',\n 'password': 'somepassword'\n }\n\n response = self.client.post(self.create_url, data, format='json')\n\n # And that we're returning a 201 created code.\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n # Additionally, we want to return the username and email upon successful creation.\n self.assertEqual(response.data['email'], data['email'])\n self.assertFalse('password' in response.data)", "def test_user_creation(self):\n self.assertEqual(CustomUser.objects.count() - self.user_count, 2)\n self.assertEqual(self.new_student_user.username, 'Student')\n self.assertTrue(self.new_student_user.password, 'student12345')\n self.assertEqual(self.new_instructor_user.username, 'Instructor')\n self.assertTrue(self.new_instructor_user.password, 'instructor12345')", "def test_create_user_same_username(self):\n first_name = \"a\"\n last_name = \"a\"\n username = \"a\"\n email = \"a\"\n password = \"a\"\n\n manager = UserManager()\n result = manager.create(first_name, last_name, username, email, password)\n self.assertFalse(result)", "def test_register_new_user(self):\n with self.client:\n response = self.client.post(\n url_for('register'),\n data=dict(\n first_name='Admin',\n last_name='Admin',\n email='[email protected]',\n password='admin2016',\n confirm_password='admin2016'\n ),\n follow_redirects=True\n )\n self.assertEqual(response.status_code, 200)", "def test_should_create_user_when_give_password_and_username(self):\n data = {'username': 'zhe_xu', 'password': '123'}\n response = self.client.post('/api/users', data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(User.objects.count(), 1)\n self.assertTrue(check_password('123', User.objects.get().password))\n self.assertNotEqual('123', User.objects.get().password)", "def test_create_user_user_exists(self):\n create_mock_user(**self.mock_user)\n\n res = self.client.post(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def setUp(self):\n self.new_user = User(\"Juma\",\"12345\")", "def test_create_defined_user(self):\r\n self._auto_auth(\r\n username='robot', password='test',\r\n email='[email protected]', full_name=\"Robot Name\"\r\n )\r\n\r\n # Check that the user has the correct info\r\n user = User.objects.get(username='robot')\r\n self.assertEqual(user.username, 'robot')\r\n self.assertTrue(user.check_password('test'))\r\n self.assertEqual(user.email, '[email protected]')\r\n\r\n # Check that the user has a profile\r\n user_profile = UserProfile.objects.get(user=user)\r\n self.assertEqual(user_profile.name, \"Robot Name\")\r\n\r\n # By default, the user should not be global staff\r\n self.assertFalse(user.is_staff)", "def create_user(self):\n User.objects.create_user('test', '[email protected]', 'testing')", "def test_create_valid_user(self):\n\n credentials = {\n 'email': '[email protected]',\n 'password': 'Testpass12',\n 'name': 'Test Name'\n }\n response = self.client.post(URL_CREATE_USER, credentials)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Check that the object has actually been created properly.\n user = get_user_model().objects.get(**response.data)\n self.assertTrue(user.check_password(credentials['password']))\n\n # Check that the HTTP response does not include the password.\n self.assertNotIn('password', response.data)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_create_user_page(self):\n url = reverse('admin:core_user_add')\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, 200)", "def test_check_user(self):\n self.new_user.save_user()\n test_user = User(\"Test\", \"user\", \"test\", \"walIas15\")\n test_user.save_user()\n test_user.check_user(\"test\", \"walIas15\")", "def test_create_user(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': 'somepassword'\n }\n\n request = self.factory.post(self.create_url, data, format='json')\n view = UserViewSet.as_view({\"post\": \"create\"})\n response = view(request)\n self.assertEqual(User.objects.count(), 2)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['username'], data['username'])\n self.assertEqual(response.data['email'], data['email'])\n self.assertFalse('password' in response.data)", "def test_create_new_user(self):\n data = {\n 'username': 'John',\n 'email': '[email protected]',\n 'password': 'test123!',\n 'phone': '1234567890',\n 'first_name': 'Chuck',\n 'last_name': 'Norris',\n 'gender': \"M\",\n 'birthdate': \"1999-11-11\",\n }\n\n response = self.client.post(\n reverse('user-list'),\n data,\n format='json',\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(json.loads(response.content)['phone'], '1234567890')\n\n user = User.objects.get(email=\"[email protected]\")\n activation_token = ActionToken.objects.filter(\n user=user,\n type='account_activation',\n )\n\n self.assertEqual(1, len(activation_token))", "def test_signup(self):\n response = self.client.post('/user/', {\n 'username': 'aseem123', 'password': 'passwrodaosida123'\n })\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def test_create_user_with_email_success(self):\n email = '[email protected]'\n password = 'azerty'\n user = get_user_model().objects.create_user(\n email = email,\n password = password\n )\n\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def setUp(self):\n self.new_user = User(username=\"Hey\")\n self.new_user.save()", "def test_create_user(self):\r\n self._auto_auth()\r\n self.assertEqual(User.objects.count(), 1)\r\n self.assertTrue(User.objects.all()[0].is_active)", "def test_user_is_really_create():\n response = api_helper.get_user(user_name=pytest.test_user.name)\n assert response.status_code == 200\n response_json_data = response.json()[\"data\"]\n assert len(response_json_data) == 1\n check_user_data_in_response(response_json_data[0])\n pytest.test_user.id = response_json_data[0]['id']", "def setUp(self):\n self. user = User.objects.create_user(username='fredbob',\n first_name='Fred',\n last_name='Bob',\n email='[email protected]',\n password='foobar')", "def setUp(self):\n self.new_user = User(\"Hamisi\",\"python\")", "def test_user_create(self):\n\n # Creates event\n event = {\n \"clientId\": 2,\n \"username\": \"user\" + randstr(),\n \"pwd\": \"password\",\n \"nameLast\": \"User\",\n \"nameFirst\": \"Joe\",\n \"email\": \"[email protected]\" + randstr(),\n \"phone\": \"123-4567\",\n \"profilePicturePath\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Generates expected value\n expected = {\n 'statusCode': 200,\n 'body': '{\"success\": true, \"apicode\": \"OK\", \"apimessage\": \"User successfully created.\", \"apidataset\": {\"message\": \"User successfully created!\"}}'\n }\n\n # Invokes\n actual = handler.user_create(event=event, context=None)\n\n # Validates response\n self.assertEqual(expected, actual)", "def test_user_exists(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'abcd1234',\n 'name': 'Test',\n }\n\n # call the create function above\n create_user(**payload)\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Check if statuscode returns a HTTP400 bad request\n # becos user already exist\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_with_email_successfull(self):\n email = '[email protected]'\n password = 'fghdjdkri'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_user_signup_valid(self):\n\n u = User.signup(\n username=\"testuser\",\n email=\"[email protected]\",\n password=\"HASHED_PASSWORD\",\n image_url=User.image_url.default.arg\n )\n\n db.session.commit()\n\n u.id = 9999\n\n user = User.query.get(u.id)\n username = user.username\n\n self.assertEqual(username, \"testuser\")", "def test_create_user_with_email_successful(self):\n email = \"[email protected]\"\n password = \"loremIpsumDolor\"\n user = get_user_model().objects.create_user(\n email=email,\n password=password,\n first_name=\"Hakan\",\n last_name=\"Yalcinkaya\",\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def create_testuser(app, created_models, verbosity, **kwargs):\n if not settings.DEBUG:\n return\n try:\n auth_models.User.objects.get(username='test')\n except auth_models.User.DoesNotExist:\n print '*' * 80\n print 'Creating test user -- login: test, password: test'\n print '*' * 80\n assert auth_models.User.objects.create_superuser('test', '[email protected]', 'test')\n else:\n print 'Test user already exists'", "def setUp(self):\n account_models.User.objects.create_user(email='[email protected]', password='WhoAmI', username='aov1')", "def setup_test_user(self):\n self.setup_test_tenant()\n self.test_user = rand_name('test_user_')\n self.test_password = rand_name('pass_')\n self.test_email = self.test_user + '@testmail.tm'\n resp, self.user = self.client.create_user(self.test_user,\n self.test_password,\n self.tenant['id'],\n self.test_email)\n self.users.append(self.user)", "def test_create_user_object():\n from .scripts.initializedb import create_user_object\n user_object = create_user_object(\"test\", \"test\", \"test\")\n assert isinstance(user_object, User)", "def setUp(self):\n self.user = User.objects.create_user(username='Marry', email='[email protected]', password='secret')\n self.user.first_name = 'Marry'\n self.user.last_name = 'Tomson'\n self.user.save()", "def setUp(self):\n self.new_user = User(username='burens', password='12345')", "def users_create():", "def test_user():\n user_data = {\n \"name\": \"Brad\",\n \"username\": \"brad345\",\n \"email\": \"[email protected]\",\n \"password\": \"facebook\",\n \"location\": {\n \"city\": \"Philadelphia\",\n \"state\": \"Pennsylvania\",\n \"country\": \"United States\"\n }\n }\n return UserFactory.create_user(user_data)", "def create_test_user(self):\n user = User.objects.create_user(\n username='[email protected]', password='password')\n user.groups.add(self.group)\n user.user_permissions.add(p('wagtailadmin.access_admin'))\n user.save()\n return user", "def test_create_user_whit_email_successfull(self):\n email = '[email protected]'\n password = 'pass123'\n user = get_user_model().objects.create_user(\n email=email,\n password=password\n )\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.UserDetails), 1)" ]
[ "0.9118798", "0.9118798", "0.9118798", "0.8665263", "0.85160905", "0.84581566", "0.8421159", "0.8413208", "0.84032094", "0.8297671", "0.8266993", "0.82470185", "0.82303685", "0.821916", "0.8214676", "0.8203459", "0.81913304", "0.817495", "0.8174881", "0.8171025", "0.81610763", "0.8159575", "0.8154422", "0.81356037", "0.8132863", "0.8123375", "0.8114264", "0.8112461", "0.81074226", "0.8104734", "0.81005675", "0.8085181", "0.8079479", "0.8074248", "0.80550194", "0.8047637", "0.8042489", "0.80412817", "0.8040879", "0.803351", "0.8032466", "0.8017892", "0.80165213", "0.7997706", "0.7993867", "0.7988993", "0.79880625", "0.79696375", "0.7953411", "0.7946349", "0.79448044", "0.7941185", "0.7926936", "0.79168326", "0.78968483", "0.7895528", "0.7894193", "0.7889952", "0.7878759", "0.7875682", "0.78624624", "0.7855325", "0.7854874", "0.7844239", "0.7836511", "0.78212184", "0.78099835", "0.78039294", "0.7797497", "0.7781747", "0.7781747", "0.7781747", "0.7781747", "0.7776628", "0.77726936", "0.777099", "0.7756712", "0.7747291", "0.77397877", "0.7739548", "0.7739486", "0.7733395", "0.77332985", "0.7730516", "0.77214986", "0.77169454", "0.7708076", "0.77055484", "0.7701529", "0.7701249", "0.76985776", "0.7696019", "0.76919895", "0.76916045", "0.768571", "0.76835865", "0.7678119", "0.7672358", "0.76683134", "0.76659834" ]
0.7802782
68
Authorization header credentials are not valid
def bad_credentials(self, token = True): # Should not match up with anything in the database bad_creds = str(uuid.uuid4()) if token: return self.credentials(HTTP_AUTHORIZATION=("Token %s" % bad_creds)) else: return self.credentials(HTTP_AUTHORIZATION=("Basic %s" % bad_creds))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unauthorized():\n return HttpError(401)", "def authenticate_header(self, request):\n return \"Api key authentication failed.\"", "def test_authorization_header_empty(self, get_key_secret):\r\n request = Request(self.environ)\r\n request.authorization = \"bad authorization header\"\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'OAuth verification error: Malformed authorization header',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def test_authorization_header_empty(self, _get_key_secret):\n request = Request(self.environ)\n request.authorization = \"bad authorization header\"\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_headers(self):\n token = 'abc123'\n requests.get(self.url, auth=BearerAuth(token))\n self.assertEqual(httpretty.last_request().headers['Authorization'], 'Bearer {}'.format(token))", "def test_authorization_header_not_present(self, get_key_secret):\r\n request = Request(self.environ)\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'OAuth verification error: Malformed authorization header',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def test_authorization_header_not_present(self, _get_key_secret):\n request = Request(self.environ)\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def auth_error():\n return unauthorized('Invalid credentials')", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def validate_auth_header(headers):\n if current_app.auth_db is None:\n raise NoAuthenticationDatabaseException\n\n if \"Authorization\" not in headers:\n raise NoAuthHeaderException\n\n auth_header = headers[\"Authorization\"].split(\" \")\n\n if len(auth_header) < 2 or auth_header[0] != \"Bearer\":\n raise InvalidAuthHeaderException\n\n token = auth_header[1]\n\n decoded = current_app.authenticator.decode_token(token)\n\n g.client_data = decoded\n\n if datetime.datetime.utcnow() > decoded.expiration:\n raise ExpiredTokenException\n\n database_token = current_app.auth_db.lookup_token(decoded.token_id)\n\n if database_token != decoded:\n raise InvalidTokenException\n\n return decoded", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def test_error_find_invalid_authorization_header(self, test_client):\n url = '/api/v1/auth/me'\n headers = {\n 'Authorization': 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9'\n '.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ'\n '.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c '\n }\n response = test_client.get(url, headers=headers)\n\n assert response.status_code == 422\n assert response.json['msg'] == \"Signature verification failed\"", "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def invalid_auth_token_header():\n headers = '{\"Host\":\"$host\",\"User-Agent\":\"$user_agent\",\"Date\":\"DATE\",'\n headers += '\"Accept\": \"application/json\",\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Project-ID\": \"$project_id\",'\n headers += '\"X-Auth-Token\": \"InvalidToken\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host,\n project_id=CFG.project_id,\n user_agent=CFG.user_agent)", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.post(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def authenticate(self):\n resp = Response(None, 401)\n abort(401, description='Please provide proper credentials', response=resp)", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.get(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_error_find_no_authentication_header(self, test_client):\n url = '/api/v1/auth/me'\n response = test_client.get(url)\n\n assert response.status_code == 401\n assert response.json['msg'] == 'Missing Authorization Header'", "def test_auth_required(self):\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)", "def authenticate():\n return abort(401)", "def test_invalid_token_admin(self):\n invalid_token = {\n \"Content-Type\" : \"application/json\",\n \"x-access-token\" : \"eyJ0eXAiOiJK6MTUyNjczNzQ5Nvm2LkbWLZF2RuD32FBvgG8KyM\"}\n response = self.app.get(\n '/api/v3/users',\n headers=invalid_token)\n self.assertEqual(response.status_code, 401)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def auth_key(event):\n headers = event.get('header')\n if not headers:\n raise RestException(\"Headers are missing\", 400)\n auth = headers.get('Authorization')\n if not auth:\n raise RestException('Header Authorization is missing', 400)\n if not auth.lower().startswith('bearer '):\n raise RestException(\"Authorization missing Bearer keyword\", 400)\n auth = auth.replace('Bearer ', '')\n auth = auth.replace('bearer ', '')\n return auth.strip()", "def authenticate(self):\n abort(\n 401,\n description=self.exception,\n www_authenticate=(\"WWW-Authenticate\", 'Basic realm=\"%s\"' % __package__),\n )", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def test_authorization_is_enforced(self):\n new_client = APIClient()\n res = new_client.get('/bucketlists/', kwargs={'pk': 2}, format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_required_auth(self):\n res = self.client.get(MOVIES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_required(self):\n\n res = self.client.get(QUIZZES_URL)\n\n self.assertTrue(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}", "def test_user_list_get_with_invalid_auth(client):\n\n response = client.get(\n \"/users\",\n headers={\"Accept\": \"application/vnd.api+json\", \"Authorization\": \"abcdefg\"},\n )\n assert response.status_code == 422\n assert get_content_type(response) == \"application/vnd.api+json\"\n assert json.loads(response.data.decode()) == {\n \"errors\": [\n {\n \"status\": 422,\n \"title\": \"Unprocessable Entity\",\n \"detail\": \"Bad Authorization header. Expected value 'Bearer <JWT>'\",\n }\n ]\n }", "def test_auth_required(self):\n\n res = self.client.get(SERVICES_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def testAuthorizationMalformedClientSecretInHeader(self):\n client = getTestPasswordClient('malformedSecret')\n client.secret = b'malformedSecret\\xFF\\xFF'\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n self._addAuthenticationToRequestHeader(request, client)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, MalformedParameterError('client_secret'),\n msg='Expected the token resource to reject a '\n 'request with a malformed Authorization header.')", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def unauthorized():\n return {'errors': ['Unauthorized']}, 401", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "async def authorization(request):\n # Decode tokens, load/check users and etc\n # ...\n # in the example we just ensure that the authorization header exists\n return request.headers.get(\"authorization\", \"\")", "def _filter_headers(self):\n return [\"Authorization\"]", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def __init__(self,Authorization: str = Header(None)):\n if Authorization:\n if re.match(r\"Bearer\\s\",Authorization) and len(Authorization.split(' ')) == 2 and Authorization.split(' ')[1]:\n self._TOKEN = Authorization.split(' ')[1]\n # verified token and check if token is revoked\n raw_token = self._verified_token(encoded_token=self._TOKEN)\n # if connection redis is available check token revoke\n self._is_redis_available()\n self._check_token_is_revoked(raw_token['jti'])\n else:\n raise HTTPException(status_code=422,detail=\"Bad Authorization header. Expected value 'Bearer <JWT>'\")", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def authenticate():\n return Response(\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def test_authentication_required(self):\n self.client.logout()\n response = self.client.get(self.path, content_type=JSON_CONTENT_TYPE)\n assert response.status_code == 401", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n\treturn Response(\n\t'Could not verify your access level for that URL.\\n'\n\t'You have to login with proper credentials', 401,\n\t{'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response('Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def test_auth_required(self):\n res = self.client.get(RECIPES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_token_format(self):\n bearer_token =self.download.get_authorization()\n bearer = bearer_token.split(' ')[0]\n self.assertEqual('Bearer', bearer)", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authorized(fn):\n\n def _wrap(*args, **kwargs):\n if 'Authorization' not in request.headers:\n # Unauthorized\n print(\"No token in header\")\n abort(401)\n\n\n if key not in request.headers['Authorization']:\n # Unauthorized\n print(\"Key not in auth header\")\n abort(401)\n\n return fn(*args, **kwargs)\n return _wrap", "def test_unauthorized_access(flask_test_client, http_method, endpoint):\n response = flask_test_client.open(\n method=http_method, path=endpoint, headers=get_headers()\n )\n assert response.status == \"401 UNAUTHORIZED\"\n assert response.content_type == \"application/json\"\n assert response.json[\"message\"] == \"Access token is invalid or expired.\"", "def forget(self, request):\n return [('WWW-Authenticate', 'Bearer realm=\"%s\"' % self.realm)]", "def testAuthorizationClientAuthInHeader(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n self._addAuthenticationToRequestHeader(request, self._VALID_CLIENT)\n newAuthToken = 'tokenWithAuthInHeader'\n self._TOKEN_FACTORY.expectTokenRequest(newAuthToken, self._TOKEN_RESOURCE.authTokenLifeTime,\n self._VALID_CLIENT, self._VALID_SCOPE)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self._TOKEN_FACTORY.assertAllTokensRequested()\n self.assertValidTokenResponse(\n request, result, newAuthToken,\n self._TOKEN_RESOURCE.authTokenLifeTime, expectedScope=self._VALID_SCOPE)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_loggin_required(self):\n response = self.client.get(RESGATE_URL)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials',\n 401,\n {\n 'WWW-Authenticate': 'Basic realm=\"Login Required\"'\n }\n )", "def api_auth_validate(request, access_key):\n if not request.is_json:\n return {'error' : 'Bad request, payload must be JSON', 'code' : 400}\n if not 'working_repo' in session:\n return {'error' : 'Operation requires authentication', 'code': 401}\n if session['working_repo'] != access_key:\n return {'error' : 'Not authorized for this operation', 'code' : 403}\n \n return True", "def fresh_jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")\n\n if not self.get_raw_jwt()['fresh']:\n raise HTTPException(status_code=401,detail=\"Fresh token required\")", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def authenticate():\n return Response(\n '', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate(self):\n return Response(\n 'Could not verify your access level for that URL.\\nYou have to login with proper credentials',\n 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def assertHttpUnauthorized(self, resp):\r\n return self.assertEqual(resp.status_code, 401)", "def test_retrieve_user_unauthorized(self):\n # HTTP GET Request\n response = self.client.get(ME_URL)\n\n # If you call the URL without authorization\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_authorization(self):\n res = self.get(url=\"/products/1/pricehistory\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)\n res = self.get(url=\"/products/1/pricehistory\", role=\"user\")\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.UnauthorizedAccess)", "def token_header(token):\n message = '{token}:ignored'.format(token=token)\n return {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}" ]
[ "0.7151656", "0.6980444", "0.69387716", "0.6932599", "0.69088596", "0.6879544", "0.6865807", "0.68371814", "0.6741195", "0.6717008", "0.67152405", "0.668238", "0.66528887", "0.66374475", "0.66084486", "0.65807706", "0.6570014", "0.6560585", "0.6551", "0.65419096", "0.65318316", "0.6519331", "0.6512592", "0.6506085", "0.64875823", "0.64875823", "0.64867264", "0.6466971", "0.6436504", "0.64360565", "0.6430462", "0.6415947", "0.64036644", "0.63697", "0.6364254", "0.63523257", "0.63480544", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.63440454", "0.6343098", "0.63265854", "0.6323362", "0.63032305", "0.62958276", "0.62958276", "0.62918425", "0.6284504", "0.62689465", "0.6256831", "0.6253584", "0.6249941", "0.6249941", "0.6249941", "0.6249941", "0.6249941", "0.6249941", "0.6249941", "0.6249941", "0.6234878", "0.6214468", "0.6210138", "0.62013453", "0.6187005", "0.6172058", "0.6169424", "0.616382", "0.61599344", "0.6150139", "0.6146447", "0.61314404", "0.6127301", "0.61270875", "0.6114946", "0.61095655", "0.6108782", "0.6106697", "0.61045736", "0.61006725", "0.6093408", "0.60835916", "0.6083509" ]
0.63877547
33
Returns a base64 encoded image
def get_image_base64(path): with open(path, 'r') as img: return base64.b64encode(img.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')", "def b64_image(self) -> bytes:\n buffer = BytesIO()\n self.image.save(buffer, \"PNG\") \n im_b64 = base64.b64encode(buffer.getvalue())\n im_b64 = b\"data:image/png;base64,\" + im_b64\n return im_b64", "def encode_image(image):\n return base64.b64encode(image).decode('ascii')", "def image_to_base64str(image):\n file_bytes = image.file.read()\n base64_img_str = 'data:image;base64, '\n base64_img_str += str(base64.b64encode(file_bytes), 'utf-8')\n return base64_img_str", "def _get_image(x):\n return b64encode(x).decode('ascii')", "def encode(output_image_path):\n with open(output_image_path, 'rb') as image_file:\n encoded_string = base64.b64encode(image_file.read()).decode('utf-8')\n return encoded_string", "def getbase64(nparr,):\n if type(nparr) == type({}):\n nparr = nparr['img']\n im = Image.fromarray(nparr)\n buf = BytesIO()\n im.save(buf,format=\"JPEG\")\n return base64.b64encode(buf.getvalue()).decode('ascii')", "def img_to_base64(img):\n with io.BytesIO() as output:\n img.save(output, format=\"PNG\")\n img_string = base64.b64encode(output.getvalue())\n return img_string.decode(\"utf-8\")", "def image_to_base64(image, format='JPEG'):\n in_mem_file = io.BytesIO()\n image.save(in_mem_file, format=format)\n # reset file pointer to start\n in_mem_file.seek(0)\n img_bytes = in_mem_file.read()\n base64_bstr = base64.b64encode(img_bytes)\n return base64_bstr.decode('ascii')", "def base64_string(self) -> global___Expression:", "def save_img_base64(_preds):\n img = Image.fromarray(_preds)\n buff = BytesIO()\n img.save(buff, format=\"JPEG\")\n return base64.b64encode(buff.getvalue())", "def base64_encode_image(inArray):\n imgDat = [base64_encode_array(inArray).decode(\"utf-8\")]\n imgType = str(inArray.dtype)\n imgShape = inArray.shape\n return json.dumps([ imgDat, imgType, imgShape ])", "def image_to_base64(pixbuf, activity):\n _file_name = os.path.join(get_path(activity, 'instance'), 'imagetmp.png')\n if pixbuf != None:\n pixbuf.save(_file_name, \"png\")\n _base64 = os.path.join(get_path(activity, 'instance'), 'base64tmp')\n _cmd = \"base64 <\" + _file_name + \" >\" + _base64\n subprocess.check_call(_cmd, shell=True)\n _file_handle = open(_base64, 'r')\n _data = _file_handle.read()\n _file_handle.close()\n return _data", "def get_image(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT data FROM image WHERE id = '{self.image_id}'\")\n image = cursor.fetchone()\n cursor.close()\n return b64encode(image['data']).decode('utf-8')", "def get_image_base64_str(self, message: ImageMessage) -> str:\n return ImageContentProcessor.binary_img_to_base64_str(self._core.get_message_content(str(message.id)).content)", "def base64_to_image(base64_image):\n return Image.open(io.BytesIO(base64.b64decode(base64_image)))", "def base64ify(image_data: bytes):\n # Convert the avatar to base64.\n mimetype = imghdr.what(None, image_data)\n if not mimetype:\n raise ValueError(\"Invalid image type\")\n\n b64_data = base64.b64encode(image_data).decode()\n return \"data:{};base64,{}\".format(mimetype, b64_data)", "def encode_image(self, image):\n\t\t# Encode in Base64 and print encoded string for copying\n\t\twith open(image, 'rb') as image:\n\t\t\tprint(\"[+] Image has been encoded. Copy this string:\\n\")\n\t\t\timg_64 = '<img src=\"data:image/png;base64,{}\">'.format(base64.b64encode(image.read()).decode('ascii'))\n\t\t\tprint(img_64 + \"\\n\")\n\t\t\tprint(\"[+] End of encoded string.\")", "def data64(self) -> str:\n return Image.encode64(self.data)", "def encodedImage(imageFile):\n imageFile = \"\".join([METRICS_PATH, imageFile])\n encoded = base64.b64encode(open(imageFile, 'rb').read())\n return 'data:image/jpg;base64,{}'.format(encoded.decode())", "def prepare_output(image: np.ndarray) -> str:\n response_image = Image.fromarray(np.uint8(image * 255))\n buffer = BytesIO()\n response_image.save(buffer, \"PNG\")\n encoded = base64.b64encode(buffer.getvalue())\n return \"data:image/png;base64,\" + str(encoded)[2:-1]", "def formatImage(imgData):\n imgstr = re.search(b'base64,(.*)', imgData).group(1)\n with open('output.png','wb') as output:\n output.write(base64.decodebytes(imgstr))", "def img2base64str(image, ext=\".png\"):\n assert image.shape == (256, 256, 3)\n assert ext in {\".png\", \".jpg\"}\n\n buffer = cv2.imencode('.png', image)[1]\n return b64encode(buffer)", "def convert_photo(link):\n\n image = open(link, \"rb\") #Open binary file in read-only mode\n image_read = image.read()\n image_base64 = base64.b64encode(image_read)\n\n return image_base64", "def process_image(self, base64_string: str) -> str:\n self.convert_base64_to_image(base64_string)\n self.corp_image()\n self.change_image_pixels()\n return self.image_to_string()", "def encode(pixels):\n # save the image to a bytes buffer\n buffered = BytesIO()\n image = Image.fromarray(pixels.astype('uint8'))\n image = image.convert('RGB')\n image.save(buffered, format=\"PNG\")\n\n # decode the bytes as a string\n img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')\n\n return img_str", "def convert_to_base64(image_file):\n with open(image_file, 'rb') as f:\n jpeg_bytes = base64.b64encode(f.read()).decode('utf-8')\n predict_request = '{\"instances\" : [{\"b64\": \"%s\"}]}' % jpeg_bytes\n # Write JSON to file\n with open(OUTPUT_FILE, 'w') as f:\n f.write(predict_request)\n return predict_request", "def np_to_base64(img_np):\n img = Image.fromarray(img_np.astype('uint8'), 'RGB')\n buffered = BytesIO()\n img.save(buffered, format=\"PNG\")\n return u\"data:image/png;base64,\" + base64.b64encode(buffered.getvalue()).decode(\"ascii\")", "def picture_base64(self) -> str:\n return self.properties.get(MessageField.PICTURE.value)", "def convertImage(img):\n return '\\\\includegraphicsdata{%s}' % \":\".join([\n 'data',\n img.contentType,\n \"base64,%s\" % img.data.encode(\"base64\").replace(\"\\n\", \"\"),\n ])", "def encode(self, image) -> bytes:\n raise NotImplementedError()", "def read_image(filepath: pathlib.Path) -> str:\n with open(filepath, \"rb\") as f:\n image_base64: str = base64.b64encode(f.read()).decode(\"utf-8\")\n\n return image_base64", "def get_Base64(self):\n\n return base64_with_linebreaks(self.get_DER())", "def convert_base64_to_image(self, image_in_base64):\n image_in_base64 = str(image_in_base64).replace('data:image/jpeg;base64,', '')\n image_data = base64.b64decode(image_in_base64)\n\n # Save image as image file\n with open(self.captcha_image_filename, 'wb') as file:\n file.write(image_data)", "def read_file_content_as_base64(file_path: str) -> str:\n file = open(file_path, \"rb\")\n image_base64 = base64.b64encode(file.read()).decode(\"utf-8\")\n file.close()\n\n return image_base64", "def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring", "def b64raster(self):\n r = self.craster()\n if r:\n if len(r) == 1:\n return r\n return b64encode(r)\n else:\n return None", "def base_64_to_img(base_64_string):\r\n # convert image into np array\r\n return cv2.imdecode(\r\n np.frombuffer(base64.b64decode(base_64_string.split(\";base64,\").pop()), np.uint8),\r\n cv2.IMREAD_COLOR)", "def get_body(self):\n from matplotlib.backends.backend_agg import \\\n FigureCanvasAgg as FigureCanvas\n\n canvas = FigureCanvas(self._body)\n png_output = BytesIO()\n canvas.print_png(png_output)\n data = png_output.getvalue()\n\n data_uri = base64.b64encode(data).decode('utf-8')\n return '<img title=\"{}\" src=\"data:image/png;base64,{}\">'.format(\n self.key, data_uri)", "def base64_encode(data):\n return base64.encodestring(data);", "def get_image_uri(self):\n return \"data:image/png;base64,\" + \\\n self.browser.get_screenshot_as_base64()", "def _encode_img(self, file_path):\n import memcache\n filename = file_path.rpartition(os.sep)[2]\n cache_file = \"%s_cache\" % file_path\n cached_image = memcache.get('%s%s' % (memcache.version, cache_file))\n if cached_image is None:\n image = open(file_path)\n cached_image = \"data:image;base64,%s\"%base64.b64encode(image)\n memcache.set('%s%s' % (memcache.version, cache_file), cached_image, 300)\n return cached_image", "def convert_to_image(self, frame, base64_encode=False):\n #NOTE: tuple (85010, 1) ndarray --> data reduction\n img_buf_arr = cv2.imencode(\".jpeg\", frame)[1]\n if base64_encode:\n img_buf_arr = b\"data:image/jpeg;base64,\" + base64.b64encode(img_buf_arr)\n return img_buf_arr\n return bytes(img_buf_arr)", "def write_image(image_base64: str, filepath: pathlib.Path):\n with open(filepath, \"wb\") as f:\n f.write(base64.b64decode(image_base64))", "def to_internal_value(self, data):\n if isinstance(data, str) and data.startswith('data:image'):\n # Found image is encoded, and must be decoded\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1] # Extract file extension\n id = uuid.uuid4()\n data = ContentFile(base64.b64decode(imgstr), name = id.urn[9:] + '.' + ext)\n return super(Base64ImageField, self).to_internal_value(data)", "def decode_base64(data):\n\n image = None\n try:\n image = base64.decodestring(data)\n except:\n print \"Could not decode base64 image from json\"\n\n return image", "def base64(s):\n return b64encode(s,'[]').replace('=','_')", "def base64(path, filename):\n print(uc.base64(path, filename))", "def obimg():\n # The client might make a call to get a pic for an object which might\n # not have one. Better to return a blank than an error in that case.\n imgdat = B64ENCTRANSPARENT4X4PNG\n try:\n dsType = dbacc.reqarg(\"dt\", \"string\", required=True)\n dsId = dbacc.reqarg(\"di\", \"string\", required=True)\n inst = dbacc.cfbk(dsType, \"dsId\", dsId)\n if inst:\n picfldmap = {\"Point\": \"pic\"}\n imgdat = inst[picfldmap[dsType]]\n imgdat = base64.b64decode(imgdat)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respond(imgdat, mimetype=\"image/png\")", "def data_2_base64(data: np.ndarray) -> str:\n bytes_io = io.BytesIO()\n np.save(bytes_io, data, allow_pickle=False)\n return base64.b64encode(zlib.compress(bytes_io.getvalue())).decode('utf-8')", "def get_base64_data(self):\n if not self.is_base64_data():\n raise AttributeError(\"tag 'base64_data' not set\")\n return self._value", "def img_file_to_b64str(filename, urlsafe=False):\n # Open image file as byte\n file_byte = open(filename, 'rb').read()\n\n if urlsafe:\n file_base64 = base64.urlsafe_b64encode(file_byte)\n else:\n file_base64 = base64.standard_b64encode(file_byte)\n\n file_base64_string = file_base64.decode('utf-8')\n\n return file_base64_string", "def img_stream_to_b64str(stream, urlsafe=False):\n if urlsafe:\n stream_base64 = base64.urlsafe_b64encode(stream)\n else:\n stream_base64 = base64.standard_b64encode(stream)\n\n stream_base64_string = stream_base64.decode('utf-8')\n\n return stream_base64_string", "def encode(self):\n return base64.b64encode(self.content).decode('ascii')", "def storeImageFromBase64(self, data64: str):\n self.data64 = data64\n self._processImageStore()", "def figure_to_base64str(fig: matplotlib.figure.Figure) -> str:\n buf = io.BytesIO()\n fig.savefig(buf, bbox_inches='tight', format='png')\n return base64.b64encode(buf.getbuffer().tobytes()).decode('ascii')", "def CanvasToPngString(canvas):\n\n temp_file_name = \"temp_io.png\"\n canvas.Print(temp_file_name) \n image = open(temp_file_name, 'r')\n\n data_uri = image.read().encode(\"base64\")\n img_html_src = \"data:image/png;base64,%s\" % data_uri\n\n image.close()\n os.remove(temp_file_name)\n\n return img_html_src", "def fn_base64(self, value):\n if isinstance(value, str):\n value = value.encode()\n return base64.b64encode(value).decode()", "def my_base64encode(s):\n return base64.b64encode(s).decode(\"utf-8\")", "def send_image(image: PIL.Image.Image):\n import base64\n import io\n\n image = image.convert(\"RGB\")\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n image_b64 = base64.b64encode(buffer.getvalue())\n send(\"image\", image_b64.decode(\"utf-8\"))", "def base64Encode(input, addNewlines = False):\n base64Str = base64.b64encode(input)\n if not type(base64Str) is str:\n base64Str = \"\".join(map(chr, base64Str))\n \n if not addNewlines:\n return base64Str\n\n result = \"\"\n i = 0\n while i < len(base64Str):\n result += base64Str[i:i + 64] + \"\\n\"\n i += 64\n return result", "def _encode_image(image_array, fmt):\n from PIL import Image # pylint: disable=g-import-not-at-top\n pil_image = Image.fromarray(image_array)\n image_io = io.BytesIO()\n pil_image.save(image_io, format=fmt)\n return image_io.getvalue()", "def base64_decode_image(inStr):\n imgDat, imgType, imgShape = json.loads(inStr)\n imgDat = bytes(imgDat, encoding=\"utf-8\")\n\n imgDat = base64_decode_array(imgDat, imgType)\n imgDat = imgDat.reshape(imgShape)\n return imgDat", "def base64encode(self, value):\n\n return value.encode(\"base64\")[:-1].replace(\"\\n\", \"\")", "def pil_to_b64(im, enc_format='png', verbose=False, **kwargs):\n\n buff = BytesIO()\n im.save(buff, format=enc_format, **kwargs)\n encoded = base64.b64encode(buff.getvalue()).decode(\"utf-8\")\n\n return encoded", "def generate_json_from_base64_image(base64_image):\n # Content image\n content_json_obj = {'content': base64_image}\n\n # Detection type.\n feature_json_obj = [{'type': get_detection_type(5),\n 'maxResults': 50,\n }\n ]\n\n # Now add it to the request\n request_list = []\n request_list.append({\n 'features': feature_json_obj,\n 'image': content_json_obj,\n })\n\n # To json\n data = json.dumps({'requests': request_list})\n\n return data", "def base64_data(cls, val):\n return cls('base64_data', val)", "def encode(self, rosMsg):\r\n if not isinstance(rosMsg, sensor_msgs.msg.Image):\r\n raise TypeError('Given object is not a sensor_msgs.msg.Image '\r\n 'instance.')\r\n\r\n # Convert to PIL Image\r\n pil = Image.fromstring(\r\n ImageConverter._ENCODINGMAP_ROS_TO_PY[rosMsg.encoding],\r\n (rosMsg.width, rosMsg.height),\r\n rosMsg.data,\r\n 'raw',\r\n ImageConverter._ENCODINGMAP_ROS_TO_PY[rosMsg.encoding],\r\n 0,\r\n 1)\r\n\r\n # Save to StringIO\r\n img = StringIO()\r\n pil.save(img, 'PNG')\r\n return img", "def deserialise_image(data):\n if \"data:image\" in data:\n data = data[data.find(\",\") + 1:]\n\n return Image.open(io.BytesIO(base64.urlsafe_b64decode(data)))", "def embed_image_pred(image):\n image_pil2 = Image.fromarray((255 * image).astype('uint8'))\n #image_pil2 = image_pil.resize((256, 256))\n string_buf2 = StringIO.StringIO()\n image_pil2.save(string_buf2, format='png')\n data = string_buf2.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/png;base64,' + data", "def getImgContentFile(img):\n format, imgstr = img.split(';base64,')\n ext = format.split('/')[-1]\n file = ContentFile(base64.b64decode(imgstr), name='temp.' + ext)\n return file", "def picture_bytes(self):\n return bytearray(self.picture_base64[\"data\"][\"data\"])", "def raw_image(self):\n return self._image", "def convert_str_to_image(image_string):\n image = image_string.partition('base64,')[2]\n img_data = base64.b64decode(image)\n return img_data", "def base64_encode(text):\n if not isinstance(text, (bytes, bytearray)):\n text = bytes(text.encode())\n encode = base64.b64encode(text)\n return encode.decode('ascii')", "def file_as_base64(path):\n with open(path, \"rb\") as file:\n return base64.b64encode(file.read())", "def get_output_image(self, o_type=\"Image\"):\n o_type = o_type.lower()\n if o_type == \"name\":\n return self.output_image_name\n if o_type == \"image\":\n return self.output_image\n elif o_type == \"base64\":\n image_data = self.get_output_image(o_type=\"String\")\n encoded_image_data = base64.b64encode(image_data)\n return encoded_image_data.decode()\n elif o_type == \"string\":\n img_io = BytesIO()\n self.output_image.save(img_io, \"PNG\")\n img_io.seek(0)\n return img_io.getvalue()", "def thumb64(self) -> str:\n return Image.encode64(self.thumb)", "def to_image_data(data):\n \n # removing image\n if not data:\n return u''\n\n # image path (not changed)\n if data[0:5] != u'data:':\n return None\n \n # TODO: better MIME handling\n mime = data[5:data.index(u';')].lower()\n img = data[data.index(u',') + 1:].decode('base64')\n \n return mime, img", "def base64encode(self, item: str) -> bytes:\n b = self[item]\n b = b if isinstance(b, bytes) else b.encode()\n return base64.b64encode(b)", "def base64_to_pil(img_base64):\n image_data = re.sub('^data:image/.+;base64,', '', img_base64)\n img = Image.open(BytesIO(base64.b64decode(image_data))).convert('RGB')\n return img", "def to_representation(self, value):\n if value:\n data = default_storage.open(value.path).read()\n encoded=base64.b64encode(data).decode(\"utf-8\")\n return encoded", "def app_logo_img():\n return base64.b64decode(\n b\"iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEX/TQBcNTh/AAAAAXRSTlPM0jRW/QAAAApJREFUeJxjYgAAAAYAAzY3fKgAAAAASUVORK5CYII=\"\n )", "def b64enc(data: bytes) -> str:\n\n return base64.standard_b64encode(data).decode(\"utf-8\")", "def save_b64_image_to_png(filename, b64str):\n import base64\n\n imgdata = base64.b64decode(b64str.split(\",\")[1])\n with open(filename, \"wb\") as f:\n f.write(imgdata)", "def data64(self, value: str) -> None:\n self.data = Image.decode64(value)", "def encode_base64(self, i):\n return base64.b64encode(struct.pack('!L', self.transcode(i)), self.extra_chars)[:6]", "def image(self):\n # type: () -> string_types\n return self._image", "def base64_to_numpy_image(b64: str) -> np.ndarray:\n image = np.array(Image.open(BytesIO(base64.b64decode(b64))))\n return image", "def make_image_bytes(shape: Sequence[int], fmt: str = 'JPEG') -> bytes:\n image = fake_feature_generator.generate_image_np(*shape)\n return encode_image(image, fmt=fmt)", "def send_image(image_base64):\n\n url = \"url\"\n data = image_base64\n files = {\"photo\": { \"base64\": data.decode('utf-8')}}\n\n try:\n r = requests.post(url, json=files)\n except requests.exceptions.RequestException as e:\n print(e)\n\n with open('/home/krozanit/Desktop/somefile.txt', 'w') as the_file:\n the_file.write(r.text)\n print(r.text)\n return r.text", "def gen_broadlink_base64_from_raw(data, repeat=0):\n return b64encode(bytes(gen_broadlink_from_raw(data, repeat)))", "def encodeFrame(frame):\n return base64.b64encode(frame)", "def get_image(self):\n return self.process_raw_image(self.get_raw_image())", "def export_image(self, params: Dict[str, str]) -> bytes:\n response = requests.post(self.export_url, data=params)\n self.export_output = response.content\n return self.export_output", "def convertdataTOimage(data):\n data = data.partition(\",\")[2]\n padding = len(data)%4\n data += \"=\"*padding\n image = Image.open(BytesIO(b64decode(data)))\n return image", "def img_stream_to_b64_dataurl(stream):\n\n # Check image file type\n filetype = getWebImgType_stream(stream)\n if not filetype:\n return False\n\n # Start convert\n b64str = img_stream_to_b64str(stream, urlsafe=False)\n b64url = \"data:image/%s;base64,%s\" % (filetype, b64str)\n\n return b64url", "def base64_to_PIL(string):\n try:\n base64_data = base64.b64decode(string)\n img = Image.open(BytesIO(base64_data)).convert('RGB')\n return img\n except:\n return None", "def base64_filter(val, indent=2):\n if isinstance(val, Undefined):\n return \"\"\n s = json.dumps(val).encode(\"utf-8\")\n return b64encode(s).decode(\"utf-8\")", "def data(self):\n if self._data is None:\n return BinaryData(strencoding=\"base64\")\n return self._data" ]
[ "0.869036", "0.848181", "0.792245", "0.7919407", "0.78182817", "0.7813553", "0.7783526", "0.7704936", "0.7685311", "0.76038045", "0.7507104", "0.74403137", "0.7422337", "0.7361644", "0.73501134", "0.73389935", "0.7311974", "0.7309352", "0.7265232", "0.722809", "0.7165373", "0.7146901", "0.706896", "0.70437247", "0.7020166", "0.700006", "0.69758147", "0.68695486", "0.6861234", "0.6832082", "0.6820809", "0.68009114", "0.6781356", "0.6752789", "0.6725488", "0.66982436", "0.6680185", "0.66746336", "0.6673837", "0.6655878", "0.6589131", "0.65675515", "0.6548436", "0.65457106", "0.64872754", "0.6482789", "0.6474112", "0.6449936", "0.64379835", "0.6413432", "0.6373233", "0.6342577", "0.63347405", "0.63325125", "0.6306772", "0.6287446", "0.6287431", "0.62843466", "0.6282223", "0.6273832", "0.62709403", "0.62631047", "0.62458014", "0.62061644", "0.6182028", "0.6175362", "0.61752343", "0.6163665", "0.6129837", "0.6129768", "0.6116129", "0.61115867", "0.61087793", "0.6101655", "0.6096396", "0.60715836", "0.6048688", "0.60424685", "0.60386044", "0.5985095", "0.59824425", "0.5974936", "0.5974795", "0.5961669", "0.5953909", "0.5949824", "0.594156", "0.5940441", "0.5935909", "0.59213305", "0.58870274", "0.5881872", "0.5878355", "0.5871177", "0.58656806", "0.5848836", "0.58426225", "0.58285284", "0.58252877", "0.5822598" ]
0.8013179
2
Pretty prints a dictionary object
def pretty_print(data): print json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pretty_print(dictionary: dict):\n return json.dumps(dictionary, indent=4)", "def _format_dict(self, dict_, indent=0):\n prefix = indent*\" \"*4\n output = \"{\\n\"\n for key, val in sorted(dict_.items()):\n if isinstance(val, dict):\n rval = self._format_dict(val, indent+1)\n else:\n rval = repr(val)\n output += prefix + \" \"*4 + repr(key) + \" : \" + rval + \",\\n\"\n output += prefix + \"}\"\n return output", "def pretty_dict(d):\n return '{%s}' % ', '.join('%r: %r' % (k, v)\n for k, v in sorted(d.items(), key=repr))", "def pretty_repr(self, num_spaces=4):\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return f'FrozenDict({pretty_dict(self._dict)})'", "def ppdict(d):\n print '{'\n keys=d.keys()\n keys.sort()\n for k in keys:\n spacing=\" \" * (16-(len(repr(k))+1))\n print \"%s:%s%s,\" % (repr(k),spacing,repr(d[k]))\n print '}'", "def pretty_print(d, indent=0):\n for key, value in d.items():\n print('\\t' * indent + str(key) + \":\")\n if isinstance(value, dict):\n pretty_print(value, indent + 1)\n else:\n print('\\t' * (indent + 1) + str(value))", "def prettyPrintDictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n \r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(\"{ }\")\r\n return\r\n\r\n # Recursive case\r\n stream.write(\"{\")\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n keys.sort()\r\n for key in keys : # Sorted order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(\"}\")", "def pretty_repr(x: Any, num_spaces: int = 4) -> str:\n\n if isinstance(x, FrozenDict):\n return x.pretty_repr()\n else:\n\n def pretty_dict(x):\n if not isinstance(x, dict):\n return repr(x)\n rep = ''\n for key, val in x.items():\n rep += f'{key}: {pretty_dict(val)},\\n'\n if rep:\n return '{\\n' + _indent(rep, num_spaces) + '}'\n else:\n return '{}'\n\n return pretty_dict(x)", "def pretty(d, indent=0):\n\tret_str = ''\n\tfor key, value in d.items():\n\n\t\tif isinstance(value, collections.Mapping):\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\n'\n\t\t\tret_str = ret_str + pretty(value, indent + 1)\n\t\telse:\n\t\t\tret_str = ret_str + '\\n' + '\\t' * indent + str(key) + '\\t' * (indent + 1) + ' => ' + str(value) + '\\n'\n\n\treturn ret_str", "def print_dict(dictionary, format_=None):\n\n format_ = format_ or DEFAULT\n\n if format_ == TEXT:\n for key, value in iter(sorted(dictionary.items())):\n print(\"%s = %s\" % (key, value))\n elif format_ == DOCKERENV:\n for key, value in iter(sorted(dictionary.items())):\n print(\"%s=%s\" % (key, value))\n elif format_ == BASH:\n for key, value in iter(sorted(dictionary.items())):\n print(\"export %s=%s\" % (key, value))\n elif format_ == JSON:\n print(json.dumps(dictionary))\n elif format_ == NAME_VALUE_DICT:\n print(\"[\")\n for key, value in iter(sorted(dictionary.items())):\n print('{\"name\": \"%s\", \"value\": \"%s\"},' % (key, value))\n print(\"]\")", "def json_pretty_print(dictionary):\n return json.dumps(dictionary, sort_keys=True,\n indent=2, separators=(',', ': '))", "def dict_pretty_print(D: dict, indent_lvl=0):\n print(\"Using 3 decimal places.\")\n base_indent = indent_lvl * \" \"\n indent = (indent_lvl+2)*\" \"\n print(f\"{base_indent}\" + \"{\")\n for key, value in D.items():\n print(f\"{indent}{key}: \", end=\"\")\n if type(value) is dict:\n print(\"\")\n dict_pretty_print(value, indent_lvl + 2)\n else:\n print(f\"{value:.3f}\")\n print(f\"{base_indent}\" + \"}\")", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def _pretty_print(value, indent=''):\n keys = list(value.keys())\n keys.sort()\n for k in keys:\n v = value[k]\n if type(v) == dict:\n print(\"%s%s:\"%(indent, k))\n _pretty_print(v, indent+' ')\n elif type(v) == str:\n if '\\n' in v:\n print(indent+'%s: |'%k)\n for l in v.split('\\n'):\n print(indent+' '+l)\n else:\n print(\"%s%s: %s\"%(indent, k, v))\n else:\n dump = yaml.dump(v)\n # #1617\n # newer versions of python-yaml append the '...' document end\n # syntax. as YAML functions fine w/o it, and as it is\n # confusing to users who are just getting a single scalar, we\n # strip it\n if dump.endswith('\\n...\\n'):\n dump = dump[:-4]\n \n sys.stdout.write(\"%s%s: %s\"%(indent, k, dump))", "def nice_dict_format(d):\n return ''.join([key+\": \"+str(d[key])+\"\\n\" for key in list(d.keys())])", "def _pretty_print(self, json_dict):\n if self.prettyprint:\n return \"\\n\" + json.dumps(json_dict, indent=self.indent)\n return json.dumps(json_dict)", "def print_dict(data):\n print data", "def prettyPrintODictHelper_ (d, stream, indent, pretty_print=True, indent_additive=4) :\r\n global OTabRepr\r\n # Base case, empty table\r\n entries = len(d)\r\n if entries==0 :\r\n stream.write(OTabEmpty[OTabRepr]) # \"o{ }\"\r\n return\r\n\r\n # Recursive case\r\n stream.write(OTabLeft[OTabRepr]) # \"o{\"\r\n if pretty_print: stream.write('\\n')\r\n\r\n # Iterate through, printing each element\r\n ii=0\r\n keys = d.keys()\r\n for key in keys : # Insertion order on keys\r\n if pretty_print : indentOut_(stream, indent+indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\"(\"+repr(key)+\", \")\r\n else :\r\n stream.write(repr(key)+\":\")\r\n value = d[key]\r\n specialStream_(value, stream, indent, pretty_print, indent_additive)\r\n if OTabRepr == 0 :\r\n stream.write(\")\")\r\n \r\n if entries>1 and ii!=entries-1 :\r\n stream.write(\",\")\r\n if pretty_print: stream.write('\\n')\r\n ii += 1\r\n \r\n if pretty_print : indentOut_(stream, indent) \r\n stream.write(OTabRight[OTabRepr]) # \"}\"\r", "def printDict(self):\n print str(self)", "def format_dict(dictionary, depth=0):\n tab = \" \" * 4\n string = \"{\\n\"\n for key, val in dictionary.items():\n string += depth * tab \n string += \"{}: \".format(key)\n if type(val) is dict:\n string += format_dict(val, depth + 1)\n \n else:\n if type(val) is str:\n fmt = \"'{}'\\n\"\n else:\n fmt = \"{}\\n\"\n string += fmt.format(val)\n string += (depth) * tab + '}\\n'\n return string", "def prettyPrint(self):\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n x=pp.pformat(self.__dict__)\n print x\n return", "def pprint(self):\n import json\n return json.dumps(OrderedDict(self.items()), indent=4)", "def format_dictionary(dct, indent=4):\n return json.dumps(dct, indent=indent, sort_keys=True)", "def print_dict(self):\n print(self.__dict__)", "def print_dictionary(\n d, nested_level=0, output=sys.stdout, spacing=' ', separator=None,\n):\n if separator:\n print(separator, file=output)\n\n if type(d) == dict:\n print('%s{' % (nested_level * spacing), file=output)\n for k, v in list(d.items()):\n if hasattr(v, '__iter__'):\n print('%s%s:' % ((nested_level + 1) * spacing, k), file=output)\n print_dictionary(v, nested_level + 1, output)\n else:\n print(\n '%s%s: %s' % ((nested_level + 1) * spacing, k, v),\n file=output\n )\n print('%s}' % (nested_level * spacing), file=output)\n elif type(d) == list:\n print('%s[' % (nested_level * spacing), file=output)\n for v in d:\n if hasattr(v, '__iter__'):\n print_dictionary(v, nested_level + 1, output)\n else:\n print('%s%s' % ((nested_level + 1) * spacing, v), file=output)\n print('%s]' % (nested_level * spacing), file=output)\n else:\n print('%s%s' % (nested_level * spacing, d), file=output)", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def print_dict_items(my_dict):\n print(\"Printing dictionary\", my_dict, \"in readable form\")\n for (key, value) in my_dict.items():\n print(\"Key =\", key, \"has value =\", value)", "def print_dict_items(my_dict):\n print(\"Printing dictionary\", my_dict, \"in readable form\")\n for (key, value) in my_dict.items():\n print(\"Key =\", key, \"has value =\", value)", "def render_dict(dict):\n\t\treturn str.encode(str(dict))", "def print_object(dict_to_print, *, name='', uppercase=False):\n string = '' if name == '' else name.ljust(10)\n for key, value in dict_to_print.items():\n string += f'{key.upper() if uppercase else key}: {\"\" if value < 0 else \" \"}{float(value):.4}'.ljust(\n len(key) + 10)\n\n print(string)", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return dict.__repr__(self)", "def _format_dict(self, dict):\n\n result = \"\"\n for k, v in dict.items():\n result += \"\\n{0}: {1}\".format(k.capitalize(), v)\n\n return result", "def pretty_print(data, indent=4):\n if type(data) == dict:\n print(json.dumps(data, indent=indent, sort_keys=True))\n else:\n print(data)", "def printdict(input_dict):\n for key in input_dict:\n print key, \":\", input_dict[key]", "def print_dd_dict( self, ):\n print( self._dd_dict )", "def simple_formatter(entry, fp, indent=0):\n for key, value in six.iteritems(entry):\n if isinstance(value, dict):\n print('{}{}:'.format(' ' * indent, key))\n simple_formatter(value, fp, indent + 1)\n else:\n print('{}{}: {}'.format(' ' * indent, key, value), file=fp)", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def pprint(obj):\n return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': '))", "def __repr__(self):\n return repr(dict([(k, v) for k, v in self.iteritems()]))", "def pprint(self,obj):\n return(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))", "def dict_json_print_beauty(json_dict=dict, encode='utf-8'):\n # type: (dict, str)->None\n print(json.dumps(json_dict, encoding=encode, ensure_ascii=False, indent=4))", "def __repr__(self, indent=2):\n return pprint.pformat(self.to_dict(), indent=indent)", "def _to_string(self) -> str:\n\n string_list = []\n for key, value in self.__dict__.items():\n if isinstance(value, dict):\n string_list.append(key)\n string_list.extend('\\n'.join([\"Key: {:24}\\tValue: {}\".format(_key, _value) for _key, _value in value.items()]))\n else:\n string_list.append(\"Key: {:24}\\tValue: {}\\n\".format(key, value))\n return ''.join(string_list)", "def printDict(myDict):\n for key in myDict:\n print(f\"Version: --> {myDict[key]['version']} \")\n print(f\"Accuracy: --> {myDict[key]['accuracy']}\")\n print(f\"Time --> {myDict[key]['time_per_target']}\")\n print(f\"Penalty --> {myDict[key]['target_w_penalty']}\")\n print(f\"ID --> {myDict[key]['assessed_by']}\")\n print(f\"# --> {myDict[key]['attempt']}\")\n\n print()", "def print(self):\n for fiction in self.fictions:\n print(fiction.__dict__)", "def json_dump_dict(dictionary):\n\n print(json.dumps(dictionary, indent=4, ensure_ascii=False).encode(\"utf8\").decode())\n\n return", "def __str__(self):\n if len(self.keys()):\n return '{' + repr(self.keys()[0]) + ':' + repr(self[self.keys()[0]]) + ', ...'\n else:\n return super(FSDict, self).__str__()", "def pretty_print(name, input, val_width=40, key_width=0):\n\n # root\n pretty_str = name + ': {\\n'\n\n # determine key width\n for key in input.keys(): key_width = max(key_width, len(str(key)) + 4)\n\n # cycle keys\n for key in input.keys():\n\n val = input[key]\n\n # round values to 3 decimals..\n if type(val) == np.ndarray: val = np.round(val, 3).tolist()\n\n # difficult formatting\n val_str = str(val)\n if len(val_str) > val_width:\n val_str = pprint.pformat(val, width=val_width, compact=True)\n val_str = val_str.replace('\\n', '\\n{tab}')\n tab = ('{0:' + str(4 + key_width) + '}').format('')\n val_str = val_str.replace('{tab}', tab)\n\n # more difficult formatting\n format_str = '{0:' + str(4) + '}{1:' + str(key_width) + '} {2:' + str(val_width) + '}\\n'\n pretty_str += format_str.format('', key + ':', val_str)\n\n # close root object\n pretty_str += '}'\n\n return pretty_str", "def print_dict(dictionary):\n for x,y in dictionary.items():\n print(x, y)", "def dict_json_printer(json_dict=dict, encode='utf-8'):\n # type: (dict, str)->None\n print(json.dumps(json_dict, encoding=encode, ensure_ascii=False))", "def display(self):\r\n\t\tfor key, value in self.__dict__.items():\r\n\t\t\tprint(key.upper(), value, sep=': ')\r\n\r\n\t\tprint(\"\")", "def pretty(d, indent=0):\n sp = \" \"\n t = \"\"\n \n if isinstance(d, dict):\n l = len(d)\n c = 0\n t += \"<type 'dict'>:{\\n\"\n for key, value in d.items():\n t += sp * (indent + 1) + \"'\" + str(key) + \"':\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"}\"\n elif isinstance(d, list):\n l = len(d)\n c = 0\n t += \"<type 'list'>:[\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \"]\"\n elif isinstance(d, tuple):\n l = len(d)\n c = 0\n t += \"<type 'tuple'>:(\\n\"\n for value in d:\n t += sp * (indent + 1) + str(c) + \":\" + pretty(value, indent + 1)\n \n if c + 1 < l:\n t += \",\"\n \n t += \"\\n\"\n c += 1\n t += sp * indent + \")\"\n else:\n t += str(type(d)) + \":'\" + str(d) + \"'\"\n \n return t", "def formatall(obj):\n result = \"\"\n if isinstance(obj, list):\n# i = 0\n for obj in obj:\n #printf(\">>> [%d] >>> \", i)\n result += format(obj)\n result += \"\\n\"\n# i += 1\n return result\n if isinstance(obj, dict):\n for key, value in obj.items():\n result += \"%-15s : \" % key\n result += format(value)\n result += \"\\n\"\n return result\n return format(obj)", "def print_dict_tree(d, max_depth=None, indent=0):\n def _recurse(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict) and indent != max_depth:\n print(); _recurse(value, indent + 1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))\n \n return _recurse(d)", "def print_dictionary(dictionary: dict):\n for key in dictionary:\n print(key, dictionary[key])", "def __repr__(self):\r\n return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])", "def print_dict(obj, fields=None, formatters=None, mixed_case_fields=False,\n normalize_field_names=False, property_label=\"Property\",\n value_label=\"Value\", table_label=None, print_header=True,\n print_border=True, wrap=0, out=sys.stdout):\n formatters = formatters or {}\n mixed_case_fields = mixed_case_fields or []\n if not fields:\n if isinstance(obj, dict):\n fields = sorted(obj.keys())\n else:\n fields = [name for name in dir(obj)\n if (not name.startswith(\"_\")\n and not callable(getattr(obj, name)))]\n\n pt = prettytable.PrettyTable([property_label, value_label], caching=False)\n pt.align = \"l\"\n for field_name in fields:\n if field_name in formatters:\n data = formatters[field_name](obj)\n else:\n field = field_name\n if normalize_field_names:\n if field not in mixed_case_fields:\n field = field_name.lower()\n field = field.replace(\" \", \"_\").replace(\"-\", \"_\")\n\n if isinstance(obj, dict):\n data = obj.get(field, \"\")\n else:\n data = getattr(obj, field, \"\")\n\n # convert dict to str to check length\n if isinstance(data, (dict, list)):\n data = json.dumps(data)\n if wrap > 0:\n data = textwrap.fill(str(data), wrap)\n # if value has a newline, add in multiple rows\n # e.g. fault with stacktrace\n if (data and isinstance(data, str)\n and (r\"\\n\" in data or \"\\r\" in data)):\n # \"\\r\" would break the table, so remove it.\n if \"\\r\" in data:\n data = data.replace(\"\\r\", \"\")\n lines = data.strip().split(r\"\\n\")\n col1 = field_name\n for line in lines:\n pt.add_row([col1, line])\n col1 = \"\"\n else:\n if data is None:\n data = \"-\"\n pt.add_row([field_name, data])\n\n table_body = pt.get_string(header=print_header,\n border=print_border) + \"\\n\"\n\n table_header = \"\"\n\n if table_label:\n table_width = table_body.index(\"\\n\")\n table_header = make_table_header(table_label, table_width)\n table_header += \"\\n\"\n\n if table_header:\n out.write(encodeutils.safe_encode(table_header).decode())\n out.write(encodeutils.safe_encode(table_body).decode())", "def print_data(d, indent=0):\n prefix = indent * ' '\n for k in sorted(d):\n v = d[k]\n k = prefix + str(k)\n if isinstance(v, dict):\n print(k)\n print_data(v, indent + 1)\n else:\n if k.endswith('cent'):\n v = ' '.join(\n str(tuple(int(j) if j.is_integer() else j for j in i))\n for i in v\n )\n elif isinstance(v, np.ndarray):\n v = str(v).replace('\\n', '')\n print(k, '=', v)", "def print_json_tree(d, indent=0):\n for key, value in d.items():\n print(' ' * indent + str(key), end=' ')\n if isinstance(value, dict):\n print(); print_json_tree(value, indent+1)\n else:\n print(\":\", str(type(d[key])).split(\"'\")[1], \"-\", str(len(str(d[key]))))", "def print_id_keyed_dict(d):\n newdoc_string=\"==========================================================\"\n for key,value in d.items():\n print(newdoc_string)\n if isinstance(key,ObjectId):\n print('ObjectId string of document=',str(key))\n else:\n print('WARNING: key is not object id as it shoudl be. It is->',\n key,' of type ',type(key))\n print(newdoc_string)\n if type(value)==dict:\n print(json_util.dumps(value,indent=2))\n else:\n print(value)", "def __repr__(self, *args, **kwargs):\n result ='{'\n for (k, v) in self.items(*args, **kwargs):\n result += repr(k) + \": \" + repr(v) + \",\"\n\n result = result[:-1] + '}'\n return result", "def printt(dictionnary):\n for key, value in dictionnary.iteritems():\n print('{key}, size: {size}, {values}'.format(key=key, \n size=len(value), values=value[0:4]))", "def debug_repr(self) -> str:\n repr_string = \"{}(Confi):\\n\".format(self.__class__.__name__)\n items = list(self.entries.items())\n items.sort(key = lambda item: item[0])\n indent = ' ' * 4\n for key, entry in items:\n repr_string += f\"{indent}{key}: {repr(entry.value)}\\n\"\n return repr_string", "def pprint(*d):\n i = 0\n while i < len(d):\n print(pretty(d[i]))\n i += 1", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return defaultdict.__repr__(self)", "def _format_instance(d, style=None):\n pt = PrettyTable(['Property', 'Value'], caching=False)\n pt.align = 'l'\n for k, v in sorted(d.items()):\n # convert dict to str to check length\n if isinstance(v, (dict, list)):\n v = json.dumps(v)\n # if value has a newline, add in multiple rows\n # e.g. fault with stacktrace\n if v and isinstance(v, six.string_types) and (r'\\n' in v or '\\r' in v):\n # '\\r' would break the table, so remove it.\n if '\\r' in v:\n v = v.replace('\\r', '')\n lines = v.strip().split(r'\\n')\n col1 = k\n for line in lines:\n pt.add_row([col1, line])\n col1 = ''\n else:\n if v is None:\n v = '-'\n pt.add_row([k, v])\n\n if style == 'html':\n output = '<b>Instance details</b>'\n output += pt.get_html_string(attributes={\n 'border': 1,\n 'style': 'border-width: 1px; border-collapse: collapse;'\n })\n else:\n output = 'Instance details:\\n'\n output += pt.get_string()\n return output", "def __str__(self):\n if len(self.__keys) == 0:\n return '{}'\n output = '{'\n fmt = '{}: {}, '\n for key, val in zip(self.__keys, self.__vals):\n output += fmt.format(repr(key), repr(val))\n return output[:-2] + '}'", "def __repr__(self):\n res = \"{\"\n for k in self.keys():\n res+=\" '\"+str(k)+\"':\"+str(self[k])+\",\"\n res=res[:-1]+\" }\"\n return res", "def Pretty_print(summary):\n print('Length of the dictionary: {}'.format(len(summary)))\n print('Word', ' ', 'Count')\n print('-------------------------')\n # Sort the dictionary by value\n for word, count in sorted(summary.items(), key=lambda kv: kv[1], reverse=True):\n print(\"{:17} {:5}\".format(word, count))", "def print_pretty(self, data):\n length = max(map(lambda x: len(x), data.keys()))\n print '+-------------------------------------+'\n print '| Company Name | Year | Month | Value |'\n print '+-------------------------------------+'\n for key, value in data.items():\n print '| %s | %s | %s | %s |' % (key, \\\n value['year'], value['month'], value['value'])\n print '+-------------------------------------+'", "def format_dict(kv_list):\n return '\\n'.join(['{} - {}'.format(key, value) for\n key, value in kv_list])", "def _pretty_json_dump(d):\n return json.dumps(d, sort_keys=True, indent=3)", "def echo_dict(data,\n no_color,\n key_color='green',\n spaces=None,\n value_color='blue'):\n if not spaces:\n spaces = get_max_key(data)\n\n for key, value in data.items():\n title = '{spaces}{key}: '.format(\n spaces=' ' * (spaces - len(key)),\n key=key\n )\n wrapper = TextWrapper(\n width=(82 - spaces),\n subsequent_indent=' ' * (spaces + 3)\n )\n\n if isinstance(value, dict):\n echo_dict(value, no_color, key_color, spaces, value_color)\n else:\n click.echo(\n ''.join([\n style_string(title, no_color, fg=key_color),\n wrapper.fill(\n style_string(\n str(value), no_color, fg=value_color\n )\n )\n ])\n )", "def dump(self):\n\n d = OrderedDict()\n d[\"Predicates\"] = self.predicates\n d[\"Initial State\"] = self.init\n d[\"Goal State\"] = self.goal\n d[\"Actions\"] = self.actions\n #d[\"Types\"] = self.types\n d[\"Parent Types\"] = self.parent_types\n #d[\"Objects\"] = self.objects\n d[\"Obj -> Type Mapping\"] = self.obj_to_type\n #d[\"Type -> Obj Mapping\"] = self.type_to_obj\n\n for k, v in d.items():\n print(\"*** %s ***\" % k)\n if isinstance(v, dict):\n if len(v) == 0:\n print(\"\\t<no items>\")\n for k, val in v.items():\n print(\"\\t%s -> %s\" % (k, str(val)))\n elif hasattr(v, '__iter__'):\n if len(v) == 0:\n print(\"\\tNone\")\n elif k == \"Actions\":\n for action in self.actions:\n action.dump(lvl=1)\n else:\n print(\"\\t\" + \"\\n\\t\".join([str(item) for item in v]))\n else:\n print(\"\\t\" + str(v))\n print(\"\")", "def _render_dict_to_string(self, adict):\n alist = [ \"%s:%s\" % (self._render_thing(k), \n self._render_thing(adict[k])\n ) for k in adict.keys()]\n return \",\".join(self._render_row(alist))", "def to_string(obj):\n if isinstance(obj, dict):\n str_obj = '{'\n for key, value in obj.items():\n str_obj += Parser.parse_text(key)+': '+Parser.parse_text(value)+'\\n'\n return str_obj + '\\b}'\n else:\n return Parser.parse_text(obj)", "def pprint(self):\n return pformat(repr(self))", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def encode_pretty_printed_json(json_object):\n\n return _pretty_encoder.encode(json_object).encode(\"ascii\")", "def pretty_print_app_info(info_dict):\n print(json.dumps(info_dict, sort_keys=True,\n indent=4, separators=(',', ': ')))", "def dump_pretty(thing):\n print(json.dumps(thing, indent=1, default=convert_for_json))", "def pprint(self):\n print(self.pprint_str())", "def _DictToString(self, value_dict, str_length=5):\n\n def FormatValue(v, value_format, str_length):\n if isinstance(v, (int, float)):\n return value_format % v\n else:\n return str(v).rjust(str_length)\n\n text = []\n blank = '--'.rjust(str_length)\n\n if self._show_label:\n text.append(' '.join(k.rjust(str_length) for k in self._node_labels))\n\n if not self._precision:\n value_format = '%% %dd' % str_length\n else:\n value_format = '%% %d.%df' % (str_length, self._precision)\n\n text.append(' '.join(\n [FormatValue(value_dict[k], value_format, str_length)\n if k in value_dict else blank for k in self._node_labels]))\n\n return '\\n'.join(text)", "def print_pairs(self, d, level=0):\n for k, v in d.iteritems():\n if type(v) is dict:\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self.print_pairs(v, level + 1)\n elif k == \"output\":\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self._write('%s\\n' % v)\n else:\n self._write('%s%s : %s\\n' % (\"\\t\" * level, k.upper(), v))", "def print_friendly_JSON_object(JSON_object):\n formatted_string = json.dumps(JSON_object, sort_keys=True, indent=4)\n print(formatted_string)", "def print_raw_json(raw):\n # type: (dict) -> None\n print(json.dumps(raw, ensure_ascii=False, indent=2, sort_keys=True))", "def print_json(obj):\n print(json.dumps(obj, indent=2))", "def display_dict() -> None:\n for key in ascii_dict:\n print(key, ': ')\n for line in ascii_dict[key]:\n print(line)", "def my_pprint(obj, intend = 0):\n if isinstance(obj, dict):\n for key, value in obj.items():\n print(intend*\" \"+str(key)+\" : \")\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, list):\n for value in obj:\n my_pprint(value, intend = intend + 4)\n print()\n elif isinstance(obj, bytes):\n print(\"<binary data>\")\n \n else:\n try:\n print(intend*\" \"+str(obj))\n except UnicodeDecodeError:\n print(intend*\" \"\"<?>\")", "def to_str(self) -> str:\n return pprint.pformat(self.dict())", "def format_pagerank_dict(self, a_dict):\n raise NotImplementedError()", "def __repr__(self):\n return str(dict(self))", "def format_dict(\n d: typing.Mapping[TTextType, TTextType]\n) -> typing.Iterator[TViewLine]:\n\n return format_pairs(d.items())", "def pretty_print_result_map(results: dict) -> None:\n print(json.dumps({k: map_res(v) for k, v in results.items()}, indent=2))", "def __repr__(self):\n return self.pretty_print(self.__dict__)", "def get_dict_str(d: dict) -> str:\n\treturn str({str(u): str(v) for u, v in d.items()})", "def testPrettyPrintJSON(self):\n test_dict = {'test': [{'dict1': {'key1': 'val1'}, 'dict2': None}]}\n expected_string = ('{\\n \"test\": [\\n {\\n \"dict1\": {\\n'\n ' \"key1\": \"val1\"\\n }, \\n'\n ' \"dict2\": null\\n }\\n ]\\n}\\n')\n self.assertEqual(expected_string, utils.PrettyPrintJSON(test_dict))", "def prettify(tree, indent=0):\n for key, value in six.iteritems(tree):\n if key == FILE_MARKER:\n if value:\n print((' ' * indent + str(value)))\n else:\n print((' ' * indent + str(key)))\n if isinstance(value, dict):\n prettify(value, indent+1)\n else:\n print((' ' * (indent+1) + str(value)))", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)" ]
[ "0.7756763", "0.77491796", "0.7593614", "0.75883794", "0.747514", "0.74257255", "0.74077237", "0.7393802", "0.7355092", "0.7287501", "0.72772837", "0.7269156", "0.7246214", "0.72203445", "0.72087824", "0.71886367", "0.7185559", "0.71673584", "0.7134078", "0.7124751", "0.7095388", "0.7086884", "0.7052595", "0.7047233", "0.7023657", "0.7000053", "0.69855046", "0.69855046", "0.69805044", "0.69541097", "0.69249123", "0.6923098", "0.6912201", "0.6908027", "0.6890332", "0.6881322", "0.68712527", "0.68712527", "0.6840846", "0.68130773", "0.67873317", "0.6745515", "0.6743674", "0.6743416", "0.66926706", "0.66919565", "0.66755134", "0.66612136", "0.6642415", "0.66253", "0.6624792", "0.6623687", "0.6617636", "0.65821666", "0.6564034", "0.65630186", "0.6540417", "0.6525092", "0.65145683", "0.6505454", "0.6500538", "0.64945954", "0.6492798", "0.6492531", "0.64908886", "0.6467483", "0.64518195", "0.6451499", "0.6446019", "0.64407146", "0.6436832", "0.6407729", "0.6388712", "0.63677603", "0.6364576", "0.63588923", "0.6339154", "0.6327574", "0.6324713", "0.6322996", "0.63226616", "0.6319129", "0.6315445", "0.6305759", "0.63017696", "0.62966305", "0.6285193", "0.6278505", "0.62684995", "0.62670755", "0.62650454", "0.62629133", "0.62560296", "0.62534976", "0.62534", "0.6242399", "0.6229885", "0.62296844", "0.62260884", "0.6204383" ]
0.65422326
56
Create and return an author for testing
def create_author(user_dict, author_dict): user = User.objects.create_user(**user_dict) user.save() author_dict['user'] = user author = Author.objects.create(**author_dict) author.save() return (user, author)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample_author(first_name='testfirstname', last_name='testlastname'):\n return Author.objects.create(first_name=first_name, last_name=last_name)", "def create_author(name):\n return Author.objects.create(name=name)", "def create(self, validated_data):\n return Author.objects.get_or_create_author(validated_data['author'])", "def author(self):\n return User(None, self.get_data(\"author\"))", "def author(self) -> 'User': # stub\n return self._author", "def new_author():\n author = Author(name=request.args.get('name', ''))\n author.save()\n return 'Saved :)'", "def create_author(conn, author):\n\tsql = ''' INSERT INTO authorScopus(id,givenname,surname,initials,orcid)\n\t\t\t VALUES(?,?,?,?,?) '''\n\tcur = conn.cursor()\n\tcur.execute(sql, author)\n\treturn cur.lastrowid", "def test_add_author_logged(self):\n self.client.force_authenticate(user=self.user)\n\n data = {\n 'name': 'Donald Knuth',\n }\n\n request = self.client.post(reverse('author-create'), data)\n\n self.assertEqual(request.status_code, status.HTTP_201_CREATED)", "def author(self) -> \"api.User\":\n raise NotImplementedError", "def get_author(self):\n return self.author", "def get_author(self):\n return self.author", "def author(self) -> GitLabUser:\n return GitLabUser.from_data(self.data['author'],\n self._token,\n self.data['author']['id'])", "def generate_author():\n return author_surnames[random.randint(0, len(author_surnames) - 1)] + \" \" + author_lastnames[random.randint(0, len(author_surnames) - 1)]", "def author(self):\r\n return self.user", "def test_create_authors(self):\n payload = {\n 'first_name': 'testname1',\n 'last_name': 'testname2',\n 'nickname': 'testnick1'\n }\n\n res = self.client.post(reverse('authors'), payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n author = Author.objects.get(id=res.data['id'])\n\n for key in payload.keys():\n self.assertEqual(payload[key], getattr(author, key))", "def perform_create(self, serializer):\r\n serializer.save(author=self.request.user)", "def perform_create(self, serializer):\n serializer.save(author=self.request.user)", "def addAuthor(author):\n author_dict = dict()\n # author_dict['id'] = \"{}/api/{}\".format(DOMAIN, author.id)\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['github'] = author.github_url\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n # Optional Attributes\n if author.github_url:\n author_dict['github'] = author.github_url\n if author.user.first_name:\n author_dict['firstName'] = author.user.first_name\n if author.user.last_name:\n author_dict['lastName'] = author.user.last_name\n if author.user.email:\n author_dict['email'] = author.user.email\n if author.bio:\n author_dict['bio'] = author.bio\n\n return author_dict", "def test_entry_author(factories, userid, name):\n annotation = factories.Annotation(userid=userid)\n\n feed = atom.feed_from_annotations(\n [annotation], \"atom_url\", lambda _: \"annotation url\"\n )\n\n assert feed[\"entries\"][0][\"author\"][\"name\"] == name", "def test_author_registration_with_existing_username(self) -> None:\n\n # Create a fake test Author\n author: Author = create_author()\n\n # Construct POST request with taken username\n response: Response = self.client.post(BASE_URL + '/create/', {\n 'password': 'abcd1432',\n 'bio': fake.text(120),\n 'email': fake.email(),\n 'username': author.username,\n 'first_name': fake.first_name(),\n })\n\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n self.assertEqual(data, {\n 'detail': f\"User '{author.username}' already exists.\"\n })", "def author(self):\n return self._author", "def author(self):\n return self._author", "def author(self):\n return self._author", "def test_add_author_notes(self):\n metadata = Metadata(DataSource.CONTENT_CAFE)\n content = self.data_file(\"author_notes.html\")\n self.http.queue_requests_response(200, 'text/html', content=content)\n self.api.add_author_notes(metadata, self.identifier, self.args)\n\n [notes] = metadata.links\n eq_(Hyperlink.AUTHOR, notes.rel)\n assert 'Brenda researched turtles' in notes.content\n\n # We incidentally figured out the book's title.\n eq_(\"Franklin's Christmas Gift\", metadata.title)", "def createRemoteAuthor2(author, author_id):\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author_id)\n author_dict['host'] = author.get('host')\n author_dict['displayName'] = author.get('displayName')\n author_dict['github'] = author.get('github')\n author_dict['url'] = author.get('url')\n author_dict['friends'] = author.get('friends')\n # Optional Attributes\n if author.get('github_url'):\n author_dict['github'] = author.get('github_url')\n if author.get('firstName'):\n author_dict['firstName'] = author.get('firstName')\n if author.get('lastName'):\n author_dict['lastName'] = author.get('lastName')\n if author.get('email'):\n author_dict['email'] = author.get('email')\n if author.get('bio'):\n author_dict['bio'] = author.get('bio')\n\n return author_dict", "def author(self) -> str:\n return pulumi.get(self, \"author\")", "def author(self):\n return self._data.get('author', None)", "def author(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"author\")", "def testgetEBookAuthor(self):\r\n ebook1 = ElectronicResources()\r\n ebook1.setEBookAuthor('Harry Bosch')\r\n self.assertEqual(ebook1.getEBookAuthor(), 'Harry Bosch')", "def author(name):\n print(\"Author:\", name)", "def author(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"author\")", "def author(self):\n return self._changeset.get('author', None)", "def test_commit_author(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n author = pygit2.Signature(\"Katherine\", \"[email protected]\")\n repository.commit(message=\"empty\", author=author)\n\n head = repository.head.commit\n assert author.name == head.author.name and author.email == head.author.email", "def getAuthor(self):\n return self.bookAuthor", "def __insert_author(self, author):\n\n try:\n cur = self.conn.cursor()\n query = 'INSERT INTO author(name) VALUES(?)'\n cur.execute(query, (author,))\n self.conn.commit()\n return self.get_author_by_name(author)\n\n except IntegrityError:\n return False", "def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def author(self) -> str:\n return self._author", "def author(self) -> str:\n return self._author", "def remoteAddAuthor(author):\n author_dict = dict()\n author_dict['id'] = author.get('id')\n author_dict['host'] = author.get('host')\n author_dict['displayName'] = author.get('displayName')\n author_dict['github'] = author.get('github')\n author_dict['url'] = author.get('url')\n\n # Optional Attributes\n if author.get('github_url'):\n author_dict['github'] = author.get('github_url')\n if author.get('firstName'):\n author_dict['firstName'] = author.get('firstName')\n if author.get('lastName'):\n author_dict['lastName'] = author.get('lastName')\n if author.get('email'):\n author_dict['email'] = author.get('email')\n if author.get('bio'):\n author_dict['bio'] = author.get('bio')\n\n return author_dict", "def test_add_author_unlogged(self):\n data = {\n 'name': 'Linus Torvalds'\n }\n\n request = self.client.post(reverse('author-create'), data)\n\n self.assertEqual(request.status_code, status.HTTP_403_FORBIDDEN)", "def test_search_author(self):\n\n\t\titem_id = mock_item(title='Dummy Title', author='Made Up Author')[0]\n\n\t\titem = models.search('Made')[0]\n\t\tself.assertEqual(item['id'], item_id)", "def create(self, request):\n article = request.data.get('article', {})\n serializer = self.serializer_class(data=article)\n serializer.is_valid(raise_exception=True)\n serializer.save(author=request.user.profile)\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)", "def test_search_by_author(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_author(\"George Orwell\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_author(\"George Orwell\"), 1)", "def create_article(self):\n user = self.create_a_user()\n article = Article.objects.create(\n title=self.title,\n description=self.description,\n body=self.body, author=user.profile)\n article.save()\n return article", "def setUp(self):\n self.user = User.objects.create_user(**USER)\n self.user_a = User.objects.create_user(**USER_A)\n self.user_b = User.objects.create_user(**USER_B)\n self.author = Author.objects.create(\n user = self.user,\n displayname=\"Jimmy\",\n github_username = GITHUB_USERNAME,\n bio = BIO,\n host = HOST)\n self.author_a = Author.objects.create(\n user = self.user_a,\n displayname=\"Bobby\",\n github_username = GITHUB_USERNAME,\n bio = BIO,\n host = HOST)\n self.author_b = Author.objects.create(\n user = self.user_b,\n displayname=\"Drake\",\n github_username = GITHUB_USERNAME,\n bio = BIO,\n host = HOST)\n\n c.token_credentials(self.author)", "def author(self, author):\n self._author = author", "def sign_up_entry(self, author, password):\n try:\n cur = self.conn.cursor()\n author_dict = self.__insert_author(author)\n author_id = author_dict['author_id']\n self.__insert_password(author_id, password)\n self.conn.commit()\n return self.get_author_by_id(author_id)\n\n except TypeError:\n return False", "def test_author_initials(self):\n inv_search = 'author:\"polyakov, a* m*\"'\n spi_search = 'find a a m polyakov'\n self._compare_searches(inv_search, spi_search)", "def test_author_filtering(self):\n # Get a valid author\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id}\n\n self._test_filtering(**params)", "def _get_first_author(self):\n if not len(self.get_authors()):\n return ''\n return self.get_authors()[0]", "def test_2_addautor(self):\n for nome, email, grupo in ((\"Autor 1\", \"[email protected]\", \"grupo 1\"),\n (\"Autor 2\", \"[email protected]\", \"\")):\n self.app.addAutor(nome=nome,\n email=email,\n grupo=grupo)", "def __init__(self, author, title):\r\n\r\n self.author = author\r\n self.title = title", "def test_author_string(self):\n self.assertNotEqual(self.module.__author__, '???')", "def test_author_simplest(self):\n invenio_search = 'author:ellis'\n spires_search = 'find a ellis'\n self._compare_searches(invenio_search, spires_search)", "def post(self, request):\n data = request.data\n serializer = self.serializer_class(data=data)\n if serializer.is_valid():\n serializer.save(author=self.request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def get_author(self):\n return self._get_property(core.SVN_PROP_REVISION_AUTHOR)", "def test_authors():\n assert(hasattr(tekel, '__authors__'))", "def author(self, value):\n self._set_attr('author', value)", "def getAuthorByID(id: int) -> Author:\n if not id:\n abort(400)\n author = Author.query.get(id)\n if not author:\n abort(404, \"Author is not found\")\n return author.serialize()", "def create_book(title, author, completion):\n return Book.objects.create(title=title, author=author, completion=completion)", "def addSetAuthor(self,val):\n self.bookAuthor = val", "def get_article_author(self, article_webpage):\n pass", "def created_by(self):\n membership = UnitMembershipFactory(\n unit=self.unit, role=models.UnitMembershipRole.OWNER\n )\n return membership.user", "def creator(self):\n return User(None, self.get_data(\"creator\"), **self._new_session_args)", "def createUser(self):\n if self.user:\n return self.user\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import CurrentUserProvider\n properties = {'account': CurrentUserProvider(),\n 'status': 'valid', 'is_developer': self.dev_test}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def test_retrieve_authors(self):\n sample_author()\n sample_author()\n\n res = self.client.get(reverse('authors'))\n authors = Author.objects.all()\n serializer = AuthorSerializer(authors, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def __init__(self, body: str, author: str):\n self.body = body\n self.author = author", "def test_get_authors_from_data(self):\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\n \"name\": \"George Elliott\",\n \"personal_name\": \"George Elliott\",\n \"last_modified\": {\n \"type\": \"/type/datetime\",\n \"value\": \"2008-08-31 10:09:33.413686\",\n },\n \"remote_ids\": {\n \"isni\": \"000111\",\n },\n \"key\": \"/authors/OL453734A\",\n \"type\": {\"key\": \"/type/author\"},\n \"id\": 1259965,\n \"revision\": 2,\n },\n status=200,\n )\n results = self.connector.get_authors_from_data(self.work_data)\n result = list(results)[0]\n self.assertIsInstance(result, models.Author)\n self.assertEqual(result.name, \"George Elliott\")\n self.assertEqual(result.openlibrary_key, \"OL453734A\")\n self.assertEqual(result.isni, \"000111\")", "def test_escalation_of_an_article_with_author(self):\n token = self.user2.token()\n self.client.credentials(\n HTTP_AUTHORIZATION='Bearer ' + token)\n resp = self.escalate_an_article_successfully()\n self.assertEqual(resp.status_code, status.HTTP_400_BAD_REQUEST)", "def getRemoteAuthor(author_id):\n servers = Server.objects.all()\n for server in servers:\n if server.username and server.password:\n host = server.hostname\n if not host.endswith(\"/\"):\n host = host + \"/\"\n server_api = \"{}author/{}\".format(host, author_id)\n print('Request:')\n print(server_api)\n try:\n r = requests.get(server_api, auth=(server.username, server.password))\n print(r)\n if r.status_code in [200, 201]:\n return createRemoteAuthor2(r.json(), author_id)\n except Exception as e:\n print(e)\n return None", "def __init__(self, body: str, author: str):\n\t\tself.body = body\n\t\tself.author = author", "def fromdict(cls, author_info: dict) -> 'Author':\n return cls(fullname=author_info.get('fullname'))", "def test_create(client):\n rv = create(client, reponame='Michael', url='https://github.com/Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'\n assert json.loads(rv.data.decode())['url'] == 'https://github.com/Michael'", "def test_get_specific_authors_profile(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(reverse(\"profiles:profile\", kwargs={\n 'username':self.register_data['user']['username'],\n }), format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def author(self, author: str):\n\n self._author = author", "def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)", "def get_author(self, attribute_name, default=None):\n return getattr(self, '%s__author' % attribute_name, default)", "def creator(self):\n return self._creator", "def creator(self):\n return self._creator", "def author(self, author):\n\n self._author = author", "def author(self, author):\n\n self._author = author", "def set_author (self, author):\n self.author = author", "def test_user_can_change_as_author(self):\n self.assertTrue(self.story.user_can_change(self.user1))", "def addNewAuthor(name: str, birth: str):\n if not name or not checkDate(birth):\n abort(400)\n author = Author(name=name, birth=birth)\n db.session.add(author)\n db.session.commit()\n app.logger.info(f\"New author with id: {author.id} added\")", "def set_author(self, author):\n self.author = author\n self.opf.author = author", "def Author():\n return html.Div(id=\"AuthorBox\", children=[\n html.Div(id=\"AuthorImage\", children=[\n html.Img(id=\"AImg\", src=app.get_asset_url('profile_dummy.png'))\n ]),\n html.Div(id=\"AuthorData\", children=[\n html.H1(\"Author Information\"),\n html.P(\"Name: name\"),\n html.P(\"Origin: place\"),\n html.P(\"Born: date, place\"),\n html.P(\"Date of death: date\"),\n html.P(\"Occupation: occupation\"),\n html.P(\"Publishing date of book: date\"),\n html.Br(),\n html.A(\"Link to Wikidata\", href='http://www.google.com', target=\"_blank\")\n ])\n ])", "def perform_create(self, serializer):\n org = self.kwargs['org_guid']\n serializer.validated_data['organization'] = Organization.objects.get(\n org_guid=org)\n serializer.validated_data['author'] = self.request.user\n return super(OrganizationNoteListView, self).perform_create(serializer)", "def test_cherrypick_author(repository: Repository) -> None:\n main = repository.head\n branch = repository.heads.create(\"branch\")\n author = pygit2.Signature(\"author\", \"[email protected]\")\n\n repository.checkout(branch)\n (repository.path / \"a\").touch()\n repository.commit(author=author)\n\n repository.checkout(main)\n repository.cherrypick(branch.commit)\n\n assert author.email == repository.head.commit.author.email", "def creator(self) -> str:\n return pulumi.get(self, \"creator\")", "def creator(self) -> str:\n return pulumi.get(self, \"creator\")", "def author(self):\n\n for item in self.metadata:\n if item.tag.localname == \"creator\":\n if 'file-as' in item.tag:\n return item.tag['file-as']\n else:\n return item.tag.text", "def add_author_node(a, nodes, retval, size=0):\n if a.id not in nodes:\n nodes[a.id] = len(nodes)\n retval[\"nodes\"].append({\"id\": str(a.id), \"title\": a.name, \"size\": size})", "def get_author(patch):\n\n\tauthor = patch.attributes['author'].value\n\tif author in authormap:\n\t\tauthor = authormap[author]\n\tif not len(author):\n\t\tauthor = \"darcs-fast-export <darcs-fast-export>\"\n\t# add missing name\n\telif not \">\" in author:\n\t\tauthor = \"%s <%s>\" % (author.split('@')[0], author)\n\t# avoid double quoting\n\telif author[0] == '\"' and author[-1] == '\"':\n\t\tauthor = author[1:-1]\n\t# name after email\n\telif author[-1] != '>':\n\t\tauthor = author[author.index('>')+2:] + ' ' + author[:author.index('>')+1]\n\treturn author.encode('utf-8')", "def createOtherUser(self, email):\n from soc.models.user import User\n from soc.modules.seeder.logic.providers.user import FixedUserProvider\n properties = {'account': FixedUserProvider(value=email), 'status': 'valid'}\n self.user = seeder_logic.seed(User, properties=properties)\n return self.user", "def create_account(self, short_name, author_name=None, author_url=None,\n replace_token=True):\n response = self._telegraph.method('createAccount', values={\n 'short_name': short_name,\n 'author_name': author_name,\n 'author_url': author_url\n })\n\n if replace_token:\n self._telegraph.access_token = response.get('access_token')\n\n return response", "def creator(self, creator):\n self._creator = creator", "def parse_author(self, response):\n i = AuthorItem()\n i['name'] = response.xpath('//h3[@class=\"author-title\"]/text()').extract_first().strip()\n i['birth_date'] = response.xpath('//span[@class=\"author-born-date\"]/text()').extract_first()\n birth_location = response.xpath('//span[@class=\"author-born-location\"]/text()').extract_first()\n if birth_location:\n i['birth_location'] = birth_location.replace('in ', '')\n i['description'] = response.xpath('//div[@class=\"author-description\"]/text()').extract_first().strip()\n i['url'] = response.url\n return i", "def set_author(self, **kwargs):\n self.author_name = kwargs.get('name')\n self.author_url = kwargs.get('url')\n self.author_icon = kwargs.get('icon_url')", "def test_find_first_author(self):\n inv_search = 'firstauthor:ellis'\n spi_search = 'find fa ellis'\n self._compare_searches(inv_search, spi_search)" ]
[ "0.79695106", "0.7857562", "0.7657986", "0.746278", "0.71505", "0.7013657", "0.6913632", "0.6895028", "0.68653756", "0.6783304", "0.6783304", "0.66965", "0.6666494", "0.66540325", "0.6537066", "0.64950055", "0.64649695", "0.64644116", "0.6461421", "0.6401893", "0.63745886", "0.63745886", "0.63745886", "0.63187766", "0.6296003", "0.62883323", "0.6214415", "0.6202189", "0.61968625", "0.6191612", "0.61798036", "0.6139476", "0.6138959", "0.61362803", "0.6119709", "0.61044985", "0.6103059", "0.6103059", "0.609866", "0.608228", "0.60554665", "0.60243547", "0.5997057", "0.5994019", "0.591355", "0.590206", "0.5877489", "0.5876957", "0.58753407", "0.58467704", "0.5838141", "0.5838008", "0.58365077", "0.5821965", "0.5811594", "0.58098555", "0.5789639", "0.57866806", "0.57766914", "0.57717395", "0.57535046", "0.5752219", "0.57467437", "0.5738041", "0.5722272", "0.57165575", "0.5711795", "0.57084334", "0.5701698", "0.5701289", "0.56952906", "0.56915784", "0.5683564", "0.56807303", "0.5669362", "0.5662511", "0.5658746", "0.56580216", "0.5655612", "0.5655612", "0.5652671", "0.5652671", "0.56432796", "0.5625825", "0.5624948", "0.5616431", "0.5615768", "0.5612111", "0.5608777", "0.56024724", "0.56024724", "0.5589069", "0.55843705", "0.5580549", "0.55783194", "0.5577873", "0.55757064", "0.5566047", "0.55535805", "0.55534977" ]
0.7088164
5
Returns a list of created posts for the given author
def create_multiple_posts(author, num, ptext = TEXT, visibility = ACL_DEFAULT): posts = [] for i in range(num): posts.append(Post.objects.create(content = ptext, author = author, visibility=visibility)) return posts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queryset(self):\r\n\r\n user = get_object_or_404(User, username=self.kwargs.get('username'))\r\n return Post.objects.filter(author=user).order_by('-date_posted')", "def get_queryset(self):\n id = self.kwargs['pk']\n target_author=get_object_or_404(Author, pk = id)\n return Post.objects.filter(author=target_author)", "def author_posts(request, author_id):\n id = int(author_id)\n user = myUser.objects.get(user_id=id)\n if user.is_admin:\n posts = Post.objects.select_related('author').order_by('-modified')\n else:\n posts = Post.objects.select_related('author').filter(author_id=id).order_by('-modified')\n\n return render(request, 'posts/authors.html',\n {'posts': posts})", "def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')", "def get_queryset(self):\n author = self.kwargs['author']\n target_author = get_object_or_404(Blog, author=author)\n return Blog.objects.filter(author=target_author)", "def recent_posts(self):\n\n try:\n jsondoc = json.load(urllib.urlopen(\"http://reddit.com/user/%s.json\" % self.username))\n except:\n raise self.DoesNotExist\n \n posts = []\n for item in jsondoc['data']['children']:\n if item['kind'] == 't1':\n posts.append(Comment(item['data']))\n elif item['kind'] == 't3':\n posts.append(item['data'])\n\n return posts", "def get_quotes_for_author(self, author: str) -> List[Quote]:\n params = (f'%{author}%',)\n query = '''\n SELECT *\n FROM quotes\n WHERE author LIKE ?\n ORDER BY created_at DESC\n '''\n\n ret = self.__execute_query(query, params)\n\n return self.__build_quotes_from_query_result(ret.fetchall())", "def postList(posts):\n post_list = list()\n for post in posts:\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def get_posts(self):\n return Post.select().where (Post.user == self)", "def getPosts():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name IN (SELECT following FROM followers WHERE user = ?) OR name = ?\", (name, name))\n posts = cur.fetchall()\n return posts", "def get_queryset(self):\n user: User = self.request.user\n following_users = user.profile.following.all()\n return Post.objects.filter(author__in=following_users).order_by('created')", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList", "def get_posts_for_user(account_pk):\n where = \"WHERE account_pk = ?\"\n values = (account_pk, )\n orders = \"ORDER BY time DESC\"\n return Post.select_many(where, orders, values)", "def getMyPosts():\n \n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE id = ?\", [user_id])\n name = cur.fetchall()[0][0]\n cur.execute(\"SELECT * FROM posts WHERE name = ?\", [name])\n posts = cur.fetchall()\n return posts", "def get_posts(self):\n return self.blog_posts.all()", "def get_all_posts(self):\n cur = self.conn.cursor()\n\n query = 'SELECT blog.blog_id as id, blog.title as title, ' \\\n 'blog.subtitle as subtitle, ' \\\n 'blog.content as content, blog.date as date, ' \\\n 'author.name as author ' \\\n 'FROM blog, author ' \\\n 'WHERE blog.author_id = author.author_id ' \\\n 'ORDER BY blog_id DESC '\n\n posts = []\n cur.execute(query)\n\n for row in cur.fetchall():\n posts.append(dict(row))\n\n return posts", "def list_posts(request):\n if request.method == 'POST':\n category = request.POST.get('category', False)\n posts = Post.objects.select_related('author')\\\n .filter(category=category)\\\n .order_by('-modified')\n # import pdb; pdb.set_trace()\n return render(request, 'posts/index.html',\n {'posts': posts})\n\n posts = Post.objects.select_related('author').order_by('-modified')\n likes = Likes.objects.select_related('post')\n\n return render(request, 'posts/index.html',\n {'posts': posts})", "def remotePostList(host, posts, public):\n post_list = list()\n posts = posts.get('posts')\n for post in posts:\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('source')\n comments = remoteCommentList(post)\n count = post.get('count')\n next = \"{}/api/posts/{}/comments\".format(DOMAIN, id)\n if host.endswith(\"/\"):\n host = host[:-1]\n source = \"{}/posts/{}\".format(host, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin,\n 'source': source, 'count': count, 'next': next}\n post_list.append(post_dict)\n return post_list", "def get_posts(self): #return list of posts that are associated with this blog_id\n return Post.find_posts_for_blog_id(self.blog_id) #this will return a list of posts objects", "def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list", "def feed(request):\n followers = request.user.profile.followers.values_list('pk', flat=True)\n posts = Post.objects.filter(author_id__in=followers)\n\n return render(request,\n 'posts/feed.html',\n {'posts': posts})", "def posts_for_feed():\n user_id = session.get('user_id')\n friend_posts = Post.query.join(Friend, db.and_(Post.user_id == Friend.user_2,\n Friend.active == True)).outerjoin(Comment, db.and_(Comment.post_id == Post.post_id,\n Comment.active == True)).filter(Friend.user_1 == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n\n post_list = []\n for post in friend_posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n return resp", "def get_posts():\n url = app.config['POSTS_ENDPOINT']\n response = requests.get(url, params={})\n if response.status_code == 200:\n return parse_posts(response.json())\n raise RuntimeError('Error in retrieving posts.')", "def get_posts(self, published=False) -> Type[QuerySet]:\n categories = self.get_descendant_categories()\n posts = Post.objects.filter(categories__in=categories)\n if published:\n posts = posts.filter(published__lte=timezone.now())\n return posts", "def users_posts():\n\n user_id = session.get('user_id')\n posts = Post.query.outerjoin(Comment, db.and_(Comment.post_id == Post.post_id, \n Comment.active == True)).filter(Post.user_id == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n post_list = []\n for post in posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n\n return resp", "def task_fetch_posts(\n author_id,\n count=28,\n posts_out='data/posts_data.xlsx'):\n\n # Create query instances for posts\n post_query = Query(PostParser)\n\n # Query posts data\n post_data = post_query.query_all(POSTS_QUERY_HASH_PARAM, {\n \"id\": author_id,\n \"first\": 50,\n }, count)\n logger.info(\"Count of posts data: %d\" % len(post_data))\n\n # Save the posts data\n post_data_df = pd.DataFrame(post_data)\n post_data_df.to_excel(posts_out, encoding='utf-8', index=False)\n logger.info(\"Save the posts data to %s.\" % posts_out)", "def published_posts(self) -> Type[QuerySet]:\n return Post.objects.filter(published__lt=timezone.now()).order_by('-published')", "def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return", "def get_all_posts_json():\n\n posts = [\n {\n \"postId\": post.post_id,\n \"postPrompt\" : crud.get_prompt_by_prompt_id(post.prompt_id),\n \"postText\": post.post_text,\n \"location\": post.user_facing_location,\n \"dateCreated\": post.created_at,\n \"toneQualities\": crud.get_tone_qualities_by_post_id(post.post_id),\n }\n for post in crud.get_post_by_user_id(session['user_id'])\n ]\n\n return jsonify(posts)", "def get_posts(request):\n posts = Post.objects.order_by(\"created_date\")\n return render(request, \"blogposts.html\", {\"posts\": posts})", "def get_posts(self):\r\n postList = []\r\n for tag in self.setting.imgurTags:\r\n try:\r\n req = requests.get('%s%s' % (self.setting.tagLink, tag), headers=self.setting.imgurHeaders)\r\n for post in req.json()['data']['items']:\r\n p = self.json_to_post(post, tag)\r\n if p is not None:\r\n postList.append(p)\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.CRITICAL, 'imgur.get_posts exception(%s): %s' % (tag, e))\r\n break\r\n return postList", "def get_posts():\n\n error_on_unauthorized()\n \n posts = Post.query.order_by(Post.id)\n total_num = posts.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n origin = request.args.get('origin', None)\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n if origin is not None:\n posts = posts.filter(User.origin == origin)\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n \n return jsonify(total=total_num, posts=[p.to_dict() for p in posts.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")", "def books_by_author(self, author):\n request_url = \"%s?author=%s\" % (self.API_URL, author)\n json_data = self.make_request(request_url)\n if not json_data:\n return []\n books = []\n for book in json_data['docs']:\n books.append(book['title_suggest'])\n return books", "def get_posts():\n\n\tposts = []\n\n\trows = db().select(db.survey.ALL, orderby=~db.survey.created_on)\n\tfor i, r in enumerate(rows):\n\t\t\n\t\t\tt = dict(\n\t\t\t\tuser_email = r.user_email,\n\t\t\t\tuser_name = get_user_name_from_email(r.user_email),\n\t\t\t\tquestion = r.question,\n\t\t\t\tcreated_on = r.created_on,\n\t\t\t\topt1 = r.opt1,\n\t\t\t\topt2 = r.opt2,\n\t\t\t\topt3 = r.opt3,\n\t\t\t\topt4 = r.opt4,\n\t\t\t\tres1 = r.res1,\n\t\t\t\tres2 = r.res2,\n\t\t\t\tres3 = r.res3,\n\t\t\t\tres4 = r.res4,\n\t\t\t\t#created_on_human = humanize.naturaltime(r.created_on),\n\t\t\t\tupdated_on = r.updated_on,\n\t\t\t\t#updated_on_human = r.updated_on_human,\n\t\t\t\tid = r.id,\n\t\t\t)\n\t\t\tposts.append(t)\n\n\tlogged_in = auth.user_id is not None\n\temail = None\n\tif logged_in:\n\t\temail = auth.user.email\n\n\treturn response.json(dict(\n\t\tposts=posts,\n\t\tlogged_in=logged_in,\n\t\temail=email,\n\t))", "def get_authors(self, blogid=1):\n return self.execute('wp.getAuthors', blogid, self.username, self.password)", "def get_queryset(self):\n return Post.objects.filter(pub_date__lte=timezone.now())", "def get_queryset(self):\n return Post.objects.filter(pub_date__lte=timezone.now())", "async def get_reblogged_by(db, author: str, permlink: str):\n post_id = await _get_post_id(db, author, permlink)\n assert post_id, \"post not found\"\n sql = \"\"\"SELECT name FROM hive_accounts\n JOIN hive_feed_cache ON id = account_id\n WHERE post_id = :post_id\"\"\"\n names = await db.query_col(sql, post_id=post_id)\n names.remove(author)\n return names", "def get_queryset(self):\n return Post.objects.filter(\n user_id=self.kwargs['user_id'], status='published', visibility='public'\n )", "def authors(self):\n user_ids = set(r.author.id for r in self.history())\n return User.query.find({'_id': {'$in': list(user_ids)}}).all()", "def _get_participants_with_posts(self, participants):\n participants_with_posts = []\n for p in participants:\n posts = p.get_posts_from_qtnre_answer_date(self._days)\n if len(posts) > 0:\n participants_with_posts.append(p)\n\n return participants_with_posts", "def test_return_list_of_posts(self):\n self.create_new_user()\n self.create_new_posts()\n response = self.c.get('/wall/',\n content_type=\"application/json\")\n\n assert 200 == response.status_code\n assert 2 == len(response.json()['data']['posts'])\n assert response.json()['data']['posts'][0]['message'].startswith('All animals are equal')\n assert response.json()['data']['posts'][1]['message'].startswith('War is peace')", "def queryset(self, request, queryset):\n # 返回文章queryset里面 所有指定作者的文章\n author_id = self.value()\n if author_id:\n return queryset.filter(author__id=author_id)\n else:\n return queryset", "def commentList(post):\n comments = Comment.objects.all().filter(post=post).order_by('-published')\n remote_comments = RemoteComment.objects.all().filter(post=post).order_by('published')\n comment_list = list()\n\n if comments:\n for comment in comments:\n comment_dict = dict()\n comment_dict['author'] = addAuthor(comment.author)\n comment_dict['comment'] = comment.comment\n comment_dict['contentType'] = comment.contentType\n comment_dict['published'] = comment.published\n comment_dict['id'] = comment.id\n comment_list.append(comment_dict)\n if remote_comments:\n for remote in remote_comments:\n remote_dict = dict()\n server = remote.server\n r = requests.get(remote.author, auth=(server.username, server.password))\n if r.status_code == 200:\n author = remoteAddAuthor(r.json())\n remote_dict['author'] = author\n remote_dict['comment'] = remote.comment\n remote_dict['contentType'] = remote.contentType\n remote_dict['published'] = remote.published\n remote_dict['id'] = remote.id\n comment_list.append(remote_dict)\n else:\n continue\n\n comment_list = sorted(comment_list, key=lambda k: k['published'], reverse=True)\n\n return comment_list", "def assertAuthorsInPosts(context, authors, posts):\n cross_check(context, authors, posts)", "def filter_posts(request):\n if request.is_ajax():\n id_user = int(request.POST.get('id_user'))\n if id_user>0:\n return render(request, \"posts_list.html\",\n {\"posts\": Post.objects.filter(\n author_id=id_user).order_by('date_pub'),\n })\n else:\n return render(request, \"posts_list.html\",\n {\"posts\": Post.objects.all().order_by('date_pub'),\n })\n return None", "def get_recent_posts(self, request, count):\n if request.has_permission('edit'):\n return DBSession.query(Post).filter_by(blog=self).order_by('created desc').slice(0, count).all()\n else:\n return DBSession.query(Post).filter_by(blog=self, published=True).order_by('created desc').slice(0, count).all()", "def get_queryset(self):\n return Post.objects.filter(published_date__isnull=True).order_by('created_date')", "def get_queryset(self):\n user = self.request.user\n return Task.objects.filter(author=user)", "def list_posts(params, db_conn):\n\n skip = params.get('skip') or 0\n limit = params.get('limit') or 10\n params = omit(params, ('skip', 'limit',))\n query = (r.table(post_schema['tablename'])\n .filter(params)\n .order_by(r.asc('created'))\n .skip(skip)\n .limit(limit))\n return list(query.run(db_conn))", "def get_all_posts(self, *fields):\n if fields:\n posts = self.collection.find(projection=fields)\n else:\n posts = self.collection.find()\n\n for post in posts.sort('created_datetime', -1):\n yield BlogPost(\n title=post['title'],\n content=post['content'],\n created_datetime=post['created_datetime']\n )", "def posts_list(request):\n\n # recupera posts\n posts = Post.objects.select_related(\"owner\").filter(Q(publish_at__lte=now())).all()\n categorias = Categoria.objects.all()\n\n # prepara el contexto de la plantilla\n context = {\n 'post_objects': posts,\n 'categoria_objects': categorias\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/inicio.html', context)", "def getReposts(owner_id=None, post_id=None, offset=None, count=None):\n params = {\n 'owner_id': owner_id,\n 'post_id': post_id,\n 'offset': offset,\n 'count': count\n }\n result = call('wall.getReposts', **params)\n return parse_response(result)", "def get_queryset(self):\n return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')", "def get_queryset(self):\n return Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')", "def get_posts():\n db = psycopg2.connect(\"dbname=forum\")\n c = db.cursor()\n query = \"SELECT content, time FROM posts ORDER BY time DESC\"\n c.execute(query)\n rows = c.fetchall()\n POSTS = rows\n db.close()\n return POSTS", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def getAuthors(self):\n authors = []\n for each in self.context.getAuthors():\n title = each['title']\n firstname = each['firstname']\n middlename = each['middlename']\n lastname = each['lastname']\n author = Author(title, firstname, middlename, lastname)\n authors.append(author)\n return authors", "def generatePosts(self,**kwargs):\n oldestTimeSoFar = None\n while True:\n if oldestTimeSoFar is None:\n items = self.getPosts(**kwargs)\n else:\n items = self.getPosts(before_time=oldestTimeSoFar,**kwargs)\n if not items:\n return\n for item in items:\n yield item\n oldestTimeSoFar = item['published_at']\n time.sleep(0.5)", "def get_queryset(self):\n\t\treturn Post.objects.order_by('-pub_date')[:5]", "def get(self):\n return get_all_posts()", "def select_posts(self, subreddit_name=None, daterange=None, utc=True, include_removed=True):\n subreddit_name = subreddit_name.lower()\n \n query = self.session.query(Post)\n if subreddit_name is not None:\n query = query.filter(Post.subreddit.has(name=subreddit_name))\n if daterange is not None:\n if utc:\n if daterange[0] is not None:\n query = query.filter(Post.created_utc >= daterange[0])\n if daterange[1] is not None:\n query = query.filter(Post.created_utc <= daterange[1])\n else:\n if daterange[0] is not None:\n query = query.filter(Post.created >= daterange[0])\n if daterange[1] is not None:\n query = query.filter(Post.created <= daterange[1])\n if not include_removed:\n query = query.filter(Post.selftext != \"[removed]\")\n\n return [self._post_entry_to_model(entry) for entry in query.all()]", "def own(self, request, pk=None):\n queryset = self.get_queryset()\n queryset = queryset.filter(author=request.user)\n serializer = PostSerializer(queryset, many=True, context={\"request\": request})\n return Response(serializer.data, status=status.HTTP_200_OK)", "def get_posts(wp):\n from wordpress_xmlrpc.methods.posts import GetPosts\n\n all_posts = []\n\n offset = 0\n increment = 20\n while True:\n posts = wp.call(GetPosts({'number': increment, 'offset': offset, 'post_type': 'post'}))\n if len(posts) == 0:\n break # no more posts returned\n for post in posts:\n all_posts.append(post)\n\n offset = offset + increment\n\n return all_posts", "def retrieves_posts_on_home(self: User, from_id: Optional[str]) -> List[Post]:\n def _filter_post(p):\n return sees_post(self, p, context_home_or_profile=True)\n\n return get_page(\n mongoengine_model=Post,\n extra_query_args={},\n extra_filter_func=_filter_post,\n from_id=from_id,\n page_count=HomePostsPageSize\n )", "def get_authors(self, instance):\n\n # Get Authors in the specified order\n author_order = Author.objects \\\n .filter(dataset_id=instance.id) \\\n .order_by('order')\n\n # Put in a list\n authors = [a.author for a in author_order]\n\n # Return a list of person urls\n serializers = PersonSerializer(authors, many=True, context={'request': self.context['request']}).data\n return [p[\"url\"] for p in serializers]", "def posts_get():\n title_like = request.args.get(\"title_like\")\n body_like = request.args.get(\"body_like\")\n\n posts = session.query(Post)\n if title_like:\n if body_like:\n posts = posts.filter(\n Post.title.contains(title_like), Post.body.contains(body_like))\n else:\n posts = posts.filter(Post.title.contains(title_like))\n posts = posts.all()\n\n data = json.dumps([post.as_dictionary() for post in posts])\n return Response(data, 200, mimetype=\"application/json\")", "def load_posts(post_ids, current_user_id=None):\r\n logging.warn(\"Ids===={}\".format(post_ids))\r\n\r\n # If list is not used, or any call that trigger __iter__ will end up with the query syntax\r\n # rather than the data itself.\r\n #posts_query = Post.objects.filter(id__in=post_ids).limit(100).allow_filtering()\r\n #post_counters = list(PostCounter.objects.filter(id__in=post_ids).limit(100).allow_filtering())\r\n\r\n post_objects = []\r\n # ok ,\r\n for post_id in post_ids:\r\n p = Post.objects.get(id=post_id)\r\n\r\n try:\r\n pc = PostCounter.objects.get(id=post_id) #filter(lambda x: x.id == post.id, post_counters)\r\n stats = pc._as_dict()\r\n del stats['id']\r\n p.__dict__['statistics'] = stats\r\n except DoesNotExist, dne:\r\n pass\r\n\r\n if current_user_id is not None:\r\n try:\r\n pv = PostVote.objects.get(post_id=post_id, user_id=current_user_id)\r\n p.__dict__['upvoted'] = True\r\n except DoesNotExist, dne:\r\n pass\r\n post_objects.append(p)\r\n\r\n return post_objects", "def new_posts(self, number_posts=5) -> Type[QuerySet]:\n return self.published_posts()[:number_posts]", "def getSubmissionsByUser(self, i):\r\n return [(ind,sub) for ind, sub in enumerate(self.submissions) if sub.authorId == i]", "def get_user_posts(request):\n if request.method == \"POST\":\n token = request.data.get('token')\n post_id = request.data.get('post_id')\n type_ = request.data.get('type')\n\n if Token.objects.filter(key=token).exists():\n token = get_object_or_404(Token, key=token)\n if post_id == -1:\n posts = Post.objects.all().order_by(\"-date\")[:PAGE_OFFSET]\n elif type_ == 'old':\n posts = Post.objects.filter(pk__lt=post_id).order_by(\"-date\")[:PAGE_OFFSET]\n else: # 'new'\n posts = reversed(Post.objects.filter(pk__gt=post_id).order_by(\"date\")[:PAGE_OFFSET])\n\n serializer = PostSerializer(posts, many=True, context={'user_id': token.user_id})\n return Response({\"success\": 29,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 17})", "def get_posts(subreddits, limit, user_agent=default_user_agent):\n all_posts = []\n\n for subreddit in subreddits:\n print(subreddit)\n data_url = 'https://www.reddit.com/r/{}.json?limit={}'.format(subreddit, limit)\n response_data = requests.get(data_url, headers = {'User-agent': user_agent})\n\n posts = response_data.json()['data']['children']\n\n all_posts.extend(posts)\n\n return all_posts", "def articles(self):\n articles = Post.objects.live().descendant_of(self)\n articles = articles.order_by('-date')\n\n return articles", "def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict", "def fetch_posts():\n get_chain_address = F\"{CONNECTED_NODE_ADDRESS}/chain\"\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for block in chain[\"chain\"]:\n for tx in block[\"transactions\"]:\n tx[\"index\"] = block[\"index\"]\n tx[\"hash\"] = block[\"previous_hash\"]\n content.append(tx)\n \n global posts \n posts = sorted(content,\n key=lambda k: k['timestamp'],\n reverse=True)", "def _filter_posts(posts):\n\n return filter(_filter_post, posts)", "def author_profile(request, pk):\n author = get_object_or_404(User, pk=pk)\n profileposts = ProfilePost.objects.filter(user=author).filter(published_date__lte=timezone.now()\n ).order_by('-published_date').all()\n return render(request, 'profile.html', {\"profile\": author, 'profileposts': profileposts})", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def queryset(self, request):\n if request.user.is_superuser:\n return Entry.objects.all()\n return Entry.objects.filter(author=request.user)", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def render_posts(self, **params):\n\n if \"user_posts\" in params:\n posts = params['user_posts']\n else:\n posts = Post.get_all()\n\n rendered_posts = \"\"\n for post in posts:\n rendered_posts += self.render_post(post, **params)\n\n self.render(\"blog/blog.html\", rendered_posts=rendered_posts)", "def retrieves_posts_on_profile(self, profile_user, from_id):\n def _filter_post(p):\n return sees_post(self, p, context_home_or_profile=False)\n\n return get_page(\n mongoengine_model=Post,\n extra_query_args={\n 'author': profile_user\n },\n extra_filter_func=_filter_post,\n from_id=from_id,\n page_count=ProfilePostsPageSize\n )", "def add_new_posts(last_updated=None):\n for blog in Blog.objects.all():\n try:\n document = feedparser.parse(blog.feed_url)\n except:\n print \"error parsing\"\n continue\n\n if last_updated is None:\n print(\"- Adding %i articles from %s\" % (len(document['entries']), blog.title))\n\n for entry in document['entries']:\n # now we create a new post\n post = Post()\n post.blog = blog\n post.title = entry['title']\n\n if 'summary' in entry:\n post.content = entry['summary']\n if 'content' in entry:\n post.content = entry['content']\n\n post.link = entry['link']\n post.save()\n else:\n # TODO: only parse from a date\n pass", "def by_post_id(cls, post_id):\n return cls.all().filter('post_id =', post_id).order('-created').fetch(limit=20)", "def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)", "def get_user_posts(self, request):\n post_objects = Post.objects.filter(liked_users__id=request.user.id)\n avg_user_liked_post_weight = self.__avg_user_interested_post_weight(post_objects)\n queryset = self.__user_interested_post_filter(avg_user_liked_post_weight)\n context = {'user':request.user}\n serializer = PostSerializer(queryset, many=True, context=context)\n return Response({'data': serializer.data}, status=status.HTTP_200_OK)", "def get_queryset(self):\n return Post.objects.order_by('-posted')[:5]", "def get_user_posts(user_id: str) -> List[dict]:\n url = f'https://jsonplaceholder.typicode.com/posts?userId={user_id}'\n\n json_data = [{}]\n\n response = get(url)\n\n if response.status_code == 200:\n json_data = response.json()\n\n return json_data", "def recent_posts(self, horizon: int=30) -> Type[QuerySet]:\n delta = timezone.timedelta(horizon)\n start_date = timezone.now() - delta\n return self.published_posts().filter(published__gte=start_date)", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors", "def get_public_posts(server_posts):\n public_list = server_posts\n servers = Server.objects.all()\n\n for server in servers:\n if server.username and server.password:\n host = server.hostname\n if not host.endswith(\"/\"):\n host = host + \"/\"\n server_api = \"{}posts\".format(host)\n try:\n s = requests.Session()\n # https://stackoverflow.com/questions/15431044/can-i-set-max-retries-for-requests-request\n retries = Retry(total=5,\n backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n\n s.mount('http://', HTTPAdapter(max_retries=retries))\n s.mount('https://', HTTPAdapter(max_retries=retries))\n\n r = s.get(server_api, auth=(server.username, server.password))\n\n if r.status_code == 200:\n posts = remotePostList(server.hostname, r.json(), public_list)\n public_list.extend(posts)\n public_list = sorted(public_list, key=lambda k: k['published'], reverse=True)\n public_list = [next(v) for k, v in groupby(public_list, lambda d: d[\"id\"])]\n\n except:\n print('error')\n return public_list", "def note_list(request):\n user = request.user\n notes = Note.objects.filter(author=user)\n serializer = NoteSerializer(notes, many=True)\n return Response(serializer.data)", "def post_list(request):\n # Only show the posts that have been published\n posts = Post.objects.filter(date_published__isnull=False)\n return render(request,\n 'blog/post_list.html',\n {'posts': posts}\n )", "def get_publications_by_author(cached_list, cached_set, author_name):\n publications = { 'dblp': [], 'cdblp': [] }\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n\n if author['dblp'].__contains__('publications'):\n publications['dblp'] = author['dblp']['publications']\n# for pub in author['dblp']['publications']:\n# print(pub)\n\n if author['cdblp'].__contains__('publications'):\n publications['cdblp'] = author['cdblp']['publications']\n# for pub in author['cdblp']['publications']:\n# print(pub)\n return publications", "def fetch_posts():\n get_chain_address = \"{}/chain\".format(CONNECTED_NODE_ADDRESS)\n response = requests.get(get_chain_address)\n if response.status_code == 200:\n content = []\n chain = json.loads(response.content)\n for pos, block in enumerate(chain[\"chain\"]):\n if pos ==0:\n pass\n else:\n for tx in list(block[\"transactions\"].values()):\n tx[\"index\"] = block[\"index\"]\n tx[\"hash\"] = block[\"previous_hash\"]\n content.append(tx)\n\n global posts\n posts = sorted(content, key=lambda k: k['timestamp'],\n reverse=True)", "def find_all(cls):\n return [AuthorModel(a['name'], str(a['_id']))\n for a in cls.db.newsdb.find()]", "def get_post(id, check_author=True):\r\n cur = get_db().cursor()\r\n cur.execute(\r\n 'SELECT p.id, title, body, created, author_id, username'\r\n ' FROM novel.post p JOIN novel.user u ON p.author_id = u.id'\r\n ' WHERE p.id = %s',id )\r\n\r\n post = cur.fetchone()\r\n if post is None:\r\n abort(404, \"Post id {0} doesn't exist.\".format(id))\r\n\r\n if check_author and post['author_id'] != g.user['id']:\r\n abort(403)\r\n\r\n return post", "def get_authors(draft):\n authors = []\n for a in draft.authors.all():\n initial = ''\n prefix, first, middle, last, suffix = a.person.name_parts()\n if first:\n initial = first + '. '\n entry = '%s%s <%s>' % (initial,last,a.address)\n authors.append(entry)\n return authors" ]
[ "0.6751652", "0.6719958", "0.6679745", "0.64851755", "0.6355382", "0.63139474", "0.6307772", "0.62711054", "0.6250475", "0.62041193", "0.61662126", "0.6099204", "0.6088763", "0.60696316", "0.6060949", "0.59918046", "0.59400725", "0.58352584", "0.58273214", "0.58035225", "0.5796463", "0.57482696", "0.5721598", "0.5719084", "0.570795", "0.5680417", "0.56639403", "0.56167555", "0.561523", "0.5614806", "0.5576493", "0.55551785", "0.55338323", "0.55220014", "0.55172426", "0.55164045", "0.5512481", "0.5498714", "0.5498714", "0.5495253", "0.5490945", "0.54843426", "0.54787844", "0.5477818", "0.54723835", "0.54678166", "0.54526806", "0.5438409", "0.54363", "0.54340863", "0.54081845", "0.53999686", "0.53910625", "0.53699154", "0.53653896", "0.5359778", "0.5359778", "0.53491837", "0.5345605", "0.5293267", "0.5293007", "0.52893615", "0.5276139", "0.5274806", "0.52643985", "0.52452725", "0.5240756", "0.52343684", "0.52230096", "0.5214948", "0.5206232", "0.520611", "0.5200782", "0.51883006", "0.5187968", "0.518241", "0.51766264", "0.51641", "0.5146495", "0.51431316", "0.51377815", "0.51375824", "0.5133609", "0.5128775", "0.5128434", "0.5127672", "0.51274824", "0.5127473", "0.5123646", "0.5112072", "0.51075804", "0.5092214", "0.5090441", "0.50806296", "0.5074247", "0.5073417", "0.50698906", "0.5066606", "0.5066024", "0.5056531" ]
0.6749558
1
Test to ensure that all authors added to relationship are in the returned data Called after a retrieve relationship test has passed
def authors_in_relation(context, data, authors): guids = [a.id for a in authors] guids = map( lambda x: str(x).replace('-', ''), guids) for guid in guids: context.assertTrue(unicode(guid) in data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_retrieve_authors(self):\n sample_author()\n sample_author()\n\n res = self.client.get(reverse('authors'))\n authors = Author.objects.all()\n serializer = AuthorSerializer(authors, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_known_related_objects_identity_preservation(self):\n self.assertIs(self.aldous, self.brave_new_world.author)", "def test_retrieve_books(self):\n book = sample_book(publisher=self.publisher)\n book.author.add(sample_author())\n # book.publisher.add(sample_publisher())\n\n res = self.client.get(reverse('books'))\n books = Book.objects.all()\n serializer = BookSerializer(books, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_authors_from_data(self):\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\n \"name\": \"George Elliott\",\n \"personal_name\": \"George Elliott\",\n \"last_modified\": {\n \"type\": \"/type/datetime\",\n \"value\": \"2008-08-31 10:09:33.413686\",\n },\n \"remote_ids\": {\n \"isni\": \"000111\",\n },\n \"key\": \"/authors/OL453734A\",\n \"type\": {\"key\": \"/type/author\"},\n \"id\": 1259965,\n \"revision\": 2,\n },\n status=200,\n )\n results = self.connector.get_authors_from_data(self.work_data)\n result = list(results)[0]\n self.assertIsInstance(result, models.Author)\n self.assertEqual(result.name, \"George Elliott\")\n self.assertEqual(result.openlibrary_key, \"OL453734A\")\n self.assertEqual(result.isni, \"000111\")", "def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)", "def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]", "def test_create_authors(self):\n payload = {\n 'first_name': 'testname1',\n 'last_name': 'testname2',\n 'nickname': 'testnick1'\n }\n\n res = self.client.post(reverse('authors'), payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n author = Author.objects.get(id=res.data['id'])\n\n for key in payload.keys():\n self.assertEqual(payload[key], getattr(author, key))", "def relationships(self):", "def test_authors():\n assert(hasattr(tekel, '__authors__'))", "def test_get_relationship_templates(self):\n pass", "def authors(self):\n authors = [\n n.people for n in self.pymbake_person_relationship.all()\n ]\n\n return authors", "def test_get_related_nodes(self):\n pass", "def test_retrieve_l_organizations(self):\n pass", "def test_get_specific_authors_profile(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(reverse(\"profiles:profile\", kwargs={\n 'username':self.register_data['user']['username'],\n }), format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]", "def test_author_sorted_articles(self):\n\n self.make_test('articles', ArticleListSerializer, 'author:articles')", "def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"", "def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))", "def test_book_related(self):\n client = APIClient()\n client.login(username=self.students[0].username, password=\"salam*123\")\n response = client.get(\"/books/4/related/\")\n json = response.json()\n self.assertEqual(json[\"count\"], 2)\n self.assertEqual(json[\"results\"][0][\"id\"], 5)\n self.assertEqual(json[\"results\"][1][\"id\"], 2)", "def test_author_list_equality_with_valid_authentication(self) -> None:\n\n # Set the Authorization header to the appropriate\n # format as the rest_framework expects using utils.\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(\n self.super_author.get_key()\n ))\n\n response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(data, self.serialized_data, msg=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def assertAuthorsInPosts(context, authors, posts):\n cross_check(context, authors, posts)", "def test_author_filtering(self):\n # Get a valid author\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id}\n\n self._test_filtering(**params)", "def test_add_followers(self):\n pass", "def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)", "def test_retrieve_publishers(self):\n sample_publisher()\n sample_publisher()\n\n res = self.client.get(reverse('publishers'))\n publishers = Publisher.objects.all()\n serializer = PublisherSerializer(publishers, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_edition_from_data(self):\n work = models.Work.objects.create(title=\"Hello\")\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\"hi\": \"there\"},\n status=200,\n )\n with patch(\n \"bookwyrm.connectors.openlibrary.Connector.get_authors_from_data\"\n ) as mock:\n mock.return_value = []\n result = self.connector.create_edition_from_data(work, self.edition_data)\n self.assertEqual(result.parent_work, work)\n self.assertEqual(result.title, \"Sabriel\")\n self.assertEqual(result.isbn_10, \"0060273224\")\n self.assertEqual(result.description, self.edition_data[\"description\"][\"value\"])\n self.assertEqual(result.languages[0], \"English\")\n self.assertEqual(result.publishers[0], \"Harper Trophy\")\n self.assertEqual(result.pages, 491)\n self.assertEqual(result.subjects[0], \"Fantasy.\")\n self.assertEqual(result.physical_format, \"Hardcover\")", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))", "def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list", "def test_individuals(self):\n individuals = Individual.query().all()\n print len(individuals)\n print individuals\n assert len(individuals) == 2", "def test_author_sorted_topics(self):\n\n self.make_test('topics', TopicListSerializer, 'author:topics')", "def test_addOrganization(self):\r\n #fetch the object form the datastore\r\n org_obj = db.GqlQuery(\"SELECT * FROM Organization\")\r\n organization = addOrganization(org_obj.run().next())\r\n #view it as a dict\r\n organization_d = importer.etree_to_dict(organization)\r\n assert [{'name': u'Test Organization'},\r\n {'kind': u'TestOrgKind'},\r\n {'description': u'TestOrgDescription'},\r\n {'location': [{'city': u'Organization City'}, {'country': u'USA'}]},\r\n {'images': [\r\n {'image': [\r\n {'source': u'http://www.testimage.com'},\r\n {'description': u'Description of TestImage'}]}]},\r\n {'maps': [\r\n {'map': [{'source': u'http://maps.google.com'}, {'description': u'Map Description'}]}]},\r\n {'videos': [{u'youtube': u'r_8om4dsEmw'}]},\r\n {'social': [{u'twitter': u'@billgates'}]},\r\n {'citations': [\r\n {'citation': [\r\n {'source': u'http://maps.google.com'},\r\n {'description': u'Map Description'}]}]},\r\n {'external-links': [\r\n {'external-link': [\r\n {'source': u'http://www.google.com'},\r\n {'description': u'Google'}]}]}] in organization_d.values()", "def test_fetch_related_data_valid(self):\n resp = requests.post(\n _CONF[\"re_api_url\"] + \"/api/v1/query_results\",\n params={\"stored_query\": \"ws_fetch_related_data\", \"show_public\": True},\n data=json.dumps({\"obj_key\": \"1:1:1\"}),\n ).json()\n self.assertEqual(resp[\"count\"], 1)\n self.assertEqual(resp[\"has_more\"], False)\n res = resp[\"results\"][0]\n # Check the root object results\n self.assertEqual(res[\"obj\"][\"_key\"], \"1:1:1\")\n self.assertEqual(res[\"obj_type\"][\"_key\"], \"Module.Type1-1.0\")\n # Check the copy results\n self.assertEqual(res[\"copies\"][\"count\"], 1)\n self.assertEqual(len(res[\"copies\"][\"data\"]), 1)\n self.assertEqual(\n res[\"copies\"][\"data\"][0][\"data\"][\"_id\"], \"ws_object_version/1:2:1\"\n )\n self.assertEqual(res[\"copies\"][\"data\"][0][\"hops\"], 1)\n self.assertEqual(\n res[\"copies\"][\"data\"][0][\"type\"][\"_id\"], \"ws_type_version/Module.Type1-1.0\"\n )\n # Check the provenance results\n self.assertEqual(res[\"prov\"][\"count\"], 1)\n self.assertEqual(len(res[\"prov\"][\"data\"]), 1)\n self.assertEqual(\n res[\"prov\"][\"data\"][0][\"data\"][\"_id\"], \"ws_object_version/1:3:1\"\n )\n self.assertEqual(res[\"prov\"][\"data\"][0][\"hops\"], 1)\n self.assertEqual(\n res[\"prov\"][\"data\"][0][\"type\"][\"_id\"], \"ws_type_version/Module.Type1-1.0\"\n )\n # Check the ref results\n self.assertEqual(res[\"refs\"][\"count\"], 1)\n self.assertEqual(len(res[\"refs\"][\"data\"]), 1)\n self.assertEqual(\n res[\"refs\"][\"data\"][0][\"data\"][\"_id\"], \"ws_object_version/1:4:1\"\n )\n self.assertEqual(res[\"refs\"][\"data\"][0][\"hops\"], 1)\n self.assertEqual(\n res[\"refs\"][\"data\"][0][\"type\"][\"_id\"], \"ws_type_version/Module.Type1-1.0\"\n )", "def test_get_publication(self):\n pass", "def test_get_all_related(self):\n c1 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c1\")\n c2 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c2\")\n # if c1 is related to c2\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c2\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c1)\n self.assertEqual(set(expected_output), set(actual_output))\n # then c2 should be related to c1\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c1\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c2)\n self.assertEqual(set(expected_output), set(actual_output))", "def test_related_add_multiple_children(app, testdata):\n # Test language\n docs = testdata[\"documents\"]\n series = testdata[\"series\"]\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 0\n assert len(doc2.related.editions) == 0\n assert len(ser3.related.editions) == 0\n\n doc1.related.add_edition(doc2)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 1\n assert len(doc2.related.editions) == 1\n assert len(ser3.related.editions) == 0\n\n doc1.related.add_edition(ser3)\n doc1.commit()\n\n doc1 = Document.get_record_by_pid(docs[0][\"document_pid\"])\n doc2 = Document.get_record_by_pid(docs[1][\"document_pid\"])\n ser3 = Series.get_record_by_pid(series[0][\"series_pid\"])\n\n assert len(doc1.related.editions) == 2\n assert len(doc2.related.editions) == 2\n assert len(ser3.related.editions) == 2", "def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors", "def test_retrieve_l_organization(self):\n pass", "def test_retrieve_ingredients_assigned_to_recipes(self):\n ing1 = Ingredient.objects.create(user=self.user, name='Apples')\n ing2 = Ingredient.objects.create(user=self.user, name='Turkey')\n recipe = Recipe.objects.create(\n user=self.user,\n title='Apple crumble',\n time_minutes=5,\n price=10.0,\n )\n recipe.ingredients.add(ing1)\n\n res = self.client.get(\n reverse('recipe:ingredient-list'),\n {'assigned_only': 1},\n )\n\n ser1 = IngredientSerializer(ing1)\n ser2 = IngredientSerializer(ing2)\n\n self.assertIn(ser1.data, res.data)\n self.assertNotIn(ser2.data, res.data)", "def get_author_data():\n entry = mongo.db.Authors\n output = list()\n look_up_type = None\n if 'name' in request.args:\n look_up_type = 'name'\n print(request.args)\n if len(request.args['name']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['name'].strip('\"')\n name = entry.find({'name': {'$regex': value}})\n if name:\n for author in name:\n output.append({'name': author['name']})\n elif 'booktitle' in request.args:\n look_up_type = 'related_books'\n if len(request.args['booktitle']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['booktitle'].strip('\"')\n related_books = entry.find(\n {'author_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for title in related['author_books']:\n if value in title:\n output.append(({'related_books': title}))\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenAuthors.html', output=output, look_up_type=look_up_type), 200", "def setUpTestData(cls):\n number_of_authors = 13\n\n for author_id in range(number_of_authors):\n Author.objects.create(\n first_name=f'Christian {author_id}',\n last_name=f'Surname {author_id}',\n )", "def testGetRelatedIdentifiers(self):\n try:\n # --- Get related identifiers ---\n pcdcP = PubChemDataCacheProvider(self.__cfgOb, self.__cachePath)\n rD = pcdcP.getRelatedMapping(self.__cidList)\n logger.info(\"rD %r\", rD)\n self.assertGreaterEqual(len(rD), len(self.__cidList))\n #\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def test_dependent_object_import(self):\n # Initialize Importers\n up_manager = ImporterManager(importer=UserProfileImporter())\n company_manger = ImporterManager(importer=CompanyImporter())\n user_manager = ImporterManager(importer=UserImporter())\n\n # Populate leaf models of dependency tree with kv data\n for row,name in enumerate(self.usernames):\n user_manager.update_kvs(field_name='username', value=name, row=row)\n company_manger.update_kvs(field_name='natural_id', value=self.company.natural_id, row=row)\n\n #: Retrieve data associated with kv data\n user_manager.get_available_rows()\n company_manger.get_available_rows()\n\n #: Populate data up the dependency tree with retrieved rows\n for row in range(self.n_objs):\n up_manager.update_kvs('company', company_manger.get_object_or_list(row), row=row)\n up_manager.update_kvs('user', user_manager.get_object_or_list(row), row=row)\n\n #: Retrieve data associated with models depended upon\n up_manager.get_available_rows()\n\n #: Test corresponding UserProfile has been returned\n for row in range(self.n_objs):\n objs = up_manager.get_objs_and_meta(row) #: Returns a list of objects only if manytomany, o/w just 1\n\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, UserProfile)\n self.assertIsNotNone(objs[0].query)\n\n self.assertEqual(objs[0].object.user.username, self.usernames[row])", "def test_many_to_many_prefetch_related(self):\n artist = Artist.objects.create(name=\"Great singer\")\n group = Group.objects.create(name=\"Cool band\")\n\n membership = Membership.objects.create(\n artist=artist,\n group=group,\n invite_reason=\"Need a new drummer\"\n )\n\n membership.delete()\n\n query = Group.objects.filter(id=group.id).prefetch_related(\"members\")\n self.assertEqual(\n query[0].members.count(),\n 0\n )", "def test_is_followed_by(self):\n\n self.u1.followers.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_followed_by(self.u2))\n self.assertFalse(self.u2.is_followed_by(self.u1))", "def test_draft_list_only_display_authors_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor)\n AidFactory(name='Is this just fantasy?')\n\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' in content\n assert 'Is this just fantasy?' not in content", "def test_retrieve(self):\n users = CalendallUser.objects.all()\n self.check_attrs_helper(users)", "def test_complete_data_schema(self):\n response = self.client.get(self.url)\n data = response.data\n self.assertIn('id', data)\n self.assertIn('first_name', data)\n self.assertIn('last_name', data)\n self.assertIn('aliases', data)\n self.assertIn('movies_as_actor', data)\n self.assertIn('movies_as_director', data)\n self.assertIn('movies_as_producer', data)", "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "def test_organizations_list(self):\n pass", "def any_author_exists(self):\n return bool(self.mapper.count())", "def test_followers_following_list_authorized(self):\n\n # user2 following user1\n # follow = Follows(user_being_followed_id=1, user_following_id=2)\n\n self.u2.following.append(self.u)\n db.session.commit()\n\n with self.client as client:\n\n client.post(\n '/login',\n data = {\n \"username\" : self.u.username,\n \"password\" : \"password\"\n },\n )\n\n response = client.get(\"/users/2/following\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/1\"' ,html)\n \n response = client.get(\"/users/1/followers\")\n html = response.get_data(as_text=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn('\"/users/2\"' ,html)", "def test_merge_model_relationships(bf, dataset, organization, assert_in_neo4j):\n person = dataset.create_model(\n \"Person\",\n schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)],\n )\n\n food = dataset.create_model(\n \"Food\", schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)]\n )\n\n color = dataset.create_model(\n \"Color\",\n schema=[ModelProperty(\"name\", data_type=str, title=True, required=True)],\n )\n\n # Relationship type with no \"from\" and \"to\"\n likes = dataset.create_relationship_type(\"Likes\", \"likes\")\n\n # Relationship type with \"from\" and \"to\", but no instances\n dataset.create_relationship_type(\n \"Appreciates\", \"appreciates\", source=person.id, destination=color.id\n )\n\n alice = person.create_record({\"name\": \"Alice\"})\n bob = person.create_record({\"name\": \"Bob\"})\n charlie = person.create_record({\"name\": \"Charlie\"})\n\n ice_cream = food.create_record({\"name\": \"Ice Cream\"})\n\n alice_likes_bob = likes.relate(alice, bob)\n bob_likes_charlie = likes.relate(bob, charlie)\n alice_likes_ice_cream = likes.relate(alice, ice_cream)\n\n # At this point we have in the relation_types file\n #\n # ()-[likes]->()\n # (person)-[appreciates]->(color)\n #\n # and in the schemaRelations file\n #\n # (person)-[likes]->(person)\n # (person)-[likes]->(food)\n #\n # The /relationships endpoint on the old service *only* returns things in\n # the relation_types file.\n #\n # But the new service should merge them both together to create all\n # necessary model relationships and stubs:\n #\n # ()-[likes]->()\n # (person)-[appreciates]->(color)\n # (person)-[likes]->(person)\n # (person)-[likes]->(food)\n\n migrate_dataset(\n organization_id=organization.int_id,\n # organization_node_id=organization.id,\n dataset_ids=[dataset.int_id]\n # dataset_node_id=dataset.id,\n )\n\n assert_in_neo4j()\n\n # Drop into raw requests because of\n # https://app.clickup.com/t/426zh9\n relationships = bf._api.concepts.relationships._get(\n bf._api.concepts.relationships._uri(\n \"/{dataset_id}/relationships\", dataset_id=dataset.id\n )\n )\n\n assert sorted(\n [(r[\"from\"] or \"*\", r[\"name\"], r[\"to\"] or \"*\") for r in relationships]\n ) == sorted(\n [\n (\"*\", \"Likes\", \"*\"),\n (person.id, \"Likes\", food.id),\n (person.id, \"Likes\", person.id),\n (person.id, \"Appreciates\", color.id),\n ]\n )", "def save_m2m(self):\n if self.authors.data:\n ids = set(self.authors.data)\n return self.obj.update_authors(ids)\n return False", "def get_authors(self, instance):\n\n # Get Authors in the specified order\n author_order = Author.objects \\\n .filter(dataset_id=instance.id) \\\n .order_by('order')\n\n # Put in a list\n authors = [a.author for a in author_order]\n\n # Return a list of person urls\n serializers = PersonSerializer(authors, many=True, context={'request': self.context['request']}).data\n return [p[\"url\"] for p in serializers]", "def test_get_education(self):\n current_resume = resume.objects.first()\n expected = list(current_resume.get_education())\n case = list(current_resume.education_set.all())\n self.assertEqual(case,expected)", "def test_search_author(self):\n\n\t\titem_id = mock_item(title='Dummy Title', author='Made Up Author')[0]\n\n\t\titem = models.search('Made')[0]\n\t\tself.assertEqual(item['id'], item_id)", "def make_test(self, model_type: str, model_serializer, url_name: str):\n\n for author_id in [1, 2]:\n username = getattr(self, f'author_{author_id}').username\n\n instances = getattr(self, f'{model_type}_by_author_{author_id}')\n instances_data = model_serializer(instances, many=True).data\n\n response = self.client.get(reverse(url_name, kwargs={\n 'username': username\n }))\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['count'], len(instances_data))", "def test_getResourceRelations(self):\n pass", "def test_search_client_by_author(self, mock_get):\n\n response = isbn_utils.search_by(self.filter_author, self.author)\n self.assertEqual(response.data, json.loads(open(UNIT_TEST_RESOURCES_FOLDER +\n FILE_NAME_AUTHOR_SEARCH_RESPONSE).read())[\"data\"])", "def test_basic_book(self):\n ernest_author = Author.objects.create(FIO=\"Ernest Miller Hemingway\", birthday = \"1899-07-21\")\n create_book = Book.objects.create(title=\"The Old Man And The Sea\", yearbook=\"2012-07-07\", short_describe=\"The Old Man and the Sea is the story of an epic battle between an old, experienced fisherman and a large marlin.\")\n create_book.author.add(ernest_author)\n\n create_book = Book.objects.create(title=\" A Farewell to Arms\", yearbook=\"2012-01-31\", short_describe=\"A Farewell to Arms focuses on a romance between Henry and a British nurse, Catherine Barkley, against the backdrop of World War I, cynical soldiers, fighting and the displacement of populations.\")\n create_book.author.add(ernest_author)\n\n create_book = Book.objects.create(title=\"Islands in the Stream\", yearbook=\"2012-02-25\", short_describe=\"The first act, \\\"Bimini\\\", begins with an introduction to the character of Thomas Hudson, a classic Hemingway stoic male figure. Hudson is a renowned American painter who finds tranquility on the island of Bimini, in the Bahamas, a far cry from his usual adventurous lifestyle. Hudson's strict routine of work is interrupted when his three sons arrive for the summer and is the setting for most of the act.\")\n create_book.author.add(ernest_author)", "def test_getorganizations_item(self):\n pass", "def test_getCpfRelations(self):\n pass", "def test_insert_and_fetch_value(self):\n author_kent = Author(\n first_name=\"Arthur\",\n last_name=\"Kent\",\n rating=Decimal(\"4.1\"),\n )\n author_kent.save()\n qs1 = Author.objects.all().values(\"first_name\", \"last_name\")\n self.assertEqual(qs1[0][\"first_name\"], \"Arthur\")\n self.assertEqual(qs1[0][\"last_name\"], \"Kent\")\n # Delete data from Author table.\n Author.objects.all().delete()", "def test_books_exist(self):\n login = self.client.login(username = \"jdoe\", password=\"123\")\n book = Book.objects.create(name = \"Gullivers Travels\", pages=400)\n url = reverse(\"book_list\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context[\"book_list\"], [book])", "def test_add_author_notes(self):\n metadata = Metadata(DataSource.CONTENT_CAFE)\n content = self.data_file(\"author_notes.html\")\n self.http.queue_requests_response(200, 'text/html', content=content)\n self.api.add_author_notes(metadata, self.identifier, self.args)\n\n [notes] = metadata.links\n eq_(Hyperlink.AUTHOR, notes.rel)\n assert 'Brenda researched turtles' in notes.content\n\n # We incidentally figured out the book's title.\n eq_(\"Franklin's Christmas Gift\", metadata.title)", "def test_related_resource__reverse(read_only):\n class MemberSerializer(serializers.ModelSerializer):\n class Meta:\n model = MemberWithCustomID\n fields = ('custom_id', 'first_name', 'last_name', 'projects', 'owned_projects')\n read_only_fields = ['projects', 'owned_projects'] if read_only else []\n\n class MemberViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):\n queryset = MemberWithCustomID.objects.all()\n serializer_class = MemberSerializer\n renderer_classes = [renderers.JSONRenderer]\n parser_classes = [parsers.JSONParser]\n swagger_schema = base.BasicSwaggerAutoSchema\n\n router = routers.DefaultRouter()\n router.register(r'members', MemberViewSet, **compatibility._basename_or_base_name('members'))\n\n generator = OpenAPISchemaGenerator(info=openapi.Info(title=\"\", default_version=\"\"), patterns=router.urls)\n\n swagger = generator.get_schema(request=None, public=True)\n\n response_schema = swagger['paths']['/members/{custom_id}/']['get']['responses']['200']['schema']['properties']\n assert 'id' in response_schema['data']['properties']\n assert response_schema['data']['properties']['id']['type'] == 'string'\n assert response_schema['data']['properties']['id']['format'] == 'int64'\n assert 'type' in response_schema['data']['properties']\n assert 'attributes' in response_schema['data']['properties']\n assert list(response_schema['data']['properties']['attributes']['properties'].keys()) == ['first-name', 'last-name']\n assert 'relationships' in response_schema['data']['properties']\n relationships_schema = response_schema['data']['properties']['relationships']['properties']\n assert list(relationships_schema.keys()) == ['projects', 'owned-projects']\n assert relationships_schema['projects']['properties']['data']['items']['properties']['id']['type'] == 'string'\n assert relationships_schema['projects']['properties']['data']['items']['properties']['id']['format'] == 'int32'\n assert relationships_schema['owned-projects']['properties']['data']['items']['properties']['id']['type'] == 'string'\n assert relationships_schema['owned-projects']['properties']['data']['items']['properties']['id']['format'] == \\\n 'int32'", "def test_find_relation_types(self):\n pass", "def test_organizations_read(self):\n pass", "def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in", "def authors(self):\n return self.properties.get('Authors', ClientValueCollection(SharedWithMeDocumentUser))", "def test_many_to_many_through_self(self):\n through_field = Person._meta.get_field(\"parents\")\n through = through_field.remote_field.through\n\n metadata = MetaData(schema=\"unique\")\n sa_models = construct_models(metadata)\n self.assertEqual(sa_models[through].__table__.schema, \"unique\")", "def test_get_students_for_contact(self):\n pass", "def test_books(self):\r\n link_re = re.compile(r'<(?P<link>[^>]+)>\\; rel=\\\"(?P<rel>[^\\\"]+)\\\"')\r\n\r\n response = self.get_resource('author-test-book',\r\n data=dict(author=self.author.pk))\r\n self.assertTrue(response.has_header(\"Link\"))\r\n self.assertEquals(\r\n response[\r\n \"Link\"], '<%s?page=2&author=5>; rel=\"next\"' % self.reverse('author-test-book')) # nolint\r\n # Get objects by links on Link header\r\n response = self.client.get(link_re.findall(response['Link'])[0][0])\r\n\r\n links = link_re.findall(response['Link'])\r\n\r\n self.assertEquals(links[0][0], '%s?page=3&author=5' %\r\n self.reverse('author-test-book'))\r\n self.assertEquals(links[0][1], 'next')\r\n\r\n self.assertEquals(\r\n links[1][0], '%s?author=5' % self.reverse('author-test-book'))\r\n self.assertEquals(links[1][1], 'previous')\r\n\r\n response = self.get_resource(\r\n 'author-test-book', data={\r\n 'author': self.author.pk, 'adr-max': 0\r\n })\r\n self.assertFalse(response.has_header(\"Link\"))\r\n\r\n response = self.get_resource(\r\n 'author-test-book',\r\n data={\r\n 'author': self.author.pk, 'adr-max': 'all'\r\n })\r\n self.assertEquals(response.status_code, 200)\r\n self.assertFalse(response.has_header(\"Link\"))", "def test_article_exist(self):\n\n article = Article.objects.get(title='Test title')\n self.assertEqual(article.author, 'Roman')", "def sample_author(first_name='testfirstname', last_name='testlastname'):\n return Author.objects.create(first_name=first_name, last_name=last_name)", "def setUp(self):\n\n self.client = get_client()\n self.fake = Faker()\n self.sim = Simulate()\n\n self.generate_authorizations(10)", "def test_post_list_connected_other_users_story(self):\n related_story = create_story(title=\"Test Related Story\", \n summary=\"Test Related Story Summary\",\n byline=\"Test Related Story Byline\",\n status='published',\n author=self.user2)\n self.assertEqual(len(self.story.related_stories.all()), 0)\n post_data = {\n 'relation_type': 'connected',\n 'source': related_story.story_id,\n 'target': self.story.story_id,\n }\n uri = '/api/0.1/stories/%s/related/' % (related_story.story_id)\n self.api_client.client.login(username=self.username, password=self.password)\n resp = self.api_client.post(uri, format='json', \n data=post_data)\n self.assertHttpCreated(resp)\n self.assertEqual(StoryRelation.objects.count(), 1)\n created_rel = StoryRelation.objects.get()\n self.assertEqual(created_rel.relation_type,\n post_data['relation_type'])\n self.assertEqual(created_rel.source, related_story)\n self.assertEqual(created_rel.target, self.story)", "def test_user_address_relationship(self):\r\n user = self._create_test_user()\r\n addresses = [self._create_test_address() for x in range(3)]\r\n user.addresses += addresses\r\n self.db.session.commit()\r\n for a in addresses:\r\n assert a in user.addresses", "def test_loaded_data(self):\n users = (0, 0, 1, 1, 2, 3, 4, 5, 5, 6, 7, 8)\n friends = (1, 2, 2, 3, 3, 4, 5, 6, 7, 8, 8, 9)\n\n friendships = []\n for i in range(len(users)):\n friendships.append({'user': users[i], 'friend': friends[i]})\n friendships.append({'user': friends[i], 'friend': users[i]})\n\n self.assertEqual(self.users.friendships, friendships)", "def test_confirm_order_by_reference_wont_work(self):\n\n class Author(Document):\n author = ReferenceField(self.Person)\n\n Author.drop_collection()\n\n person_a = self.Person(name=\"User A\", age=20)\n person_a.save()\n person_b = self.Person(name=\"User B\", age=40)\n person_b.save()\n person_c = self.Person(name=\"User C\", age=30)\n person_c.save()\n\n Author(author=person_a).save()\n Author(author=person_b).save()\n Author(author=person_c).save()\n\n names = [a.author.name for a in Author.objects.order_by(\"-author__age\")]\n assert names == [\"User A\", \"User B\", \"User C\"]", "def test_author_list_equality_with_invalid_authentication(self) -> None:\n\n # Let's check for a request with no authorization\n\n response: Response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {\n 'detail': 'Authentication credentials were not provided.'\n })\n\n # Now lets check with an Author without permissions.\n\n # Select the underprivileged author randomly.\n author: Author = random.choice(self.authors)\n\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(author.get_key()))\n\n response: Response = self.client.get(self.url)\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {\n 'detail': 'You do not have permission to perform this action.'\n })", "def test_retrieve_ingredients_assigned_to_recipes(self):\n ingredient1 = Ingredient.objects.create(user=self.user, name='Apple')\n ingredient2 = Ingredient.objects.create(user=self.user, name='Turkey')\n recipe = Recipe.objects.create(user=self.user, title='Apple crumble', time_minutes=5, price=10)\n recipe.ingredients.add(ingredient1)\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n serializer1 = IngredientSerializer(ingredient1)\n serializer2 = IngredientSerializer(ingredient2)\n self.assertIn(serializer1.data, res.data)\n self.assertNotIn(serializer2.data, res.data)", "def test_collection_author_year_filtering(self):\n # Create a collection\n entries = Entry.objects.filter(id__in=(1, 5, 10, 15))\n collection = CollectionFactory(entries=entries)\n entry = Entry.objects.get(id=1)\n\n # Get a valid collection\n params = {\n \"collection\": collection.id,\n \"author\": entry.first_author.id,\n \"year\": entry.publication_date.year,\n }\n self._test_filtering(**params)", "def test_item_is_related_to_list(self) -> None:\n list_ = List.objects.create()\n item = Item()\n item.list = list_\n item.save()\n self.assertIn(item, list_.item_set.all())", "def test_beneficiaries_retrieve_validate_content_that_will_pass(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve', kwargs={'pk': 1})\n response = self.client.get(url)\n\n # serialize all model object data\n beneficiaries = Beneficiary.objects.get(pk=1)\n serializer = BeneficiarySerializer(beneficiaries, many=False)\n self.assertEqual(response.json(), serializer.data)\n self.assertEqual(response.status_code, 200)", "def test_add_relation_types(self):\n pass", "def test_nondependent_object_get(self):\n manager = ImporterManager(importer=UserImporter())\n for row,name in enumerate(self.usernames):\n manager.update_kvs(field_name='username',value=name,row=row)\n\n manager.get_available_rows()\n for i in range(self.n_objs):\n objs: List[RecordData] = manager.get_objs_and_meta(i) #: Returns a list of objects only if manytomany\n self.assertEqual(objs[0].available, True)\n self.assertIsNotNone(objs[0].object)\n self.assertIsInstance(objs[0].object, User)\n self.assertIsNotNone(objs[0].query)\n\n del manager", "def test_conceptional_relations(id, conrel, expected_ids):\n synset = germanet_data.get_synset_by_id(id)\n related = synset.relations[conrel]\n np.testing.assert_equal(sorted([syn.id for syn in related]), sorted(expected_ids))", "def test_refersto_author(self):\n inv_search = 'refersto:author:kitty'\n spi_search = 'find refersto author kitty'\n self._compare_searches(inv_search, spi_search)", "def test_get_people_list(self):\n person_1 = Person(\n first_name='Emilia',\n last_name='Clarke',\n aliases='Emi'\n )\n person_2 = Person(\n first_name='Peter',\n last_name='Dinklage',\n )\n person_3 = Person(\n first_name='Thomas',\n last_name='McCarthy',\n aliases='Thom'\n )\n\n Person.objects.bulk_create([person_1, person_2, person_3])\n\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('count'), Person.objects.count())", "def test_view_recipe_detail(self):\n recipe = sample_recipe(user = self.user)\n recipe.tags.add(sample_tag(user=self.user)) # This is how u add an item on a ManytoManyField\n recipe.ingredients.add(sample_ingredient(user=self.user))\n\n url = detail_url(recipe.id)\n res = self.client.get(url)\n\n serializer = RecipeDetailSerializer(recipe) # since this is not a list function, we dont need many=true\n self.assertEqual(res.data, serializer.data) # test that the response is serialized", "def test_retrieve_ingredients_assigned_to_recipes(self):\n ingredient1 = Ingredients.objects.create(\n user=self.user,\n name='Breakfast'\n )\n ingredient2 = Ingredients.objects.create(user=self.user, name='Dinner')\n recipe = Recipe.objects.create(\n title='biryani',\n time_minutes=30,\n price=250,\n user=self.user\n )\n recipe.ingredients.add(ingredient1)\n\n res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})\n\n serializer1 = IngredientSerializer(ingredient1)\n serializer2 = IngredientSerializer(ingredient2)\n\n self.assertIn(serializer1.data, res.data)\n self.assertNotIn(serializer2.data, res.data)", "def test_create_instructor_complete_data(self):\n response = self.client.post(self.url, data=json.dumps(self.payload_all), content_type='application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.content.decode())\n self.assertEqual(Instructor.objects.count(), self.qty + 1)\n self.assertTrue(User.objects.filter(email=self.payload_all['email']).exists())\n self.assertTrue(User.objects.filter(username=self.payload_all['email']).exists())\n user_id = User.objects.get(username=self.payload_all['email']).id\n self.assertTrue(Instructor.objects.filter(user_id=user_id).exists())", "def test_one_and_many(self):\n\n mapper(User, users, properties={\n 'orders':relation(Order, lazy=False)\n })\n mapper(Item, items)\n mapper(Order, orders, properties = dict(\n items = relation(Item, secondary=order_items, lazy=False, order_by=items.c.id)\n ))\n\n q = create_session().query(User)\n\n l = q.filter(\"users.id in (7, 8, 9)\")\n\n def go():\n assert fixtures.user_order_result[0:3] == l.all()\n self.assert_sql_count(testing.db, go, 1)", "def test_initialization_of_homework_result_author():\n assert result_1.author == good_student", "def get_author_affiliations(self, author_node, author_group_node):\n ref_ids = author_node.xpath(\".//@refid[contains(., 'af')]\").extract()\n group_affs = author_group_node.xpath(\"string(./affiliation/textfn[1])\").getall()\n if ref_ids:\n affiliations = self._find_affiliations_by_id(author_group_node, ref_ids)\n else:\n affiliations = filter(None, group_affs)\n return affiliations", "def test_fetch_instructors_records(self) -> None:\n SIT: University = University(\n \"/Users/rdshah2005/Desktop/SSW810/Assignment9/SSW-810\")\n expected_result: List[str] = [['98765', 'Einstein, A', 'SFEN'], ['98764', 'Feynman, R', 'SFEN'], [\n '98763', 'Newton, I', 'SFEN'], ['98762', 'Hawking, S', 'SYEN'], ['98761', 'Edison, A', 'SYEN'], ['98760', 'Darwin, C', 'SYEN']]\n computed_results: List[str] = list()\n\n for record in SIT.all_instructors.values():\n computed_results.append(record.fetch_instructor_records())\n\n self.assertEqual(expected_result, computed_results)", "def test_attributes(self):\n u = User.query.filter_by(username=\"jjones\").first()\n assert u.username == \"jjones\"\n assert u.email == \"[email protected]\"\n assert len(u.reviews) == 4\n assert u.email_verified is False\n assert u._email_token_key == 'verify_email'\n assert u._password_token_key == 'reset_password'\n assert u.sentfriendrequests == []\n assert u.receivedfriendrequests == []\n assert u.sentgrouprequests == []\n u2 = User.query.get(1)\n assert u2 in u.friends\n assert type(u.address) == Address" ]
[ "0.719106", "0.6605452", "0.65803057", "0.64502394", "0.6409618", "0.6390426", "0.62981063", "0.6223942", "0.6172895", "0.6143107", "0.6092482", "0.60106426", "0.5917884", "0.587733", "0.5856236", "0.5841362", "0.58273274", "0.5820912", "0.5778369", "0.5765315", "0.574128", "0.5734344", "0.57250065", "0.57065344", "0.56937903", "0.56928355", "0.5686367", "0.56197584", "0.55989265", "0.557787", "0.5577128", "0.55763495", "0.5560648", "0.55581194", "0.55415785", "0.55412674", "0.553819", "0.55294144", "0.5527821", "0.55089194", "0.54961264", "0.54884106", "0.54719615", "0.54713553", "0.54669064", "0.5463743", "0.546007", "0.54572475", "0.5451251", "0.54504204", "0.5446667", "0.544655", "0.54421824", "0.5436696", "0.5434739", "0.5432363", "0.5428308", "0.5425159", "0.5421041", "0.54154795", "0.54126525", "0.5411016", "0.54049754", "0.5398849", "0.5397721", "0.53959286", "0.53900856", "0.53871024", "0.5382152", "0.5361884", "0.5357816", "0.53487843", "0.53459436", "0.53457403", "0.5343612", "0.53329206", "0.53277844", "0.532462", "0.5319612", "0.53189504", "0.5316084", "0.5315843", "0.5311915", "0.5309801", "0.53097564", "0.53079396", "0.53007454", "0.5292835", "0.52914894", "0.52886415", "0.5285108", "0.52824295", "0.5278157", "0.5274698", "0.5274531", "0.52724737", "0.5271929", "0.5268991", "0.5267092", "0.526687" ]
0.7145637
1
Create Friends and Friends of Friends and associated posts
def create_friends(friend, friendors, create_post = True, visibility = ACL_DEFAULT): for friendor in friendors: friend.add_friend(friendor) friendor.add_friend(friend) # FriendRelationship.objects.create(friendor = friendor, friend = friend) if create_post: Post.objects.create(content = TEXT, author = friendor, visibility = visibility)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_friend(request, profile_pk, friend_pk):\n\n profile_object = Profile.objects.get(pk=profile_pk)\n friend_object = profile_object.get_friend_suggestions().get(pk=friend_pk)\n \n profile_object.friends.add(friend_object)\n profile_object.save()\n\n return redirect(reverse('show_profile_page', kwargs={'pk': profile_pk}))", "def create_friend(user_id, friend_user_id):\n\n friend = User_Friend(user_id=user_id, friend_user_id=friend_user_id)\n\n db.session.add(friend)\n db.session.commit()\n\n return friend", "def add_friend(request):\n required_fields = ['source_user_id', 'dest_user_id', 'token']\n\n # Check if the post request contain the required fields\n if set(required_fields) != set(list(request.data.keys())):\n return Response({'error': str('Missing required fields!')}, status=status.HTTP_400_BAD_REQUEST)\n\n # POST Request content\n data = request.data\n\n # Here check if user_id matches the token with the database\n if not db.check_user(data['source_user_id'], data['token']):\n return Response({'error': str('UNAUTHORIZED')}, status=status.HTTP_401_UNAUTHORIZED)\n\n # Send friend request\n if not mock_db.add_friend(data['source_user_id'], data['dest_user_id']):\n return Response({'error': str('Error when adding friend!')},\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'status': 'success'})", "def create(self, request):\n friend_obj = Friend.objects.add_friend(\n request.user, # The sender\n get_object_or_404(User, pk=request.data['user_id']), # The recipient\n message=request.data.get('message', '')\n )\n\n return Response(\n FriendshipRequestSerializer(friend_obj).data,\n status.HTTP_201_CREATED\n )", "def share_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n # self._posts.append(post)\n self.write_post(post.tags, friend)\n friend.update_relation(self, SHARE_POST)\n friend.append_share(post, user=self)\n # self.update_relation(friend, SHARE_POST)\n break", "def post(owner_id=None, friends_only=None, from_group=None, message=None,\\\n attachments=None, services=None, signed=None, publish_date=None,\\\n lat=None, long=None, place_id=None, post_id=None, guid=None, mark_as_ads=None):\n params = {\n 'owner_id': owner_id,\n 'friends_only': friends_only,\n 'from_group': from_group,\n 'message': message,\n 'attachments': attachments,\n 'services': services,\n 'signed': signed,\n 'publish_date': publish_date,\n 'lat': lat,\n 'long': long,\n 'place_id': place_id,\n 'post_id': post_id,\n 'guid': guid,\n 'mark_as_ads': mark_as_ads\n }\n result = call('wall.post', **params)\n return parse_response(result)", "def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()", "def post(self):\n\t\tdb = getattr(g, 'db', None)\n\t\tobj = request.get_json()\n\n\t\tif ('username' not in obj) or ('session' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telif not authenticate(obj['username'],obj['session']):\n\t\t\treturn {'status':'AUTH_FAIL'}\n\t\telif ('action' not in obj):\n\t\t\treturn {'status':'MISSING_PARAMS'}\n\t\telse:\n\t\t\taction = obj['action']\n\t\t\tif action == 'ADD' and 'friend' in obj:\n\t\t\t\tqry = \"INSERT INTO friends VALUES ((SELECT id FROM profiles WHERE username = %s),\\\n\t\t\t\t\t(SELECT id FROM profiles WHERE username = %s));\"\n\t\t\t\twith db as cur:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tlines = cur.execute(qry, (obj['username'],obj['friend']))\n\n\t\t\t\t\t\tif lines > 0:\n\t\t\t\t\t\t\treturn {'status':'FRIEND_ADDED'}\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\t\t\t\t\texcept sql.IntegrityError:\n\t\t\t\t\t\treturn {'status':'DUPLICATE_USER'}\n\t\t\t\t\texcept sql.OperationalError:\n\t\t\t\t\t\treturn {'status':'NO_SUCH_USER'}\n\n\t\t\telif action == 'GET':\n\t\t\t\t\"\"\" Retrieve all friends belonging to user. \"\"\"\n\t\t\t\tfriends = [] #accepted, both ends\n\t\t\t\tpending = [] #pending answer from friend\n\n\t\t\t\t# retrieve canonical friends\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT friend FROM friends WHERE target = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tfor friend in cur.fetchall():\n\t\t\t\t\t\tfriends += friend\n\n\t\t\t\t# retrieve pending requests\n\t\t\t\tuserqry = \"SELECT id FROM profiles WHERE username = %s\"\n\t\t\t\tfriendsqry = \"SELECT target FROM friends WHERE friend = ANY(\"+userqry+\")\"\n\t\t\t\tqry = \"SELECT username FROM profiles WHERE id = ANY(\"+friendsqry+\");\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'],))\n\t\t\t\t\tprint \"friends:\"+str(friends)\n\t\t\t\t\tfor req in cur.fetchall():\n\t\t\t\t\t\tif not req[0] in friends:\n\t\t\t\t\t\t\tpending += req\n\n\t\t\t\tif not (len(friends)<=0 and len(pending)<=0):\n\t\t\t\t\treturn {'status':'QUERY_OK', 'friends':friends, 'pending':pending}\n\t\t\t\telse:\n\t\t\t\t\treturn {'status':'NO_FRIENDS'}\n\n\t\t\telif action == 'DELETE' and 'friend' in obj:\n\t\t\t\tqry = \"DELETE FROM friends WHERE target = (SELECT id FROM profiles WHERE username = %s)\\\n\t\t\t\t\tand friend = (SELECT id FROM profiles WHERE username = %s);\"\n\t\t\t\twith db as cur:\n\t\t\t\t\tlines = cur.execute(qry, (obj['username'], obj['friend']))\n\t\t\t\t\tif lines>0:\n\t\t\t\t\t\treturn {'status':'FRIEND_DELETED'}\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn {'status':'QUERY_FAILED'}\n\n\t\t\telse:\n\t\t\t\treturn {'status':'INVALID_ACTION'}", "def addfriend(self, second_user_id):\n second_user = User.objects.get(id=second_user_id)\n new_friendship = Friendship.objects.create(friend_user=self, friend=second_user.gameplanuser)\n new_friendship.save()", "def post(self, request, *args, **kwargs):\n frompath = urlparse(request.DATA.get('from_person')).path\n topath = urlparse(request.DATA.get('to_person')).path\n\n #print(request.DATA)\n if type(frompath) is str and type(topath) is str:\n frompath_elements = frompath.split('/')\n topath_elements = topath.split('/')\n else:\n return Response({'error: invalid data'}, status=status.HTTP_400_BAD_REQUEST)\n\n fromPerson = get_object_or_404(Person, username=frompath_elements[-2])\n toPerson = get_object_or_404(Person, username=topath_elements[-2])\n count = Relationship.objects.filter(from_person=fromPerson, to_person=toPerson).count()\n\n #Reject a request to create Relationship with self\n if request.user.person.username == toPerson.username or count > 0:\n return Response({'error: Relationship with self not permitted'}, status=status.HTTP_400_BAD_REQUEST)\n\n if request.user.person.username == fromPerson.username or request.user.is_staff:\n return self.create(request, *args, **kwargs)\n return Response({'error': 'from_user does not match authenticated User'}, status=status.HTTP_400_BAD_REQUEST)", "def create_friend_request():\n if request.method == \"GET\":\n friend_requests = [f.to_dict() for f in g.user.get_friend_requests()]\n return jsonify({'success': True, 'friend_requests': friend_requests})\n\n if request.method == \"POST\":\n # Get recieving user id from request\n json = request.get_json()\n if json is None:\n raise CustomError(400, message=\"No JSON included or Content-Type\"\n \"is not application/json\")\n\n if 'recieving_user_id' not in json:\n raise CustomError(400, message=\"Must include recieving_user_id\")\n\n recieving_user_id = json['recieving_user_id']\n\n # Get the user object\n recieving_user = User.query.get(recieving_user_id)\n if recieving_user is None:\n raise CustomError(\n 404,\n message='User with id: {} was not found.'.format(\n recieving_user_id)\n )\n\n # Check friendship does not already exist\n friendship_exists = Friendship.query.filter(\n (Friendship.actioning_user_id == g.user.id) |\n (Friendship.recieving_user_id == g.user.id),\n (Friendship.actioning_user_id == recieving_user_id) |\n (Friendship.recieving_user_id == recieving_user_id)\n ).first()\n\n if friendship_exists:\n raise CustomError(\n 409,\n message=\"There is either a pending friend request between the\"\n \"two users or the two users are already friends.\"\n )\n\n # Insert friend request\n friend_request = Friendship(g.user, recieving_user)\n db.session.add(friend_request)\n db.session.commit()\n\n return jsonify({'success': True}), 201", "def add_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_add = get_id_from_username(request.form['add_user'])\n if not friend_to_add or friend_to_add==user_id:\n return redirect(url_for('message.converse'))\n add_friend_db(user_id, friend_to_add)\n return redirect(url_for('message.converse'))", "def post(self, request, *args, **kwargs):\n\n user_wall_post_comment = self.get_object()\n user_wall_post_comment.likes.add(self.request.user)\n return Response(status=201)", "def react_to_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n attitude = self._interests[random.choice(post.tags)]\n self.update_positive_and_negative_actions(friend, attitude)\n reaction = Reaction(attitude, self.unique_id)\n post.add_reaction(reaction)\n friend.update_relation(self, REACT)\n friend.append_reaction(post, reaction)\n # self.update_relation(friend, REACT)\n break", "def add_friend(self, User):\n if not User in self.friends.all():\n self.friend.add(User)\n #self.save()", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def add_relation(request, id):\n user = request.user\n friend = get_object_or_404(User, id=id)\n user.profile.relations.add(friend)\n user.profile.friends.remove(friend)\n messages.success(\n request,\n 'Friend added to your family list'\n )\n return redirect('profiles:my_family')", "def add_direct(request):\n friend = request.POST['friend'].strip()\n\n if userauth_models.User.objects.filter(username=friend).exists():\n friendUser = userauth_models.User.objects.get(username=friend)\n elif userauth_models.User.objects.filter(phone_number=friend):\n friendUser = userauth_models.User.objects.get(phone_number=friend)\n elif userauth_models.User.objects.filter(email=friend):\n friendUser = userauth_models.User.objects.get(email=friend)\n else:\n return HttpResponse(status=403) #no friend :(\n\n threadName = request.user.username + friendUser.username\n\n if models.MessageThread.objects.filter(title=threadName).exists():\n thread = models.MessageThread.objects.get(title=threadName)\n elif models.MessageThread.objects.filter(title=(friendUser.username + \\\n request.user.username)).exists():\n thread = models.MessageThread.objects.get(title=(friendUser.username \\\n + request.user.username))\n else:\n thread = models.MessageThread(title=threadName, psk=threadName, \\\n admin=request.user.username, friend1 = friendUser.username, is_direct=True)\n #thread = models.MessageThread(title=threadName, psk=threadName)\n thread.save()\n\n if not request.user in thread.clients.all():\n thread.clients.add(request.user)\n #thread.clients.add(friendUser)\n channel_layer = get_channel_layer()\n if 'channel_name' in request.session:\n async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n \n #if not friendUser in thread.clients.all():\n # thread.clients.add(friendUser)\n # channel_layer = get_channel_layer()\n\n # if 'channel_name' in request.session:\n # async_to_sync(channel_layer.group_add)(thread.hash_id,request.session['channel_name'])\n\n thread_data = serializers.MessageThreadSerializer(thread).data\n\n return HttpResponse(status=200)", "def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def post(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n args = post_parser.parse_args()\n\n # check circles\n circles = []\n for circle_id in args['circle_ids']:\n found_circle = find_circle(user, circle_id)\n if not found_circle:\n return {'msg': f'Circle {circle_id} is not found'}, 404\n circles.append(found_circle)\n\n # check reshare\n reshared_from = args['reshared_from']\n reshared_from_post = None\n if reshared_from:\n reshared_from_post = dangerously_get_post(reshared_from)\n if not reshared_from_post:\n return {\"msg\": f\"Post {reshared_from} is not found\"}, 404\n\n # check media\n media_object_names = args['media_object_names']\n if reshared_from and media_object_names:\n return {'msg': \"Reshared post is not allowed to have media\"}, 400\n\n post = create_post(\n user,\n content=args['content'],\n is_public=args['is_public'],\n circles=circles,\n reshareable=args['reshareable'],\n reshared_from=reshared_from_post,\n media_list=check_media_object_names(media_object_names, MaxPostMediaCount),\n mentioned_users=check_mentioned_user_ids(args['mentioned_user_ids']),\n is_update_avatar=False\n )\n if not post:\n return {\"msg\": f\"Not allowed to reshare post {reshared_from}\"}, 403\n return post, 201", "def add_to_following(sender, instance, created, **kwargs):\r\n sender_= instance.sender\r\n receiver_ = instance.receiver\r\n if instance.status == 'accepted':\r\n sender_.following.add(receiver_.user)", "def test_addFriend(self):\n \n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n # c = Client()\n # c.post(\"/requestFriend\",{'Friend':f.id,'User':u.id})\n # client = Client()\n # response = client.get(\"/requestFriend\")\n # request = response.wsgi_request \n\n #request.POST({'Friend':f.id,'User':u.id})\n #response = self.client.get(reverse('meetup_finder_app:requestFriend'))\n #f.requested_friends.add(u)\n #requestFriend(request)\n data = {'Friend':f.id,'User':u.id}\n #request = self.factory.post('/a/test/path/', data, content_type='application/json')\n # request = self.factory.post('/requestFriend/', data, content_type='application/json')\n # print(request.POST['User'])\n # request.user = self.user\n # requestFriend(request)\n\n # poll_1 = Poll.objects.get(pk=1)\n # self.assertEqual(poll_1.choice_set.get(pk=1).votes, 1)\n\n resp = self.client.post('/addFriend/', {'User': u.id, 'Friend': f.id})\n self.assertEqual(resp.status_code, 302)\n\n self.assertIs(u in f.friends.all(), True)", "def like(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.add(self.request.user)\n to_user = user_wall_post.owner\n from_user = request.user\n\n UserNotification.create_post_friend_liked_notification(from_user, to_user, 'Right', id=pk)\n return Response(status=201)", "def test_requested_friends_asymmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n \n f.requested_friends.add(u)\n self.assertIs(u in f.requested_friends.all(), True)\n self.assertIs(f in u.requested_friends.all(), False)", "def post(self, request, format=None):\n serializer = PostSerializer(data=request.data)\n notification = NotificationViewList()\n if serializer.is_valid():\n\n if self.request.user.is_authenticated():\n try:\n target = User.objects.get(id=request.data['target_id'])\n serializer.save(\n user=User.objects.get(id=self.request.user.id),\n target_name=(target.first_name+' '+target.last_name)\n )\n except User.DoesNotExist:\n serializer.save(\n user=User.objects.get(id=self.request.user.id),\n target_name=''\n )\n data = {}\n data['type'] = 'user'\n if request.data['target_id'] is not None:\n data['user_id'] = request.data['target_id']\n data['firstname'] = User.objects.get(\n id=request.data['target_id']\n ).first_name\n data['lastname'] = User.objects.get(\n id=request.data['target_id']\n ).last_name\n else:\n data['user_id'] = None\n json_data = json.dumps(data)\n if request.data['target_id'] is not None:\n notification.add(\n request.user,\n request.data,\n User.objects.filter(\n id__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')),\n ContentType.objects.get(model='post'),\n JSONRenderer().render(serializer.data).decode('utf-8'),\n json_data\n )\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED\n )\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def dispatch(self, request, *args, **kwargs):\n user_to = User.objects.get(pk=kwargs['pk'])\n user_from = self.request.user\n ###\n if user_to not in wanna_be_friends(user_from):\n friendship = FriendshipInvitation.objects.create(\n from_user=user_from, to_user=user_to, status=\"0\")\n\n notif = Notification.objects.create(sender=user_from,\n receiver=user_to,\n notif_type='friend_request')\n # Aca se ha enviado la solicitud\n else:\n return HttpResponseRedirect(\"/fr_req_fail/\")\n return HttpResponseRedirect(\"/\")", "def create(self, validated_data):\n \"\"\" Create post with a location \"\"\"\n location_data = validated_data.pop('location')\n\n # create a new one or get a old for reference\n this_location = Location.objects.get_or_create(\n **location_data\n )\n\n # pop the photo url's data\n photo_data = validated_data.pop('photo')\n\n # must pop the tags data before it would used to create a post \n tags_data = validated_data.pop('tag')\n # create a instance of this post\n this_post = Post.objects.create(\n location = this_location[0],\n **validated_data)\n\n \"\"\"Associate tag's informatiion to post\"\"\"\n for tag in tags_data:\n this_tag = Tag.objects.get_or_create(name = tag.get('name'))\n print(tag.get('name'))\n print(this_tag)\n # attach this tag to this photos_datapost \n this_post.tag.add(this_tag[0])\n\n \"\"\"Associate the photo url \"\"\"\n for photo in photo_data:\n this_post.photo.create(name = photo.get('name'))\n # return the created post \n this_post.save()\n return this_post", "def posts_for_feed():\n user_id = session.get('user_id')\n friend_posts = Post.query.join(Friend, db.and_(Post.user_id == Friend.user_2,\n Friend.active == True)).outerjoin(Comment, db.and_(Comment.post_id == Post.post_id,\n Comment.active == True)).filter(Friend.user_1 == user_id,\n Post.active == True).order_by(Post.post_id.desc()).all()\n\n post_list = []\n for post in friend_posts:\n post_list.append(post.to_dict_for_json())\n\n resp = make_response(jsonify(post_list), 200)\n return resp", "def write_comment_to_post(self):\n for friend in random.sample(self._friends, len(self._friends)):\n post: Post = friend.get_random_post()\n if not post:\n continue\n attitude = self._interests[random.choice(post.tags)]\n self.update_positive_and_negative_actions(friend, attitude)\n comment = Comment(attitude, self.unique_id)\n post.add_comment(comment)\n friend.update_relation(self, WRITE_COMMENT)\n friend.append_comment(post, comment)\n # self.update_relation(friend, WRITE_COMMENT)\n break", "def posts_create(request):\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('feed')\n\n else:\n form = PostForm()\n\n return render(\n request=request,\n template_name='posts/new.html',\n context={\n 'form': form,\n 'user': request.user,\n 'perfil': request.user.perfil\n }\n )", "def users_create():", "def create(self, validated_data):\n\n following = models.FollowingsModel(\n followed = validated_data['followed']\n )\n request = self.context.get('request', None)\n following.follower = request.user\n existings = models.FollowingsModel.objects.filter(followed=following.followed, follower=following.follower)\n if len(existings) == 0:\n following.save()\n return following\n elif following.follower == following.followed:\n raise serializers.ValidationError({'message':'You Cannot follow yourself'})\n\n raise serializers.ValidationError({'message':'You have already followed this user.'})", "def post(self, request):\n\n user_id = request.data.get('user_id')\n followers = request.data.get('followers')\n followings = request.data.get('followings')\n\n if followers:\n follower_objs = UserFollower.objects.filter(user=user_id).first()\n\n follower_serialized = UserFollowerSerializer(follower_objs)\n\n response_json = {\n 'status': True,\n 'message': 'successful',\n 'data': follower_serialized.data\n }\n\n return Response(response_json, status=200)\n\n if followings:\n following_objs = UserFollowing.objects.filter(user=user_id).first()\n\n following_serialized = UserFollowerSerializer(following_objs)\n\n response_json = {\n 'status': True,\n 'message': 'successful',\n 'data': following_serialized.data\n }\n\n return Response(response_json, status=200)\n\n response_json = {\n 'status': False,\n 'message': 'unsuccessful',\n 'data': {}\n }\n\n return Response(response_json, status=200)", "def test_friends_symmetrical(self):\n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n\n u.friends.add(f)\n self.assertIs(u in f.friends.all(), True)\n self.assertIs(f in u.friends.all(), True)", "def accept(self):\n receiver_friend_list = FriendList.objects.get(user=self.receiver)\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender)\n sender_friend_list = FriendList.objects.get(user=self.sender)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver)\n self.is_active = False\n self.save()", "def make_friend(user_id, friend_id):\n # Find out if the user exists\n user_a = user_grab(user_id)\n if user_a is None:\n return \"user not found\", 404\n\n # Find the other user\n user_b = user_grab(friend_id)\n if user_b is None:\n return \"user not found\", 404\n\n # Get their friend list\n friends_current = user_a.get(\"friends\")\n friends_updated = []\n if friends_current is not None:\n for friend in friends_current:\n if friend == friend_id:\n return user_b\n friends_updated = friends_current\n friends_updated.append(str(user_b['_id']))\n api_vars.users.update({'_id': ObjectId(user_id)},\n {'$set': {'friends': friends_updated}})\n return json.dumps(user_b)", "def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list", "def add_user_friendships(friend_page, acct):\n\n friends_list = [] # becomes a list of User objects\n # with db.session.begin():\n for friend in friend_page.user: # loops over page of 30 friends\n gr_id = int(friend.id.cdata.encode('utf8'))\n gr_url = friend.link.cdata.encode('utf8')\n name = friend.name.cdata.encode('utf8')\n image_url = friend.small_image_url.cdata.encode('utf8')\n\n try:\n # if user is already in db, add friendship only\n existing_user = User.query.filter_by(gr_id=gr_id).one()\n friends_list.append(existing_user)\n except:\n new_user = User(gr_id=gr_id, gr_url=gr_url,\n gr_name=name, image_url=image_url)\n db.session.add(new_user)\n print \"added new friend: \" + friend.name.cdata.encode('utf8')\n friends_list.append(new_user)\n\n print friends_list\n db.session.commit()\n\n # after adding missing users to db, add friendship between authorized account\n # and all friends\n for friend in friends_list:\n\n new_friend = Friendship(user_id=acct.user.user_id, friend_id=friend.user_id)\n old_friend = Friendship(user_id=friend.user_id, friend_id=acct.user.user_id)\n db.session.add(new_friend)\n db.session.add(old_friend)\n print \"Added friendship!\"\n\n db.session.commit()", "def test_request_friend(self):\n self.test_login_user()\n self.test_create_user('b')\n url = reverse('MGA:send_friend_request')\n data = {'id': 2}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def add_friend():\n\n\n user_id = session['user_id']\n add_friend = request.form.get(\"add-friend\")\n friend_id = request.form.get(\"friend_id\")\n friendship = Friendship.add_friend(user_id, friend_id)\n\n print \"This is the friend id\", friend_id\n\n return 'friend added'", "def test_requestFriend(self):\n \n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n # c = Client()\n # c.post(\"/requestFriend\",{'Friend':f.id,'User':u.id})\n # client = Client()\n # response = client.get(\"/requestFriend\")\n # request = response.wsgi_request \n\n #request.POST({'Friend':f.id,'User':u.id})\n #response = self.client.get(reverse('meetup_finder_app:requestFriend'))\n #f.requested_friends.add(u)\n #requestFriend(request)\n data = {'Friend':f.id,'User':u.id}\n #request = self.factory.post('/a/test/path/', data, content_type='application/json')\n # request = self.factory.post('/requestFriend/', data, content_type='application/json')\n # print(request.POST['User'])\n # request.user = self.user\n # requestFriend(request)\n\n # poll_1 = Poll.objects.get(pk=1)\n # self.assertEqual(poll_1.choice_set.get(pk=1).votes, 1)\n\n resp = self.client.post('/requestFriend/', {'User': u.id, 'Friend': f.id})\n self.assertEqual(resp.status_code, 302)\n\n self.assertIs(u in f.requested_friends.all(), True)", "def create_post(self: User, content: str, is_public: bool, circles: List[Circle], reshareable: bool,\n reshared_from: Optional[Post], media_list: List[Media], mentioned_users: List[User],\n is_update_avatar: bool) \\\n -> Union[Post, bool]:\n if not content and not media_list:\n # a post has to have either content or media\n return False\n\n new_post = Post()\n new_post.eid = make_uuid()\n new_post.author = self.id\n if content:\n new_post.content = bleach.clean(content)\n new_post.is_public = is_public\n new_post.circles = circles\n new_post.media_list = media_list\n new_post.is_update_avatar = is_update_avatar\n\n if reshared_from and not reshareable:\n # if resharing from a post, this post must also be reshareable, otherwise it's logically wrong\n return False\n\n if reshared_from:\n if media_list:\n # when resharing, only allow content (text), e.g. no media\n return False\n\n if reshared_from.reshared_from:\n # if reshared_from itself is a reshared post, reshare reshared_from's original post\n # reshared_from.reshared_from is LazyReference so need to retrieve the full post\n reshared_from = get_in_post_cache(reshared_from.reshared_from.id)\n\n # same explanation for context_home_or_profile=False\n if not sees_post(self, reshared_from, context_home_or_profile=False):\n return False\n\n if not reshared_from.reshareable:\n return False\n\n new_post.reshared_from = reshared_from\n\n new_post.reshareable = reshareable\n new_post.save()\n\n if reshared_from:\n create_notification(\n self,\n notifying_href=new_post.make_href(),\n notifying_summary=new_post.content,\n notifying_action=NotifyingAction.Reshare,\n notified_href=reshared_from.make_href(),\n notified_summary=reshared_from.content,\n owner=reshared_from.author\n )\n # only cache reshared post\n set_in_post_cache(reshared_from)\n\n mention(\n self,\n notified_href=new_post.make_href(),\n notified_summary=new_post.content,\n mentioned_users=mentioned_users\n )\n\n return new_post", "def on_created_post(sender, instance, created, **kwargs):\n if not created:\n return\n if not issubclass(sender, dillo.models.posts.Post):\n return\n instance.hash_id = instance.id\n instance.save()\n log.debug('Set user %s as follower of own post %i' % (instance.user, instance.id))\n follow(instance.user, instance, actor_only=False)", "def comments(self, request, pk=None):\n\n if request.method == 'GET':\n user_wall_post = self.get_object()\n post_comments = UserWallPostComment.objects.filter(user_wall_post=user_wall_post)\n post_comment_serializer = UserWallPostCommentSerializer(post_comments, many=True)\n return Response(post_comment_serializer.data)\n\n user_wall_post = self.get_object()\n post_comment_serializer = UserWallPostCommentSerializer(data=request.data)\n post_comment_serializer.is_valid(raise_exception=True)\n post_comment_serializer.save(comment_by=self.request.user, user_wall_post=user_wall_post)\n\n to_user = user_wall_post.owner\n from_user = request.user\n UserNotification.create_post_friend_comment_notification(from_user, to_user, 'Right', id=pk)\n return Response(data=post_comment_serializer.data, status=201)", "def post(self):\n\n\t\targs = fb_login_parser.parse_args()\n print args\n if not android_users.find_one({\"fb_id\": args[\"fb_id\"]}):\n android_users.update({\"fb_id\": args[\"fb_id\"]}, {\"$set\": {\n \"user_name\": args[\"user_name\"],\n \"email\": args[\"email\"],\n \"gender\": args[\"gender\"], \n \"date_of_birth\": args[\"date_of_birth\"],\n \"location\": args[\"location\"],\n \"user_friends\": args[\"user_friends\"],}} , upsert=True) \n \n return {\"error\": False,\n \"success\": True,\n \"error_code\": 0,\n \"messege\": \"The user with fb_id {0} and name {1} has been inserted correctly\".\n format(args[\"fb_id\"], args[\"user_name\"] ),}\n \n \n if android_users.find_one({\"fb_id\": args[\"fb_id\"]}):\n if len(android_users.find_one({\"fb_id\": args[\"fb_id\"]}).get(\"user_friends\")) < len(args[\"user_friends\"]):\n android_users.update({\"fb_id\": args[\"fb_id\"]}, {\"$set\": {\n \"user_friends\": args[\"user_friends\"],}} , upsert=False) \n \n \n return {\"error\": False,\n \"success\": True,\n \"error_code\": 0,\n \"messege\": \"The user with fb_id {0} and name {1} has been updated with new user_friends\".\n format(args[\"fb_id\"], args[\"user_name\"] ),}\n \n return {\"error\": True,\n \"success\": False,\n \"error_code\": 0, \n \"messege\": \"The user with fb_id {0} and name {1} already exists\".\n format(args[\"fb_id\"], args[\"user_name\"] ),}\n \n return", "def newreply(request, post_id):\n if not request.user.is_authenticated():\n return redirect('/login/?next=%s' % request.path)\n else:\n\n reply = Reply.objects.create(\n creator = request.user,\n created = datetime.datetime.now(),\n body = request.POST.get('mensaje'),)\n post = Post.objects.get(id = post_id)\n post.reply.add(reply) \n return redirect('/home/')", "def create(self, request):\n\n invited_email = request.data.get(\"email\")\n status = request.data.get(\"status\", False)\n if not invited_email:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n try:\n invited_user = UserProfile.objects.get(user__email=invited_email)\n except UserProfile.DoesNotExist:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n\n user_sending = get_object_or_404(UserProfile, user=request.user)\n\n if user_sending == invited_user:\n return Response(status=rest_status.HTTP_404_NOT_FOUND)\n\n error = \"\"\n try:\n friendship, _created = FriendShip.objects.get_or_create(\n user_1=user_sending, user_2=invited_user, status=status\n )\n if not _created:\n if friendship.status:\n error = _(\"You already are friend with this user\")\n else:\n error = _(\"A pending invitation is already created\")\n except Exception:\n error = _(\n f\"An error occured when user {user_sending.user.email} invited {invited_user.user.email}\"\n )\n\n data = {}\n status = rest_status.HTTP_200_OK\n if error:\n status = rest_status.HTTP_400_BAD_REQUEST\n data[\"message\"] = error\n else:\n serializer = FriendShipSerializer(friendship)\n data[\"message\"] = \"OK\"\n data[\"content\"] = serializer.data\n return Response(data, status=status)", "def post(self):\n\n token = self.request.get('token')\n user = users.get_current_user()\n if not user:\n self.redirect('/')\n user_obj = utils.get_user(user)\n if token: # Adding a token for a user\n if utils.verify_token(token):\n user_obj.auth_token = token\n user_obj.put()\n self.redirect('/privatefeeds')\n else:\n args = constants.TOKEN_ARGS\n auth_url = \"https://trello.com/1/authorize?%s\" % urlencode(args)\n self.render('private.html',\n actions=constants.ACTIONS,\n incorrect_token=True,\n auth_url=auth_url,\n signout=constants.SIGNOUT)\n else: # Adding a board for a user\n board_id = self.request.get('board')\n title = self.request.get('title')\n link = self.request.get('link')\n description = self.request.get('description')\n actions = [x for x in constants.ACTIONS if self.request.get(x)]\n\n if actions:\n get_all = False\n if board_id == 'all':\n get_all = True\n board_id = None\n feed_url = utils.create_feed(\n user,board_id,\n title,link,\n description,actions,\n public_board=False,\n get_all=get_all,\n token=user_obj.auth_token)\n self.render('congrats.html',feed_url=feed_url,signout=constants.SIGNOUT)\n else: # They missed some required info\n user_boards = utils.find_boards(user_obj)\n self.render('private.html',\n actions=constants.ACTIONS,\n check_error=True,\n user_boards=user_boards,\n link=link,\n description=description,\n title=title,\n signout=constants.SIGNOUT)", "def make_post(request):\n if request.user.is_authenticated() and request.POST:\n member = Member.objects.get(user=request.user)\n thread_id = request.POST.get('thread_id', -1)\n content = request.POST.get('content', -1)\n if thread_id != -1 and content != -1 and member:\n post = Post()\n post.author = member\n post.thread = Thread.objects.get(pk=thread_id)\n post.content = content\n post.save()\n return HttpResponse(200)\n else:\n return server_error(request)\n else:\n return server_error(request)", "async def add(\n self,\n\t\tuser_id: Optional[int] = None,\n\t\ttext: Optional[str] = None,\n\t\tfollow: Optional[bool] = None,\n\t\t**kwargs\n ) -> friends.AddResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.add\", params)\n model = friends.AddResponse\n return model(**response).response", "def accept_request(request, id):\n f_request = FriendRequest.objects.get(id=id)\n if f_request.to_user == request.user:\n f_request.to_user.profile.friends.add(f_request.from_user)\n f_request.from_user.profile.friends.add(f_request.to_user)\n f_request.delete()\n messages.success(\n request,\n f'Your friend request was successfully accepted'\n )\n return redirect('profiles:my_friends')", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)", "def add_friendship(self, user_id, friend_id):\n if user_id == friend_id:\n # print(\"WARNING: You cannot be friends with yourself\")\n return False\n elif friend_id in self.friendships[user_id] or user_id in self.friendships[friend_id]:\n # print(\"WARNING: Friendship already exists\")\n return False\n else:\n self.friendships[user_id].add(friend_id)\n self.friendships[friend_id].add(user_id)\n\n return True", "def post(self, *args, **kwargs):\n json_data = request.get_json()\n\n relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data()\n\n if 'data' not in json_data:\n raise BadRequest('/data', 'You must provide data with a \"data\" route node')\n if isinstance(json_data['data'], dict):\n if 'type' not in json_data['data']:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in json_data['data']:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if json_data['data']['type'] != related_type_:\n raise InvalidType('/data/type', 'The type field does not match the resource type')\n if isinstance(json_data['data'], list):\n for obj in json_data['data']:\n if 'type' not in obj:\n raise BadRequest('/data/type', 'Missing type in \"data\" node')\n if 'id' not in obj:\n raise BadRequest('/data/id', 'Missing id in \"data\" node')\n if obj['type'] != related_type_:\n raise InvalidType('/data/type', 'The type provided does not match the resource type')\n\n self.before_post(args, kwargs, json_data=json_data)\n\n obj_, updated = self._data_layer.create_relationship(json_data,\n model_relationship_field,\n related_id_field,\n kwargs)\n\n qs = QSManager(request.args, self.schema)\n includes = qs.include\n if relationship_field not in qs.include:\n includes.append(relationship_field)\n schema = compute_schema(self.schema, dict(), qs, includes)\n\n if updated is False:\n return '', 204\n\n result = schema.dump(obj_).data\n if result.get('links', {}).get('self') is not None:\n result['links']['self'] = request.path\n self.after_post(result)\n return result, 200", "def write_post(id, args):\n graph = facebook.GraphAPI(id) \n graph.put_object(parent_object='me', connection_name = 'feed', **args)", "def get_user_and_created(cls, update, context):\n data = utils.extract_user_data_from_update(update)\n u, created = cls.objects.update_or_create(user_id=data[\"user_id\"], defaults=data)\n\n if created:\n if context is not None and context.args is not None and len(context.args) > 0:\n payload = context.args[0]\n if str(payload).strip() != str(data[\"user_id\"]).strip(): # you can't invite yourself\n u.deep_link = payload\n u.save()\n\n return u, created", "def create_and_join(request):\n c = {}\n c.update(csrf(request))\n if request.method == 'POST': # If the form has been submitted...\n form = TeamForm(request.POST) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n team = form.save()\n member = request.user.member\n member.team = team\n member.save()\n messages.add_message(request, messages.SUCCESS, 'Team info created!')\n return HttpResponseRedirect(reverse('team_details', args=(team.id,)))\n else:\n form = TeamForm() # An unbound form\n\n return render_to_response(\"teams/create_and_join.html\", {'form': form, 'c':c},\n context_instance=RequestContext(request))", "def test_rejectFriend(self):\n \n u = AppUser(id = 1)\n u.django_user = User.objects.create(username='Testuser')\n u.save()\n f = AppUser(id = 2)\n f.django_user = User.objects.create(username='Testuser2')\n f.save()\n u.requested_friends.add(f)\n # c = Client()\n # c.post(\"/requestFriend\",{'Friend':f.id,'User':u.id})\n # client = Client()\n # response = client.get(\"/requestFriend\")\n # request = response.wsgi_request \n\n #request.POST({'Friend':f.id,'User':u.id})\n #response = self.client.get(reverse('meetup_finder_app:requestFriend'))\n #f.requested_friends.add(u)\n #requestFriend(request)\n data = {'Friend':f.id,'User':u.id}\n #request = self.factory.post('/a/test/path/', data, content_type='application/json')\n # request = self.factory.post('/requestFriend/', data, content_type='application/json')\n # print(request.POST['User'])\n # request.user = self.user\n # requestFriend(request)\n\n # poll_1 = Poll.objects.get(pk=1)\n # self.assertEqual(poll_1.choice_set.get(pk=1).votes, 1)\n\n resp = self.client.post('/rejectFriend/', {'User': u.id, 'Friend': f.id})\n self.assertEqual(resp.status_code, 302)\n\n self.assertIs(f not in u.requested_friends.all(), True)", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def _create_member(self, **kwargs):\n category_name = kwargs.pop('category_name', Category.ACTIVE)\n params = {\n 'category': Category.objects.get(name=category_name),\n 'first_payment_month': 8,\n 'first_payment_year': 2015,\n 'has_student_certificate': False,\n 'has_subscription_letter': True,\n 'has_collaborator_acceptance': False,\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n member = Member.objects.create(**params)\n\n # create the related person\n params = {\n 'membership': member,\n 'nickname': 'test-nick',\n 'picture': 'fake-pic',\n }\n params = {k: kwargs.pop(k, v) for k, v in params.items()}\n Person.objects.create(**params)\n\n assert not kwargs, kwargs # would indicate a misuse of the parameters\n return member", "def create_graph(users, friend_counts):\n ###TODO-- Completed\n G = nx.Graph()\n\n #For Filtering the Nodes\n #print(friend_counts)\n friend_nodes = [friend for friend in friend_counts if friend_counts[friend] > 1]\n candidate_nodes = [user['screen_name'] for user in users]\n\n #print(\"Nodes: \",len(friend_nodes), len(candidate_nodes))\n #Adding Nodes to graph\n G.add_nodes_from(friend_nodes + candidate_nodes)\n\n #Connecting the Nodes with Edges\n for candidate in users:\n for friend in friend_nodes:\n if friend in candidate['friends']:\n G.add_edge(candidate['screen_name'], friend)\n\n return G", "def add(self, redditor: str | praw.models.Redditor, **other_settings: Any):\n data = {\"name\": str(redditor), \"type\": self.relationship}\n data.update(other_settings)\n url = API_PATH[\"friend\"].format(subreddit=self.subreddit)\n self.subreddit._reddit.post(url, data=data)", "def create_post(request):\n user = User.objects.get(email=request.user.email)\n if request.method == \"POST\":\n form = BlogPostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = user\n post.avatar = user.userprofile.avatar.url\n post.save()\n messages.success(request, \"You have posted successfully\")\n return redirect(\"get_posts\")\n else:\n messages.error(request, \"Unable to post at this time\")\n else:\n form = BlogPostForm()\n return render(request, \"blogpostform.html\", {\"form\": form})", "def add_new_post(request):\n\n token = request.data.get('token')\n if Token.objects.filter(key=request.data[\"token\"]).exists():\n text = request.data.get('text', '')\n image = request.data.get('image')\n video = request.data.get('video')\n\n if len(request.data[\"text\"]) < 1:\n return Response({\"error\": 21})\n elif len(request.data[\"text\"]) > 10000:\n return Response({\"error\": 22})\n\n token = get_object_or_404(Token, key=token)\n post = Post.objects.create(permission=request.data[\"permission\"],\n author_id=token.user_id,\n text=request.data[\"text\"])\n if image:\n image_data = b64decode(image)\n post.image = ContentFile(image_data, \"post.png\")\n\n if video:\n video_data = b64decode(request.data[\"video\"])\n post.video = ContentFile(video_data, \"post.mov\")\n post.save()\n\n if \"hashtags\" in request.data and len(request.data[\"hashtags\"]) > 0:\n for hashtag in request.data[\"hashtags\"]:\n PostHashtag.objects.create(post=post,\n hashtag=hashtag)\n\n serializer = PostSerializer(post, context={'user_id': token.user_id})\n return Response({\"success\": 23,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 17})", "def create_post(user_id):\n if CURRENT_USER_KEY not in session or session[CURRENT_USER_KEY] != user_id:\n raise Unauthorized\n\n user = User.query.get_or_404(user_id)\n\n form = PostForm()\n form.muscles.choices = [(m.id, m.name) for m in Muscle.query.all()]\n form.equipment.choices = [(e.id, e.name) for e in Equipment.query.all()]\n # import pdb\n # pdb.set_trace()\n if form.validate_on_submit():\n title = form.title.data\n details = form.details.data\n is_private = form.is_private.data\n muscles = form.muscles.data\n equipment = form.equipment.data\n post = Post(title=title, details=form.details.data,\n is_private=form.is_private.data, user_id=user_id)\n db.session.add(post)\n db.session.commit()\n\n # create join table additions\n muscles_to_add = []\n equipment_to_add = []\n for muscle in muscles:\n muscle_post = PostMuscle(post_id=post.id, muscle_id=muscle)\n muscles_to_add.append(muscle_post)\n for choice in equipment:\n equipment_post = PostEquipment(\n post_id=post.id, equipment_id=choice)\n equipment_to_add.append(equipment_post)\n db.session.add_all(muscles_to_add + equipment_to_add)\n db.session.commit()\n flash('New post created!', 'success')\n return redirect(url_for('show_user_profile', user_id=user_id))\n return render_template('add_post.html', form=form, user=user)", "def get(self, request, format=None, limit=20):\n user = User.objects.get(id=self.request.user.id)\n group_post = Post.objects.filter(\n target_type=ContentType.objects.get(\n model='group',\n app_label='group'\n ).id,\n target_id__in=GroupMember.objects.filter(\n user=user\n ).values('group_id')\n )\n event_post = Post.objects.filter(\n target_type=ContentType.objects.get(model='event').id,\n target_id__in=EventMember.objects.filter(\n user=user,\n role__gt=0\n ).values('event_id')\n )\n friend_post = Post.objects.filter(\n target_type=ContentType.objects.get(model='user').id,\n target_id__in=Friend.objects.filter(\n from_user=self.request.user.id).values('to_user'),\n user__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')\n ) | Post.objects.filter(\n target_type=ContentType.objects.get(model='user').id,\n target_id=None,\n user__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')\n )\n user_post = Post.objects.filter(\n user=user,\n target_type=ContentType.objects.get(model='user')\n ) | Post.objects.filter(\n target_id=user.id,\n target_type=ContentType.objects.get(model='user')\n )\n post = (group_post | event_post | friend_post | user_post).order_by(\n '-datetime')[:limit]\n response = self.serializer_class(post, many=True)\n\n return Response(response.data)", "def create_user_profile(sender, **kwargs):\n\n if kwargs['created']:\n UserProfile.objects.create(user=kwargs['instance'])", "def createNewChat(topic, users_logins):\n group_chat_flag = False\n if len(users_logins) > 2:\n group_chat_flag = True\n\n chat = Chat.objects.create(topic=topic, is_group_chat=group_chat_flag)\n chat.save()\n for user_login in users_logins:\n user = User.objects.filter(login=user_login)[0]\n Member.objects.create(chat=chat, user=user).save()", "def add_friend(self, account):\n if not account in self.friends.all():\n self.friends.add(account)\n self.save()", "def create_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def post(self, req):\n error_messages = []\n success_message = ''\n\n # Creamos owner y se lo pasamos al form con un objeto pre-establecido\n post_with_owner = Post()\n post_with_owner.owner = req.user\n post_with_owner.blog = Blog.objects.filter(owner=req.user)[0]\n\n form = PostCreateForm(req.POST, instance=post_with_owner)\n if form.is_valid():\n\n new_post = form.save()\n form = PostCreateForm()\n success_message = u'Post guardado con éxito! '\n success_message += u'<a href=\"{0}\">'.format(reverse('post_detail', args=[req.user.username, new_post.pk]))\n success_message += u'(ver post)</a>'\n else:\n error_messages.append(u'Formulario incompleto.')\n\n context = {\n 'form': form,\n 'success_message': success_message\n }\n return render(req, 'posts/new_post.html', context)", "def add_post(request, id):\n template = loader.get_template('topic.html')\n if request.method == \"POST\":\n data = JSONParser().parse(request)\n try:\n user = User.objects.get(username=request.user)\n except ObjectDoesNotExist:\n return HttpResponse(\"You should login to post!\")\n requested_topic = Topic.objects.get(id=data[\"topic_id\"])\n postObject = Post.objects.create(user_id=user.id, topic_id=requested_topic.id,content=data[\"content\"])\n for tag in data[\"tags\"]:\n if len(tag)>0:\n if tag['label'] == '':\n continue\n try:\n tagObject = Tag.objects.get(wikidataID=tag['id'])\n except ObjectDoesNotExist:\n tagObject = Tag.objects.create(wikidataID=tag['id'], name=tag['label'])\n except MultipleObjectsReturned:\n return HttpResponse(\"Multiple tags exist for.\" + tag + \" Invalid State.\")\n\n unique_hidden_tags = list(set(tag['hidden_tags']))\n if unique_hidden_tags:\n tagObject.hidden_tags = unique_hidden_tags\n\n tagObject.save()\n postObject.tags.add(tagObject)\n try:\n topic = Topic.objects.get(id=id)\n serialized_topic = TopicNestedSerializer(topic)\n topic_json = JSONRenderer().render(serialized_topic.data)\n except ObjectDoesNotExist:\n return HttpResponse(\"This topic doesn't exists!\")\n\n hot_topics = Topic.objects.order_by('-updated_at')[:5]\n serialized_hot_topics = HotTopicsSerializer(hot_topics, many=True)\n hot_topics_json = JSONRenderer().render(serialized_hot_topics.data)\n context = {\n 'topic': topic_json,\n 'hot_topics': hot_topics_json\n }\n return HttpResponse(template.render(context, request))", "def add_friends(self, user1_index, user2_index):\n if user1_index >= self.num_users or user2_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user1_index} and {user2_index} were requested.\"\n )\n if self.users_hat[user1_index, user2_index] == 0:\n self.users_hat[user1_index, user2_index] = 1\n elif self.is_verbose():\n self.log(f\"User {user2_index} was already following user {user1_index}\")\n if self.users_hat[user2_index, user1_index] == 0:\n self.users_hat[user2_index, user1_index] = 1\n elif self.is_verbose():\n self.log(f\"User {user1_index} was already following user {user2_index}\")", "def test_post_add_album_contrib_as_not_owner(self):\n complete_add_friends(self.u2.id, self.u3.id)\n\n self.make_logged_in_owner()\n\n # get our manage page with form (use self.u as self.u2 will not obtain the form)\n # using self.u will not affect our test later because we aren't using the client later\n resp = self.client.get(reverse('manage_album', kwargs={'albumid': self.testalbum.id}))\n\n # get and populate form\n myform = resp.context['addcontributorsform']\n data = myform.initial\n data['idname'] = self.u3.id\n\n # construct our post\n self.addcontribpostrequest = self.factory.post(\n reverse(\"add_album_contrib\", kwargs={\"albumid\": self.testalbum.id}), data=data)\n\n self.user_escalate_post_test_helper(self.addcontribpostrequest, self.u2, self.testalbum, self.testalbum.id,\n album.add_contrib, ALBUM_PRIVATE+1)", "def test_accept_friend(self):\n self.test_request_friend()\n url = reverse('MGA:accept_friend_request')\n data = {'id': 2}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def set_friends(self, name, friend_names):\n\n person = self.nodes[name]\n\n for friend_name in friend_names:\n friend = self.nodes[friend_name]\n\n # Since adjacent is a set, we don't care if we're adding duplicates ---\n # it will only keep track of each relationship once. We do want to\n # make sure that we're adding both directions for the relationship.\n person.adjacent.add(friend)\n friend.adjacent.add(person)", "def post(self):\n args = UpdateLikeList.post_parser.parse_args()\n user_name = args.get('user_name')\n restaurant_name = args.get('restaurant_name')\n #rating = args.get('rating')\n newlike = {\n 'user_name':args.get('user_name'),\n 'restaurant_name':args.get('restaurant_name')\n #'rating':args.get('rating')\n }\n conn = db.create_connection(db.connection_config_dict)\n cursor = conn.cursor()\n\n # To get user's user_id\n user_id = []\n sql_1 = 'SELECT user_id FROM User WHERE user_name = \"{user_name}\"'.format(user_name=user_name)\n print(sql_1)\n cursor.execute(sql_1)\n for u in cursor:\n user_id.append(u)\n print(user_id) \n\n # To get restaurant's restaurant_id\n restaurant_id = []\n sql_2 = 'SELECT restaurant_id FROM Restaurant WHERE name = \"{restaurant_name}\"'.format(restaurant_name=restaurant_name)\n print(sql_2)\n cursor.execute(sql_2)\n for u in cursor:\n restaurant_id.append(u)\n print(restaurant_id)\n\n # Insert new restaurant into LikeList table\n # neo4j may need insert data here\n # user id is user_id[0][0], restaurant id is restaurant_id[0][0].\n sql_3 = \"INSERT INTO LikeList (user_id, restaurant_id) VALUES ({user_id}, {restaurant_id});\".format(user_id=user_id[0][0], restaurant_id=restaurant_id[0][0])\n print(sql_3)\n cursor.execute(sql_3)\n\n conn.commit()\n return newlike, 201", "def createRelationshipsAppContact(d, pIds):\n # Create the number of app contact for the day\n numOfContact = MAX_NUMBER_OF_CONTACT\n\n for _ in range(0, numOfContact):\n # Choose two random people\n randomIndex = randint(0, len(pIds) - 1)\n pId1 = pIds[randomIndex]\n randomIndex = randint(0, len(pIds) - 1)\n pId2 = pIds[randomIndex]\n # Choose the hour/date\n # Verify if it's the same node\n if pId1 == pId2:\n continue\n date = datetime.date.today() - datetime.timedelta(days=randint(0, CONTACT_DAYS_BACKS))\n date = date.strftime(\"%Y-%m-%d\")\n h = randint(0, 23)\n minutes = randint(0, 59)\n if minutes < 10:\n minutes = \"0\" + str(minutes)\n hour = str(h) + \":\" + str(minutes) + \":00\"\n n = 0\n while not (validateDate(d, date, pId1, hour) or not validateDate(d, date, pId2, hour)) \\\n and n < MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:\n date = datetime.date.today() - datetime.timedelta(days=randint(0, 20))\n date = date.strftime(\"%Y-%m-%d\")\n h = randint(0, 23)\n minutes = randint(0, 59)\n if minutes < 10:\n minutes = \"0\" + str(minutes)\n hour = str(h) + \":\" + str(minutes) + \":00\"\n n = n + 1\n if n == MAX_NUMBER_OF_ATTEMPTS_FOR_VALID_DATE:\n continue\n\n query = (\n \"MATCH (p1:Person) , (p2:Person) \"\n \"WHERE ID(p1) = $pId1 AND ID(p2) = $pId2 \"\n \"MERGE (p1)-[:APP_CONTACT { hour: time($hour) , date: date($date)}]->(p2) \"\n \"MERGE (p1)<-[:APP_CONTACT { hour: time($hour) , date: date($date)}]-(p2)\"\n )\n # Execute the query\n with d.session() as s:\n s.write_transaction(createContact, query, pId1, pId2, hour, date)", "def post(self):\n liked = self.request.get('like')\n unliked = self.request.get('unlike')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if liked:\n if user_id in post.liked_by:\n self.render_improper_endpoint_access(\"like\")\n else:\n if post.submitter_id != user_id:\n post.liked_by.append(user.key().id())\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)\n elif unliked:\n if user_id in post.liked_by:\n index = post.liked_by.index(user_id)\n del post.liked_by[index]\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(500)", "def new_friends(self, G):\r\n H = G.to_undirected() #creates an undirected copy of the original graph\r\n n = nx.preferential_attachment(H) #uses the preferential_attachment method from networkx to create friends\r\n for u, v, p in n:\r\n chance = random.randint(0, 100) #chance is a randomly generated number between 0 and 100\r\n if p >= len(G.edges) and chance >= 90: #creates a new relationship (edge) between two nodes if their preferential\r\n G.add_edge(u, v, weight=random.uniform(-1, 1)) #attachment number is higher than the total number of edges and\r\n else: #chance is greater than 90.\r\n continue\r\n return G", "def main():\n\n access_token = ('')\n\n # Get list of friend id numbers.\n url = ('https://graph.facebook.com/' +\n 'fql?q=SELECT uid2 FROM friend WHERE uid1=me()')\n\n content = simplejson.loads(urllib2.urlopen(url).read())\n content = [i['id'] for i in content['data']]\n\n connections = ['activities', 'adaccounts', 'albums',\n 'apprequests', 'books', 'checkins', 'events',\n 'family', 'feed', 'friendlists', 'friendrequests',\n 'friends', 'games', 'groups', 'home', 'inbox',\n 'interests', 'likes', 'links', 'locations',\n 'messagingfavorites', 'movies', 'music', 'mutualfriends',\n 'notes', 'notifications', 'outbox', 'payments',\n 'permissions', 'photos', 'posts', 'scores',\n 'statuses', 'tagged', 'television', 'updates', 'videos']\n\n for i in content:\n node = {}\n timestamp = datetime.datetime.utcnow().strftime(\"%s.%f\")\n node['timestamp'] = timestamp\n url = ('https://graph.facebook.com/' +\n i +\n '/?access_token=' +\n access_token)\n j = simplejson.loads(urllib2.urlopen(url).read())\n node[i] = [{k: j[k]} for k in j.keys()]\n for k in connections:\n if k == 'mutualfriends':\n url = ('https://graph.facebook.com/me/mutualfriends/' +\n j['id'] +\n '/?access_token=' +\n access_token)\n else:\n url = ('https://graph.facebook.com/' +\n j['id'] +\n '/' +\n k +\n '?access_token='\n + access_token)\n try:\n #print('{0}: {1}; {2}: {3}').format('connection', k, 'URL', url)\n l = simplejson.loads(urllib2.urlopen(url).read())\n node[k] = [m for m in l['data']]\n except urllib2.HTTPError, e:\n pass\n print(node)", "def create_post(category, author, name, content, status):\n return Post.objects.create(category=category, author=author, name=name, content=content, status=status)", "def create_profile(sender, instance, created, **kwargs):\n if created: \n profile, new = UserProfile.objects.get_or_create(user=instance)", "def create(cls, sender, instance, created, **kdws):\n if created:\n username = helpers.make_username(instance.first_name, instance.last_name, instance.email)\n user = User(username=username)\n user.save()\n user = User.objects.get(username=username)\n instance.user = user\n instance.save()", "def post(self):\r\n return create_user(request)", "def create_post(user_id):\n\n user = User.query.get_or_404(user_id)\n title = request.form['title']\n content = request.form['content']\n tag_ids = [int(num) for num in request.form.getlist(\"tags\")]\n tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n \n new_post = Post(title=title, content=content, user=user, tags=tags)\n db.session.add(new_post)\n db.session.commit()\n\n return redirect(f\"/users/{user_id}\")", "def add_fellows(self, fellows, accomodation):\n wants_accomodation = 'Y' if accomodation.lower() == 'y' else 'N'\n\n for name in fellows:\n fellow_instance = Fellow(name, wants_accomodation)\n fellow_instance.save(self.db)", "def get(self, request, action, id, limit=20):\n user = User.objects.get(id=self.request.user.id)\n group_post = Post.objects.filter(\n target_type=ContentType.objects.get(\n model='group',\n app_label='group').id,\n target_id__in=GroupMember.objects.filter(\n user=user\n ).values('group_id')\n )\n event_post = Post.objects.filter(\n target_type=ContentType.objects.get(\n model='event').id,\n target_id__in=EventMember.objects.filter(\n user=user, role__gt=0\n ).values('event_id')\n )\n friend_post = Post.objects.filter(\n target_type=ContentType.objects.get(\n model='user').id,\n target_id__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user'),\n user__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')\n ) | Post.objects.filter(\n target_type=ContentType.objects.get(\n model='user').id,\n target_id=None,\n user__in=Friend.objects.filter(\n from_user=self.request.user.id\n ).values('to_user')\n )\n user_post = Post.objects.filter(\n user=user,\n target_type=ContentType.objects.get(model='user')\n ) | Post.objects.filter(\n target_id=user.id,\n target_type=ContentType.objects.get(model='user')\n )\n if action == 'more':\n post = (group_post | event_post | friend_post | user_post).filter(\n id__lt=id).order_by('-datetime')[:limit]\n if action == 'new':\n post = (group_post | event_post | friend_post | user_post).filter(\n id__gt=id).order_by('-datetime')\n response = self.serializer_class(post, many=True)\n return Response(response.data)", "def add_friend_to_trip(request, trip_id, user_id):\n try:\n trip = Trip.objects.get(pk=trip_id)\n if request.user not in trip.users.all():\n return Response(status=status.HTTP_401_UNAUTHORIZED)\n\n user = User.objects.get(pk=user_id)\n if user in trip.users.all():\n error_message = \"User already associated with trip\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n\n trip.users.add(user)\n except Trip.DoesNotExist:\n error_message = \"Trip does not exist\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n except User.DoesNotExist:\n error_message = \"User does not exist\"\n return Response(error_message, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(str(e), status=status.HTTP_400_BAD_REQUEST)\n\n return Response(status=status.HTTP_200_OK)", "def create_test_data(users=5, categories=2, forums=2, topics=1, posts=1):\n create_default_groups()\n create_default_settings()\n\n data_created = {'users': 0, 'categories': 0, 'forums': 0,\n 'topics': 0, 'posts': 0}\n\n # create 5 users\n for u in range(1, users + 1):\n username = \"test%s\" % u\n email = \"test%[email protected]\" % u\n user = User(username=username, password=\"test\", email=email)\n user.primary_group_id = u\n user.activated = True\n user.save()\n data_created['users'] += 1\n\n user1 = User.query.filter_by(id=1).first()\n user2 = User.query.filter_by(id=2).first()\n\n # lets send them a few private messages\n for i in range(1, 3):\n # TODO\n pass\n\n # create 2 categories\n for i in range(1, categories + 1):\n category_title = \"Test Category %s\" % i\n category = Category(title=category_title,\n description=\"Test Description\")\n category.save()\n data_created['categories'] += 1\n\n # create 2 forums in each category\n for j in range(1, forums + 1):\n if i == 2:\n j += 2\n\n forum_title = \"Test Forum %s %s\" % (j, i)\n forum = Forum(title=forum_title, description=\"Test Description\",\n category_id=i)\n forum.save()\n data_created['forums'] += 1\n\n for t in range(1, topics + 1):\n # create a topic\n topic = Topic()\n post = Post()\n\n topic.title = \"Test Title %s\" % j\n post.content = \"Test Content\"\n topic.save(post=post, user=user1, forum=forum)\n data_created['topics'] += 1\n\n for p in range(1, posts + 1):\n # create a second post in the forum\n post = Post()\n post.content = \"Test Post\"\n post.save(user=user2, topic=topic)\n data_created['posts'] += 1\n\n return data_created", "def post(self, request):\n\n email = request.data.get('email')\n phone_number = request.data.get('phone_number')\n otp = request.data.get('otp')\n\n # check that otp is correct or not (otp should match with email or phone number\n otp_obj = Otp.objects.filter(Q(email_phone=email) | Q(email_phone=phone_number) & Q(code=otp)).first()\n if not otp_obj:\n response_json = {\n 'status': False,\n 'message': 'otp is incorrect',\n 'data': {}\n }\n\n return Response(response_json, status=400)\n\n # create new user\n request_json = {\n \"username\": request.data.get('username'),\n \"password\": make_password(request.data.get('password')),\n \"email\": email,\n \"phone_number\": phone_number\n }\n\n user_serialized = UserProfileSerializer(data=request_json)\n if not user_serialized.is_valid():\n return validate_error(user_serialized)\n user_serialized.save()\n\n user_obj = UserProfile.objects.filter(id=user_serialized.data.get('id')).first()\n if not user_obj:\n return existence_error('user')\n\n # create following and follower object\n following_obj = UserFollowing.objects.create(user=user_obj)\n follower_obj = UserFollower.objects.create(user=user_obj)\n\n token, created = Token.objects.get_or_create(user=user_obj)\n\n otp_obj.delete()\n\n response_json = {\n 'status': True,\n 'message': 'User successfully registered',\n 'data': 'Token {}'.format(token.key)\n }\n\n return Response(response_json, status=201)", "def teacher_forum_create(request):\n # Deleting admin-typed user session\n # Deleting programmer-typed-user session\n\n # Get the current users\n current_basic_user = get_current_user(request, User, ObjectDoesNotExist)\n\n current_basic_user_profile = get_current_user_profile(\n request,\n User,\n BasicUserProfile,\n ObjectDoesNotExist\n )\n\n # Getting the teacher profile\n current_teacher_profile = get_current_teacher_user_profile(\n request,\n User,\n TeacherUserProfile,\n ObjectDoesNotExist\n )\n\n # forum create post form processing\n empty_input = False\n\n if request.POST.get(\"teacher_forum_create_post_btn\"):\n post_title = request.POST.get(\"post_title\")\n post_content = request.POST.get(\"post_content\")\n\n # check if any of the inputs are empty\n if bool(post_title) == False or post_title == \"\" \\\n or bool(post_content) == False or post_content == \"\":\n empty_input = True\n else:\n new_post = TeacherForumPost(\n teacher=current_teacher_profile,\n course=current_teacher_profile.teacher_course,\n post_title=post_title,\n content=post_content\n )\n new_post.save()\n return HttpResponseRedirect(\n \"/teacher/forum/read/\"+str(new_post.id)+\"/\"\n )\n\n data = {\n \"current_basic_user\": current_basic_user,\n \"current_basic_user_profile\": current_basic_user_profile,\n \"current_teacher_profile\": current_teacher_profile,\n \"empty_input\": empty_input,\n }\n\n if \"teacher_user_logged_in\" in request.session:\n return render(request, \"teacher_forum/create.html\", data)\n else:\n return HttpResponseRedirect(\"/\")", "async def send_friend_request(self):\n\n logging.debug(\"Sending friend request to \" + self.username)\n\n if self.is_friend:\n raise ObjectErrors.AlreadyFriends(\n \"You are already friends with \" + self.display_name)\n\n await self.client.request.post(\n \"/user/%s/friendRequest\" % self.id)", "def add_all_friends(twitter, users):\n for u_dict in users:\n u_dict['friends'] = get_friends(twitter,u_dict['screen_name'])", "def post(self, request, pk):\n try:\n follower = request.user\n question = Question.objects.filter(pk=pk).first()\n\n \"\"\"Return HTTP 404 if the question does not exist\"\"\"\n if question is None:\n return JsonResponse({\"error\": \"Question you requested to follow does not exist\"}, status=status.HTTP_404_NOT_FOUND)\n\n \"\"\"Check if the following record already exists, if not create it, but if it does, fail silently\"\"\"\n if not QuestionFollowing.objects.filter(user=follower, question=question).exists():\n QuestionFollowing.objects.create(user=follower, question=question)\n \"\"\"Increment the question's following\"\"\"\n question.followings += 1\n question.save()\n\n return JsonResponse({'status': True}, status=status.HTTP_200_OK)\n except Exception as e:\n print(e)\n # return JsonResponse({'status': False, 'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def fileInsert(friends, chats, posts):\n\n # check if directory 'data' exists and if not make it\n if not os.path.exists('data'):\n os.mkdir('data')\n\n # open a file called 'friends' and output each friend's id\n if friends:\n with open('data/friends', 'w') as f:\n for key in friends:\n f.write(key + '\\n')\n\n # open a file called 'chats' and output each friend's id and sent chat number\n if chats:\n with open('data/chats', 'w') as f:\n for key, val in chats.items():\n f.write(key + ' ' + val + '\\n')\n\n # open a file called 'posts' and output each time and wallpost\n if posts:\n with open('data/posts', 'w') as f:\n for key, val in posts.items():\n f.write(str(key) + ' ' + val[0] + ' ' + val[1] + '\\n')", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)" ]
[ "0.6206124", "0.60353947", "0.5980641", "0.5965954", "0.59487104", "0.56830245", "0.56672275", "0.56354594", "0.56294787", "0.55971295", "0.55717593", "0.5568948", "0.5557959", "0.5542263", "0.54640216", "0.54584336", "0.54564565", "0.54361814", "0.5435222", "0.54146063", "0.53731936", "0.5340057", "0.5336341", "0.53200805", "0.5292797", "0.5281593", "0.5280947", "0.5277363", "0.52401006", "0.52210206", "0.52129084", "0.51830494", "0.516921", "0.51648235", "0.51456934", "0.5099094", "0.509384", "0.5083791", "0.50666463", "0.50641453", "0.5061367", "0.5056556", "0.5054988", "0.5031183", "0.49796793", "0.49758655", "0.4950289", "0.49484262", "0.49477518", "0.49246845", "0.49242812", "0.49218783", "0.49218783", "0.49218783", "0.49187094", "0.49119216", "0.49007276", "0.48960778", "0.48803708", "0.4869788", "0.486444", "0.48619267", "0.48544797", "0.4850909", "0.48468497", "0.48459414", "0.48408663", "0.48375288", "0.48166278", "0.48110187", "0.48086375", "0.47943756", "0.47927615", "0.47899294", "0.47871277", "0.47845998", "0.47805908", "0.4776048", "0.4764061", "0.47599494", "0.4756773", "0.47551808", "0.4748661", "0.47428602", "0.4736181", "0.4719965", "0.47195742", "0.47167182", "0.470658", "0.46991926", "0.46991673", "0.46983707", "0.46981922", "0.46976948", "0.46773702", "0.4672507", "0.46714485", "0.46704173", "0.46698475", "0.46698475" ]
0.7941469
0
Takes post author, comment author and creates a post and associated comment
def create_post_with_comment(pauthor, cauthor, visibility, ptext, ctext): post = Post.objects.create(content = ptext, author = pauthor, visibility=visibility) comment = Comment.objects.create(comment = ctext, post = post, author = cauthor) return (post, comment)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_comment(post, author, content):\n return Comment.objects.create(post=post, author=author, content=content)", "def postCreate(post):\n post_list = list()\n comments = commentList(post)\n comment_url = \"{}/api/posts/{}/comments\".format(DOMAIN, post.id)\n visible_to = list()\n visible = post.visibleTo.all()\n if visible:\n for author in visible:\n auth = \"{}/api/author/{}\".format(DOMAIN, author.id)\n visible_to.append(auth)\n\n # visible_to = list(post.visibleTo)\n post_dict = {'author': addAuthor(post.author), 'title': post.title, 'description': post.description,\n 'contentType': post.contentType, 'content': post.content, 'published': post.published,\n 'visibility': post.visibility, 'visibleTo': visible_to, 'unlisted': post.unlisted, 'id': post.id,\n 'comments': comments[:5], 'next': comment_url, 'count': len(comments),\n 'source': \"{}/api/posts/{}\".format(DOMAIN, post.id),\n 'origin': \"{}/api/posts/{}\".format(DOMAIN, post.id)}\n post_list.append(post_dict)\n return post_list", "def post(self, request, pk):\n\n post = Blog.objects.get(pk=int(pk))\n user_id = self.request.session.get('USER_ID')\n\n try:\n user = User.objects.get(pk=user_id)\n except:\n pass\n body = self.request.POST.get('body')\n\n if user_id is None:\n messages.add_message(request, messages.ERROR, \"Please login to add comments.\")\n return HttpResponseRedirect(self)\n\n comments = Comment.objects.create(post=post, author=user, body=body)\n\n d = model_to_dict(post)\n messages.add_message(request, messages.SUCCESS, \"Comment added successfully.\")\n return self.render_to_response(d)", "def createcomment(request, pk):\n issue = get_object_or_404(Issue, pk=pk)\n if request.method == \"POST\":\n form = CommentCreationForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.issue = issue\n comment.author = request.user\n comment.created_at = timezone.now()\n comment.save()\n return redirect('office:issue', pk=pk)\n else:\n form = CommentForm()\n return render(request, 'blog/add_comment_to_post.html', {'form': form})", "def post(self):\n post_id = int(self.request.get('post_id'))\n post = Posts.get_by_id(post_id)\n comment = self.request.get('comment')\n submitter_id = self.get_active_user().key().id()\n\n if submitter_id:\n comment = Comments(post_id=post_id, content=comment,\n submitter_id=submitter_id)\n comment.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)", "def add_comment(cls, post_id, user_id, content):\n c = cls(parent=comment_key(),\n post_id=post_id,\n user_id=user_id,\n content=content)\n c.put()", "def make_comments(post, comments):\n for comment in comments:\n try:\n com = RedditComment(reddit_post=post, **comment)\n com.save()\n except Exception as ex:\n print 'comment could not be created'\n print ex", "def post(self, request, *args, **kwargs):\n serializer = CommentSerializer(data=request.data)\n post_pk = self.kwargs['post_pk']\n post = Post.objects.get(pk=post_pk)\n if serializer.is_valid():\n serializer.save(author=request.user, post=post)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def createComment(owner_id=None, post_id=None, from_group=None, message=None,\\\n reply_to_comment=None, attachments=None, sticker_id=None,\\\n guid=None):\n params = {\n 'owner_id': owner_id,\n 'post_id': post_id,\n 'from_group': from_group,\n 'message': message,\n 'reply_to_comment': reply_to_comment,\n 'attachments': attachments,\n 'sticker_id': sticker_id,\n 'guid': guid\n }\n result = call('wall.createComment', **params)\n return parse_response(result)", "async def create_reply(*, comment: models.Comment = Depends(resolve_comment), created_comment: CreateComment,\n current_user: models.User = Depends(resolve_current_user), db: Session = Depends(get_db)):\n return crud.create_comment(db, author_id=current_user.id, parent_resub_id=comment.parent_resub_id,\n parent_post_id=comment.parent_post_id, parent_comment_id=comment.id,\n content=created_comment.content)", "def create_post(request):\n if request.method == 'POST':\n title = request.POST['title']\n content = request.POST['content']\n user_id = request.POST['author_id']\n category = request.POST['category']\n\n slug = \"-\".join(list(map(lambda word: word.lower(), title.split())))\n author = User.objects.get(id=int(user_id))\n\n # save info in models\n post = Post()\n post.author = author\n post.category = category\n post.title = title\n post.content = content\n post.slug = slug\n post.save()\n return redirect('post')\n\n return render(request, 'posts/create_post.html')", "def process_comment(request, comment, post):\n\n if request.user.is_authenticated:\n # We already set auth user's name and email in the form's inital vals.\n comment.author = request.user\n\n # Is this a threaded comment?\n if request.POST.get(\"parent_id\"):\n comment.parent = Comment.objects.get(id=request.POST.get(\"parent_id\"))\n\n # If commenter is logged in, override name and email with stored values from User object\n if request.user.is_authenticated:\n comment.name = request.user.get_full_name()\n comment.email = request.user.email\n\n # Set required relationship to Post object\n comment.post = post\n\n # Get commenter's IP and User-Agent string\n # ip = get_ip(request)\n # if ip is not None:\n # comment.ip_address = ip\n comment.user_agent = request.META.get(\"HTTP_USER_AGENT\", \"\")\n\n # Run spam check\n comment.spam = spam_check(comment)\n\n # Strip disallowed HTML tags. See tangerine docs to customize.\n comment.body = sanitize_comment(comment.body)\n\n # Call comment approval workflow\n comment.approved = get_comment_approval(comment.email, request.user.is_authenticated)\n if comment.approved:\n messages.add_message(request, messages.SUCCESS, \"Your comment has been posted.\")\n else:\n messages.add_message(request, messages.INFO, \"Your comment has been held for moderation.\")\n\n comment.save()\n\n # Alert post author that comment needs moderation, or that it's been auto-published:\n send_comment_moderation_email(comment)", "def create_comment_immediately_below_post():\n post = create_a_post()\n comment = Comment.create(post=post, body=\"I'm a comment right below a post\")\n comment.save()\n return comment", "def create_post(category, author, name, content, status):\n return Post.objects.create(category=category, author=author, name=name, content=content, status=status)", "def post(self, post_id):\n comment_content = self.request.get(\"comment_content\")\n Post.add_comment(int(post_id), int(\n self.user.get_id()), comment_content)\n self.redirect(\"/blog/\" + post_id + \"/comments\")", "def test_add_comment(self):\n post = PostFactory()\n comment = PostFactory()\n post.add_comment(comment)\n self.assertEqual(comment.title, post.title)\n self.assertTrue(comment.is_comment)\n self.assertEqual(comment.parent_post, post)", "def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return", "def create_a_post():\n subj = create_subject()\n post = Post.create(subject=subj, title=\"A great title\", body=\"Just a great day!\")\n post.save()\n return post", "def createPost(content):\n\n cur, user_id, con = initialise(3, True)\n cur.execute(\"INSERT INTO posts (name, content) VALUES ((SELECT username FROM users WHERE id = ?), ?)\", (user_id, content))\n finish(con)", "def setUp(self):\n self.comment = Comment()\n self.comment.comment_description = 'This is a test comment'\n self.post = create_post()\n self.post.user = self.create_user()\n self.post.save()\n self.comment.post = self.post\n self.comment.user = self.user\n self.comment.save()", "def test_comment_creation(self):\n response = self.client.post(reverse('posts:comment_create'),\n data={\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description': 'This is a '\n 'test_comment'\n }, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data, {\n 'post': self.post.id,\n 'user': self.user.id,\n 'comment_description': 'This is a '\n 'test_comment'\n })", "def new_comment(self, post_id, comment):\n # *don't* pass in username and password. if you do, that wordpress user's\n # name and url override the ones we provide in the xmlrpc call.\n #\n # also, use '' instead of None, even though we use allow_none=True. it\n # converts None to <nil />, which wordpress's xmlrpc server interprets as\n # \"no parameter\" instead of \"blank parameter.\"\n #\n # note that this requires anonymous commenting to be turned on in wordpress\n # via the xmlrpc_allow_anonymous_comments filter.\n return self.proxy.wp.newComment(self.blog_id, '', '', post_id, comment)", "def _add_comment(self, comment, post_id, page_id, parent_comment=None):\n user_id = self._get_or_create_user(comment['from'])\n message = self._clean_message(comment)\n if len(message) > 0:\n columns = '(user, post, page, fb_id, created_time, message, like_count, comment_count'\n values = (user_id, post_id, page_id, comment['id'], comment['created_time'],\n message, comment['like_count'], comment['comment_count'])\n values_placeholder = '(%s,%s,%s,%s,%s,%s,%s,%s'\n if parent_comment is None:\n columns = columns + ')'\n values_placeholder = values_placeholder + ')'\n else:\n columns = columns + ',parent_comment)'\n values = values + (parent_comment,)\n values_placeholder = values_placeholder + ',%s)'\n return self._insert_if_possible('INSERT INTO comment {} VALUES {}'.format(columns, values_placeholder),\n values)\n else:\n return False", "def remotePostCreate(host, post):\n post = post.get('posts')[0]\n author = remoteAddAuthor(post.get('author'))\n title = post.get('title')\n description = post.get('description')\n contentType = post.get('contentType')\n content = post.get('content')\n published = utc.localize(datetime.strptime(post.get('published'), '%Y-%m-%dT%H:%M:%S.%fZ'))\n visibility = post.get('visibility')\n unlisted = post.get('unlisted')\n id = post.get('id')\n origin = post.get('origin')\n count = post.get('count')\n comments = remoteCommentList(post)\n source = \"{}/api/posts/{}\".format(DOMAIN, post.get('id'))\n\n post_dict = {'author': author, 'title': title, 'description': description,\n 'contentType': contentType, 'content': content, 'published': published,\n 'visibility': visibility, 'unlisted': unlisted, 'id': id,\n 'comments': comments, 'origin': origin, 'count': count,\n 'source': source}\n return post_dict", "def create():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n error = None\n\n if not title:\n error = 'Title is required.'\n\n if error is not None:\n flash(error)\n else:\n db = get_db()\n db.execute(\n 'INSERT INTO post (title, body, author_id)'\n ' VALUES (?, ?, ?)',\n (title, body, g.user['id'])\n )\n db.commit()\n return redirect(url_for('blog.index'))\n\n return render_template('blog/create.html')", "def addPost(self,text,id,url,date):\n self.topComments.append(Post(text,id,url,date))\n return None", "def create_post():\n\n #Get prompt id\n prompt_id = request.form.get('prompt_id')\n\n # Get post text\n post_text = request.form.get('user_post')\n\n # Create post timestamp\n created_at = datetime.now()\n user_facing_date = created_at.strftime(\"%B %d, %Y\")\n\n # Save post and related data to database\n post = crud.create_post(session['user_id'], prompt_id, post_text, session['lat'], session['lng'], session['user_facing_location'], created_at)\n\n return render_template('post_data.html', post=post, user_facing_date=user_facing_date)", "def add_comment_to_post(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == 'POST':\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = CommentForm()\n return render(request, 'blog/comment_form.html', {'form': form})", "def create(self, comment):\r\n url = self.get_url()\r\n\r\n # when creating commits they don't get wrapped in {\"body\": <comment>}\r\n return http.Request('POST', url, params=comment), parsers.parse_json", "def create_comment(session: Session) -> Comment:\n try:\n regular_user = (\n session.query(User).filter(User.username == \"obnoxioustroll69\").first()\n )\n post = session.query(Post).filter(Post.id == 1).first()\n comment = Comment(\n user_id=regular_user.id,\n post_id=post.id,\n body=\"This post about SQLAlchemy is awful. You didn't even bother to explain how to install Python, which is where I (and so many others) got stuck. Plus, your code doesn't even work!! I cloned your code and it keeps giving me `environment variable` errors... WTF are environment variables?!!?!?\",\n upvotes=2,\n )\n session.add(comment) # Add the Comment\n session.commit() # Commit the change\n LOGGER.success(f\"Created comment {comment} posted by user {regular_user}\")\n return comment\n except IntegrityError as e:\n LOGGER.error(e.orig)", "def post(self, pid, sid, aid, cid):\n return create_comment(pid, sid, aid, cid)", "def create_comment(bid, pid):\n # pylint: disable=unused-argument\n form = CommentForm(request.form)\n if request.method == 'POST':\n if form.validate():\n DB.session.add(Comment(pid, current_user.uid, form.text.data))\n DB.session.commit()\n flash('Comment successfully created!')\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)", "def form_valid(self, form):\n # Add logged-in user as author of comment\n form.instance.author = self.request.user\n # Associate comment with blog based on passed id\n form.instance.post = get_object_or_404(Post, pk=self.kwargs['pk'])\n # Call super-class form validation behaviour\n return super(PostCommentCreate, self).form_valid(form)", "def make_post(request):\n if request.user.is_authenticated() and request.POST:\n member = Member.objects.get(user=request.user)\n thread_id = request.POST.get('thread_id', -1)\n content = request.POST.get('content', -1)\n if thread_id != -1 and content != -1 and member:\n post = Post()\n post.author = member\n post.thread = Thread.objects.get(pk=thread_id)\n post.content = content\n post.save()\n return HttpResponse(200)\n else:\n return server_error(request)\n else:\n return server_error(request)", "def post(self, pid, sid, aid):\n return create_comment(pid, sid, aid)", "def post(self):\n post_id = self.request.get('post_id')\n post = Post.get_by_id(int(post_id), parent=blog_key())\n content = self.request.get('comment')\n\n if content:\n comment = Comment(parent=comment_key(),\n content=content,\n user=self.user,\n post=post)\n comment.put()\n\n time.sleep(0.1)\n self.redirect('/blog/%s' % str(post.key().id()))", "def create_post(session: Session) -> Post:\n try:\n admin_user = session.query(User).filter(User.username == \"toddthebod\").first()\n post = Post(\n author_id=admin_user.id,\n slug=\"fake-post-slug\",\n title=\"Fake Post Title\",\n summary=\"A fake post to have some fake comments.\",\n feature_image=\"https://hackersandslackers-cdn.storage.googleapis.com/2021/01/[email protected]\",\n body=\"Cheese slices monterey jack cauliflower cheese dolcelatte cheese and wine fromage frais rubber cheese gouda. Rubber cheese cheese and wine cheeseburger cheesy grin paneer paneer taleggio caerphilly. Edam mozzarella.\",\n )\n session.add(admin_user) # Add the user\n session.commit() # Commit the change\n LOGGER.success(f\"Created post {post} published by user {admin_user}\")\n return post\n except IntegrityError as e:\n LOGGER.error(e.orig)", "def create_post():\r\n\r\n # Check for and reject empty username or whinge\r\n if not request.values.get(\"username\") or not request.values.get(\"whinge\"):\r\n print(\"Ignoring request to with empty username or whinge\")\r\n else:\r\n # Form data ok; add to DB\r\n con = get_db()\r\n con.execute(\"INSERT INTO posts (submitter,content,ts) VALUES (?,?,?);\",\r\n (\r\n request.values.get(\"username\"), # form field username -> DB column submitter\r\n request.values.get(\"whinge\"), # form field whinge -> DB column content\r\n time.time()\r\n )\r\n )\r\n con.commit()\r\n con.close()\r\n \r\n # TODO: Handle possibility of failed INSERT\r\n\r\n # Send them back to the main page\r\n return redirect(url_for(\"display_top\"))", "def create_author(user_dict, author_dict):\n user = User.objects.create_user(**user_dict)\n user.save()\n\n author_dict['user'] = user\n author = Author.objects.create(**author_dict)\n author.save()\n\n return (user, author)", "def create_post(bid):\n form = PostForm(request.form)\n if request.method == 'POST':\n if form.validate():\n DB.session.add(\n Post(\n bid,\n current_user.uid,\n form.name.data,\n form.desc.data))\n DB.session.commit()\n flash('Post ({}) successfully created!'.format(form.name.data))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)", "def create_post(request):\n user = User.objects.get(email=request.user.email)\n if request.method == \"POST\":\n form = BlogPostForm(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.author = user\n post.avatar = user.userprofile.avatar.url\n post.save()\n messages.success(request, \"You have posted successfully\")\n return redirect(\"get_posts\")\n else:\n messages.error(request, \"Unable to post at this time\")\n else:\n form = BlogPostForm()\n return render(request, \"blogpostform.html\", {\"form\": form})", "def mutate(parent, info, comment_details):\n\n user = User.find_or_fail(comment_details.user_id)\n post = Post.find_or_fail(comment_details.post_id)\n\n comment = Comments()\n comment.body = comment_details.body\n\n user.comments().save(comment)\n post.comments().save(comment)\n\n return comment", "def post_comment(self, entry, body, **args):\n args.update(entry=entry, body=body)\n return self.fetch(\"/comment\", post_args=args)", "def post(self, request, slug):\n article = ArticleInst.fetch(slug)\n comment = request.data.get('comment', {})\n\n serializer = self.serializer_class(data=comment)\n serializer.is_valid(raise_exception=True)\n status_ = status.HTTP_201_CREATED\n\n try:\n Comment.objects.get(\n article=article,\n body=comment.get('body')\n )\n except Comment.DoesNotExist:\n serializer.save(author=request.user, article=article)\n resp = {'message': 'Comment created'}\n resp['data'] = serializer.data\n\n else:\n resp = {'message': \"Seems you've posted an exact comment before\"}\n status_ = status.HTTP_409_CONFLICT\n return Response(data=resp,\n status=status_\n )", "def reddit_post(data, comments):\n\n sub = None\n try:\n sub = Source.objects.get(name=data['subreddit'])\n except Source.DoesNotExist:\n #This is jank but can be touched up manually\n sub = Source(name=data['subreddit'], url='reddit.com')\n sub.save()\n print 'source added to db with name: ' + data['subreddit']\n \n data['subreddit'] = sub\n \n (article, keywords) = scrape_article(data['url'], lambda x: timezone.now()) \n data['text'] = article['text']\n data['date'] = article['date']\n data['headline'] = article['headline']\n\n try:\n post = RedditPost(**data)\n post.save()\n make_reddit_keywords(post, keywords)\n make_comments(post, comments)\n except IntegrityError as ex:\n print ex\n print 'not unique reddit post for ' + data['post_title']", "def post(self):\n comment_id = self.request.get('comment_id')\n post_id = self.request.get('post_id')\n comment = Comment.get_by_id(int(comment_id), parent=comment_key())\n post = Post.get_by_id(int(post_id), parent=blog_key())\n if comment and self.user.key().id() == comment.user.key().id():\n comment.content = self.request.get('content')\n\n have_errors = False\n\n if not comment.content:\n error_content = \"Content is required\"\n have_errors = True\n\n if have_errors:\n self.render(\"edit_comment.html\",\n comment=comment,\n error_content=error_content,\n user=self.user)\n else:\n comment.put()\n time.sleep(0.1)\n\n self.redirect('/blog/%s' % str(post.key().id()))", "def post(request):\n if request.method == \"POST\":\n post = Post()\n post.content = request.POST['content']\n post.author = request.user\n post.save()\n return HttpResponseRedirect(reverse(\"index\"))", "async def createComment(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"createComment\"], *args, **kwargs)", "def handle_comments(self):\r\n comments = Comment.objects.all()\r\n for c in comments:\r\n new = ThreadedComment(\r\n content_type = c.content_type,\r\n object_id = c.object_id,\r\n comment = c.comment,\r\n user = c.user,\r\n date_submitted = c.submit_date,\r\n date_modified = c.submit_date,\r\n date_approved = c.submit_date,\r\n is_public = c.is_public,\r\n ip_address = c.ip_address,\r\n is_approved = not c.is_removed\r\n )\r\n new.save()", "def _create_comment(request, course_key, thread_id=None, parent_id=None):\r\n assert isinstance(course_key, CourseKey)\r\n post = request.POST\r\n\r\n if 'body' not in post or not post['body'].strip():\r\n return JsonError(_(\"Body can't be empty\"))\r\n\r\n course = get_course_with_access(request.user, 'load', course_key)\r\n if course.allow_anonymous:\r\n anonymous = post.get('anonymous', 'false').lower() == 'true'\r\n else:\r\n anonymous = False\r\n\r\n if course.allow_anonymous_to_peers:\r\n anonymous_to_peers = post.get('anonymous_to_peers', 'false').lower() == 'true'\r\n else:\r\n anonymous_to_peers = False\r\n\r\n comment = cc.Comment(\r\n anonymous=anonymous,\r\n anonymous_to_peers=anonymous_to_peers,\r\n user_id=request.user.id,\r\n course_id=course_key.to_deprecated_string(),\r\n thread_id=thread_id,\r\n parent_id=parent_id,\r\n body=post[\"body\"]\r\n )\r\n comment.save()\r\n if post.get('auto_subscribe', 'false').lower() == 'true':\r\n user = cc.User.from_django_user(request.user)\r\n user.follow(comment.thread)\r\n if request.is_ajax():\r\n return ajax_content_response(request, course_key, comment.to_dict())\r\n else:\r\n return JsonResponse(utils.safe_content(comment.to_dict()))", "def post(self):\n\n title = self.request.get(\"title\")\n blogPost = self.request.get(\"blogPost\")\n author = self.request.cookies.get('name')\n\n if title and blogPost:\n\n bp = Blogposts(parent=blog_key(), title=title,\n blogPost=blogPost, author=check_secure_val(author))\n\n bp.put()\n\n self.redirect('/%s' % str(bp.key.integer_id()))\n else:\n error = \"Please submit both a title and a blogpost!\"\n self.render(\"newpost.html\", title=title,\n blogPost=blogPost, error=error)", "def create_comment(cls, commentForm, slug):\n comment = CommentModel(\n Name = str(commentForm.name.data),\n Email = str(commentForm.email.data),\n Comment = str(commentForm.comment.data),\n Published = True,\n SlugId = slug\n )\n comment.put()\n return comment", "def comment_on_post_id(comment, obj_id):\n\n if type(comment) != str or type(obj_id) != str:\n logger.error(LOG_INVALID_VARIABLES_STR)\n return False\n\n # Success! Log and return the post id\n comment_id = graph.put_comment(object_id=obj_id, message=comment)[\"id\"]\n logging.info(LOG_COMMENT_SUCCESS.format(comment_id))\n return comment_id", "def add_new_comment(request):\n token = request.data.get('token')\n text = request.data.get('text', '')\n post_id = request.data.get('post_id', '')\n permission = request.data.get('permission')\n\n if Token.objects.filter(key=token).exists():\n if len(text) < 10:\n return Response({\"error\": 24})\n if len(text) > 1000:\n return Response({\"error\": 25})\n\n if type(post_id) is int:\n if Post.objects.filter(pk=post_id).exists():\n token = get_object_or_404(Token, key=token)\n post = get_object_or_404(Post, pk=post_id)\n comment = Comment.objects.create(post=post,\n author_id=token.user_id,\n text=text,\n permission=permission)\n serializer = PostSerializer(post, context={'user_id': token.user_id})\n UserFeed.objects.create(user=post.author,\n action_user=token.user,\n post_comment=comment,\n action=\"PostComment\")\n\n printable = set(string.printable)\n msg = filter(lambda x: x in printable, comment.text) \n message = \"{} commented: {}\".format(token.user.username, msg)\n\n custom = {\n \"post_id\": post.id,\n \"avatar\": UserProfile.objects.get(user=token.user).avatar.url\n }\n\n if post.author != token.user:\n user_notification = UserNotification.objects.get(user=post.author)\n send_notification(custom, message, user_notification)\n\n # check @ for users \n for item in text.split(' '):\n if item and item[0] == '@':\n username = item[1:].lower()\n user = User.objects.filter(username__iexact=username).first()\n if not user or user == token.user:\n continue\n UserFeed.objects.create(user=user,\n action_user=token.user,\n post_comment=comment,\n action=\"PostCommentComment\")\n msg = filter(lambda x: x in printable, comment.text) \n message = \"{} commented: {}\".format(token.user.username, msg)\n user_notification = UserNotification.objects.get(user=user)\n send_notification(custom, message, user_notification)\n\n return Response({\"success\": 26,\n \"post\": serializer.data})\n else:\n return Response({\"error\": 27})\n else:\n return Response({\"error\": 17})", "def form_valid(self, form):\n author = Profile.objects.get(user=self.request.user)\n object = self.get_object()\n content_type = ContentType.objects.get_for_model(object)\n form.instance.author = author\n form.instance.content_type = content_type\n form.instance.object_id = object.pk\n\n return super(CommentCreate, self).form_valid(form)", "def create_author(name):\n return Author.objects.create(name=name)", "def on_created_comment(sender, instance: dillo.models.comments.Comment, created, **kwargs):\n if not created:\n return\n # Extract tags and mentions from text and assign them to the Comment\n tags, mentions = extract_tags_and_mentions(instance.content)\n instance.tags.set(*tags)\n for mentioned_user in mentions:\n log.debug('Mentioning user %s in Comment %i' % (mentioned_user, instance.id))\n dillo.models.mixins.Mentions.objects.create(\n user=mentioned_user,\n content_object=instance,\n )\n # TODO(fsiddi) Generate activity about mention\n\n # Generate activity about comment creation\n log.debug('Generating activity about comment creation')\n verb = 'commented'\n if instance.parent_comment:\n verb = 'replied'\n action.send(instance.user, verb=verb, action_object=instance, target=instance.entity)\n log.debug('Set user %s as follower of own comment %i' % (instance.user, instance.id))\n follow(instance.user, instance, actor_only=False)", "def post_process_post(self, post):\r\n post.article = self.rewrite_ob_urls(post.article)\r\n post._commit()\r\n \r\n comments = Comment._query(Comment.c.link_id == post._id, data = True)\r\n for comment in comments:\r\n comment.body = self.rewrite_ob_urls(comment.body)\r\n comment._commit()", "def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(PostCommentCreate, self).get_context_data(**kwargs)\n # Get the blog from id and add it to the context\n context['post'] = get_object_or_404(Post, pk=self.kwargs['pk'])\n return context", "def get_context_data(self, **kwargs):\n # Call the base implementation first to get a context\n context = super(CommentCreate, self).get_context_data(**kwargs)\n # Get the blog from id and add it to the context\n context['post'] = get_object_or_404(Post, pk = self.kwargs['pk'])\n return context", "def test_owner_create_given_blogpost(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n owner = UserFactory.create_batch(2)[1]\r\n app = AppFactory.create(owner=owner)\r\n blogpost = BlogpostFactory.build(app=app, owner=owner)\r\n\r\n assert self.mock_authenticated.id == app.owner.id\r\n assert_not_raises(Exception, getattr(require, 'blogpost').create, blogpost)", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def create_post(self: User, content: str, is_public: bool, circles: List[Circle], reshareable: bool,\n reshared_from: Optional[Post], media_list: List[Media], mentioned_users: List[User],\n is_update_avatar: bool) \\\n -> Union[Post, bool]:\n if not content and not media_list:\n # a post has to have either content or media\n return False\n\n new_post = Post()\n new_post.eid = make_uuid()\n new_post.author = self.id\n if content:\n new_post.content = bleach.clean(content)\n new_post.is_public = is_public\n new_post.circles = circles\n new_post.media_list = media_list\n new_post.is_update_avatar = is_update_avatar\n\n if reshared_from and not reshareable:\n # if resharing from a post, this post must also be reshareable, otherwise it's logically wrong\n return False\n\n if reshared_from:\n if media_list:\n # when resharing, only allow content (text), e.g. no media\n return False\n\n if reshared_from.reshared_from:\n # if reshared_from itself is a reshared post, reshare reshared_from's original post\n # reshared_from.reshared_from is LazyReference so need to retrieve the full post\n reshared_from = get_in_post_cache(reshared_from.reshared_from.id)\n\n # same explanation for context_home_or_profile=False\n if not sees_post(self, reshared_from, context_home_or_profile=False):\n return False\n\n if not reshared_from.reshareable:\n return False\n\n new_post.reshared_from = reshared_from\n\n new_post.reshareable = reshareable\n new_post.save()\n\n if reshared_from:\n create_notification(\n self,\n notifying_href=new_post.make_href(),\n notifying_summary=new_post.content,\n notifying_action=NotifyingAction.Reshare,\n notified_href=reshared_from.make_href(),\n notified_summary=reshared_from.content,\n owner=reshared_from.author\n )\n # only cache reshared post\n set_in_post_cache(reshared_from)\n\n mention(\n self,\n notified_href=new_post.make_href(),\n notified_summary=new_post.content,\n mentioned_users=mentioned_users\n )\n\n return new_post", "def post(self):\n current_user = self.authenticate_user()\n\n if not current_user:\n self.redirect(\"/login\")\n else:\n content = self.request.get(\"content\")\n title = self.request.get(\"subject\")\n\n if not content or not title:\n self.render_front(title, content, \"We need both a title and content\")\n else:\n post = Post(title=title, content=content, user=current_user.key)\n post.put()\n\n current_user.posts.append(post.key)\n current_user.put()\n\n self.redirect(\"/post/\" + str(post.key.id()))", "def post(self, user):\n subject = self.request.get(\"subject\")\n content = self.request.get(\"content\")\n\n if subject and content:\n post = Post(subject=subject,\n content=content,\n author=self.user)\n post.put()\n return self.redirect(\"/%s\" % post.key().id())\n else:\n\n error = \"subject and content, please!\"\n return self.render(\"newpost.html\",\n subject=subject,\n content=content,\n error=error)", "def post_create(request):\n\tform = PostForm(request.POST or None, request.FILES or None)\n\tif request.POST:\n\t\tif form.is_valid():\n\t\t\tinstance = form.save(commit=False)\n\t\t\tinstance.user = request.user\n\t\t\tinstance.save()\n\t\t\tmessages.success(request, \"Post created!\")\n\t\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\t\telse:\n\t\t\tmessages.error(request, \"Sorry! Something went wrong.\", extra_tags=\"\")\n\tcontext = {\n\t\t'title': \"Create Post\",\n\t\t'form' : form,\n\t}\n\treturn render(request, 'post/create.html', context)", "def post(self, req):\n error_messages = []\n success_message = ''\n\n # Creamos owner y se lo pasamos al form con un objeto pre-establecido\n post_with_owner = Post()\n post_with_owner.owner = req.user\n post_with_owner.blog = Blog.objects.filter(owner=req.user)[0]\n\n form = PostCreateForm(req.POST, instance=post_with_owner)\n if form.is_valid():\n\n new_post = form.save()\n form = PostCreateForm()\n success_message = u'Post guardado con éxito! '\n success_message += u'<a href=\"{0}\">'.format(reverse('post_detail', args=[req.user.username, new_post.pk]))\n success_message += u'(ver post)</a>'\n else:\n error_messages.append(u'Formulario incompleto.')\n\n context = {\n 'form': form,\n 'success_message': success_message\n }\n return render(req, 'posts/new_post.html', context)", "def create_comment(project_id, session_id, annotation_id, comment_id=None):\n helpers.abort_if_invalid_parameters(project_id, session_id)\n user = helpers.abort_if_unauthorized(Project.query.get(project_id))\n if comment_id:\n helpers.abort_if_unknown_comment(comment_id, annotation_id)\n\n data = helpers.jsonify_request_or_abort()\n\n schema = UserAnnotationCommentSchema()\n helpers.abort_if_errors_in_validation(schema.validate(data))\n # Note: comment_id can be null, which represents that it is a parent\n comment = CommentsModel(data['content'], comment_id, user.id, annotation_id)\n db.session.add(comment)\n db.session.commit()\n\n # Determine which type of comment the response is to: nested or a root comment\n if comment_id:\n _comment = CommentsModel.query.filter_by(parent_id=comment_id).first()\n else:\n _comment = RootComment.query.get(annotation_id)\n parent_user_id = _comment.user_id\n usr = User.query.get(parent_user_id)\n\n if user.id != usr.id:\n InterviewSession.email_commentor(usr, project_id, session_id)\n\n fcm.notify_participants_user_commented(project_id, session_id)\n return custom_response(200, data=schema.dump(comment))", "def post_comment_branch(user, branch_id, comment):\r\n\tfrom browse.models import BranchComment\r\n\tfrom accounts.models import RestaurantBranch\r\n\tbranch = RestaurantBranch.objects.get(id=branch_id)\r\n\tpost, _ = BranchComment.objects.get_or_create(branch=branch, user=user)\r\n\tpost.comment = comment\r\n\tpost.save()", "def create_post(user_id):\n\n user = User.query.get_or_404(user_id)\n title = request.form['title']\n content = request.form['content']\n tag_ids = [int(num) for num in request.form.getlist(\"tags\")]\n tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n \n new_post = Post(title=title, content=content, user=user, tags=tags)\n db.session.add(new_post)\n db.session.commit()\n\n return redirect(f\"/users/{user_id}\")", "def add_accomment(request, pk):\n\n actor = get_object_or_404(Actor, pk=pk)\n if request.method == \"POST\":\n form = ActorCommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.actor = actor\n comment.save()\n return redirect('../', pk=actor.pk)\n else:\n form = ActorCommentForm()\n return render(request, {'form': form})", "def createComment(owner_id=None, photo_id=None, message=None, attachments=None,\\\n from_group=None, reply_to_comment=None, sticker_id=None,\\\n access_key=None, guid=None):\n params = {\n 'owner_id': owner_id,\n 'photo_id': photo_id,\n 'message': message,\n 'attachments': attachments,\n 'from_group': from_group,\n 'reply_to_comment': reply_to_comment,\n 'sticker_id': sticker_id,\n 'access_key': access_key,\n 'guid': guid\n }\n result = call('photos.createComment', **params)\n return parse_response(result)", "def post(self):\n subject = self.request.get('subject')\n post_content = self.request.get('post_content')\n submit = self.request.get('submit')\n cancel = self.request.get('cancel')\n user = self.get_active_user()\n created_by = int(user.key().id())\n post_id = self.request.get('post_id')\n\n if not user:\n self.redirect('/login')\n if post_id:\n post = Posts.get_by_id(int(post_id))\n else:\n post = None\n\n if cancel == \"cancel\":\n self.redirect('/%s' % str(post.key().id()))\n return\n if (post and post.submitter_id == user.key().id()) or not post:\n if submit == \"submit\" and subject and post_content:\n if post:\n post.subject = subject\n post.content = post_content\n post.put()\n else:\n post = Posts(subject=subject,\n content=post_content,\n submitter_id=created_by)\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.render_newpage(user=user,\n subject=subject,\n post_content=post_content,\n error=\"\"\"Please provide both a subject and a\n post!\"\"\")\n else:\n self.redirect('/login')", "def post_detail(request, pk):\n post = get_object_or_404(Post, pk=pk)\n comments = post.comments.all\n post.views += 1\n post.save()\n \n if request.method == \"POST\":\n comment_form = CommentForm(request.POST)\n if comment_form.is_valid() and request.user:\n new_comment = comment_form.save(commit=False)\n new_comment.post = post\n new_comment.author = request.user\n new_comment.save()\n elif request.user:\n comment_form = CommentForm()\n \n return render(request, \"details.html\", {\"post\": post, \"comments\": comments, \"comment_form\": comment_form})", "def add_comment(article_id):\n\n if 'username' in session:\n user = mongo.db.user.find_one({'username': session['username']})\n \n if request.method == 'POST':\n articles = mongo.db.articles\n article = articles.find_one_and_update({'_id': ObjectId(article_id) },\n {'$push':\n {'comments':\n {'username': session['username'],\n 'date': datetime.utcnow(),\n 'text': request.form.get('comment')\n }\n }\n })\n \n comment = mongo.db.article_comments\n comment.insert_one({'user': user['_id'],\n 'from_user': session['username'],\n 'article': article['_id'],\n 'article_title': article['title'],\n 'date': datetime.utcnow(),\n 'to_user': article['author'],\n 'text': request.form.get('comment')\n })\n \n flash('Your comment has been added.', 'success')\n return redirect(url_for('blog'))\n \n flash('Please login to post a comment.', 'info')\n return redirect(url_for('login'))", "def post_comment_package(user, pkg_id, comment):\r\n\tfrom browse.models import PackageComment\r\n\tfrom browse.models import Package\r\n\tpackage = Package.objects.get(id=pkg_id)\r\n\tpost, _ = PackageComment.objects.get_or_create(package=package, user=user)\r\n\tpost.comment = comment\r\n\tpost.save()", "def form_valid(self, form):\n #Add logged-in user as author of comment\n form.instance.author = self.request.user\n #Associate comment with blog based on passed id\n form.instance.blog=get_object_or_404(Post, pk = self.kwargs['pk'])\n # Call super-class form validation behaviour\n return super(CommentCreate, self).form_valid(form)", "def create_comment_feed(data:dict, id_feed:int, user:User)->comments_models.CommentFeed:\n\tif data.get(\"comment\") is not None:\n\t\taccounts_validations.validate_length(\"Comment\",data.get(\"comment\"),2,255)\n\telse:\n\t\traise ValueError(str(_(\"Comment is required\")))\n\twith transaction.atomic():\n\t\ttry:\n\t\t\tfeed = comments_models.CommentFeed.objects.create(\n\t\t\t\tuser = user,\n\t\t\t\tfeed = feed_models.Feed.objects.get(id=id_feed),\n\t\t\t\tcomment = data.get(\"comment\")\n\t\t\t)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\traise ValueError(str(_(\"An error occurred while saving the comment feed\")))\n\treturn feed", "def post(self):\n title = self.request.get(\"title\")\n body = self.request.get(\"body\")\n\n if title and body:\n\n # create a new Post object and store it in the database\n post = Post(\n title=title,\n body=body\n )\n post.put()\n\n # get the id of the new post, so we can render the post's page (via the permalink)\n id = post.key().id()\n self.redirect(\"/blog/%s\" % id)\n else:\n error = \"we need both a title and a body!\"\n #self.render_form(title, body, error)\n self.render(\"newpost.html\", title, body, error)", "def setUp(self):\n user = User.objects.create(username=\"nerd\")\n self.post_name = 'name1'\n self.post = Post(name=self.post_name, owner=user)", "def perform_create(self, serializer):\r\n serializer.save(author=self.request.user)", "def create(cls, headline, text, blog):\n post = cls()\n post.headline = headline\n post.text = text\n post.blog = blog\n post.posted_date = timezone.now()\n try:\n post.save()\n return post\n except(ValueError, IntegrityError, OperationalError):\n return None", "def post(self):\n comment_id = int(self.request.get('comment_id'))\n post_id = self.request.get('post_id')\n comment = Comments.get_by_id(comment_id)\n if comment.submitter_id == self.get_active_user().key().id():\n comment.delete()\n else:\n error(403)\n\n self.redirect('/%s' % post_id)", "def comment(postid):\n context = {}\n if \"username\" not in flask.session:\n raise InvalidUsage('Forbidden', status_code=403)\n\n connection = insta485.model.get_db()\n cursor = connection.execute(\n \"SELECT * FROM comments WHERE postid=:id\", {'id': postid})\n comments = cursor.fetchall()\n ''' \n if bool(comments) is False:\n raise InvalidUsage('Not Found', status_code=404)\n '''\n # User\n logname = flask.session[\"username\"]\n\n if flask.request.method == 'POST':\n data = flask.request.get_json(force=True)\n context['text'] = data['text']\n context['owner'] = logname\n context['owner_show_url'] = '/u/' + logname + '/'\n connection.execute('INSERT INTO comments (owner, postid, text) \\\n VALUES (?,?,?)', (logname, postid, data['text']))\n cursor = connection.execute('SELECT last_insert_rowid() AS id')\n commentid_dic = cursor.fetchone()\n context['commentid'] = commentid_dic['id']\n context['postid'] = postid\n return flask.jsonify(**context), 201\n\n # url\n context[\"url\"] = flask.request.path\n context['comments'] = []\n\n for i in comments:\n one_comment = {}\n one_comment['commentid'] = i['commentid']\n one_comment['owner'] = i['owner']\n one_comment['owner_show_url'] = '/u/' + i['owner'] + '/'\n one_comment['postid'] = postid\n one_comment['text'] = i['text']\n context['comments'].append(one_comment)\n\n return flask.jsonify(**context)", "def test_user_can_reply_to_comment(self):\n token1 = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token1)\n response = self.create_comment(\n token=token1, \n parentId=0,\n slug=response.data['article']['slug']\n )\n response = self.create_comment(\n token=token1, \n parentId=response.data['comment']['id'],\n slug=response.data['comment']['article']['slug']\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_201_CREATED\n )\n self.assertEqual(\n response.data['comment']['body'],\n VALID_COMMENT['body']\n )", "def post_comment(id):\n \n form = CommentForm()\n title = 'post comment'\n post = Post.query.filter_by(id=id).first()\n\n if post is None:\n\n abort(404)\n\n if form.validate_on_submit():\n comment = form.comment.data\n new_comment = Comments(opinion = comment, user_id = current_user.id, posts_id = post.id)\n new_comment.save_comment()\n return redirect(url_for('main.view_post', id = post.id))\n\n return render_template('comments.html', form = form, title = title)", "def perform_create(self, serializer):\n serializer.save(author=self.request.user)", "def comment_to_object(self, comment, post_author_id=None):\n # the message_tags field is different in comment vs post. in post, it's a\n # dict of lists, in comment it's just a list. so, convert it to post style\n # here before running post_to_object().\n comment = dict(comment)\n comment['message_tags'] = {'1': comment.get('message_tags', [])}\n\n obj = self.post_to_object(comment)\n if not obj:\n return obj\n\n obj['objectType'] = 'comment'\n\n match = self.COMMENT_ID_RE.match(comment.get('id', ''))\n if match:\n post_author, post_id, comment_id = match.groups()\n obj['url'] = self.comment_url(post_id, comment_id,\n post_author_id=post_author_id)\n obj['inReplyTo'] = [{'id': self.tag_uri(post_id)}]\n\n return self.postprocess_object(obj)", "def add_comment(request, entry_pk):\n\n blog = get_object_or_404(BlogEntry, pk=entry_pk)\n\n if not request.user.is_authenticated():\n raise PermissionDenied\n\n form = BlogCommentForm(creator=request.user, blog=blog, data=request.POST)\n\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(blog.get_absolute_url())\n\n return single(request, entry_pk=entry_pk, comment_form=form)", "def test_post_creation(authenticated_client, post_dict, user, bogus_request):\n response = authenticated_client.post(reverse('post-list'), data=post_dict)\n\n assert response.status_code == status.HTTP_201_CREATED\n\n user_url = reverse('user-detail', args=(user.pk,), request=bogus_request)\n\n post_json_response = json.loads(response.content)\n\n assert post_json_response['author'] == user_url", "def create_comment(request):\n\n # get data\n in_data = getRequestData(request)\n\n # get the Thread associated with the comments\n mythread = Thread.objects.get(id=in_data.get('mythreadid'))\n\n # save in database\n try:\n comment = Comment(pub_date = datetime.datetime.now(pytz.timezone('US/Eastern')), username = in_data.get('myusername'), text = in_data.get('mytext'), score = 0, thread = mythread )\n comment.save()\n except:\n return HttpResponseBadRequest('Error saving to database!')\n\n return JsonResponse(in_data)", "def create_article(self):\n user = self.create_a_user()\n article = Article.objects.create(\n title=self.title,\n description=self.description,\n body=self.body, author=user.profile)\n article.save()\n return article", "def post(self):\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n # if user enter good subject and content, redirect them to new post page\n if subject and content:\n p = Post(parent = blog_key(), subject = subject, content = content)\n p.put() # store the post element into database\n self.redirect('/blog/%s' % str(p.key().id()))\n # otherwise, render an error page \n else:\n error = \"subject and content, please!\"\n self.render(\"newpost.html\", subject=subject, content=content, error=error)", "def test_user_can_comment_on_article_data(self):\n token1 = self.create_user(VALID_USER_DATA)\n response = self.create_article(VALID_ARTICLE, token1)\n response = self.create_comment(\n token=token1, \n parentId=0,\n slug=response.data['article']['slug']\n )\n\n self.assertEqual(\n response.data['comment']['body'], \n VALID_COMMENT['body']\n )\n self.assertEqual(\n response.status_code,\n status.HTTP_201_CREATED\n )", "def post_comment(request, next=None, using=None):\n # Fill out some initial data fields from an authenticated user, if present\n data = request.POST.copy()\n if request.user.is_authenticated():\n if not data.get('name', ''):\n data[\"name\"] = request.user.get_full_name() or request.user.username\n if not data.get('email', ''):\n data[\"email\"] = request.user.email\n\n # Check to see if the POST data overrides the view's next argument.\n next = data.get(\"next\", next)\n\n # Look up the object we're trying to comment about\n ctype = data.get(\"content_type\")\n object_pk = data.get(\"object_pk\")\n model = models.get_model(*ctype.split(\".\", 1))\n target = model._default_manager.using(using).get(pk=object_pk)\n\n\n # Construct the comment form\n form = comments.get_form()(target, data=data)\n\n # Check security information\n if form.security_errors():\n return None\n # Create the comment\n comment = form.get_comment_object()\n comment.ip_address = request.META.get(\"REMOTE_ADDR\", None)\n if request.user.is_authenticated():\n comment.user = request.user\n\n # Signal that the comment is about to be saved\n responses = signals.comment_will_be_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n\n # Save the comment and signal that it was saved\n comment.save()\n message = get_object_or_404(Message, pk = object_pk)\n message.envoyer_commentaire_notification(comment.pk, request.user.username)\n \n signals.comment_was_posted.send(\n sender = comment.__class__,\n comment = comment,\n request = request\n )\n\n comment_list = [comment]\n return render_to_response('comments/list.html', {'comment_list': comment_list},context_instance=RequestContext(request))", "def post(self):\n user_id = get_jwt_identity()\n user = find_user(user_id)\n args = post_parser.parse_args()\n\n # check circles\n circles = []\n for circle_id in args['circle_ids']:\n found_circle = find_circle(user, circle_id)\n if not found_circle:\n return {'msg': f'Circle {circle_id} is not found'}, 404\n circles.append(found_circle)\n\n # check reshare\n reshared_from = args['reshared_from']\n reshared_from_post = None\n if reshared_from:\n reshared_from_post = dangerously_get_post(reshared_from)\n if not reshared_from_post:\n return {\"msg\": f\"Post {reshared_from} is not found\"}, 404\n\n # check media\n media_object_names = args['media_object_names']\n if reshared_from and media_object_names:\n return {'msg': \"Reshared post is not allowed to have media\"}, 400\n\n post = create_post(\n user,\n content=args['content'],\n is_public=args['is_public'],\n circles=circles,\n reshareable=args['reshareable'],\n reshared_from=reshared_from_post,\n media_list=check_media_object_names(media_object_names, MaxPostMediaCount),\n mentioned_users=check_mentioned_user_ids(args['mentioned_user_ids']),\n is_update_avatar=False\n )\n if not post:\n return {\"msg\": f\"Not allowed to reshare post {reshared_from}\"}, 403\n return post, 201", "def post(self):\n modified_content = self.request.get('comment_edit')\n comment_id = self.request.get('comment_id')\n comment = Comments.get_by_id(int(comment_id))\n user = self.get_active_user()\n\n if user.key().id() == comment.submitter_id:\n comment.content = modified_content\n comment.put()\n self.redirect('/%s' % str(comment.post_id))\n else:\n self.error(403)", "def post(self):\n\n subject = self.request.get('subject')\n content = self.request.get('content')\n\n have_errors = False\n\n if not subject:\n error_subject = \"Please write down the subject\"\n have_errors = True\n if not content:\n error_content = \"Content is required\"\n have_errors = True\n\n if have_errors:\n self.render(\"newpost.html\",\n subject=subject,\n content=content,\n error_subject=error_subject,\n error_content=error_content,\n user=self.user)\n else:\n post = Post(parent=blog_key(),\n subject=subject,\n content=content,\n user=self.user)\n post.put()\n self.redirect('/blog/%s' % str(post.key().id()))", "def add_comment(request):\n if request.method != 'POST':\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n else:\n img_id = request.POST['id']\n try:\n img = Image.objects.get(pk=img_id)\n except:\n return HttpResponseRedirect(reverse('wainz.views.composite'))\n comment_text = request.POST['comment']\n #TODO sanitize input\n comment = ImageComment()\n comment.submission_date = timezone.now()\n comment.comment_text= comment_text\n comment.image_id = img_id\n comment.submitter_id = int(request.POST['uid'])\n comment.save()\n return rest.rest_success(request, img_id)", "def create_task(author, title, text, **kwargs):\n mc = MathContent(text=text)\n mc.save()\n task = Task(author=author, name=title, content=mc, **kwargs)\n task.save()\n return task" ]
[ "0.7968736", "0.69776434", "0.680873", "0.6670445", "0.6624297", "0.6575736", "0.6529041", "0.6456722", "0.64194477", "0.6396711", "0.6375334", "0.6340401", "0.63245595", "0.6278583", "0.62078166", "0.6168181", "0.6167977", "0.61602026", "0.61574775", "0.6155002", "0.6143866", "0.6140876", "0.6121726", "0.61149126", "0.61007464", "0.60991216", "0.608715", "0.60754097", "0.60615396", "0.6057131", "0.604684", "0.6014429", "0.5994278", "0.59881574", "0.5970347", "0.5945169", "0.59382254", "0.59333473", "0.59317094", "0.59296954", "0.5914984", "0.5883961", "0.585073", "0.5846212", "0.58429784", "0.5842871", "0.582814", "0.58248913", "0.582428", "0.5810658", "0.57994163", "0.57930577", "0.5791682", "0.57531965", "0.5749165", "0.5744627", "0.5742683", "0.5733065", "0.57328683", "0.571094", "0.5699412", "0.56939983", "0.569015", "0.5689146", "0.5688257", "0.56696564", "0.56678504", "0.56668216", "0.5662548", "0.5657867", "0.5655747", "0.5641819", "0.56367755", "0.562576", "0.5625701", "0.561538", "0.55970645", "0.5589118", "0.5561099", "0.5539726", "0.5534758", "0.5530384", "0.5524474", "0.5523618", "0.5521801", "0.55176336", "0.5516592", "0.55107504", "0.54922026", "0.54824126", "0.547513", "0.5469452", "0.5452432", "0.5449303", "0.5448553", "0.5448498", "0.54452384", "0.54398817", "0.5439098", "0.5438888" ]
0.79196894
1
Takes response.data and confirms no repeated guids (No repeated posts)
def assertNoRepeatGuids(context, posts): guids = [p['guid'] for p in posts] context.assertTrue(len(set(guids)) == len(posts), "Some guids repeated")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_response_reusage_after_replied(self):\n\n post1 = self._create_tweet(\n content=\"I need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n\n resp1 = Response.objects.upsert_from_post(post1)\n\n support = UserProfile.objects.upsert('Twitter', dict(screen_name='@test2'))\n self._create_tweet(\n user_profile=support,\n content=\"We cant help you right now. Sorry.\",\n channel=self.outbound,\n demand_matchables=True,\n in_reply_to=post1)\n\n post2 = self._create_tweet(\n content=\"I still need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertNotEqual(resp1.id, resp2.id)", "def check_medtag_reports(request):\n\n json_resp = {}\n json_resp['count'] = 0\n medtag_arts = Report.objects.all().exclude(institute = 'PUBMED')\n # for el in pubmed_arts:\n # if el.id_report.startswith('PUBMED'):\n json_resp['count'] = medtag_arts.count()\n return JsonResponse(json_resp,safe=False)", "def check_for_duplicate_phone_numbers(d):\n\n print('# This function is under maintenance. Please try again later.')\n return d", "def test_response_reusage(self):\n\n post1 = self._create_db_post(content=\"@test I need a foo.\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n self.assertTrue(self.sc.inbound_channel.is_assigned(post1))\n\n conv1 = self.sc.upsert_conversation(post1)\n post2 = self._create_db_post(content=\"I still need a foo!\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n conv2 = self.sc.upsert_conversation(post2)\n\n resp1 = Response.objects.upsert_from_post(post1)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertEqual(conv1.id, conv2.id)\n self.assertEqual(resp1.id, resp2.id)\n self.assertTrue(resp2.post_date > resp1.post_date)", "def check_soundcloud_ids_mismatch():\n wiki = pywikibot.Site('en', 'wikipedia')\n category = pywikibot.Category(wiki, CATEGORY)\n pages = pagegenerators.CategorizedPageGenerator(category)\n\n total_pages = 0\n processed = 0\n result = []\n\n for page in pages:\n total_pages += 1\n res = compare_soundcloud_ids(page, wiki)\n\n if res == True:\n # The IDs are the same, nothing to do. The category may contains cached entries\n print('The ID for \"%s\" are the same in both the article and Wikidata.' % page.title())\n processed += 1\n continue\n elif not res:\n print('Skipping %s. It has no SoundCloud ID' % page.title())\n processed += 1\n continue\n\n result.append([res, page.title()])\n\n for ids, title in result:\n # Now we have two IDs (one from article, another from repo).\n # Let us check their associated movie titles in the website\n repoId = ids['repoId']\n wikiId = ids['articleId']\n c_url, response_code1 = check_soundcloud_id(repoId)\n c_url2, response_code2 = check_soundcloud_id(wikiId)\n\n if c_url == c_url2:\n # Both valid\n print('''Both SoundClouds IDs are valid for the title. %s''' % title)\n processed += 1\n elif response_code1 == 404 and response_code1 != response_code2:\n # Handle case\n processed += 1\n elif response_code2 == 404 and response_code2 != response_code1:\n # Handle case\n processed += 1\n else:\n # Handle final case\n pass\n\n print('Finished! Total pages: %s. Processed: %s' %(total_pages, processed))", "def check_dataset_duplicate_ids(self, dataset):\n ids = [a['_id'] for a in dataset]\n # Construct list of duplicates\n dupe_ids = [a for n, a in enumerate(ids) \n if a in ids[:n]]\n if len(dupe_ids) > 0:\n # Get list of names for the duplicate pandas\n dupe_names = [a['en.name'] for a in dataset \n if a['_id'] in dupe_ids]\n raise IdError(\"ERROR: duplicate ids for en.names: %s\" \n % str(dupe_names))", "def _process_response(self, resp):\n signals = []\n resp = resp.json()\n fresh_posts = posts = resp['data']\n paging = resp.get(self._paging_field) is not None\n self.logger.debug(\"Facebook response contains %d posts\" % len(posts))\n\n # we shouldn't see empty responses, but we'll protect our necks.\n if len(posts) > 0:\n self.update_freshness(posts)\n fresh_posts = self.find_fresh_posts(posts)\n paging = len(fresh_posts) == self.limit()\n\n # store the timestamp of the oldest fresh post for use in url\n # preparation later.\n if len(fresh_posts) > 0:\n self.prev_stalest = self.created_epoch(fresh_posts[-1])\n\n signals = [FacebookSignal(p) for p in fresh_posts]\n self.logger.debug(\"Found %d fresh posts\" % len(signals))\n\n return signals, paging", "def check_for_duplicate_subject_identifier(self):\n pass", "def check_if_duplicate(self, data):\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND location = '{}'\\\n \".format(self.table, data['topic'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup with same topic at the same venue\\\n already exists'\n\n query = \"SELECT * FROM {} WHERE happening_on = '{}' AND location = '{}'\\\n \".format(self.table, data['happening_on'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date at the same venue \\\n already exists'\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND happening_on = '{}'\\\n \".format(self.table, data['topic'], data['happening_on'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date with same topic \\\n already exists'\n\n return False, None", "def is_unique(self, id, items):\r\n copies = 0\r\n for i in items:\r\n if type(i) is dict:\r\n if i['id'] == id:\r\n copies = copies + 1\r\n else:\r\n if i.id == id:\r\n copies = copies + 1\r\n if copies >= 2:\r\n return False\r\n else:\r\n return True", "def _raise_if_duplicates(counts: Dict[str, int]) -> None:\n duplicates: List[str] = []\n for nickname, count in counts.items():\n if count > 1:\n duplicates.append(nickname)\n if len(duplicates) > 0:\n # TODO This is not always nickname\n raise ValueError(f'\\'nickname\\' not unique {duplicates}')", "def check_repost_exists(type, id):\n \n try:\n soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))\n return True\n except HTTPError as e:\n if e.response.status_code == 404:\n db.mark_as_deleted(type, id)\n return False\n else:\n raise", "def similar_random(id):\n result_ids = get_similar(tree, ids, embeds, id, 50)\n\n if result_ids == False:\n return 'ID not found', 404\n\n shuffle(result_ids)\n\n result_ids = result_ids[0:10]\n\n out = {\n 'ids': result_ids\n }\n\n return out, 200", "def test_retrieve_tags_unique(self):\n tag1 = Tag.objects.create(user=self.user, name='Tag1')\n Tag.objects.create(user=self.user, name='Tag2')\n recipe1 = Recipe.objects.create(\n user=self.user,\n title=\"Rec1\",\n time_minutes=20,\n price=Decimal('4.85')\n )\n recipe2 = Recipe.objects.create(\n user=self.user,\n title=\"Rec2\",\n time_minutes=20,\n price=Decimal('4.85')\n )\n\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n\n serializer1 = TagSerializer(tag1)\n\n self.assertIn(serializer1.data, res.data)\n self.assertEqual(len(res.data), 1) # unique results", "def test_retrieve_tags_assigned_unique(self):\n tag1 = Tag.objects.create(user = self.user,name='Breakfast')\n tag2 = Tag.objects.create(user=self.user,name='Lunch')\n\n recipe1 = Recipe.objects.create(user=self.user,title='Goose Liver on toast',price=5.00,time_minutes=15)\n recipe2 = Recipe.objects.create(user = self.user,title='Egg Benedict',price=5.00,time_minutes=15)\n recipe1.tags.add(tag1)\n recipe2.tags.add(tag1)\n\n res = self.client.get(TAGS_URL,{'assigned_only':1})\n self.assertEqual(len(res.data),1)", "def test_duplicate_entries(self):", "def check_missing_unique_link():\n from mspray.apps.main.utils import queryset_iterator\n\n queryset = SprayDay.objects.filter(spraypoint__isnull=True).only(\n \"pk\", \"location_id\"\n )\n for record in queryset_iterator(queryset):\n add_unique_record(record.pk, record.location_id)\n gc.collect()", "def user_response_to_post(self, request, pk):\n post_objects_count = Post.objects.filter(id=pk, liked_users__id=request.user.id).count()\n post_objects = Post.objects.get(id=pk)\n if post_objects_count !=0:\n post_objects.liked_users.remove(request.user)\n response_msg = \"You disliked the post\"\n else:\n post_objects.liked_users.add(request.user)\n response_msg = \"You have liked the post\"\n return Response({'data': response_msg}, status=status.HTTP_200_OK)", "def test_retrive_tags_assigned_unique(self):\n tag = Tag.objects.create(user=self.user, name=\"Breakfast\")\n Tag.objects.create(user=self.user, name=\"Lunch\")\n recipe1 = Recipe.objects.create(\n title=\"Pancake\",\n making_time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title=\"Italian Fried Egg\",\n making_time_minutes=5,\n price=10.00,\n user=self.user\n )\n recipe1.tags.add(tag)\n recipe2.tags.add(tag)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n self.assertEqual(len(res.data), 1)", "def test_get_posts_missing_ids(client):\n response = client.simulate_get('/page/get_records')\n assert response.status_code == 400", "def test_duplicate_questions(self):\n self.is_authenticated()\n self.post_question()\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_report_article_more_than_once(self):\n from rest_framework.test import APIClient\n client = APIClient()\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result['errors'],'You can only report an article once')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def check_unique_ids(request: Request, policy: RequestPolicy, logger: Logger) -> None:\n seen: Dict[str, int] = {}\n for bundle in request.bundles:\n if bundle.id in seen:\n raise KSR_BUNDLE_UNIQUE_Violation(\n f\"More than one bundle with id {bundle.id}\"\n )\n seen[bundle.id] = 1\n\n _num_bundles = len(request.bundles)\n logger.info(f\"KSR-BUNDLE-UNIQUE: All {_num_bundles} bundles have unique ids\")\n return", "def _get_unique_genres(connection):\n print('---Getting unique genres---')\n genreDict = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_genres;\")\n res = cursor.fetchall()\n num_genres = 0\n for genre in res:\n if genre[1] not in genreDict:\n genreDict[genre[1]] = num_genres\n num_genres += 1\n return genreDict", "def assert_json_failure_response_is_username_collision(self, response):\r\n self.assertEqual(400, response.status_code)\r\n payload = json.loads(response.content)\r\n self.assertFalse(payload.get('success'))\r\n self.assertIn('already exists', payload.get('value'))", "def test_retrieve_ingredients_assigned_unique(self):\n\n ingredient = create_sample_ingredient(user=self.user, name=\"Orange\")\n create_sample_ingredient(user=self.user, name='Chocolate')\n\n recipe1 = create_sample_recipe(\n user=self.user,\n title=\"Orange Juice\",\n time_minutes=10,\n price=6.00\n )\n\n recipe2 = create_sample_recipe(\n user=self.user,\n title=\"Orange Pie\",\n time_minutes=40,\n price=20.00\n )\n\n recipe1.ingredients.add(ingredient)\n recipe2.ingredients.add(ingredient)\n\n response = self.client.get(INGREDIENTS_URL, {\"assigned_only\": 1})\n\n self.assertEqual(len(response.data), 1)\n # we will return 1, because we assigned only 1 id to two recipes\n # also here id is in int", "def _validate_duplicate_names(res_data, name, _id=None):\n if _id:\n for data in res_data:\n if data.get(\"name\") == name and data.get(\"id\") != _id:\n return False\n return True\n else:\n for data in res_data:\n if data.get(\"name\") == name:\n return False\n return True", "def get_onehundred_new_posts():\n one_hundred_new_posts = []\n for post in reddit.subreddit(\"all\").new():\n try:\n one_hundred_new_posts.append(post.id)\n submission = reddit.submission(id=post.id)\n except:\n one_hundred_new_posts.remove(post.id)\n return one_hundred_new_posts", "def test_retrieve_tags_assigned_unique(self):\n tag = Tag.objects.create(user=self.user, name='Breakfast')\n Tag.objects.create(user=self.user, name='Lunch')\n\n recipe1 = Recipe.objects.create(\n title='Pancakes',\n time_minutes=10,\n price=5.00,\n user=self.user\n )\n recipe1.tags.add(tag)\n\n recipe2 = Recipe.objects.create(\n title='Porridge',\n time_minutes=15,\n price=12.00,\n user=self.user\n )\n recipe2.tags.add(tag)\n\n res = self.client.get(TAGS_URL, {'assigned_only': 1})\n\n self.assertEqual(len(res.data), 1)", "def test_retrieve_tags_assigned_unique(self):\n\n tag = Tag.objects.create(user=self.user, name='Breakfast')\n Tag.objects.create(user=self.user, name='Lunch')\n\n recipe1 = Recipe.objects.create(\n title='Pancakes',\n time_minutes=5,\n price=3.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title='Porridge',\n time_minutes=3,\n price=1.00,\n user=self.user\n )\n\n recipe1.tags.add(tag)\n recipe2.tags.add(tag)\n\n response = self.client.get(URL_TAGS, {'assigned_only': True})\n\n self.assertEqual(len(response.data), 1)", "def test_duplicated_gaitid(self):\n idaa_index = 6\n\n upload_program = program.ProgramUpload(idaa_program=self.idaa_json['value'][idaa_index]['fields'],\n msr_country_codes_list=msr_country_codes_list, msr_gaitid_list=msr_gaitid_list, duplicated_gaitids=self.duplicated_gaitids\n )\n\n self.assertFalse(upload_program.is_valid())\n self.assertTrue(upload_program.has_discrepancy('duplicate_gaitid'))", "def TestQueryIDs(l):\r\n good = 0\r\n empty = 0\r\n bad = 0\r\n for i in l:\r\n try:\r\n jsonstring = pyclassyfire.client.get_results(int(i), 'json')\r\n Class = json.loads(jsonstring)\r\n if len(Class['entities']) == 0:\r\n empty += 1\r\n elif len(Class['entities']) > 1:\r\n print(\"WHAT?!\")\r\n sys.exit()\r\n else:\r\n print(i)\r\n good += 1\r\n except Exception as e:\r\n print(e)\r\n bad += 1\r\n\r\n print(\"good\",good)\r\n print(\"bad\",bad)\r\n print(\"empty\",empty)\r\n return None", "def test_identify_duplicates_3(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"L5\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def test_identify_duplicates_4(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def test_random_id(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n assert random_id(ids, {}) in ids\r\n # just test we got something from the list, don't add stochastic test\r", "def is_article_duplicate(cls, article):\n return cls.db.hkeys(\"article_map\").count(article.link) != 0", "def test_duplicate_ids():\n assert query_row(db_conf, 'osm_buildings', 51001)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51001)['type'] == 'mp'\n assert query_row(db_conf, 'osm_buildings', 51011)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51011)['type'] == 'mp'", "def test_identify_duplicates_1(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 2\n ticket2.type = \"replace\"\n ticket2.phage_id = \"L5\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def check_unique(self):\n pass", "def valid_in_response(self):\n return self._repeatable[1] is not None", "def valid_multiple_in_response(self):\n return self._repeatable[1] is True", "def test_identify_duplicates_6(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = 1\n ticket1.type = \"replace\"\n ticket1.phage_id = \"Trixie\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = 1\n ticket2.type = \"replace\"\n ticket2.phage_id = \"Trixie\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 1)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 1)", "def _store_response_for_duplicates(self, message):\n\n key = (message.remote, message.mid)\n if key in self._recent_messages:\n self._recent_messages[key] = message", "def medtag_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n if not r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def test_identify_duplicates_2(self):\n\n ticket1 = ticket.ImportTicket()\n ticket1.id = \"none\"\n ticket1.type = \"replace\"\n ticket1.phage_id = \"none\"\n\n ticket2 = ticket.ImportTicket()\n ticket2.id = \"none\"\n ticket2.type = \"replace\"\n ticket2.phage_id = \"none\"\n\n null_set = set([\"none\"])\n list_of_tickets = [ticket1, ticket2]\n id_dupes, phage_id_dupes = \\\n tickets.identify_duplicates(list_of_tickets, null_set=null_set)\n with self.subTest():\n self.assertEqual(len(id_dupes), 0)\n with self.subTest():\n self.assertEqual(len(phage_id_dupes), 0)", "def is_duplicate(kml, collection):\n\t\n\tresults = [ i for i in collection.find({'date': kml['date']}) ]\n\tif results:\n\t\tprint('\\nDuplicate found! %s\\n' % item)\n\t\treturn True\n\telse:\n\t\treturn False", "def test_detect_duplicate_upload_items(duplicate_items: List[JSONDict]) -> None:\n\n with pytest.raises(ValidationError) as e:\n invalid_collection = UploadCollection(items=duplicate_items) # noqa: F841\n\n assert e.value.errors() == [\n {\n \"loc\": (\"items\",),\n \"msg\": \"Duplicate item guids detected: ['http://www.crimsonhexagon.com/post1']\",\n \"type\": \"value_error\",\n }\n ]", "def check_PUBMED_reports(request):\n\n json_resp = {}\n json_resp['count'] = 0\n pubmed_arts = Report.objects.filter(institute = 'PUBMED')\n for el in pubmed_arts:\n if el.id_report.startswith('PUBMED'):\n json_resp['count'] += 1\n return JsonResponse(json_resp,safe=False)", "def test_retrieve_ingredients_assigned_unique(self):\n\n ingredient = Ingredient.objects.create(user=self.user, name='Eggs')\n Ingredient.objects.create(user=self.user, name='Parmaggiano Cheese')\n\n recipe1 = Recipe.objects.create(\n title='Eggs Benedict',\n time_minutes=30,\n price=12.00,\n user=self.user\n )\n recipe2 = Recipe.objects.create(\n title='Coriander Eggs on Toast',\n time_minutes=20,\n price=5.00,\n user=self.user\n )\n recipe1.ingredients.add(ingredient)\n recipe2.ingredients.add(ingredient)\n\n response = self.client.get(URL_INGREDIENTS, {'assigned_only': True})\n\n self.assertEqual(len(response.data), 1)", "def is_repetition(self):\n return self.id == 1", "def verify_object(self, data):\n rv = self.get(data[self.id_field])\n result = not is_404(rv)\n if result:\n for key, value in data:\n if not in_response(rv, value):\n return False\n return result", "def verify_media(self):\n self.check_dataset_duplicate_ids(self.media)", "def test_post_duplicate_question(self):\n self.post_question(self.valid_question2)\n\n\n response = self.post_question(self.valid_question2)\n self.assertEqual(response.status_code, 400)", "def test_empty_similar_results(self):\n query = [{\n \"object_name\": \"Assessment\",\n \"type\": \"ids\",\n \"filters\": {\n \"expression\": {\n \"op\": {\"name\": \"similar\"},\n \"object_name\": \"Assessment\",\n \"ids\": [\"-1\"],\n },\n },\n }]\n response = self.client.post(\n \"/query\",\n data=json.dumps(query),\n headers={\"Content-Type\": \"application/json\"},\n )\n\n self.assertListEqual(\n response.json[0][\"Assessment\"][\"ids\"],\n [],\n )", "def test_duplicate_ids2():\n assert query_row(db_conf, 'osm_buildings', 51001)['type'] == 'way'\n assert query_row(db_conf, 'osm_buildings', -51001) == None\n assert query_row(db_conf, 'osm_buildings', -51011)['type'] == 'mp'\n assert query_row(db_conf, 'osm_buildings', 51011) == None", "def see_reels(self, reels):\n if not isinstance(reels, list):\n # In case of only one reel as input\n reels = [reels]\n\n story_seen = {}\n now = int(time.time())\n for i, story in enumerate(sorted(reels, key=lambda m: m['taken_at'], reverse=True)):\n story_seen_at = now - min(i + 1 + random.randint(0, 2), max(0, now - story['taken_at']))\n story_seen[\n '{0!s}_{1!s}'.format(story['id'], story['user']['pk'])\n ] = [\n '{0!s}_{1!s}'.format(story['taken_at'], story_seen_at)\n ]\n\n data = self.json_data({\n 'reels': story_seen,\n '_csrftoken': self.token,\n '_uuid': self.uuid,\n '_uid': self.user_id\n })\n data = self.generate_signature(data)\n return self.session.post('https://i.instagram.com/api/v2/' + 'media/seen/', data=data).ok", "def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d", "def list_should_not_contain_duplicates(self,list_,msg=None):\r\n if not isinstance(list_,list):\r\n list_= list(list_)\r\n dupes = []\r\n for item in list_:\r\n if item not in dupes:\r\n count = list_.count(item)\r\n if count >1:\r\n logger.info(\" '%s' found %d times\" %(item,count))\r\n dupes.append(item)\r\n if dupes:\r\n raise AssertionError(msg or '%s found multiple times' %seq2str(dupes))", "def generate_missing_and_dupes():\n missing = []\n possible_dupes = defaultdict(list)\n for a in Attachment.objects.all():\n path = a.attachment.path\n if not os.path.exists(path):\n missing.append(a)\n continue\n with open(path, 'rb') as f:\n hasher = hashlib.md5()\n hasher.update(f.read())\n file_hash = hasher.hexdigest()\n possible_dupes[file_hash].append(a)\n real_dupes = {k: v for k, v in possible_dupes.items() if len(v) > 1}\n return missing, real_dupes", "def checkSync(self, data):\n print data,\"ASdaskjdgkajsg\"\n if \"sync\" in data and data[\"sync\"] and \"id\" not in data:\n event_count = Event.query.filter(Event.title == data[\"title\"])\\\n .count()\n if event_count:\n raise ValidationError('Exists')", "def test_unique_id_2():\n ids = []\n ids.append(tasks.add(Task('one')))\n ids.append(tasks.add(Task('two')))\n ids.append(tasks.add(Task('three')))\n\n uid = tasks.unique_id()\n assert uid not in ids", "def check_repeat(db, record):\n models = [TechRepublicData, SecurityNewsData, PyjobData, RedditData]\n temp = db.query(*models)\n\n for model in models:\n if temp.filter(model.title == record.title).count():\n return True", "def check_api_connection(post_data, response) -> list:\n # check status code\n if response['status_code'] != 20000:\n raise ConnectionError(f\"Status code is not ok: {response['status_message']}\")\n # check\n id_list = []\n for a, b in zip(post_data.values(), response['tasks']):\n if a['keyword'] != b['data']['keyword']:\n raise ConnectionError(\"task is missing\")\n else:\n id_list.append(b['id'])\n return id_list", "def only_guid_item(upload_items: List[JSONDict]) -> JSONDict:\n altered = upload_items[0]\n del altered[\"url\"]\n return altered", "def get_unique_run_ids(json_results):\n run_id_list = []\n for key in json_results:\n if json_results[key]['run_id'] not in run_id_list:\n run_id_list.append(json_results[key]['run_id'])\n return run_id_list", "def test_retrieve_ingredient_assigned_unique(self):\n ingredient = Ingredient.objects.create(user=self.user, name='eggs')\n Ingredient.objects.create(user=self.user, name='cheese')\n recipe1 = Recipe.objects.create(title='eggs benedict', time_minutes=30, price=12, user=self.user)\n recipe1.ingredients.add(ingredient)\n recipe2 = Recipe.objects.create(title='poached eggs and beans', time_minutes=30, price=15, user=self.user)\n recipe2.ingredients.add(ingredient)\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n self.assertEqual(len(res.data), 1)", "def test_api_document_fetch_list_user_access_token_not_duplicated(self):\n user = UserFactory()\n\n organization_access = OrganizationAccessFactory(\n user=user,\n role=ADMINISTRATOR,\n )\n OrganizationAccessFactory.create_batch(\n 5, organization=organization_access.organization\n )\n\n playlist = PlaylistFactory(organization=organization_access.organization)\n\n playlist_access = PlaylistAccessFactory(\n user=user,\n playlist=playlist,\n role=ADMINISTRATOR,\n )\n PlaylistAccessFactory.create_batch(5, playlist=playlist)\n\n markdown_document = MarkdownDocumentFactory(\n playlist=playlist,\n )\n\n jwt_token = UserAccessTokenFactory(user=playlist_access.user)\n\n response = self.client.get(\n \"/api/markdown-documents/\", HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\"\n )\n self.assertEqual(response.status_code, 200)\n mardown_translation = markdown_document.translations.first()\n\n self.assertEqual(\n response.json(),\n {\n \"count\": 1,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(markdown_document.id),\n \"playlist\": {\n \"id\": str(playlist.id),\n \"lti_id\": playlist.lti_id,\n \"title\": playlist.title,\n },\n \"images\": [],\n \"is_draft\": True,\n \"rendering_options\": {},\n \"translations\": [\n {\n \"language_code\": mardown_translation.language_code,\n \"title\": mardown_translation.title,\n \"content\": mardown_translation.content,\n \"rendered_content\": mardown_translation.rendered_content,\n }\n ],\n \"position\": 0,\n },\n ],\n },\n )", "def __init__(self):\n self.ids_seen = set()", "def test_create_duplicated_notification(client):\n create_user_response = create_user(client, TEST_USER_NAME, TEST_USER_PASS)\n assert create_user_response.status_code == HttpStatus.created_201.value\n\n new_notification_message = 'Welcome to the 4th eSports Competition'\n new_notification_category = 'Information'\n post_response = create_notification(\n client,\n new_notification_message,\n 25,\n new_notification_category)\n assert post_response.status_code == HttpStatus.created_201.value\n assert Notification.query.count() == 1\n\n post_response_data = json.loads(post_response.get_data(as_text=True))\n assert post_response_data['message'] == new_notification_message\n\n new_notification_url = post_response_data['url']\n get_response = client.get(\n new_notification_url,\n headers=get_authentication_headers(TEST_USER_NAME, TEST_USER_PASS))\n assert get_response.status_code == HttpStatus.ok_200.value\n\n get_response_data = json.loads(get_response.get_data(as_text=True))\n assert get_response_data['message'] == new_notification_message\n assert get_response_data['notification_category']['name'] == \\\n new_notification_category\n\n second_post_response = create_notification(\n client,\n new_notification_message,\n 15,\n new_notification_category)\n assert second_post_response.status_code == HttpStatus.bad_request_400.value\n assert Notification.query.count() == 1", "def test_check_existing_finds_existing_entities(self):\n namespace = 'default-gzip'\n collection = generate_collection(\n namespace, ['small content', 'larger content', 'biggest content'])\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n response = self.call_api(\n 'preupload', self.message_to_dict(collection), 200)\n\n # we should see one enqueued task and two new URLs in the response\n items = response.json['items']\n self.assertEqual(2, len(items))\n self.assertEqual([1, 2], [int(item['index']) for item in items])\n for item in items:\n self.assertIsNotNone(item.get('upload_ticket'))\n\n # remove tasks so tearDown doesn't complain\n _ = self.execute_tasks()", "def testIdUnique(self):\n ids = {}\n # Vary parameters that affect the work or input data,\n # verify each id is unique\n for params in itertools.product(RequestNames, TaskNames, Inputs,\n Masks, Dbses, Acdcs):\n ele = WorkQueueElement(RequestName=params[0], TaskName=params[1],\n Inputs=params[2], Mask=params[3],\n Dbs=params[4], ACDC=params[5]\n )\n self.assertFalse(ele.id in ids)\n ids[ele.id] = None", "def is_real_dupe(dupe_lst):\n if len(dupe_lst) >= 10:\n return True\n first_composer = dupe_lst[0]['composer']\n\n if len(dupe_lst) > 2:\n for d in dupe_lst:\n if d['composer'] != first_composer:\n return True\n return False", "def unique_instance(un_data):\n test_dict = dict()\n indexed = list()\n count = 0\n for i,item in enumerate(un_data):\n if not test_dict.has_key( hash(item) ):\n test_dict[ hash(item) ] = 0\n else:\n count = count + 1\n indexed.append(i)\n return count, indexed", "def on_missing_identity(self, messages):\n meta = messages[0].community.get_meta_message(u\"dispersy-identity\")\n for message in messages:\n # we are assuming that no more than 10 members have the same sha1 digest.\n sql = u\"SELECT packet FROM sync JOIN member ON member.id = sync.member WHERE sync.community = ? AND sync.meta_message = ? AND member.mid = ? LIMIT 10\"\n packets = [str(packet) for packet, in self._database.execute(sql, (message.community.database_id, meta.database_id, buffer(message.payload.mid)))]\n if packets:\n if __debug__:\n dprint(\"responding with \", len(packets), \" identity messages\")\n self._statistics.dict_inc(self._statistics.outgoing, u\"-dispersy-identity\", len(packets))\n self._endpoint.send([message.candidate], packets)\n\n else:\n assert not message.payload.mid == message.community.my_member.mid, \"we should always have our own dispersy-identity\"\n if __debug__: dprint(\"could not find any missing members. no response is sent [\", message.payload.mid.encode(\"HEX\"), \", mid:\", message.community.my_member.mid.encode(\"HEX\"), \", cid:\", message.community.cid.encode(\"HEX\"), \"]\", level=\"warning\")", "def test_already_created_shortlink(self):\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data\n rv = self.post('https://www.seinfeld.com')\n assert '<a href=\"TheStakeOut\">TheStakeOut</a> is now short for <a href=\"https://www.seinfeld.com\">https://www.seinfeld.com</a>!' in rv.data", "def count_unique():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_unique_bookmarks()\r\n trans.commit()", "def test_post_list_duplicate(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status=\"published\",\n language=\"en\", author=self.user)\n layout = SectionLayout.objects.get(sectionlayouttranslation__name=\"Side by Side\")\n container1 = Container.objects.get(name='left')\n section = create_section(title=\"Test Section 1\", story=story,\n layout=layout)\n asset = create_html_asset(type='text', title='Test Asset',\n body='Test content', owner=self.user)\n \n asset2 = create_html_asset(type='text', title='Test Asset',\n body='Test content 2', owner=self.user)\n self.assertEqual(SectionAsset.objects.count(), 0)\n post_data = {\n 'asset': self.get_asset_uri(asset),\n 'container': container1.name\n }\n uri = '/api/0.1/stories/%s/sections/%s/assets/' % (story.story_id,\n section.section_id)\n self.api_client.client.login(username=self.username, password=self.password)\n resp = self.api_client.post(uri, format='json', data=post_data)\n self.assertHttpCreated(resp)\n self.assertEqual(SectionAsset.objects.count(), 1)\n section_asset = SectionAsset.objects.get()\n self.assertEqual(section_asset.section, section)\n self.assertEqual(section_asset.container, container1)\n\n # Re-post a new asset to the same section/container\n post_data = {\n 'asset': self.get_asset_uri(asset2),\n 'container': container1.name\n }\n self.api_client.client.login(username=self.username, password=self.password)\n resp = self.api_client.post(uri, format='json', data=post_data)\n # Confirm that an HTTP 400 (bad request) error was\n # returned\n self.assertHttpBadRequest(resp)", "def test_specified_views_counter_in_request_data_doesnt_work(self):\n authorize_user(self, self.user)\n\n self.data['views_counter'] = 100\n\n r = self.client.post(self.url, self.data)\n self.assertEqual(r.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ShortMessage.objects.all().count(), 1)\n\n self.assertEqual(r.json()['views_counter'], 0)\n self.assertEqual(ShortMessage.objects.all()[0].views_counter, 0)", "def is_new_post(self, post):\n return self.last_post != post['id']", "def shared(request, uuid):\n\n try:\n doc = SearchDocument.objects.get(uuid=uuid)\n if doc.times_shared: \n doc.times_shared += 1\n else:\n doc.times_shared = 1\n doc.save()\n return HttpResponse(\"OK\", content_type=\"text/html\")\n except:\n return HttpResponseServerError()", "def test_handle_response_nodes_no_duplicates_in_shortlist(self):\n lookup = Lookup(FindValue, self.target, self.node, self.event_loop)\n uuids = [uuid for uuid in lookup.pending_requests.keys()]\n uuid = uuids[0]\n contact = lookup.shortlist[0]\n shortlist = tuple([(p.public_key, p.version, p.uri) for p\n in lookup.shortlist])\n msg = Nodes(uuid, self.node.network_id, self.node.network_id,\n self.reply_port, self.version, self.seal, shortlist)\n response = asyncio.Future()\n response.set_result(msg)\n lookup._handle_response(uuid, contact, response)\n self.assertEqual(lookup.shortlist, [PeerNode(*n) for n in shortlist])", "def pubmed_reports(request):\n\n json_resp = {}\n json_resp['usecase'] = []\n reps = Report.objects.all()\n for r in reps:\n\n if r.id_report.startswith('PUBMED_') and not str(r.name_id) in json_resp['usecase']:\n json_resp['usecase'].append(str(r.name_id))\n return JsonResponse(json_resp)", "def verify_response_block_list(self, response):\n self.assertSetEqual(\n {block['id'] for block in response.data},\n self.non_orphaned_block_usage_keys,\n )", "def test_card_id_present_but_no_signup_yet(self):\n machine_id = helpers.create_coffee_machine(self.app, \\\n \"Senseo Floor 5\", 0.35, \"EUR\" )\n\n card_id = helpers.gen_card_id() \n\n data = { \"card_id\" : card_id, \n \"api_key\" : utils.api_key,\n \"machine_id\" : machine_id }\n\n # make backend aware of card_id\n res = self.app.put(coffeeapp.coffee_count_api_path, data=data)\n assert 200 == res.status_code\n res_json = json.loads(res.data)\n assert res_json.has_key(\"known\")\n assert res_json[\"known\"] == False\n assert res_json.has_key(\"short_pin\")\n assert res_json.has_key(\"url\")\n\n res = self.app.put(coffeeapp.coffee_count_api_path, data=data)\n # now it should respond with the same data\n assert 200 == res.status_code\n res_json = json.loads(res.data)\n assert res_json.has_key(\"known\")\n assert res_json[\"known\"] == False\n assert res_json.has_key(\"short_pin\")\n assert res_json.has_key(\"url\")", "def _get_response_id(self, response):\n if response.url in self._response_id_cache:\n return self._response_id_cache[response.url]\n else:\n response_id = str(uuid.uuid4())\n self._response_id_cache[response.url] = response_id\n return response_id", "def check_user_data_in_response(response_data):\n assert response_data[\"id\"] > 0\n assert response_data[\"name\"] == pytest.test_user.name\n assert response_data[\"email\"] == pytest.test_user.email\n assert response_data[\"gender\"] == pytest.test_user.gender\n assert response_data[\"status\"] == pytest.test_user.status", "def __iteratePosts(self):\n try:\n replies = self.soup.find('div','vt_postrow_holder').findAll('div', 'vt_postrow_rest')\n if not replies:\n log.info(self.log_msg('No posts found in url %s'%self.currenturi))\n return False\n replies.reverse()\n for reply in replies:\n if not self.__addPost(reply):\n log.info(self.log_msg('Reached last crawled page in url %s'%self.currenturi))\n return False\n return True\n except:\n log.info(self.log_msg('cannot add the data in url %s'%self.currenturi))", "def post_list_view(request, *args, **kwargs):\n qs = Post.objects.all()\n posts_list = [{\"id\": x.id, \"content\": x.content, \"likes\": random.randint(0, 120), \"reposts\": random.randint(0, 10)} for x in qs] #{\"id\": x.id, \"content\": x.content, \"likes\": random.randint(0, 120), \"reposts\": random.randint(0, 10) }/x.serialize()\n data = {\n \"isUser\": False,\n \"response\": posts_list\n }\n return JsonResponse(data) #, save=False", "def expect_duplicate(self):\n # Reset everything for this record\n self._expect_duplicate = False\n self.__dupcntr = 0\n self.__maxdup = 0\n # Get the probability to generate duplicate for next record\n if self.fake.random.random() < self.duplicate_cfg[\"Prob_duplicate\"]:\n self._expect_duplicate = True\n self.__maxdup = self.random_select_ndups()\n else:\n self._expect_duplicate = False\n self.__maxdup = 0\n\n self.__logger.debug(\"expect_duplicate ndups: %d\", self.__maxdup)", "def most_missed_creators(self, cache_max_age=0):\n expected_items = []\n query = u'CLAIM[195:%s] AND NOCLAIM[170]' % \\\n ',195:'.join(self.collections) # collection\n wd_queryset = wdquery.QuerySet(query)\n\n wd_query = wdquery.WikidataQuery(cacheMaxAge=cache_max_age)\n data = wd_query.query(wd_queryset)\n\n if data.get('status').get('error') == 'OK':\n expected_items = data.get('items')\n\n creator_dict = {}\n counter = 0\n for q_val in expected_items:\n q_item = self.wd.QtoItemPage(q_val)\n data = q_item.get()\n claims = data.get('claims')\n if u'P170' in claims:\n continue\n descr = data.get('descriptions').get('en')\n if descr and descr.startswith(u'painting by '):\n creator = descr[len(u'painting by '):]\n if '(' in creator: # to get rid of disambiguation addition\n creator = creator[:creator.find('(')].strip()\n if creator in creator_dict.keys():\n creator_dict[creator] += 1\n else:\n creator_dict[creator] = 1\n counter += 1\n pywikibot.output(u'Found %d mentions of %d creators' %\n (counter, len(creator_dict)))\n # output\n f = codecs.open(u'creatorHitlist.csv', 'w', 'utf-8')\n for k, v in creator_dict.iteritems():\n f.write(u'%d|%s\\n' % (v, k))\n f.close()", "def test_retrieve_ingredients_assigned_unique(self):\n ingredient = Ingredient.objects.create(user=self.user, name='Salt')\n Ingredient.objects.create(user=self.user, name='suggar')\n recipe_1 = Recipe.objects.create(\n user=self.user, title='Massefouf',\n time_minutes=30, price=5\n )\n recipe_2 = Recipe.objects.create(\n user=self.user, title='Chakchouka',\n time_minutes=30, price=5\n )\n recipe_1.ingredients.add(ingredient)\n recipe_2.ingredients.add(ingredient)\n\n res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)", "def is_data(i):\n keys = ['_id', '_time']\n return all(i != k for k in keys)", "def get_data_fb(user_id, access_token):\n\n my_user = storage.get(User, user_id)\n my_user.update_attr(\"fb_access_token\", access_token)\n\n r = requests.get('https://graph.facebook.com/me/feed?access_token=' + access_token)\n result = r.json()\n post_dict = {}\n post_list = []\n index = 0\n for posts in result[\"data\"]:\n if index == 10:\n break\n new_post = {}\n\n new_post[\"CrossMe_user_id\"] = user_id\n new_post[\"Post_id_CrossMe\"] = str(uuid.uuid4())\n\n if \"message\" in posts.keys():\n new_post[\"message\"] = posts[\"message\"]\n else:\n new_post[\"message\"] = \"NULL\"\n\n new_post[\"created_time\"] = datetime.strptime(posts[\"created_time\"], '%Y-%m-%dT%H:%M:%S+%f')\n\n new_post[\"source\"] = \"FACEBOOK\"\n\n new_post[\"fb_post_id\"] = posts[\"id\"]\n\n\n URLPOST = 'https://graph.facebook.com/' + posts[\"id\"] + '?fields=object_id&access_token=' + access_token\n post_data = requests.get(URLPOST).json()\n if \"object_id\" in post_data.keys():\n URLIMAGE = 'https://graph.facebook.com/' + post_data[\"object_id\"] + '?fields=images&access_token=' + access_token\n image_data = requests.get(URLIMAGE).json()\n if \"images\" not in image_data.keys():\n continue\n all_images = image_data[\"images\"]\n new_post[\"image_url\"] = all_images[1][\"source\"]\n posts[\"media_type\"] = \"IMAGE\"\n else:\n continue\n posts[\"media_type\"] = \"STATUS\"\n new_post[\"image_url\"] = \"NULL\"\n\n post_list.append(new_post)\n index = index + 1\n\n my_post = Post()\n\n my_post.user_id = new_post[\"CrossMe_user_id\"]\n my_post.creation_date = new_post[\"created_time\"]\n my_post.post_source = new_post[\"source\"]\n my_post.post_type = posts[\"media_type\"]\n my_post.post_text = new_post[\"message\"]\n my_post.media_url = new_post[\"image_url\"]\n my_post.save()\n\n\n post_dict[\"fb_last_post\"] = post_list\n\n return make_response(jsonify(post_dict), 200)", "def verifySURLGUIDDictionary(surl_guid_dictionary):\n\n # A lost file will show up as an empty list in the dictionary\n # Return status True if there are at least one valid SURL\n\n status = False\n pilotErrorDiag = \"\"\n\n tolog(\"Verifying SURLs\")\n if surl_guid_dictionary != {}:\n for guid in surl_guid_dictionary.keys():\n\n if surl_guid_dictionary[guid] == []:\n pilotErrorDiag = \"Encountered an empty SURL list for GUID=%s (replica is missing in catalog)\" % (guid)\n tolog(\"!!WARNING!!2222!! %s\" % (pilotErrorDiag)) \n else:\n # Found a valid SURL\n status = True\n tolog(\"GUID=%s has a valid (set) SURL list\" % (guid))\n else:\n pilotErrorDiag = \"Rucio returned an empty replica dictionary\"\n tolog(\"!!WARNING!!2222!! %s\" % (pilotErrorDiag))\n \n return status, pilotErrorDiag", "def group_handling(existing_uuids: Set[str]) -> None:", "def test_task_unique_total(self):\r\n # from bookie.bcelery import tasks\r\n tasks.count_unique()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.UNIQUE_CT)\r\n self.assertEqual(stat.data, 3)", "def _write_uids(self, ids, resp):\n for uid, result in izip(ids, resp.json()[\"results\"]):\n if not result[\"data\"]:\n self._missing.send(uid)\n if self.doclog.isEnabledFor(logging.TRACE):\n self.doclog.trace(\"uid %s does not have properties\" % uid)\n else:\n self._has_properties.add(uid)", "def test_DUPLICATION_COUNT(self):\n self.assertIsInstance(constants.DUPLICATION_COUNT, int,\n \"constants.DUPLICATION_COUNT must be an \" +\n \"integer.\")", "def try_create_uniqe_title(self,title,owner):\n if self.valid_title(title):\n for i in range (1,20):\n new_title=title+\"_\"+str(i)\n if self.unique_title(new_title,owner):\n return new_title\n return False\n else:\n return False", "def test_verification_with_invalid_token(self) -> None:\n\n uuids: typing.List[str] = []\n for i in range(2, 5):\n uuids.append(str(uuid.uuid5(\n uuid.uuid1(1),\n f'abcd123456{i}'\n )))\n\n for temp_uuid in uuids:\n response: Response = self.client.get(f'/api/authors/verify/{temp_uuid}/')\n data = u.get_json(response)\n self.assertEqual(response.status_code, 404)\n self.assertEqual(data, {\n 'detail': 'Not found.'\n })" ]
[ "0.5659422", "0.55370516", "0.5480621", "0.5440656", "0.5439443", "0.5430286", "0.53764886", "0.53743136", "0.5368204", "0.53578305", "0.53345466", "0.532668", "0.530157", "0.5287817", "0.5268618", "0.52629507", "0.5260671", "0.5257835", "0.5253612", "0.52466136", "0.52329594", "0.5224962", "0.5224153", "0.5218604", "0.5210464", "0.5204218", "0.51977813", "0.5195533", "0.5182614", "0.51810396", "0.5168894", "0.51463383", "0.5141247", "0.5135171", "0.5131784", "0.51275855", "0.51177925", "0.5103021", "0.5102829", "0.5093779", "0.5087362", "0.50835645", "0.5082553", "0.5081387", "0.5078074", "0.5075362", "0.5064965", "0.5043248", "0.5042933", "0.5028247", "0.50233036", "0.50228137", "0.50208145", "0.5019896", "0.5008453", "0.50044924", "0.49966556", "0.49708197", "0.49658749", "0.4964934", "0.49640492", "0.4961696", "0.49583906", "0.49551132", "0.4946007", "0.49385467", "0.49291682", "0.4917406", "0.49084085", "0.4904611", "0.49004057", "0.4899166", "0.4891219", "0.4890796", "0.48863792", "0.48844025", "0.48825994", "0.48817885", "0.48759854", "0.48758024", "0.4869275", "0.48691863", "0.48642856", "0.4861724", "0.48602828", "0.4853457", "0.48510137", "0.48463717", "0.48438853", "0.48413467", "0.48369485", "0.48302132", "0.4809754", "0.48090994", "0.48067963", "0.48052916", "0.48046398", "0.4804323", "0.47985014", "0.4793711" ]
0.6993569
0
Compares a list of authors against a list of displaynames
def cross_check(context, authors, poscom): displaynames = [x['author']['displayname'] for x in poscom] for author in authors: if author.user.username not in displaynames: context.assertFalse(True, "%s not in list" %author.user.username)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))", "def display_authors(self, *args):\n return ', '.join(author.name for author in args[0].authors.all()[:3])", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def test_author_initials(self):\n inv_search = 'author:\"polyakov, a* m*\"'\n spi_search = 'find a a m polyakov'\n self._compare_searches(inv_search, spi_search)", "def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def test_author_many_initials(self):\n inv_search = 'author:\"bach, p* d* q*\"'\n spi_search = 'find a p d q bach'\n self._compare_searches(inv_search, spi_search)", "def test_author_simplest(self):\n invenio_search = 'author:ellis'\n spires_search = 'find a ellis'\n self._compare_searches(invenio_search, spires_search)", "def test_exactauthor_simple(self):\n invenio_search = 'exactauthor:\"ellis, j\"'\n spires_search = 'find ea ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors", "def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)", "def make_author_list(res):\n try:\n r = [\", \".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]\n except KeyError as e:\n print(\"No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.\")\n r = [\"Unknown Authors\"]\n return r", "def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)", "def test_super_short_author_name(self):\n spi_search = \"fin a er and cn cms\"\n inv_search = \"author:er collaboration:cms\"\n self._compare_searches(inv_search, spi_search)", "def test_author_reverse(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a j ellis'\n self._compare_searches(invenio_search, spires_search)", "def test_author_full_first(self):\n invenio_search = 'author:\"ellis, john*\" or exactauthor:\"ellis, j *\" or exactauthor:\"ellis, j\" or exactauthor:\"ellis, jo\" or exactauthor:\"ellis, joh\" or author:\"ellis, john, *\"'\n spires_search = 'find a ellis, john'\n self._compare_searches(invenio_search, spires_search)", "def test_author_full_initial(self):\n inv_search = 'author:\"klebanov, ig* r*\" or exactauthor:\"klebanov, i r\"'\n spi_search = \"find a klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)", "def has_duplicates_authors(L):\r\n # make a copy of t to avoid modifying the parameter\r\n s = L[:]\r\n s.sort()\r\n\r\n # check for adjacent elements that are equal\r\n for i in range(len(s)-1):\r\n if s[i] == s[i+1]:\r\n return True\r\n return False", "def __generate_author_string__(self, list_of_authors):\n author_string = \"\"\n return author_string.join(list_of_authors)", "def make_citation_authors(res):\n if \"author\" in res.keys():\n first_author = res['author'][0]['family'] + \", \" + res['author'][0]['given']\n last_author = res['author'][-1]['given'] + \" \" + res['author'][-1]['family']\n middle_authors = \", \".join(\" \".join([x['given'], x['family']]) for x in res['author'][1:-1])\n #assemble authors\n author_string = first_author\n author_string = author_string + \", \" + middle_authors if middle_authors != '' else author_string\n author_string = author_string + \", and \" + last_author if len(res['author']) > 1 else author_string\n \n author_string = author_string + \".\" if author_string[-1] != \".\" else author_string\n else:\n author_string = \"Unknown Authors\"\n\n return clean_txt(author_string.capitalize())", "def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)", "def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]", "def test_citedby_author(self):\n inv_search = 'citedby:author:doggy'\n spi_search = 'find citedby author doggy'\n self._compare_searches(inv_search, spi_search)", "def get_authors(draft):\n authors = []\n for a in draft.authors.all():\n initial = ''\n prefix, first, middle, last, suffix = a.person.name_parts()\n if first:\n initial = first + '. '\n entry = '%s%s <%s>' % (initial,last,a.address)\n authors.append(entry)\n return authors", "def format_authors(self, style):\n def format_one_author(author, style):\n \"\"\"\n Helper function that does it for one author.\n \"\"\"\n # Check If there's no given name.\n # We should probably try to handle the no family name case, but\n # I'm not sure when we will actually come across an example...\n if \"given\" not in author or author[\"given\"] == []:\n return author[\"family\"]\n # Otherwise...\n family_name = author[\"family\"]\n given_names = author[\"given\"]\n\n # deal with a pathological case, 10.1016/j.jmr.2018.02.009\n ns = given_names.split()\n for i, name in enumerate(ns):\n if i >= 1 and name.startswith('-'):\n this_name = ns.pop(i)\n ns[i - 1] += this_name\n given_names = \" \".join(ns)\n\n if style == \"display\":\n return (\"\".join(n[0] for n in re.split(r\"[\\s-]\", given_names))\n + \" \" + family_name)\n elif style == \"acs\":\n # \"Jean-Baptiste Simon\" -> [[\"Jean\", \"Baptiste\"], [\"Simon\"]]\n split_both = [name.split('-') for name in given_names.split()]\n # [[\"Jean\", \"Baptiste\"], [\"Simon\"]] -> \"J.-B. S\"\n joined_both = \". \".join([\".-\".join(n[0] for n in names)][0]\n for names in split_both)\n return (family_name + \", \" + joined_both + \".\")\n elif style == \"bib\":\n s = family_name + \", \" + given_names\n return s.replace(\". \", \".\\\\ \") # Must use control spaces\n elif style == \"full\":\n return given_names + \" \" + family_name\n # Otherwise, grumble.\n else:\n raise ValueError(f\"Invalid value '{style}' for style.\")\n\n if self.authors is not None:\n return [format_one_author(author, style) for author in self.authors]", "def test_absorbs_naked_author_search(self):\n invenio_search = \"author:ellis\"\n spi_search = \"author ellis\"\n self._compare_searches(invenio_search, spi_search)", "def authors(author_ids):\n if author_ids is None:\n return ''\n else:\n ids = []\n for author_id in author_ids.split(','):\n ids.append(User.id == int(author_id))\n authors = User.query.filter(or_(*ids)).all()\n if authors is None:\n return ''\n else:\n return 'by ' + ', '.join([author.name for author in authors])", "def parse_authors():\n import subprocess\n try:\n output = subprocess.check_output(['git', 'shortlog', '-s'],\n universal_newlines=True)\n except Exception as ex:\n print('ex = {!r}'.format(ex))\n return []\n else:\n striped_lines = (l.strip() for l in output.split('\\n'))\n freq_authors = [line.split(None, 1) for line in striped_lines if line]\n freq_authors = sorted((int(f), a) for f, a in freq_authors)[::-1]\n # keep authors with uppercase letters\n authors = [a for f, a in freq_authors if a.lower() != a]\n return authors", "def print_authors(popular_authors):\n\n print('\\nThe list of authors being listed as per their popularity:\\n')\n for author in popular_authors:\n print(author[0] + '\\t-\\t' + str(author[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def test_find_first_author(self):\n inv_search = 'firstauthor:ellis'\n spi_search = 'find fa ellis'\n self._compare_searches(inv_search, spi_search)", "def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in", "def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications", "def authors():\n print(\"\"\"\\n WanderersTeam:\\n\n Alicja Olejniczak\\n\n Bartosz Zawadzki\\n\n Klaudia Slawinska\\n\\n\"\"\")", "def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))", "def get_author_name_urls(dept_name, dept_url):\n\t# Change to \"School of Humanities\" to match the name used in Enlighten\n\t# Done because the string obtained from http://www.gla.ac.uk/schools/ contains the Gaelic name as well\n\tif \"Humanities\" in dept_name:\n\t\tdept_name = \"School of Humanities\"\n\n\t# get list of names of researchers in department\n\tnames = get_names(dept_url)\n\n\twinning_name_urls = set()\n\n\t# loop through each name\n\tfor name in names:\n\t\tname = initialise_first_name(name)\n\t\t# Get Enlighten page on which author name will be found (page for the letter of author's last name)\n\t\tfull_url = author_list_base + \"index.\"+ name.split(\" \")[0][0] + \".html\"\n\t\ttree = get_tree(full_url)\n\t\t# Get all candidate authors which match the name\n\t\tname_urls = get_name_url_matches(name, tree)\n\t\t# If candidates were found\n\t\tif name_urls:\n\t\t\t# Filter out authors that have already been scraped\n\t\t\tname_urls = [name_url for name_url in name_urls if name_url not in winning_name_urls]\n\t\t\t# Get the first ranked (name, url) tuple for the target name from the remaining candidates\n\t\t\twinning_name_url = get_winning_url(name_urls, dept_name)\n\t\t\tif winning_name_url:\n\t\t\t\twinning_name_urls.add(winning_name_url)\n\n\treturn winning_name_urls", "def getAuthors(self): #$NON-NLS-1$\r", "def get_coauthors_by_author(cached_list, cached_set, author_name):\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n coauthors = {}\n if author['dblp'].__contains__('coauthors'):\n for author_key in author['dblp']['coauthors']:\n coauthors[author_key] = { 'en': author_key, 'zh': '' }\n\n if author['cdblp'].__contains__('coauthors'):\n for author_key in author['cdblp']['coauthors']:\n if coauthors.__contains__(author_key['full_name']):\n coauthors[author_key['full_name']]['zh'] = author_key['zh']\n else:\n coauthors[author_key['full_name']] = { 'en': author_key['full_name'], 'zh': author_key['zh'] }\n\n return coauthors", "def _get_authors_list():\n\n articles = os.listdir(\"../data/\")\n authors = []\n for article in articles:\n with open(\"../data/\" + article, 'r') as file:\n lines = file.readlines()\n author = tuple(\n line.replace(\"\\n\", \"\").split()[1] for line in lines\n if \"Автор:\" in line\n )[0]\n authors.append(author)\n\n return authors", "def authors_completion(self, terms):\n return self.db.execute(u'''SELECT * FROM \"authors\" WHERE name LIKE ? LIMIT 50''', (u\"%{}%\".format(terms),)).fetchall()", "def _authors(fname):\n if not os.path.exists(fname):\n raise RuntimeError(\"File {} not found\".format(fname))\n with open(fname, \"r\") as obj:\n lines = [_tostr(item.strip()) for item in obj.readlines()]\n regexp = re.compile(r\"(?:\\s*\\*\\s+)?(.*)\\s+<(.*)>.*\")\n for line in lines:\n match = regexp.match(line)\n if match:\n name, email = match.groups()\n yield name, email", "def find_authors(code):\n url = baseurl(code)\n page = req(url)\n soup = BeautifulSoup(page, 'lxml')\n addr = [t.attrs.get('content', None) \n for t in soup.find_all(\"meta\", {\"name\": \"citation_author_email\"})]\n \n # corresponding authors will have their email under another tag too\n corr = [t.find('a').attrs.get('href', None)\n for t in soup.find_all(None, {\"class\": \"author-corresp-email-link\"})]\n\n addr = [a for a in addr if a is not None]\n corr = [a.replace('mailto:', '') for a in corr if a is not None]\n\n return dict(corr=list(set(corr)), all=list(set(addr)))", "def print_popular_authors(authors):\n for (author, views) in authors:\n print \"%s - %d views\" % (author, views)", "def test_first_author_full_initial(self):\n inv_search = 'firstauthor:\"klebanov, ig* r*\" or exactfirstauthor:\"klebanov, i r\"'\n spi_search = \"find fa klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)", "def get_name_url_matches(author_name, html_tree):\n\n\t# Convert name to lower case - this will be searched against lower case text on the Enlighten page\n\tlower_name = author_name.lower()\n\t# Used to convert text in <a> tags to lower case in paths before checking if matches the name provided\n\tcase = 'translate(text(), \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\")'\n\t# This is the path to look for <a> tags which contain the target name as text\n\t# N.B. contains() is used rather than equals as it can catch more cases\n\tpath = '//table/tr/td/ul/li/a[contains(%s, \\\"%s\\\")]' % (case, lower_name)\n\t# get the list of <a> elements whose text contains the name\n\telements = html_tree.xpath(path)\n\t# If target string was found, for each <a> element that contains it, make a\n\t# (text, url) tuple and create a list out of the resulting tuples\n\t# N.B. the href obtained from the element is concatenated to the base url as it is relative\n\tif elements:\n\t\t# have to concatenate as href is given as relative path\n\t\ttext_url_tups = [(elem.text, author_list_base + elem.get(\"href\")) for elem in elements]\n\telse:\n\t\ttext_url_tups = None\n\n\treturn text_url_tups", "def corporate_authors(self, key, value):\n _corporate_authors = self.get(\"authors\", [])\n\n for v in force_list(value):\n if key == \"710__\":\n if \"a\" in v:\n _corporate_authors.append(\n {\n \"full_name\": clean_val(\"a\", v, str),\n \"type\": \"ORGANISATION\",\n }\n )\n else:\n self[\"authors\"] = collaborations(self, key, value)\n raise IgnoreKey(\"corporate_authors\")\n else:\n _corporate_authors.append(\n {\"full_name\": clean_val(\"a\", v, str), \"type\": \"ORGANISATION\"}\n )\n return _corporate_authors", "def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)", "def authors(self):\n authors = self.context.Authors(sep=' and ',\n lastsep=' and ',\n format=\"%L, %F %M\",\n abbrev=0,\n lastnamefirst=0)\n if not isinstance(authors, unicode):\n authors = unicode(authors, 'utf-8')\n return authors", "def format_authors(author_list):\n if isinstance(author_list, (list, tuple)):\n return \", \".join([format_authors(author) for author in author_list])\n else:\n if \", \" in author_list:\n author_list = author_list.split(\", \")\n author_list.reverse()\n author_list = \" \".join(author_list)\n elif \",\" in author_list:\n author_list = author_list.split(\",\")\n author_list.reverse()\n author_list = \" \".join(author_list)\n return author_list", "def test_find_first_author_initial(self):\n inv_search = 'firstauthor:\"ellis, j*\"'\n spi_search = 'find fa j ellis'\n self._compare_searches(inv_search, spi_search)", "def assertAuthorsInComments(context, authors, comments):\n cross_check(context, authors, comments)", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def format_one_author(author, style):\n # Check If there's no given name.\n # We should probably try to handle the no family name case, but\n # I'm not sure when we will actually come across an example...\n if \"given\" not in author or author[\"given\"] == []:\n return author[\"family\"]\n # Otherwise...\n family_name = author[\"family\"]\n given_names = author[\"given\"]\n\n # deal with a pathological case, 10.1016/j.jmr.2018.02.009\n ns = given_names.split()\n for i, name in enumerate(ns):\n if i >= 1 and name.startswith('-'):\n this_name = ns.pop(i)\n ns[i - 1] += this_name\n given_names = \" \".join(ns)\n\n if style == \"display\":\n return (\"\".join(n[0] for n in re.split(r\"[\\s-]\", given_names))\n + \" \" + family_name)\n elif style == \"acs\":\n # \"Jean-Baptiste Simon\" -> [[\"Jean\", \"Baptiste\"], [\"Simon\"]]\n split_both = [name.split('-') for name in given_names.split()]\n # [[\"Jean\", \"Baptiste\"], [\"Simon\"]] -> \"J.-B. S\"\n joined_both = \". \".join([\".-\".join(n[0] for n in names)][0]\n for names in split_both)\n return (family_name + \", \" + joined_both + \".\")\n elif style == \"bib\":\n s = family_name + \", \" + given_names\n return s.replace(\". \", \".\\\\ \") # Must use control spaces\n elif style == \"full\":\n return given_names + \" \" + family_name\n # Otherwise, grumble.\n else:\n raise ValueError(f\"Invalid value '{style}' for style.\")", "def test_get_authors_from_data(self):\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\n \"name\": \"George Elliott\",\n \"personal_name\": \"George Elliott\",\n \"last_modified\": {\n \"type\": \"/type/datetime\",\n \"value\": \"2008-08-31 10:09:33.413686\",\n },\n \"remote_ids\": {\n \"isni\": \"000111\",\n },\n \"key\": \"/authors/OL453734A\",\n \"type\": {\"key\": \"/type/author\"},\n \"id\": 1259965,\n \"revision\": 2,\n },\n status=200,\n )\n results = self.connector.get_authors_from_data(self.work_data)\n result = list(results)[0]\n self.assertIsInstance(result, models.Author)\n self.assertEqual(result.name, \"George Elliott\")\n self.assertEqual(result.openlibrary_key, \"OL453734A\")\n self.assertEqual(result.isni, \"000111\")", "def get_authors(self):\n return [aer.author for aer in self.authorentryrank_set.all()]", "def get_external_authors_between(base, head):\n\n # Get all authors\n authors = git(\"log\", f\"{base}..{head}\", \"--format=%aN|%aE\")\n authors = set(\n author.partition(\"|\")[0].rstrip()\n for author in authors if not (author.endswith((\"@google.com\"))))\n\n # Get all co-authors\n contributors = git(\n \"log\", f\"{base}..{head}\", \"--format=%(trailers:key=Co-authored-by)\"\n )\n\n coauthors = []\n for coauthor in contributors:\n if coauthor and not re.search(\"@google.com\", coauthor):\n coauthors.append(\n \" \".join(re.sub(r\"Co-authored-by: |<.*?>\", \"\", coauthor).split())\n )\n return \", \".join(sorted(authors.union(coauthors), key=str.casefold))", "def title_authors_fingerprint(self):\n if None in (self.title, self.authors_lastnames):\n return None\n\n lastnames = list(map(normalize_text_value, self.authors_lastnames))\n lastnames = normalize_list_direction(lastnames)\n lastnames = '.'.join(lastnames)\n\n title = normalize_text_value(self.title)\n\n return '$'.join((lastnames, title))", "def test_refersto_author(self):\n inv_search = 'refersto:author:kitty'\n spi_search = 'find refersto author kitty'\n self._compare_searches(inv_search, spi_search)", "def test_draft_list_only_display_authors_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor)\n AidFactory(name='Is this just fantasy?')\n\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' in content\n assert 'Is this just fantasy?' not in content", "def Authors(self, default=[{}]):\n tmp = self.data.get('authors', default)\n return [HEP.AuthorReducedObject(i) for i in tmp]", "def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)", "def get_authors(config: Config) -> dict:\n\n file_handle = load_csv_file(config.input)\n\n start = time.time()\n raw_authors = get_raw_authors(config, file_handle)\n end = time.time()\n print(f\"get_raw_authors took {end - start} seconds\")\n\n start = time.time()\n sanitized_authors = sanitize_authors(config, raw_authors)\n end = time.time()\n print(f\"sanitize_authors took {end - start} seconds\")\n\n return sanitized_authors", "def authors():\n\tclick.clear()\n\trich.print(\"[bold]IDT[/bold] was initially made by [bold magenta]Deliton Junior[/bold magenta] and [bold red]Misael Kelviny[/bold red]\")", "def compare(reference, catalog_record, title_similarity_score):\n\tscore_explanation = \"(%s=%s\"%(\"title_similarity\",title_similarity_score)\n\tscores = [title_similarity_score]\n\t# TODO: this needs to be improved! right now returns too highly a value for wrong matches\n\t# compare the `author` field\n\tif(\"author\" in reference and \"author\" in catalog_record and catalog_record[\"author\"] is not None \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tand len(catalog_record[\"author\"])>2):\n\t\tscore = fuzzyContainment(reference[\"author\"],catalog_record[\"author\"])\n\t\tscores.append(score)\n\t\tlogger.debug(\"[author] The score of fuzzyContainment between %s and %s is %s\"%(reference[\"author\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , catalog_record[\"author\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , score))\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s\"%(score_explanation,\"+ author_similarity\",score)\n\t# compare the `year` field\n\tif(\"year\" in reference and catalog_record[\"year\"] is not None and len(catalog_record[\"year\"])>2):\n\t\tif(\"-\" in reference[\"year\"]):\n\t\t\tfirst_part = reference[\"year\"].split(\"-\")[0].replace(\" \",\"\")\n\t\t\tsecond_part = reference[\"year\"].split(\"-\")[1].replace(\" \",\"\")\n\t\t\tscore = first_part in catalog_record[\"year\"] or second_part in catalog_record[\"year\"]\n\t\telse:\n\t\t\tscore = reference[\"year\"] == catalog_record[\"year\"]\n\t\tlogger.debug(\"[year] The similarity between %s and %s is %s\"%(reference[\"year\"], catalog_record[\"year\"], score))\n\t\tscores.append(score)\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s\"%(score_explanation,\"+ year_similarity\",score)\n\tif(\"place\" in reference and \"place\" in catalog_record and catalog_record[\"place\"] is not None \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t and len(catalog_record[\"place\"])>2):\n\t\tscore = fuzzyContainment(reference[\"place\"], catalog_record[\"place\"])\n\t\tlogger.debug(\"[publicationplace] The score of fuzzyContainment between %s and %s is %s\"%(reference[\"place\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t, catalog_record[\"place\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t, score))\n\t\tscores.append(score)\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s\"%(score_explanation,\"+ publplace_similarity\",score)\n\tif(\"publisher\" in reference and \"place\" in catalog_record[\"publisher\"] \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t and catalog_record[\"publisher\"] is not None \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t and len(catalog_record[\"publisher\"])>2):\n\t\tscore = fuzzyContainment(reference[\"publisher\"], catalog_record[\"publisher\"])\n\t\tlogger.debug(\"[publisher] The score of fuzzyContainment between %s and %s is %s\"%(reference[\"publisher\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , catalog_record[\"publisher\"]\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t , score))\n\t\tscores.append(score)\n\telse:\n\t\tscore = 0.01\n\t\tscores.append(score)\n\tscore_explanation = \"%s %s=%s)\"%(score_explanation,\"+ publisher_similarity\",score)\n\tglobal_score = sum(scores)/len(reference)\n\tscore_explanation = \"%s / %s = %s\"%(score_explanation,len(reference),global_score)\n\tmessage = \"\"\"\n\tInput reference: %s\n\tRecord compared: %s\n\tGlobal score: %s\n\tScore's explanation: %s\n\t\"\"\"%(reference, catalog_record, global_score, score_explanation)\n\treturn global_score, score_explanation", "def test_search_by_author(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_author(\"George Orwell\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_author(\"George Orwell\"), 1)", "def name_comparator(last_name):\n score = 0\n\n # check if first n letters of first and last name matches\n for i in range(1, 4):\n if len(first_name) >= i and len(last_name) >= 2:\n # if previous letter does not match, don't continue\n if i > 1 and score > (i - 1) * -1:\n break\n\n # lower score by one per each matching letter\n if first_name[i - 1: i] == last_name[i - 1: i]:\n score -= 1\n\n \"\"\"detect names with umlauts and give them higher score if both have\n them, lower score if only one has them.\"\"\"\n regex = compile(r'[äöå]')\n if score == 0:\n if regex.search(first_name) and regex.search(last_name):\n score -= 1\n else:\n if bool(regex.search(last_name)) != bool(regex.search(last_name)):\n score += 1\n\n return score", "def load_authors():\n\n ret = {}\n for token in util.git('log', '--format=%aE:::%aN').split('\\n'):\n email, name = token.split(':::')\n ret[email] = name\n return ret", "def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'", "def get_abbr_authors(draft):\n initial = ''\n result = ''\n authors = DocumentAuthor.objects.filter(document=draft)\n \n if authors:\n prefix, first, middle, last, suffix = authors[0].author.person.name_parts()\n if first:\n initial = first[0] + '. '\n result = '%s%s' % (initial,last)\n if len(authors) > 1:\n result += ', et al'\n \n return result", "def get_authors(self):\n\n names = []\n rows = []\n\n try:\n rows = self.find_elements_in_owner(self.locators['author_row'])\n except NoSuchElementException:\n # there are no authors\n rows = []\n\n for rowEle in rows:\n authorname = self._get_author(rowEle)\n names.append(authorname)\n return names", "def __compare_lowercase(self, dn, dn_list):\n\t\tfor d in dn_list:\n\t\t\tif dn.lower() == d.lower():\n\t\t\t\treturn True\n\t\treturn False", "def print_popular_authors():\n print(\"\\nAuthors listed by article views:\\n\")\n views_data = get_query_results(AUTHORS_VIEWS_QUERY)\n author_row_format = '{} - {} views'\n for author, views in views_data:\n print(author_row_format.format(author, views))", "def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)", "def authors_string(self):\n all_authors = Author.objects.filter(book=self).order_by('last_name')\n return u', '.join([unicode(a) for a in all_authors])", "def test_display_names(self):\r\n names = [\r\n ('correct', u'correct'),\r\n ('incorrect', u'incorrect'),\r\n ('incomplete', u'incomplete'),\r\n ('unanswered', u'unanswered'),\r\n ('unsubmitted', u'unanswered'),\r\n ('queued', u'processing'),\r\n ('dave', u'dave'),\r\n ]\r\n for status, display_name in names:\r\n statobj = inputtypes.Status(status)\r\n self.assertEqual(statobj.display_name, display_name)", "def parse_authors(article):\n author_names = article.find(\"sourcedesc\").findAll(\"persname\")\n authors = []\n for author in author_names:\n firstname = author.find(\"forename\", {\"type\": \"first\"})\n firstname = firstname.text.strip() if firstname is not None else \"\"\n middlename = author.find(\"forename\", {\"type\": \"middle\"})\n middlename = middlename.text.strip() if middlename is not None else \"\"\n lastname = author.find(\"surname\")\n lastname = lastname.text.strip() if lastname is not None else \"\"\n if middlename is not \"\":\n authors.append(firstname + \" \" + middlename + \" \" + lastname)\n else:\n authors.append(firstname + \" \" + lastname)\n authors = \"; \".join(authors)\n return authors", "def merge_duplicates_of_last_name(self, people_with_last_name_list):\n\n # if only one name -> already finished\n if len(people_with_last_name_list) == 1:\n return True\n\n # sort list by count (higher counts are less likely misspellings).\n people_with_last_name_list.sort(key=lambda x: x.count, reverse=True)\n\n # Compare each person with each other person\n for person1 in people_with_last_name_list:\n for person2 in people_with_last_name_list:\n\n if person1 == person2:\n continue\n\n # if no first and middle name -> continue\n if person1.first == '' and person1.middle == '':\n continue\n if person2.first == '' and person2.middle == '':\n continue\n\n # if first and middle names match exactly -> merge\n if person1.first == person2.first and person1.middle == person2.middle:\n self.merge_two_persons(person1, person2)\n return False\n\n # if both have full first names and they don't match -> skip\n if len(person1.first) > 2 and len(person2.first) > 2 and \\\n person1.first != person2.first:\n continue\n\n # if both have full middle names and they don't match -> skip\n if len(person1.middle) > 2 and len(person2.middle) > 2 and \\\n person1.middle != person2.middle:\n continue\n\n # if initial of the first name is not the same -> skip\n if person1.first and person2.first and person1.first[0] != person2.first[0]:\n continue\n\n # if both have at least first and middle initials\n if person1.first and person1.middle and person2.first and person2.middle:\n # if first or last initials don't match -> skip\n if person1.first[0] != person2.first[0] or person1.middle[0] != person2.middle[0]:\n continue\n\n # if first and middle initials match -> merge\n if person1.first[0] == person2.first[0] and person1.middle[0] == person2.middle[0]:\n self.merge_two_persons(person1, person2)\n return False # we're not finished -> return False\n\n # if both have the same first name and only one has initials -> merge\n if person1.first and person2.first and \\\n len(person1.first) > 2 and len(person2.first) > 2 and \\\n (person1.middle == '' or person2.middle == ''):\n self.merge_two_persons(person1, person2)\n return False # not finished\n\n # if four people or fewer left with the last name and at least their initials match\n # -> merge (the number 4 is a guesstimate.\n if len(people_with_last_name_list) <= 4 and person1.first and person2.first and \\\n person1.first[0] == person2.first[0]:\n self.merge_two_persons(person1, person2)\n return False # not finished\n\n else:\n continue\n\n # if no merges after comparing every person with every other person -> finished\n return True", "def test_author_list_equality_with_invalid_authentication(self) -> None:\n\n # Let's check for a request with no authorization\n\n response: Response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {\n 'detail': 'Authentication credentials were not provided.'\n })\n\n # Now lets check with an Author without permissions.\n\n # Select the underprivileged author randomly.\n author: Author = random.choice(self.authors)\n\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(author.get_key()))\n\n response: Response = self.client.get(self.url)\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {\n 'detail': 'You do not have permission to perform this action.'\n })", "def test_author_filtering(self):\n # Get a valid author\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id}\n\n self._test_filtering(**params)", "def get_coauthors(self):\n # Get number of authors to search for\n res = download(url=self.coauthor_link, accept='json')\n data = loads(res.text)['search-results']\n N = int(data.get('opensearch:totalResults', 0))\n # Store information in namedtuples\n fields = 'surname given_name id areas affiliation_id name city country'\n coauth = namedtuple('Coauthor', fields)\n coauthors = []\n # Iterate over search results in chunks of 25 results\n count = 0\n while count < N:\n params = {'start': count, 'count': 25}\n res = download(url=self.coauthor_link, params=params, accept='json')\n data = loads(res.text)['search-results'].get('entry', [])\n # Extract information for each coauthor\n for entry in data:\n aff = entry.get('affiliation-current', {})\n try:\n areas = [a['$'] for a in entry.get('subject-area', [])]\n except TypeError: # Only one subject area given\n areas = [entry['subject-area']['$']]\n new = coauth(surname=entry['preferred-name']['surname'],\n given_name=entry['preferred-name'].get('given-name'),\n id=entry['dc:identifier'].split(':')[-1],\n areas='; '.join(areas),\n affiliation_id=aff.get('affiliation-id'),\n name=aff.get('affiliation-name'),\n city=aff.get('affiliation-city'),\n country=aff.get('affiliation-country'))\n coauthors.append(new)\n count += 25\n return coauthors", "def giveAuthorsName(catalog, ConstituentsID):\n names = []\n\n for x in ConstituentsID:\n names.append(' '+model.giveAuthorName(catalog, x))\n return ','.join(names)", "def match(self, filter_text):\n\n return filter_text.lower() in self.author.lower() or \\\n super().match(filter_text)", "def lookups(self, request, model_admin):\n # lambda article: article.author # 接受一个article,返回对应作者对象\n # map(func, iterable) # 将可迭代对象的每个元素传入func\n # author_list 所有文章的作者列表,去重\n author_list = list(set(map(lambda article: article.author, Article.objects.all())))\n\n # author_list2 = []\n # for author in author_list:\n # author_list2.append((author.id, author.nickname or author.username))\n # return author_list2\n\n for author in author_list: # verbose value, 在过滤器里显示的名称\n yield (author.id, (author.nickname or author.username))", "def sort_name_urls(name_url_list, schl_name):\n\n\t# A dict to have (name, url) tuples as keys and the amount of papers in the relevant school\n\t# as values\n\tschool_matches = {}\n\n\tfor name_url in name_url_list: # for each author page\n\t\tschool_matches[name_url] = 0\n\t\tauthor_page_tree = get_tree(name_url[1])\n\t\t# get the <a> elements for each paper on the author's page\n\t\ta_elems = get_a_elems_for_papers(author_page_tree)\n\t\tfor a in a_elems: # for each paper\n\t\t\t# from the paper's Enlighten page, get a string indicating what school it is associated to\n\t\t\tschl_info = get_paper_school_info(a.get(\"href\"))\n\t\t\t# If the relevant school is found in the school info string, increment the value\n\t\t\t# of this (name, url) key\n\t\t\tif schl_name in schl_info:\n\t\t\t\tschool_matches[name_url] += 1\n\n\t# From dict, create list of ((name, url), numpapers) tuples sorted by value\n\tsorted_name_urls = sorted(school_matches.items(), key=operator.itemgetter(1), reverse=True)\t\n\n\treturn sorted_name_urls", "def query_authors(cls):\n authors = from_cache('AuthorsList')\n if not authors:\n authors = SuiAuthor.all().order('name').fetch(400)\n to_cache('AuthorsList', authors)\n return authors", "def test_citedby_refersto_author(self):\n inv_search = 'refersto:citedby:author:penguin'\n spi_search = 'find refersto citedby author penguin'\n self._compare_searches(inv_search, spi_search)", "def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def filter_publication(publication, cmp_authors=True):\n query = None\n isbn_query = False\n\n # there can be ISBN query or book title query\n if publication.optionals and publication.optionals.ISBN:\n query = aleph.ISBNQuery(publication.optionals.ISBN)\n isbn_query = True\n else:\n query = aleph.TitleQuery(publication.title)\n\n result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), \"\")\n\n if not result.records:\n return publication # book is not in database\n\n # if there was results with this ISBN, compare titles of the books\n # (sometimes, there are different books with same ISBN because of human\n # errors)\n if isbn_query:\n for record in result.records:\n epub = record.epublication\n\n # try to match title of the book\n if compare_names(epub.nazev, publication.title) >= 80:\n return None # book already in database\n\n return publication\n\n # checks whether the details from returned EPublication match Publication's\n for record in result.records:\n epub = record.epublication\n\n # if the title doens't match, go to next record from aleph\n if not compare_names(epub.nazev, publication.title) >= 80:\n continue\n\n if not cmp_authors:\n return None # book already in database\n\n # compare authors names\n for author in epub.autori:\n # convert Aleph's author structure to string\n author_str = \"%s %s %s\" % (\n author.firstName,\n author.lastName,\n author.title\n )\n\n # normalize author data from `publication`\n pub_authors = map(lambda x: x.name, publication.authors)\n if type(pub_authors) not in [list, tuple, set]:\n pub_authors = [pub_authors]\n\n # try to compare authors from `publication` and Aleph\n for pub_author in pub_authors:\n if compare_names(author_str, pub_author) >= 50:\n return None # book already in database\n\n return publication # book is not in database", "def test_authors():\n assert(hasattr(tekel, '__authors__'))", "def test_identifier_display_multiple_idents(self):\n Identifier(value='9788307018867', type='ISBN_13', book=self.book).save()\n Identifier(value='1234567891', type='ISBN_10', book=self.book).save()\n result = self.book.identifier_display\n expected = ['ISBN_13: 9788307018867', 'ISBN_10: 1234567891']\n\n self.assertEqual(len(result), len(expected))\n for ident in expected:\n assert ident in result", "def test_legal_names(self):\n products = generate_products()\n\n for product in products:\n names = product.name.split(\" \")\n self.assertIn(names[0], ADJECTIVES)\n self.assertIn(names[1], NOUNS)", "def check_specific_names(citelist: list, specific_names: list) -> None:\n unique_names = list()\n nameset = set()\n for c in citelist:\n if c.name != \".\":\n clean = clean_specific_name(c.name)\n if (not (clean in nameset)) and (clean != \"\"):\n nameset |= {clean}\n unique_names.append(clean)\n unique_names.sort()\n for n in unique_names:\n is_found = False\n for s in specific_names:\n if n in s.variations:\n is_found = True\n if not is_found:\n report_error(\"Missing specific name: \" + n)", "def authors(self):\n authors = [\n n.people for n in self.pymbake_person_relationship.all()\n ]\n\n return authors", "def Authors(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('authors', default)\n return [HEP.AuthorObject(i) for i in tmp]", "def is_valid_book(current_author, inputed_name, availale_books):\n\tbook_info = []\n\tauthor_book = {}\n\n\tfor book in availale_books:\n\t\tauthor = book.author.username\n\t\tauthor_book[author] = book.book_name\n\t\tbook_info.append(author_book)\n\t\tauthor_book = {}\n\n\tfor book in book_info:\n\t\tfor author, book_name in book.items():\n\t\t\tif book_name == inputed_name and author == current_author:\n\t\t\t\treturn False\n\n\treturn True", "def get_papers_by_authors(authors_list, rows_max=999):\n fl = ['id', 'bibcode', 'title', 'citation_count',\n 'aff', 'author', 'citation', 'pub', 'reference',\n 'metrics', 'year', 'read_count', 'pubdate']\n\n authors = []\n for auth in authors_list:\n print(auth)\n papers = list(ads.SearchQuery(author=auth, rows=rows_max, fl=fl))\n authors.append(papers)\n\n byauth = pd.DataFrame()\n byauth['authors'] = authors_list\n byauth['ppr_list'] = authors\n\n # cantidad de papers por autor:\n npprs = []\n for p in authors:\n npprs.append(len(p))\n byauth['n_papers'] = npprs\n\n return byauth", "def test_distribution_with_many_clauses(self):\n spi_search = \"find a mele and brooks and holtkamp and o'connell\"\n inv_search = \"author:mele author:brooks author:holtkamp author:o'connell\"\n self._compare_searches(inv_search, spi_search)", "def test_legal_names(self):\r\n prod = generate_products()\r\n ADJECTIVES = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\r\n NOUNS = ['Anvil', 'Catapult', 'Disguise', 'Mousetrap', '???']\r\n for product in prod:\r\n self.assertIn(product.name.split(\" \")[0], ADJECTIVES)\r\n self.assertIn(product.name.split(\" \")[1], NOUNS)", "def test_legal_names(self):\n names = [prod.name for prod in generate_products()]\n sep = [(name.split()[0], name.split()[1]) for name in names]\n for name in sep:\n self.assertIn(name[0], ADJS)\n self.assertIn(name[1], NOUNS)" ]
[ "0.64632857", "0.63929", "0.6384049", "0.6332246", "0.62227446", "0.6216081", "0.6199922", "0.615091", "0.6108188", "0.6102564", "0.6097298", "0.6095483", "0.6072117", "0.60518503", "0.59991443", "0.5962012", "0.595125", "0.59264", "0.5892751", "0.5881848", "0.5834737", "0.5828909", "0.58197445", "0.5809561", "0.57794464", "0.5775128", "0.5758521", "0.5700122", "0.56798327", "0.5664223", "0.56634647", "0.5625657", "0.56066555", "0.55996037", "0.5594755", "0.5592338", "0.5590163", "0.5584985", "0.5581838", "0.55772346", "0.55725914", "0.5566805", "0.55618775", "0.55439454", "0.5522566", "0.55028015", "0.54765004", "0.5471765", "0.5449081", "0.54334944", "0.54203963", "0.5413042", "0.54050255", "0.5380121", "0.53732663", "0.53718436", "0.53696173", "0.5368257", "0.53660125", "0.5364547", "0.53607595", "0.5353228", "0.5352134", "0.53370756", "0.53242457", "0.5322533", "0.5318289", "0.5317554", "0.52978647", "0.52958393", "0.5290375", "0.5289825", "0.52893126", "0.5278537", "0.5271879", "0.52613914", "0.52512485", "0.52507955", "0.5239693", "0.5235492", "0.5220848", "0.521743", "0.52147645", "0.5214135", "0.5196843", "0.519592", "0.51956856", "0.51857597", "0.518018", "0.5160453", "0.51594216", "0.51488584", "0.5146678", "0.5141392", "0.5138946", "0.5137318", "0.51345485", "0.51332617", "0.51206005", "0.5110001" ]
0.68802696
0
Cross checks a list of authors against post
def assertAuthorsInPosts(context, authors, posts): cross_check(context, authors, posts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"", "def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)", "def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]", "def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def assertAuthorsInComments(context, authors, comments):\n cross_check(context, authors, comments)", "def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors", "def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def test_author_list_equality_with_invalid_authentication(self) -> None:\n\n # Let's check for a request with no authorization\n\n response: Response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {\n 'detail': 'Authentication credentials were not provided.'\n })\n\n # Now lets check with an Author without permissions.\n\n # Select the underprivileged author randomly.\n author: Author = random.choice(self.authors)\n\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(author.get_key()))\n\n response: Response = self.client.get(self.url)\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {\n 'detail': 'You do not have permission to perform this action.'\n })", "def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)", "def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in", "def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors", "def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications", "def test_author_filtering(self):\n # Get a valid author\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id}\n\n self._test_filtering(**params)", "def test_absorbs_naked_author_search(self):\n invenio_search = \"author:ellis\"\n spi_search = \"author ellis\"\n self._compare_searches(invenio_search, spi_search)", "def extra_bibparse(db):\n for key,entry in db.entries.items():\n for auth in entry.persons[\"author\"]:\n if (\"Harrison\" not in auth.first_names or\n \"Chapman\" not in auth.last_names):\n entry.add_person(auth, \"otherauthor\")", "def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)", "def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)", "def test_author_list_equality_with_valid_authentication(self) -> None:\n\n # Set the Authorization header to the appropriate\n # format as the rest_framework expects using utils.\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(\n self.super_author.get_key()\n ))\n\n response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(data, self.serialized_data, msg=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_author_reverse(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a j ellis'\n self._compare_searches(invenio_search, spires_search)", "def test_author_many_initials(self):\n inv_search = 'author:\"bach, p* d* q*\"'\n spi_search = 'find a p d q bach'\n self._compare_searches(inv_search, spi_search)", "def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))", "def make_author_list(res):\n try:\n r = [\", \".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]\n except KeyError as e:\n print(\"No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.\")\n r = [\"Unknown Authors\"]\n return r", "def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)", "def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def test_retrieve_authors(self):\n sample_author()\n sample_author()\n\n res = self.client.get(reverse('authors'))\n authors = Author.objects.all()\n serializer = AuthorSerializer(authors, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def mixed_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": {\"$gt\": 0},\n \"authorsMaleCount\": {\"$gt\": 0},\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query", "def checkPaper(self,event=None):\r\n if self.title.getVal() not in self.paperList:\r\n self.paperList.append(self.title.getVal())\r\n self.paperList.sort()\r\n self.title.updateVals(self.paperList)\r\n return\r\n\r\n ## This section of code should probably go into setData. . .\r\n self.authorBox.clearData()\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People on Papers.PrimaryAuthor = People.PersonID WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if res ==None:\r\n self.primeAuthor.setVal(\"No Author Found; Check database\")\r\n return\r\n self.primeAuthor.setVal(formatNameSQL(res))\r\n self.addPrimeAuthorFn()\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People on Papers.CorrespondingAuthor = People.PersonID WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if res == None:\r\n self.correspond.setVal(self.primeAuthor.getVal())\r\n else:\r\n self.correspond.setVal(formatNameSQL(res))\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People JOIN CoAuthors ON Papers.paperID = CoAuthors.PaperID AND People.PersonID = CoAuthors.Author WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchall()\r\n if res == None:\r\n return\r\n for ln in res:\r\n curAuthor = str.format(formatNameSQL(ln))\r\n self.authorBox.addLine(curAuthor)\r\n self.coAuthor.setVal(curAuthor)", "def __add_publication(self, authors, publication):\n for author in authors:\n\n if author not in self.author_to_publications:\n self.author_to_publications[author] = set()\n self.author_to_publications[author].add(publication)", "def test_author_initials(self):\n inv_search = 'author:\"polyakov, a* m*\"'\n spi_search = 'find a a m polyakov'\n self._compare_searches(inv_search, spi_search)", "def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]", "def has_duplicates_authors(L):\r\n # make a copy of t to avoid modifying the parameter\r\n s = L[:]\r\n s.sort()\r\n\r\n # check for adjacent elements that are equal\r\n for i in range(len(s)-1):\r\n if s[i] == s[i+1]:\r\n return True\r\n return False", "def test_author_sorted_articles(self):\n\n self.make_test('articles', ArticleListSerializer, 'author:articles')", "def display_authors(self, *args):\n return ', '.join(author.name for author in args[0].authors.all()[:3])", "def authors(author_ids):\n if author_ids is None:\n return ''\n else:\n ids = []\n for author_id in author_ids.split(','):\n ids.append(User.id == int(author_id))\n authors = User.query.filter(or_(*ids)).all()\n if authors is None:\n return ''\n else:\n return 'by ' + ', '.join([author.name for author in authors])", "def updateAuthors(self,event=None):\r\n self.popAuthors()\r\n self.primeAuthor.updateVals(self.authorList)\r\n self.coAuthor.updateVals(self.authorList)\r\n self.correspond.updateVals(self.authorList)", "def test_author_simplest(self):\n invenio_search = 'author:ellis'\n spires_search = 'find a ellis'\n self._compare_searches(invenio_search, spires_search)", "def filter_content_by_authors(self, authors, content_type=None):\n\n if content_type is None:\n is_article, is_podcast, is_video = True, True, True\n else:\n is_article, is_podcast, is_video = self.get_content_type(content_type)\n\n try:\n response = {\"status\": \"ok\"}\n if is_article:\n db_articles = self.db_connector.connect_to_collection(self.db_config[MONGODB][COLLECTION_ARTICLES])\n response[\"articles\"] = list(db_articles.find({\"authors\": {\"$in\": authors}}, {'_id': 0}))\n if is_podcast:\n db_podcasts = self.db_connector.connect_to_collection(self.db_config[MONGODB][COLLECTION_PODCASTS])\n response[\"podcasts\"] = list(db_podcasts.find( {\"authors\": {\"$in\": authors}}, {'_id': 0}))\n if is_video:\n db_videos = self.db_connector.connect_to_collection(self.db_config[MONGODB][COLLECTION_VIDEOS])\n response[\"videos\"] = list(db_videos.find({\"authors\": {\"$in\": authors}}, {'_id': 0}))\n return response\n except:\n return {\"status\": \"bad\"}", "def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list", "def get_author_data():\n entry = mongo.db.Authors\n output = list()\n look_up_type = None\n if 'name' in request.args:\n look_up_type = 'name'\n print(request.args)\n if len(request.args['name']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['name'].strip('\"')\n name = entry.find({'name': {'$regex': value}})\n if name:\n for author in name:\n output.append({'name': author['name']})\n elif 'booktitle' in request.args:\n look_up_type = 'related_books'\n if len(request.args['booktitle']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['booktitle'].strip('\"')\n related_books = entry.find(\n {'author_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for title in related['author_books']:\n if value in title:\n output.append(({'related_books': title}))\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenAuthors.html', output=output, look_up_type=look_up_type), 200", "def corporate_authors(self, key, value):\n _corporate_authors = self.get(\"authors\", [])\n\n for v in force_list(value):\n if key == \"710__\":\n if \"a\" in v:\n _corporate_authors.append(\n {\n \"full_name\": clean_val(\"a\", v, str),\n \"type\": \"ORGANISATION\",\n }\n )\n else:\n self[\"authors\"] = collaborations(self, key, value)\n raise IgnoreKey(\"corporate_authors\")\n else:\n _corporate_authors.append(\n {\"full_name\": clean_val(\"a\", v, str), \"type\": \"ORGANISATION\"}\n )\n return _corporate_authors", "def get_authors(draft):\n authors = []\n for a in draft.authors.all():\n initial = ''\n prefix, first, middle, last, suffix = a.person.name_parts()\n if first:\n initial = first + '. '\n entry = '%s%s <%s>' % (initial,last,a.address)\n authors.append(entry)\n return authors", "def test_get_unchecked_authors():\n org = 'org'\n repo = 'repo'\n access_token = 'all-access'\n\n with patch('lib.get_release_pr', autospec=True, return_value=ReleasePR(\n body=FAKE_RELEASE_PR_BODY,\n version='1.2.3',\n url='http://url'\n )) as get_release_pr_mock:\n unchecked = get_unchecked_authors(access_token, org, repo)\n assert unchecked == {\"Alice Pote\"}\n get_release_pr_mock.assert_called_once_with(access_token, org, repo)", "def test_draft_list_only_display_authors_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor)\n AidFactory(name='Is this just fantasy?')\n\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' in content\n assert 'Is this just fantasy?' not in content", "def test_exactauthor_simple(self):\n invenio_search = 'exactauthor:\"ellis, j\"'\n spires_search = 'find ea ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def test_refersto_author(self):\n inv_search = 'refersto:author:kitty'\n spi_search = 'find refersto author kitty'\n self._compare_searches(inv_search, spi_search)", "def duplicated_code():\n author_ids = []\n updated_books = []\n updated_authors = []\n \n for author in mongo.db.authors.find():\n updated_authors.append(author)\n for book in mongo.db.books.find():\n \n # Create a new key/value pair in each book for author_name\n # by looking up the author_id and matching it to the author_name\n # of the selected author_id.\n \n book_title = book['title']\n author_id = book['author_id']\n \n for author in updated_authors:\n if author['_id'] == ObjectId(author_id):\n book['author_name'] = author['author_name']\n \n \n # Using the googlebooks API search for each book and retrieve\n # a thumbnail of the book.\n \n google_api_title = book_title.replace(' ', '+')\n book_isbn_num = book['isbn_num']\n with urllib.request.urlopen(BASE_API_LINK + google_api_title) as f:\n text = f.read()\n decoded_text = text.decode(\"utf-8\")\n obj = json.loads(decoded_text) \n google_book_obj = obj[\"items\"][0]\n book_href = google_book_obj['volumeInfo']\n if 'imageLinks' in book_href:\n book['href'] = book_href['imageLinks']['thumbnail']\n \n # Append book to new book dictionary.\n updated_books.append(book)\n \n return updated_books", "def test_author_full_first(self):\n invenio_search = 'author:\"ellis, john*\" or exactauthor:\"ellis, j *\" or exactauthor:\"ellis, j\" or exactauthor:\"ellis, jo\" or exactauthor:\"ellis, joh\" or author:\"ellis, john, *\"'\n spires_search = 'find a ellis, john'\n self._compare_searches(invenio_search, spires_search)", "def get_authors(self, blogid=1):\n return self.execute('wp.getAuthors', blogid, self.username, self.password)", "def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return", "def manual(self):\n\n\t\tfilter = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\", \"World War II\"],\n\n\t\tself.index[\"authorities\"] = [auth for auth in self.index[\"authorities\"] if auth not in filter]\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"items\"][item] = [auth for auth in self.index[\"items\"][item] if auth in self.index[\"authorities\"]]", "def find_authors(code):\n url = baseurl(code)\n page = req(url)\n soup = BeautifulSoup(page, 'lxml')\n addr = [t.attrs.get('content', None) \n for t in soup.find_all(\"meta\", {\"name\": \"citation_author_email\"})]\n \n # corresponding authors will have their email under another tag too\n corr = [t.find('a').attrs.get('href', None)\n for t in soup.find_all(None, {\"class\": \"author-corresp-email-link\"})]\n\n addr = [a for a in addr if a is not None]\n corr = [a.replace('mailto:', '') for a in corr if a is not None]\n\n return dict(corr=list(set(corr)), all=list(set(addr)))", "def test_citedby_author(self):\n inv_search = 'citedby:author:doggy'\n spi_search = 'find citedby author doggy'\n self._compare_searches(inv_search, spi_search)", "def test_authors():\n assert(hasattr(tekel, '__authors__'))", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def test_author_full_initial(self):\n inv_search = 'author:\"klebanov, ig* r*\" or exactauthor:\"klebanov, i r\"'\n spi_search = \"find a klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)", "def __generate_author_string__(self, list_of_authors):\n author_string = \"\"\n return author_string.join(list_of_authors)", "def get_publications_by_author(cached_list, cached_set, author_name):\n publications = { 'dblp': [], 'cdblp': [] }\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n\n if author['dblp'].__contains__('publications'):\n publications['dblp'] = author['dblp']['publications']\n# for pub in author['dblp']['publications']:\n# print(pub)\n\n if author['cdblp'].__contains__('publications'):\n publications['cdblp'] = author['cdblp']['publications']\n# for pub in author['cdblp']['publications']:\n# print(pub)\n return publications", "def authors(self):\n authors = [\n n.people for n in self.pymbake_person_relationship.all()\n ]\n\n return authors", "def unknown_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": 0,\n \"authorsMaleCount\": 0,\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query", "def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')", "def get_paper_authors(tree):\n\tpath = '//table/tr/th[text() = \"Glasgow Author(s) Enlighten ID:\"]/following-sibling::td/a'\n\t# Get list of <a> elements, each an author\n\tauthors = tree.xpath(path)\n\t# Make list of (author name, author url) pairs to return\n\tauthors = [(author.text, author.get(\"href\")) for author in authors]\n\n\treturn authors", "def merge_to_elastic(paper_authors, papers, authors, index_name):\n columns = list(papers.columns) + ['authors']\n for index, paper in papers.iterrows():\n merger = paper_authors.loc[paper_authors['paper_id'] == index]\n author_ids = merger['author_id'].values\n author_names = [authors.loc[authors['id'] == x, 'name'].values[0] for x in author_ids]\n paper['authors'] = author_names\n yield {\n \"_index\": index_name,\n \"_type\": \"_doc\",\n \"_id\" : f\"{index}\",\n \"_source\": filterKeys(paper, columns),\n }", "def authors_matrix( corpus ) :\n all = all_authors(corpus)\n row_dois = [x['DI'] for x in corpus]\n result = zeros( (len(corpus),len(all)), dtype = int32 )\n for paper in corpus :\n for item in authors( paper ) :\n result[ row_dois.index( paper['DI'] ) ][ all.index( item ) ] = 1\n\n return result, row_dois, all", "def Authors(self, default=[{}]):\n tmp = self.data.get('authors', default)\n return [HEP.AuthorReducedObject(i) for i in tmp]", "def make_citation_authors(res):\n if \"author\" in res.keys():\n first_author = res['author'][0]['family'] + \", \" + res['author'][0]['given']\n last_author = res['author'][-1]['given'] + \" \" + res['author'][-1]['family']\n middle_authors = \", \".join(\" \".join([x['given'], x['family']]) for x in res['author'][1:-1])\n #assemble authors\n author_string = first_author\n author_string = author_string + \", \" + middle_authors if middle_authors != '' else author_string\n author_string = author_string + \", and \" + last_author if len(res['author']) > 1 else author_string\n \n author_string = author_string + \".\" if author_string[-1] != \".\" else author_string\n else:\n author_string = \"Unknown Authors\"\n\n return clean_txt(author_string.capitalize())", "def test_create_authors(self):\n payload = {\n 'first_name': 'testname1',\n 'last_name': 'testname2',\n 'nickname': 'testnick1'\n }\n\n res = self.client.post(reverse('authors'), payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n author = Author.objects.get(id=res.data['id'])\n\n for key in payload.keys():\n self.assertEqual(payload[key], getattr(author, key))", "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def validate_blog_post(self, req, postname, version, fields):\n for category in _parse_categories(fields['categories']):\n if category in self.draft:\n if req.authname == 'anonymous':\n return [(None, 'You need to be logged in to save as draft.')]\n elif req.authname != fields['author']:\n return [(None, \"Cannot save draft for an author that isn't you.\")]\n return []", "def format_authors(author_list):\n if isinstance(author_list, (list, tuple)):\n return \", \".join([format_authors(author) for author in author_list])\n else:\n if \", \" in author_list:\n author_list = author_list.split(\", \")\n author_list.reverse()\n author_list = \" \".join(author_list)\n elif \",\" in author_list:\n author_list = author_list.split(\",\")\n author_list.reverse()\n author_list = \" \".join(author_list)\n return author_list", "def check_intersections(db, topics, papers_by_topic):\n\n\t# Print the distribution of \"number of topics\"\n\tnum_subjects = []\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tnum_subjects.append(len(p.subject))\n\t\telse:\n\t\t\tnum_subjects.append(0)\n\tnum_subjects = np.array(num_subjects)\n\n\tfor i in range(np.max(num_subjects)+1):\n\t\tprint(\"Number of papers with\", i, \"topics:\", \n\t\t\tlen(np.where(num_subjects==i)[0]))\n\n\t# Figure out what's going on with triple-tagged guys (nothing weird)\n\t\"\"\"\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tif len(p.subject) > 2:\n\t\t\t\tprint(\"\\n\",p.title,\"\\n\\t\",p.container_title,\"\\n\\t\", p.subject)\n\t\t\t\t\n\t\t\t\tfor topic, topic_words in topics.items():\n\t\t\t\t\tprint(\"\\tCheck against '\" + topic + \"':\")\n\t\t\t\t\tfor journal in p.container_title:\n\t\t\t\t\t\tcheck_words(journal, topic_words, verbose=True)\n\t\"\"\"\n\n\t# Look in more detail at double-tagged guysfor p_hash, p in db.all_papers.items():\n\tcombos = defaultdict(int)\n\tfor p_hash, p in db.all_papers.items():\n\t\tif p.subject:\n\t\t\tif len(p.subject) == 2:\n\t\t\t\tcombos[frozenset(p.subject)] += 1\n\t\t\t\t#print(\"\\n\",p.title,\"\\n\\t\",p.container_title,\"\\n\\t\", p.subject)\n\t\t\t\tif p.subject == {'Computer Science', 'Biology'}:\n\t\t\t\t\t#print(\"\\n\",p.title,\"\\n\\t\",p.container_title)#,\"\\n\\t\", p.subject)\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\tbio_words = set()\n\t\t\t\t\tCS_words = set()\n\t\t\t\t\tfor journal in p.container_title:\n\t\t\t\t\t\tfor word in topics['Biology']:\n\t\t\t\t\t\t\tif journal.find(word) >= 0:\n\t\t\t\t\t\t\t\tbio_words.add(word)\n\t\t\t\t\t\tfor word in topics['Computer Science']:\n\t\t\t\t\t\t\tif journal.find(word) >= 0:\n\t\t\t\t\t\t\t\tCS_words.add(word)\n\n\t\t\t\t\t#print(\"\\tBiology words:\", bio_words)\n\t\t\t\t\t#print(\"\\tCS words:\", CS_words)\n\t\n\tfor k, v in combos.items():\n\t\tprint(k, v)", "def test_citedby_refersto_author(self):\n inv_search = 'refersto:citedby:author:penguin'\n spi_search = 'find refersto citedby author penguin'\n self._compare_searches(inv_search, spi_search)", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def verify_author(author, connection):\n\n database = connection['test']\n collection = database['users']\n\n try:\n post = collection.find_one({\"_id\" : ObjectId(author)})\n except InvalidId:\n post = None\n\n if post is None:\n return False\n\n return True", "def getAuthors(self): #$NON-NLS-1$\r", "def any_author_exists(self):\n return bool(self.mapper.count())", "def get_papers_by_authors(authors_list, rows_max=999):\n fl = ['id', 'bibcode', 'title', 'citation_count',\n 'aff', 'author', 'citation', 'pub', 'reference',\n 'metrics', 'year', 'read_count', 'pubdate']\n\n authors = []\n for auth in authors_list:\n print(auth)\n papers = list(ads.SearchQuery(author=auth, rows=rows_max, fl=fl))\n authors.append(papers)\n\n byauth = pd.DataFrame()\n byauth['authors'] = authors_list\n byauth['ppr_list'] = authors\n\n # cantidad de papers por autor:\n npprs = []\n for p in authors:\n npprs.append(len(p))\n byauth['n_papers'] = npprs\n\n return byauth", "def test_collection_author_year_filtering(self):\n # Create a collection\n entries = Entry.objects.filter(id__in=(1, 5, 10, 15))\n collection = CollectionFactory(entries=entries)\n entry = Entry.objects.get(id=1)\n\n # Get a valid collection\n params = {\n \"collection\": collection.id,\n \"author\": entry.first_author.id,\n \"year\": entry.publication_date.year,\n }\n self._test_filtering(**params)", "def owns_post(self, post):\n return self.id == post.author.id", "def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'", "def authors(self):\n return self.properties.get('Authors', ClientValueCollection(SharedWithMeDocumentUser))", "def collection_special_author(user_id, author_id):\n\n another_user_id = author_id\n if (user_id == another_user_id):\n return \"self\"\n query = db_session.query(Collection_User).filter_by(\n user_id=user_id, another_user_id=another_user_id).all()\n if len(query) == 0:\n collect_usr = Collection_User(user_id=user_id,\n another_user_id=another_user_id,\n time=datetime.now())\n db_session.add(collect_usr)\n db_session.commit()\n update_collection_num(user_id, another_user_id, True)\n else:\n return \"already\"\n return \"success\"", "def test_author_sorted_topics(self):\n\n self.make_test('topics', TopicListSerializer, 'author:topics')", "def generate_id_for_authors():\n counter = 1\n for book in books:\n for author in books[book]:\n if not author in authors:\n authors[author] = f'Author/{counter}'\n counter += 1", "def male_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": 0,\n \"authorsMaleCount\": {\"$gt\": 0},\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query", "def test_get_authors_from_data(self):\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\n \"name\": \"George Elliott\",\n \"personal_name\": \"George Elliott\",\n \"last_modified\": {\n \"type\": \"/type/datetime\",\n \"value\": \"2008-08-31 10:09:33.413686\",\n },\n \"remote_ids\": {\n \"isni\": \"000111\",\n },\n \"key\": \"/authors/OL453734A\",\n \"type\": {\"key\": \"/type/author\"},\n \"id\": 1259965,\n \"revision\": 2,\n },\n status=200,\n )\n results = self.connector.get_authors_from_data(self.work_data)\n result = list(results)[0]\n self.assertIsInstance(result, models.Author)\n self.assertEqual(result.name, \"George Elliott\")\n self.assertEqual(result.openlibrary_key, \"OL453734A\")\n self.assertEqual(result.isni, \"000111\")", "def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList", "def author_posts(request, author_id):\n id = int(author_id)\n user = myUser.objects.get(user_id=id)\n if user.is_admin:\n posts = Post.objects.select_related('author').order_by('-modified')\n else:\n posts = Post.objects.select_related('author').filter(author_id=id).order_by('-modified')\n\n return render(request, 'posts/authors.html',\n {'posts': posts})", "def merge_author_ranks(posts_df):\n # author rankings has duplicate rows for author_id == 3727\n # author_ranks = pd.concat([author_ranks_training, author_ranks_testing,\n # new_author_ranks]).drop_duplicates()\n\n # def merge_author_ranks(posts_df, authors_file, authors_summary_file):\n # authors summary file is inconsistently formated, requires fixing to ease read in\n\n author_ranks_train2017 = pd.read_csv(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-data',\n 'data',\n 'training',\n 'author_rankings.tsv'),\n header=None,\n names=['author_id', 'author_rank'], sep='\\t')\n\n author_ranks_test2017 = pd.read_csv(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-test',\n 'user-rankings.tsv'),\n header=None,\n names=['author_id', 'author_rank'], sep='\\t')\n\n author_ranks = pd.concat([author_ranks_train2017, author_ranks_test2017]).drop_duplicates(\n subset='author_id', keep='last')\n\n # need to open file because it has bad delimiter setup\n with open(os.path.join(DATA_DIR,\n 'raw',\n 'clpsych17-data',\n 'data',\n 'author_rankings_summary.tsv')) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n staffRanks = list()\n for line in content:\n if line.endswith(\" 1\"):\n line = line[:-1].strip()\n staffRanks.append(line)\n author_ranks['is_staff'] = [\n True if x in staffRanks else False for x in author_ranks['author_rank']]\n\n df = posts_df.reset_index().merge(author_ranks, how='left', on='author_id')\n df = df.set_index('post_id')\n\n assert(df.shape[0] == posts_df.shape[0])\n\n return df", "def test_user_can_change_as_author(self):\n self.assertTrue(self.story.user_can_change(self.user1))", "def authors_completion(self, terms):\n return self.db.execute(u'''SELECT * FROM \"authors\" WHERE name LIKE ? LIMIT 50''', (u\"%{}%\".format(terms),)).fetchall()", "def get_name_url_matches(author_name, html_tree):\n\n\t# Convert name to lower case - this will be searched against lower case text on the Enlighten page\n\tlower_name = author_name.lower()\n\t# Used to convert text in <a> tags to lower case in paths before checking if matches the name provided\n\tcase = 'translate(text(), \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\", \"abcdefghijklmnopqrstuvwxyz\")'\n\t# This is the path to look for <a> tags which contain the target name as text\n\t# N.B. contains() is used rather than equals as it can catch more cases\n\tpath = '//table/tr/td/ul/li/a[contains(%s, \\\"%s\\\")]' % (case, lower_name)\n\t# get the list of <a> elements whose text contains the name\n\telements = html_tree.xpath(path)\n\t# If target string was found, for each <a> element that contains it, make a\n\t# (text, url) tuple and create a list out of the resulting tuples\n\t# N.B. the href obtained from the element is concatenated to the base url as it is relative\n\tif elements:\n\t\t# have to concatenate as href is given as relative path\n\t\ttext_url_tups = [(elem.text, author_list_base + elem.get(\"href\")) for elem in elements]\n\telse:\n\t\ttext_url_tups = None\n\n\treturn text_url_tups", "def authors(self, authors):\n\n self._authors = authors", "def filter_publication(publication, cmp_authors=True):\n query = None\n isbn_query = False\n\n # there can be ISBN query or book title query\n if publication.optionals and publication.optionals.ISBN:\n query = aleph.ISBNQuery(publication.optionals.ISBN)\n isbn_query = True\n else:\n query = aleph.TitleQuery(publication.title)\n\n result = aleph.reactToAMQPMessage(aleph.SearchRequest(query), \"\")\n\n if not result.records:\n return publication # book is not in database\n\n # if there was results with this ISBN, compare titles of the books\n # (sometimes, there are different books with same ISBN because of human\n # errors)\n if isbn_query:\n for record in result.records:\n epub = record.epublication\n\n # try to match title of the book\n if compare_names(epub.nazev, publication.title) >= 80:\n return None # book already in database\n\n return publication\n\n # checks whether the details from returned EPublication match Publication's\n for record in result.records:\n epub = record.epublication\n\n # if the title doens't match, go to next record from aleph\n if not compare_names(epub.nazev, publication.title) >= 80:\n continue\n\n if not cmp_authors:\n return None # book already in database\n\n # compare authors names\n for author in epub.autori:\n # convert Aleph's author structure to string\n author_str = \"%s %s %s\" % (\n author.firstName,\n author.lastName,\n author.title\n )\n\n # normalize author data from `publication`\n pub_authors = map(lambda x: x.name, publication.authors)\n if type(pub_authors) not in [list, tuple, set]:\n pub_authors = [pub_authors]\n\n # try to compare authors from `publication` and Aleph\n for pub_author in pub_authors:\n if compare_names(author_str, pub_author) >= 50:\n return None # book already in database\n\n return publication # book is not in database", "def extend_author_field(author_field, cds_id):\n cds_authority_id = \"AUTHOR|(CDS){0}\".format(cds_id)\n if cds_authority_id not in field_get_subfield_values(author_field, '0'):\n field_add_subfield(author_field, \"0\", cds_authority_id)\n field_add_subfield(author_field, \"9\", \"#BEARD#\")\n return True\n\n return False", "def get_authors(git_url, from_sha, to_sha):\n matches = re.match(\"(?P<git_server>.*):(?P<git_repo>.*)\", git_url)\n if matches is None:\n return (1, f\"could not understand the git url {git_url} for authors detection\")\n git_server = matches.group(\"git_server\")\n git_repo = matches.group(\"git_repo\")\n if git_server is None:\n return (\n 1,\n f\"could not understand the git server in {git_url} for authors detection\",\n )\n if git_repo is None:\n return (\n 1,\n f\"could not understand the git repo in {git_url} for authors detection\",\n )\n\n if \"git.yelpcorp.com\" in git_server:\n ssh_command = (\n f\"ssh {git_server} authors-of-changeset {git_repo} {from_sha} {to_sha}\"\n )\n return _run(command=ssh_command, timeout=5.0)\n else:\n # TODO: PAASTA-16927: support getting authors for services on GHE\n return 1, f\"Fetching authors not supported for {git_server}\"", "def check_texts(text, author, stonks, check_function):\n\n tickers = find_symbols(text)\n if tickers:\n for symbol in tickers:\n if symbol in stonks.keys() and author not in stonks[symbol]:\n stonks[symbol].append(author)\n elif symbol not in stonks.keys() and check_function(symbol):\n stonks[symbol] = [author]\n\n return stonks" ]
[ "0.72640485", "0.6881462", "0.6769553", "0.65692633", "0.6538516", "0.63877165", "0.6319493", "0.62692463", "0.60791737", "0.6033017", "0.5904452", "0.5835843", "0.58165914", "0.579224", "0.5788046", "0.57680523", "0.57557714", "0.5726369", "0.5697314", "0.56669766", "0.5661073", "0.5650174", "0.5646964", "0.5643317", "0.5618991", "0.5617926", "0.55870587", "0.55630904", "0.55617523", "0.55578864", "0.5541555", "0.55287725", "0.54399323", "0.5404073", "0.5398683", "0.5393357", "0.5388983", "0.5365714", "0.5360761", "0.5357503", "0.535492", "0.5353603", "0.53334296", "0.53186285", "0.5317738", "0.5310497", "0.5308859", "0.5303806", "0.5302197", "0.5300615", "0.52985907", "0.5292835", "0.52753276", "0.527484", "0.52693486", "0.52685845", "0.525933", "0.5256848", "0.52534676", "0.52394223", "0.5236191", "0.5228641", "0.5226801", "0.52261853", "0.5213046", "0.52016175", "0.5198846", "0.5193031", "0.5188889", "0.5170193", "0.515628", "0.5146563", "0.51453257", "0.5134042", "0.5124727", "0.5122874", "0.51216805", "0.51197046", "0.511735", "0.51172423", "0.51157475", "0.510674", "0.50909394", "0.5088618", "0.5087806", "0.5084348", "0.508163", "0.5080739", "0.50806546", "0.5077745", "0.5073335", "0.5062784", "0.5047902", "0.5047412", "0.5035847", "0.50217956", "0.50217485", "0.50178903", "0.5012493", "0.50106776" ]
0.7797011
0
Cross checks a list of authors against comments
def assertAuthorsInComments(context, authors, comments): cross_check(context, authors, comments)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_check(context, authors, poscom):\n displaynames = [x['author']['displayname'] for x in poscom]\n\n for author in authors:\n if author.user.username not in displaynames:\n context.assertFalse(True, \"%s not in list\" %author.user.username)", "def assertAuthorsInPosts(context, authors, posts):\n cross_check(context, authors, posts)", "def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))", "def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)", "def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in", "def author_ManyToMany_entry_check(): #Old it was OneToMany before adding multiple authors\n import itertools\n entry_author_ids = itertools.chain(*Entry.objects.all().values_list('author_ids', flat=True))\n entry_author_ids_set = set(entry_author_ids)\n user_ids = set(User.objects.all().values_list('id',flat=True))\n\n author_id_not_in_user = entry_author_ids_set - user_ids\n\n if author_id_not_in_user:\n return (\"Error: There are entries without a correct cross relation with user: {}\"\n .format(\",\".join(str(s) for s in author_id_not_in_user)))\n else:\n return \"OK\"", "def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def make_author_list(res):\n try:\n r = [\", \".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]\n except KeyError as e:\n print(\"No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.\")\n r = [\"Unknown Authors\"]\n return r", "def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications", "def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]", "def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)", "def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)", "def test_list_all_authors(self):\n response = self.client.get(reverse('authors') + '?page=2')\n self.assertEqual(response.status_code, 200)\n self.assertTrue('is_paginated' in response.context)\n self.assertTrue(response.context['is_paginated'] is True)\n self.assertTrue(len(response.context['author_list']) == 3)", "def test_author_list_equality_with_invalid_authentication(self) -> None:\n\n # Let's check for a request with no authorization\n\n response: Response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {\n 'detail': 'Authentication credentials were not provided.'\n })\n\n # Now lets check with an Author without permissions.\n\n # Select the underprivileged author randomly.\n author: Author = random.choice(self.authors)\n\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(author.get_key()))\n\n response: Response = self.client.get(self.url)\n data: typing.Dict[typing.Any, typing.Any] = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {\n 'detail': 'You do not have permission to perform this action.'\n })", "def test_draft_list_only_display_authors_aids(client, contributor):\n\n AidFactory(name='Is this the real life?', author=contributor)\n AidFactory(name='Is this just fantasy?')\n\n client.force_login(contributor)\n drafts_url = reverse('aid_draft_list_view')\n res = client.get(drafts_url)\n\n content = res.content.decode('utf-8')\n assert 'Is this the real life?' in content\n assert 'Is this just fantasy?' not in content", "def test_author_many_initials(self):\n inv_search = 'author:\"bach, p* d* q*\"'\n spi_search = 'find a p d q bach'\n self._compare_searches(inv_search, spi_search)", "def make_citation_authors(res):\n if \"author\" in res.keys():\n first_author = res['author'][0]['family'] + \", \" + res['author'][0]['given']\n last_author = res['author'][-1]['given'] + \" \" + res['author'][-1]['family']\n middle_authors = \", \".join(\" \".join([x['given'], x['family']]) for x in res['author'][1:-1])\n #assemble authors\n author_string = first_author\n author_string = author_string + \", \" + middle_authors if middle_authors != '' else author_string\n author_string = author_string + \", and \" + last_author if len(res['author']) > 1 else author_string\n \n author_string = author_string + \".\" if author_string[-1] != \".\" else author_string\n else:\n author_string = \"Unknown Authors\"\n\n return clean_txt(author_string.capitalize())", "def check_comments(self, args):\n\n for submission in args.comments:\n if any(char.isalpha() for char in submission[1]) \\\n or self._illegal_chars.search(submission[1]) != None:\n raise ValueError", "def test_absorbs_naked_author_search(self):\n invenio_search = \"author:ellis\"\n spi_search = \"author ellis\"\n self._compare_searches(invenio_search, spi_search)", "def extra_bibparse(db):\n for key,entry in db.entries.items():\n for auth in entry.persons[\"author\"]:\n if (\"Harrison\" not in auth.first_names or\n \"Chapman\" not in auth.last_names):\n entry.add_person(auth, \"otherauthor\")", "def test_citedby_author(self):\n inv_search = 'citedby:author:doggy'\n spi_search = 'find citedby author doggy'\n self._compare_searches(inv_search, spi_search)", "def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def test_author_initials(self):\n inv_search = 'author:\"polyakov, a* m*\"'\n spi_search = 'find a a m polyakov'\n self._compare_searches(inv_search, spi_search)", "def __generate_author_string__(self, list_of_authors):\n author_string = \"\"\n return author_string.join(list_of_authors)", "def find_authors(code):\n url = baseurl(code)\n page = req(url)\n soup = BeautifulSoup(page, 'lxml')\n addr = [t.attrs.get('content', None) \n for t in soup.find_all(\"meta\", {\"name\": \"citation_author_email\"})]\n \n # corresponding authors will have their email under another tag too\n corr = [t.find('a').attrs.get('href', None)\n for t in soup.find_all(None, {\"class\": \"author-corresp-email-link\"})]\n\n addr = [a for a in addr if a is not None]\n corr = [a.replace('mailto:', '') for a in corr if a is not None]\n\n return dict(corr=list(set(corr)), all=list(set(addr)))", "def test_author_full_initial(self):\n inv_search = 'author:\"klebanov, ig* r*\" or exactauthor:\"klebanov, i r\"'\n spi_search = \"find a klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)", "def get_coauthors_by_author(cached_list, cached_set, author_name):\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n coauthors = {}\n if author['dblp'].__contains__('coauthors'):\n for author_key in author['dblp']['coauthors']:\n coauthors[author_key] = { 'en': author_key, 'zh': '' }\n\n if author['cdblp'].__contains__('coauthors'):\n for author_key in author['cdblp']['coauthors']:\n if coauthors.__contains__(author_key['full_name']):\n coauthors[author_key['full_name']]['zh'] = author_key['zh']\n else:\n coauthors[author_key['full_name']] = { 'en': author_key['full_name'], 'zh': author_key['zh'] }\n\n return coauthors", "def get_external_authors_between(base, head):\n\n # Get all authors\n authors = git(\"log\", f\"{base}..{head}\", \"--format=%aN|%aE\")\n authors = set(\n author.partition(\"|\")[0].rstrip()\n for author in authors if not (author.endswith((\"@google.com\"))))\n\n # Get all co-authors\n contributors = git(\n \"log\", f\"{base}..{head}\", \"--format=%(trailers:key=Co-authored-by)\"\n )\n\n coauthors = []\n for coauthor in contributors:\n if coauthor and not re.search(\"@google.com\", coauthor):\n coauthors.append(\n \" \".join(re.sub(r\"Co-authored-by: |<.*?>\", \"\", coauthor).split())\n )\n return \", \".join(sorted(authors.union(coauthors), key=str.casefold))", "def authors(author_ids):\n if author_ids is None:\n return ''\n else:\n ids = []\n for author_id in author_ids.split(','):\n ids.append(User.id == int(author_id))\n authors = User.query.filter(or_(*ids)).all()\n if authors is None:\n return ''\n else:\n return 'by ' + ', '.join([author.name for author in authors])", "def test_author_reverse(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a j ellis'\n self._compare_searches(invenio_search, spires_search)", "def test_get_all_authors(self):\n self.register_user()\n token = self.login_user()\n response = self.client.get(self.user_author, format='json', HTTP_AUTHORIZATION='Token ' +token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def has_duplicates_authors(L):\r\n # make a copy of t to avoid modifying the parameter\r\n s = L[:]\r\n s.sort()\r\n\r\n # check for adjacent elements that are equal\r\n for i in range(len(s)-1):\r\n if s[i] == s[i+1]:\r\n return True\r\n return False", "def clean_stub_authors(batch_size):\n # We get all the stub authors (created by disambiguation) from ES and we verify\n # in db if the returned records are stub (ES data might be outdated)\n stub_authors_query = Q(\"term\", stub=True)\n stub_authors_search = (\n AuthorsSearch().query(stub_authors_query).source([\"control_number\"])\n )\n stub_authors_control_numbers = [\n (\"aut\", str(author[\"control_number\"]))\n for author in stub_authors_search.params(scroll=\"60m\").scan()\n ]\n LOGGER.info(\n \"Inspecting potential stub authors with no linked papers\",\n nb_of_profiles=len(stub_authors_control_numbers),\n )\n for stub_authors_control_numbers_chunk in chunker(\n stub_authors_control_numbers, batch_size\n ):\n _clean_stub_authors(stub_authors_control_numbers_chunk)\n LOGGER.info(\"Successfully removed all stub authors\")", "def parse_authors():\n import subprocess\n try:\n output = subprocess.check_output(['git', 'shortlog', '-s'],\n universal_newlines=True)\n except Exception as ex:\n print('ex = {!r}'.format(ex))\n return []\n else:\n striped_lines = (l.strip() for l in output.split('\\n'))\n freq_authors = [line.split(None, 1) for line in striped_lines if line]\n freq_authors = sorted((int(f), a) for f, a in freq_authors)[::-1]\n # keep authors with uppercase letters\n authors = [a for f, a in freq_authors if a.lower() != a]\n return authors", "def test_add_author_notes(self):\n metadata = Metadata(DataSource.CONTENT_CAFE)\n content = self.data_file(\"author_notes.html\")\n self.http.queue_requests_response(200, 'text/html', content=content)\n self.api.add_author_notes(metadata, self.identifier, self.args)\n\n [notes] = metadata.links\n eq_(Hyperlink.AUTHOR, notes.rel)\n assert 'Brenda researched turtles' in notes.content\n\n # We incidentally figured out the book's title.\n eq_(\"Franklin's Christmas Gift\", metadata.title)", "def test_author_full_first(self):\n invenio_search = 'author:\"ellis, john*\" or exactauthor:\"ellis, j *\" or exactauthor:\"ellis, j\" or exactauthor:\"ellis, jo\" or exactauthor:\"ellis, joh\" or author:\"ellis, john, *\"'\n spires_search = 'find a ellis, john'\n self._compare_searches(invenio_search, spires_search)", "def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]", "def get_authors(draft):\n authors = []\n for a in draft.authors.all():\n initial = ''\n prefix, first, middle, last, suffix = a.person.name_parts()\n if first:\n initial = first + '. '\n entry = '%s%s <%s>' % (initial,last,a.address)\n authors.append(entry)\n return authors", "def test_external_comment_acl(self):\n response = self.api.post(all_models.ExternalComment, data={\n \"external_comment\": {\n \"id\": 1,\n \"external_id\": 1,\n \"external_slug\": factories.random_str(),\n \"description\": \"test comment\",\n \"context\": None,\n \"access_control_list\": {\n \"Admin\": [\n {\n \"email\": \"[email protected]\",\n \"name\": \"user1\",\n },\n ],\n },\n }\n })\n\n self.assert201(response)\n comment = all_models.ExternalComment.query.get(1)\n comment_admin = comment.get_persons_for_rolename(\"Admin\")\n self.assertEqual(\n [i.email for i in comment_admin],\n [\"[email protected]\"]\n )", "def __citation_correction(self, bs, ground_truth):\n bs_ref = bs.findNext('bibl')\n gt_ref = ground_truth.findNext('ref')\n while gt_ref is not None:\n if gt_ref.find('article-title') != bs_ref.title:\n pass\n gt_ref = gt_ref.findNext('ref')", "def getAuthors(self): #$NON-NLS-1$\r", "def test_citedby_refersto_author(self):\n inv_search = 'refersto:citedby:author:penguin'\n spi_search = 'find refersto citedby author penguin'\n self._compare_searches(inv_search, spi_search)", "def test_author_filtering(self):\n # Get a valid author\n entry = Entry.objects.get(id=1)\n params = {\"author\": entry.first_author.id}\n\n self._test_filtering(**params)", "def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))", "def _parse_mercurial_author(data, id_gen):\n angled = ur'(?P<author>.+?) <(?P<email>.+?)>'\n paren = ur'(?P<email>.+?) \\((?P<author>.+?)\\)'\n simple = ur'(?P<author>[^,]+)'\n author_list = []\n for regex in (angled, paren, simple):\n # Watch out for commas separating multiple names.\n regex += u'(,\\s*)?'\n for match in re.finditer(regex, data):\n # Watch out for suffixes like 'Jr.' when they are comma-separated\n # from the name and thus cause issues when *all* names are only\n # separated by commas.\n match_dict = match.groupdict()\n author = match_dict['author']\n if not author.partition(' ')[1] and author.endswith('.'):\n prev_author = author_list.pop()\n author = ', '.join([prev_author, author])\n if u'email' not in match_dict:\n email = ''\n else:\n email = match_dict['email']\n author_list.append((author, email))\n else:\n # If authors were found then stop searching as only expect one\n # style of author citation.\n if author_list:\n break\n author = Author(author_list[0])\n user = author.first_last\n email = author.email\n uid = id_gen[user]\n return (uid, user, email)", "def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors", "def test_get_unchecked_authors():\n org = 'org'\n repo = 'repo'\n access_token = 'all-access'\n\n with patch('lib.get_release_pr', autospec=True, return_value=ReleasePR(\n body=FAKE_RELEASE_PR_BODY,\n version='1.2.3',\n url='http://url'\n )) as get_release_pr_mock:\n unchecked = get_unchecked_authors(access_token, org, repo)\n assert unchecked == {\"Alice Pote\"}\n get_release_pr_mock.assert_called_once_with(access_token, org, repo)", "def test_get_authors_from_data(self):\n responses.add(\n responses.GET,\n \"https://openlibrary.org/authors/OL382982A\",\n json={\n \"name\": \"George Elliott\",\n \"personal_name\": \"George Elliott\",\n \"last_modified\": {\n \"type\": \"/type/datetime\",\n \"value\": \"2008-08-31 10:09:33.413686\",\n },\n \"remote_ids\": {\n \"isni\": \"000111\",\n },\n \"key\": \"/authors/OL453734A\",\n \"type\": {\"key\": \"/type/author\"},\n \"id\": 1259965,\n \"revision\": 2,\n },\n status=200,\n )\n results = self.connector.get_authors_from_data(self.work_data)\n result = list(results)[0]\n self.assertIsInstance(result, models.Author)\n self.assertEqual(result.name, \"George Elliott\")\n self.assertEqual(result.openlibrary_key, \"OL453734A\")\n self.assertEqual(result.isni, \"000111\")", "def test_author_simplest(self):\n invenio_search = 'author:ellis'\n spires_search = 'find a ellis'\n self._compare_searches(invenio_search, spires_search)", "def test_refersto_author(self):\n inv_search = 'refersto:author:kitty'\n spi_search = 'find refersto author kitty'\n self._compare_searches(inv_search, spi_search)", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def _authors(fname):\n if not os.path.exists(fname):\n raise RuntimeError(\"File {} not found\".format(fname))\n with open(fname, \"r\") as obj:\n lines = [_tostr(item.strip()) for item in obj.readlines()]\n regexp = re.compile(r\"(?:\\s*\\*\\s+)?(.*)\\s+<(.*)>.*\")\n for line in lines:\n match = regexp.match(line)\n if match:\n name, email = match.groups()\n yield name, email", "def _get_authors_list():\n\n articles = os.listdir(\"../data/\")\n authors = []\n for article in articles:\n with open(\"../data/\" + article, 'r') as file:\n lines = file.readlines()\n author = tuple(\n line.replace(\"\\n\", \"\").split()[1] for line in lines\n if \"Автор:\" in line\n )[0]\n authors.append(author)\n\n return authors", "def select(ratio_limit, authors):\n good = list()\n bad = list()\n sum = 0\n sum += len(authors)\n for author in authors:\n if author['commits']['ratio'] <= ratio_limit:\n good.append(author)\n else:\n bad.append(author)\n assert len(good) + len(bad) == sum\n log_debug(None, 'Good: {0[0]}, Bad: {0[1]}, Limit: {0[2]}', (len(good), len(bad), ratio_limit))\n return good, bad", "def corporate_authors(self, key, value):\n _corporate_authors = self.get(\"authors\", [])\n\n for v in force_list(value):\n if key == \"710__\":\n if \"a\" in v:\n _corporate_authors.append(\n {\n \"full_name\": clean_val(\"a\", v, str),\n \"type\": \"ORGANISATION\",\n }\n )\n else:\n self[\"authors\"] = collaborations(self, key, value)\n raise IgnoreKey(\"corporate_authors\")\n else:\n _corporate_authors.append(\n {\"full_name\": clean_val(\"a\", v, str), \"type\": \"ORGANISATION\"}\n )\n return _corporate_authors", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def test_author_list_equality_with_valid_authentication(self) -> None:\n\n # Set the Authorization header to the appropriate\n # format as the rest_framework expects using utils.\n self.client.credentials(HTTP_AUTHORIZATION=u.auth_header(\n self.super_author.get_key()\n ))\n\n response = self.client.get(self.url)\n data = u.get_json(response)\n\n self.assertEqual(data, self.serialized_data, msg=data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def duplicated_code():\n author_ids = []\n updated_books = []\n updated_authors = []\n \n for author in mongo.db.authors.find():\n updated_authors.append(author)\n for book in mongo.db.books.find():\n \n # Create a new key/value pair in each book for author_name\n # by looking up the author_id and matching it to the author_name\n # of the selected author_id.\n \n book_title = book['title']\n author_id = book['author_id']\n \n for author in updated_authors:\n if author['_id'] == ObjectId(author_id):\n book['author_name'] = author['author_name']\n \n \n # Using the googlebooks API search for each book and retrieve\n # a thumbnail of the book.\n \n google_api_title = book_title.replace(' ', '+')\n book_isbn_num = book['isbn_num']\n with urllib.request.urlopen(BASE_API_LINK + google_api_title) as f:\n text = f.read()\n decoded_text = text.decode(\"utf-8\")\n obj = json.loads(decoded_text) \n google_book_obj = obj[\"items\"][0]\n book_href = google_book_obj['volumeInfo']\n if 'imageLinks' in book_href:\n book['href'] = book_href['imageLinks']['thumbnail']\n \n # Append book to new book dictionary.\n updated_books.append(book)\n \n return updated_books", "def format_authors(author_list):\n if isinstance(author_list, (list, tuple)):\n return \", \".join([format_authors(author) for author in author_list])\n else:\n if \", \" in author_list:\n author_list = author_list.split(\", \")\n author_list.reverse()\n author_list = \" \".join(author_list)\n elif \",\" in author_list:\n author_list = author_list.split(\",\")\n author_list.reverse()\n author_list = \" \".join(author_list)\n return author_list", "def ConfirmAllowedCopyrightHolder(holder):\n return holder in ALLOWED_COPYRIGHT_HOLDERS", "def test_by_author(self):\n self.assert_equal(MinusRecord.objects.count(),11)\n self.go200('minus_by_author', ['Test0'])\n self.find(CR % 'Test0')\n for i in xrange(10):\n if i % 2 == 0:\n self.find(CR % ('test'+str(i)))\n else:\n self.notfind(CR % ('test'+str(i)))\n self.find(CR % \"midi_rec\")", "def refine_author(c):\n c = strip_some_punct(c)\n c = strip_numbers(c)\n c = strip_all_unbalanced_parens(c)\n c = c.split()\n # this is hard to catch otherwise, unless we split the author vs copyright grammar in two\n if c[0].lower() == 'author':\n c = c[1:]\n c = u' '.join(c)\n return c.strip()", "def updateAuthors(self,event=None):\r\n self.popAuthors()\r\n self.primeAuthor.updateVals(self.authorList)\r\n self.coAuthor.updateVals(self.authorList)\r\n self.correspond.updateVals(self.authorList)", "def third_level(tickets):\n tickets = int(tickets)\n authors = find_authors(book_csv, '1812-1870')\n others = find_other_authors(book_csv, '1812-1870')\n\n four_authors = []\n wrong_author = random.choice(others)\n four_authors.append(wrong_author)\n\n three_authors = [[auth for auth in random.choice(authors) if auth not in four_authors] for _ in range(3)]\n three_authors = [''.join(x) for x in three_authors]\n four_authors += three_authors\n\n if len(four_authors) < 4:\n for _ in range(4-len(four_authors)):\n new_author = random.choice(authors)\n if new_author not in four_authors:\n four_authors.append(new_author)\n\n four_authors = '\\n'.join(four_authors)\n print()\n timeprint(\"Here are authors you have to choose from:\")\n while True:\n print(\"-----------------------------------------------\")\n timeprint(four_authors)\n print(\"-----------------------------------------------\")\n print()\n timeprint(\"Enter the author's name who did not write when \\\nCharles Dickens did:\")\n user_input = input()\n if user_input == wrong_author:\n print()\n tickets = tickets + 3\n timeprint(\"Yes! you are right. Let's move on to level 4\")\n break\n\n if user_input in three_authors:\n print()\n timeprint(\"Oops, wrongs answer :(\")\n four_authors = four_authors.replace(user_input, '')\n four_authors = four_authors.strip()\n timeprint(\"Here are authors left you have to choose from:\")\n print(\"-----------------------------------------------\")\n timeprint(four_authors)\n print(\"-----------------------------------------------\")\n print()\n user_input = input()\n\n if user_input == wrong_author:\n print()\n tickets += 2\n timeprint(\"Yes! you are right. Let's move on to level 4\")\n break\n\n if user_input in three_authors:\n print()\n timeprint(\"Oops, wrongs answer :( You have only one chance\\\n left to complete this level\")\n four_authors = four_authors.replace(user_input, '')\n four_authors = four_authors.strip()\n timeprint(\"Here are authors left you have to choose from:\")\n print(\"-----------------------------------------------\")\n timeprint(four_authors)\n print(\"-----------------------------------------------\")\n print()\n user_input = input()\n\n if user_input in three_authors:\n print()\n timeprint('You failed to complete this level without \\\nmistakes :( Lets move to level 4')\n break\n if user_input == wrong_author:\n print()\n tickets = tickets + 1\n timeprint(\"Yes! you are right. Let's move on to level 4\")\n break\n else:\n print()\n timeprint('Oops, wrong input. Try again')\n\n else:\n print()\n timeprint('Oops, wrong input. Try again')\n\n else:\n print()\n timeprint('Oops, wrong input. Try again')\n\n return tickets", "def authors_matrix( corpus ) :\n all = all_authors(corpus)\n row_dois = [x['DI'] for x in corpus]\n result = zeros( (len(corpus),len(all)), dtype = int32 )\n for paper in corpus :\n for item in authors( paper ) :\n result[ row_dois.index( paper['DI'] ) ][ all.index( item ) ] = 1\n\n return result, row_dois, all", "def test_exactauthor_simple(self):\n invenio_search = 'exactauthor:\"ellis, j\"'\n spires_search = 'find ea ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def get_author_and_comment_count_per_sub(config: Config, sanitized_authors: dict):\n\n start = time.time()\n raw_result = get_raw_author_and_comment_count_per_sub(config, sanitized_authors)\n end = time.time()\n print(f\"get_raw_author_and_comment_count_per_sub took {end - start} seconds\")\n\n start = time.time()\n sanitized_result = sanitize_author_and_comment_count_per_sub(raw_result)\n end = time.time()\n print(f\"sanitize_author_and_comment_count_per_sub took {end - start} seconds\")\n\n return sanitized_result", "def mixed_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": {\"$gt\": 0},\n \"authorsMaleCount\": {\"$gt\": 0},\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query", "def test_super_short_author_name(self):\n spi_search = \"fin a er and cn cms\"\n inv_search = \"author:er collaboration:cms\"\n self._compare_searches(inv_search, spi_search)", "def display_authors(self, *args):\n return ', '.join(author.name for author in args[0].authors.all()[:3])", "def test_user_can_change_as_author(self):\n self.assertTrue(self.story.user_can_change(self.user1))", "def merge(self):\n commits = self._github_api.get_pr_commits(self.number)\n\n def format_commit_author(commit):\n author = commit['commit']['author']\n name = author['name']\n email = author['email']\n return f'{name} <{email}>'\n commit_authors = [format_commit_author(commit) for commit in commits]\n co_authored_by_re = re.compile(\n r'^Co-authored-by:\\s*(.*)', re.MULTILINE)\n\n def extract_co_authors(commit):\n message = commit['commit']['message']\n return co_authored_by_re.findall(message)\n commit_co_authors = []\n for commit in commits:\n commit_co_authors.extend(extract_co_authors(commit))\n\n all_commit_authors = commit_authors + commit_co_authors\n distinct_authors = sorted(set(all_commit_authors),\n key=lambda x: commit_authors.count(x),\n reverse=True)\n\n for i, author in enumerate(distinct_authors):\n print(\"Author {}: {}\".format(i + 1, author))\n\n if len(distinct_authors) > 1:\n primary_author, distinct_other_authors = get_primary_author(\n self.cmd, distinct_authors)\n else:\n # If there is only one author, do not prompt for a lead author\n primary_author = distinct_authors.pop()\n distinct_other_authors = []\n\n commit_title = f'{self.title} (#{self.number})'\n commit_message_chunks = []\n if self.body is not None:\n # Remove comments (i.e. <-- comment -->) from the PR description.\n body = re.sub(r\"<!--.*?-->\", \"\", self.body, flags=re.DOTALL)\n # avoid github user name references by inserting a space after @\n body = re.sub(r\"@(\\w+)\", \"@ \\\\1\", body)\n commit_message_chunks.append(body)\n\n committer_name = run_cmd(\"git config --get user.name\").strip()\n committer_email = run_cmd(\"git config --get user.email\").strip()\n\n authors = (\"Authored-by:\" if len(distinct_other_authors) == 0\n else \"Lead-authored-by:\")\n authors += \" %s\" % primary_author\n if len(distinct_authors) > 0:\n authors += \"\\n\" + \"\\n\".join([\"Co-authored-by: %s\" % a\n for a in distinct_other_authors])\n authors += \"\\n\" + \"Signed-off-by: %s <%s>\" % (committer_name,\n committer_email)\n commit_message_chunks.append(authors)\n\n commit_message = \"\\n\\n\".join(commit_message_chunks)\n\n # Normalize line ends and collapse extraneous newlines. We allow two\n # consecutive newlines for paragraph breaks but not more.\n commit_message = \"\\n\".join(commit_message.splitlines())\n commit_message = re.sub(\"\\n{2,}\", \"\\n\\n\", commit_message)\n\n if DEBUG:\n print(\"*** Commit title ***\")\n print(commit_title)\n print()\n print(\"*** Commit message ***\")\n print(commit_message)\n\n if DEBUG:\n merge_hash = None\n else:\n result = self._github_api.merge_pr(self.number,\n commit_title,\n commit_message)\n if not result['merged']:\n message = result['message']\n self.cmd.fail(f'Failed to merge pull request: {message}')\n merge_hash = result['sha']\n\n print(\"Pull request #%s merged!\" % self.number)\n print(\"Merge hash: %s\" % merge_hash)", "def _on_authors_list(self, evt):\n \n # raise authors management dialog\n dlg = AuthorsView(self, self._library)\n response = dlg.ShowModal()\n dlg.Destroy()\n \n # check response\n if response != wx.ID_OK:\n return\n \n # refresh collections view\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()", "def check_texts(text, author, stonks, check_function):\n\n tickers = find_symbols(text)\n if tickers:\n for symbol in tickers:\n if symbol in stonks.keys() and author not in stonks[symbol]:\n stonks[symbol].append(author)\n elif symbol not in stonks.keys() and check_function(symbol):\n stonks[symbol] = [author]\n\n return stonks", "def _validate_contributors_summary(cls, item):\n contributor_ids_from_contributors_summary = (\n list(item.contributors_summary.keys()))\n if sorted(item.contributor_ids) != sorted(\n contributor_ids_from_contributors_summary):\n cls._add_error(\n 'contributors %s' % (\n base_model_validators.ERROR_CATEGORY_SUMMARY_CHECK),\n 'Entity id %s: Contributor ids: %s do not match the '\n 'contributor ids obtained using contributors summary: %s' % (\n item.id, sorted(item.contributor_ids),\n sorted(contributor_ids_from_contributors_summary)))", "def get_authors():\n authors = []\n authorfile = os.path.join('doc', 'authors.txt')\n with codecs.open(authorfile, 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if line and not line.startswith(u'#'):\n authors.append(line)\n return u\", \".join(authors)", "def get_authors(git_url, from_sha, to_sha):\n matches = re.match(\"(?P<git_server>.*):(?P<git_repo>.*)\", git_url)\n if matches is None:\n return (1, f\"could not understand the git url {git_url} for authors detection\")\n git_server = matches.group(\"git_server\")\n git_repo = matches.group(\"git_repo\")\n if git_server is None:\n return (\n 1,\n f\"could not understand the git server in {git_url} for authors detection\",\n )\n if git_repo is None:\n return (\n 1,\n f\"could not understand the git repo in {git_url} for authors detection\",\n )\n\n if \"git.yelpcorp.com\" in git_server:\n ssh_command = (\n f\"ssh {git_server} authors-of-changeset {git_repo} {from_sha} {to_sha}\"\n )\n return _run(command=ssh_command, timeout=5.0)\n else:\n # TODO: PAASTA-16927: support getting authors for services on GHE\n return 1, f\"Fetching authors not supported for {git_server}\"", "def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list", "def within_discussion_comment_and_user_anonymization(comment_gen,\r\n extract_comment_name,\r\n extract_user_name,\r\n anonymous_coward_name):\r\n comment_name_set = list()\r\n user_name_set = list()\r\n\r\n append_comment_name = comment_name_set.append\r\n append_user_name = user_name_set.append\r\n\r\n ####################################################################################################################\r\n # Extract comment and user name from the initial post.\r\n ####################################################################################################################\r\n initial_post = next(comment_gen)\r\n\r\n initial_post_name = extract_comment_name(initial_post)\r\n op_name = extract_user_name(initial_post)\r\n\r\n append_comment_name(initial_post_name)\r\n append_user_name(op_name)\r\n\r\n ####################################################################################################################\r\n # Iterate over all comments.\r\n ####################################################################################################################\r\n for comment in comment_gen:\r\n comment_name = extract_comment_name(comment)\r\n commenter_name = extract_user_name(comment)\r\n\r\n append_comment_name(comment_name)\r\n append_user_name(commenter_name)\r\n\r\n ####################################################################################################################\r\n # Perform anonymization.\r\n ####################################################################################################################\r\n # Remove duplicates and then remove initial post name because we want to give it id 0.\r\n comment_name_set = set(comment_name_set)\r\n comment_name_set.remove(initial_post_name)\r\n\r\n # Remove duplicates and then remove OP because we want to give them id 0.\r\n user_name_set = set(user_name_set)\r\n user_name_set.remove(op_name)\r\n\r\n # Anonymize.\r\n within_discussion_comment_anonymize = dict(zip(comment_name_set, range(1, len(comment_name_set) + 1)))\r\n within_discussion_comment_anonymize[initial_post_name] = 0 # Initial Post gets id 0.\r\n\r\n within_discussion_user_anonymize = dict(zip(user_name_set, range(1, len(user_name_set) + 1)))\r\n within_discussion_user_anonymize[op_name] = 0 # Original Poster gets id 0.\r\n\r\n comment_name_set.add(initial_post_name)\r\n user_name_set.add(op_name)\r\n\r\n if anonymous_coward_name is not None:\r\n # if op_name == anonymous_coward_name:\r\n # print(\"The Original Poster is Anonymous.\")\r\n try:\r\n within_discussion_anonymous_coward = within_discussion_user_anonymize[anonymous_coward_name]\r\n except KeyError:\r\n within_discussion_anonymous_coward = None\r\n else:\r\n within_discussion_anonymous_coward = None\r\n\r\n return comment_name_set,\\\r\n user_name_set,\\\r\n within_discussion_comment_anonymize,\\\r\n within_discussion_user_anonymize,\\\r\n within_discussion_anonymous_coward", "def checkPaper(self,event=None):\r\n if self.title.getVal() not in self.paperList:\r\n self.paperList.append(self.title.getVal())\r\n self.paperList.sort()\r\n self.title.updateVals(self.paperList)\r\n return\r\n\r\n ## This section of code should probably go into setData. . .\r\n self.authorBox.clearData()\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People on Papers.PrimaryAuthor = People.PersonID WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if res ==None:\r\n self.primeAuthor.setVal(\"No Author Found; Check database\")\r\n return\r\n self.primeAuthor.setVal(formatNameSQL(res))\r\n self.addPrimeAuthorFn()\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People on Papers.CorrespondingAuthor = People.PersonID WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchone()\r\n if res == None:\r\n self.correspond.setVal(self.primeAuthor.getVal())\r\n else:\r\n self.correspond.setVal(formatNameSQL(res))\r\n\r\n cur = self.dbConn.execute(\"SELECT People.* FROM Papers JOIN People JOIN CoAuthors ON Papers.paperID = CoAuthors.PaperID AND People.PersonID = CoAuthors.Author WHERE Papers.Title = ?\",[self.title.getVal()])\r\n res = cur.fetchall()\r\n if res == None:\r\n return\r\n for ln in res:\r\n curAuthor = str.format(formatNameSQL(ln))\r\n self.authorBox.addLine(curAuthor)\r\n self.coAuthor.setVal(curAuthor)", "def parse_authors(article):\n author_names = article.find(\"sourcedesc\").findAll(\"persname\")\n authors = []\n for author in author_names:\n firstname = author.find(\"forename\", {\"type\": \"first\"})\n firstname = firstname.text.strip() if firstname is not None else \"\"\n middlename = author.find(\"forename\", {\"type\": \"middle\"})\n middlename = middlename.text.strip() if middlename is not None else \"\"\n lastname = author.find(\"surname\")\n lastname = lastname.text.strip() if lastname is not None else \"\"\n if middlename is not \"\":\n authors.append(firstname + \" \" + middlename + \" \" + lastname)\n else:\n authors.append(firstname + \" \" + lastname)\n authors = \"; \".join(authors)\n return authors", "def Authors(self, default=[{}]):\n tmp = self.data.get('authors', default)\n return [HEP.AuthorReducedObject(i) for i in tmp]", "def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)", "def checkContributors():\n url = CHECKBASE % 'contributors'\n contributors = []\n try:\n fh = getURLHandle(url)\n #fh = urllib2.urlopen(url)\n data = fh.read()\n dom = minidom.parseString(data)\n fh.close()\n contributor_elements = dom.getElementsByTagName('Contributor')\n for catel in contributor_elements:\n if catel.firstChild is None:\n continue\n contributor = catel.firstChild.data.strip()\n if len(contributor):\n contributors.append(str(contributor))\n except:\n raise Exception,\"Could not open %s to search for list of contributors\" % url\n return contributors", "def authors():\n\tclick.clear()\n\trich.print(\"[bold]IDT[/bold] was initially made by [bold magenta]Deliton Junior[/bold magenta] and [bold red]Misael Kelviny[/bold red]\")", "def test_authors():\n assert(hasattr(tekel, '__authors__'))", "def core(self):\n \n \n comments = self.bot.subreddit(\n \"all\").stream.comments(\n skip_existing = True)\n \n \n for comment in comments:\n \n text = comment.body.lower().replace(\".\", \"\")\n \n for card in self.catalog:\n \n if (\n card[1].lower() in text\n and card[0].lower() in text\n and not comment.submission.id in self.responded\n and not comment.subreddit.user_is_banned):\n\n self.get_info(card)\n\n if not self.details:\n \n break\n\n audio = [\n \"audiobook\", \n \"audio book\"]\n \n author_format = [\n name.lower() for name in card[1].split(\" \") \n if len(name) >= 3]\n\n if (\n self.details[\"duration\"] > 10800\n and card[0].lower() in self.details[\n \"title\"].lower()\n and any(\n item in self.details[\n \"title\"].lower() for item in audio)\n and all(\n item in self.details[\n \"title\"].lower() for item in author_format)):\n \n \n saw_the_sign = (\n \"\"\"[^(Source Code)](https://capybasilisk.com/posts/\"\"\"\n \"\"\"2020/04/speculative-fiction-bot/) \"\"\"\n \"\"\"^| [^(Feedback)](https://www.reddit.com/message/\"\"\"\n \"\"\"compose?to=Capybasilisk&subject=Robot) \"\"\"\n \"\"\"^| [^(Programmer)](https://www.reddit.com/u/\"\"\"\n \"\"\"capybasilisk) \"\"\"\n \"\"\"^| ^(Downvote To Remove) \"\"\" \n \"\"\"^| ^(Version 1.4.0) \"\"\"\n \"\"\"^| ^(Support Robot Rights!)\"\"\")\n \n\n comment.reply(\n f\"\"\"Hi. You just mentioned *{card[0]}* by \"\"\" \n f\"\"\"{card[1]}.\\n\\nI've found an audiobook of \"\"\" \n \"\"\"that novel on YouTube. You can listen to it here\"\"\"\n f\"\"\":\\n\\n[YouTube | {self.details['title']}]\"\"\"\n f\"\"\"({self.details['webpage_url']})\\n\\n*I\\'m a bot that \"\"\" \n \"\"\"searches YouTube for science fiction and fantasy\"\"\" \n f\"\"\" audiobooks.*\\n***\\n{saw_the_sign}\"\"\")\n\n \n self.responded.append(\n comment.submission.id)\n \n with open(\n \"activity.csv\", \n \"a\", \n encoding = \"UTF-8\") as actlog:\n\n activity = clevercsv.writer(\n actlog)\n\n if actlog.tell() == 0:\n\n activity.writerow(\n [\"Book\",\n \"Comment\", \n \"Author\", \n \"Thread\", \n \"Subreddit\", \n \"Time\"])\n\n activity.writerow(\n [f\"{card[0]} by {card[1]}\",\n f\"{comment.body}\",\n f\"{comment.author}\",\n f\"{comment.submission.title}\",\n f\"{comment.subreddit}\",\n f\"{pendulum.now().to_datetime_string()}\"])\n \n self.details = None\n \n break\n \n break \n \n if pendulum.now().to_time_string().endswith(\n \"0:00\"):\n \n self.tidy()", "def unknown_author_sources(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n \"authorsFemaleCount\": 0,\n \"authorsMaleCount\": 0,\n }\n },\n {\n \"$project\": {\n \"_id\": 1,\n \"outlet\": 1,\n \"authors\": 1,\n \"authorsMale\": 1,\n \"authorsFemale\": 1,\n \"authorsUnknown\": 1,\n \"sourcesMaleCount\": 1,\n \"sourcesFemaleCount\": 1,\n \"sourcesUnknownCount\": 1,\n }\n },\n {\n \"$group\": {\n \"_id\": \"$outlet\",\n \"totalArticles\": {\"$sum\": 1},\n \"totalMaleSources\": {\"$sum\": \"$sourcesMaleCount\"},\n \"totalFemaleSources\": {\"$sum\": \"$sourcesFemaleCount\"},\n \"totalUnknownSources\": {\"$sum\": \"$sourcesUnknownCount\"},\n }\n },\n ]\n return query", "def get_paper_authors(tree):\n\tpath = '//table/tr/th[text() = \"Glasgow Author(s) Enlighten ID:\"]/following-sibling::td/a'\n\t# Get list of <a> elements, each an author\n\tauthors = tree.xpath(path)\n\t# Make list of (author name, author url) pairs to return\n\tauthors = [(author.text, author.get(\"href\")) for author in authors]\n\n\treturn authors", "def sanitize_author(name, email):\n # deal with inconsistent email addresses/names in commits.\n # feel free to fill this method out.\n return name", "def check_specific_names(citelist: list, specific_names: list) -> None:\n unique_names = list()\n nameset = set()\n for c in citelist:\n if c.name != \".\":\n clean = clean_specific_name(c.name)\n if (not (clean in nameset)) and (clean != \"\"):\n nameset |= {clean}\n unique_names.append(clean)\n unique_names.sort()\n for n in unique_names:\n is_found = False\n for s in specific_names:\n if n in s.variations:\n is_found = True\n if not is_found:\n report_error(\"Missing specific name: \" + n)", "def get_fullcc_list(draft):\n emails = {}\n # get authors\n for author in draft.authors.all():\n if author.address not in emails:\n emails[author.address] = '\"%s\"' % (author.person.name)\n \n if draft.group.acronym != 'none':\n # add chairs\n for role in draft.group.role_set.filter(name='chair'):\n if role.email.address not in emails:\n emails[role.email.address] = '\"%s\"' % (role.person.name)\n # add AD\n if draft.group.type.slug == 'wg': \n emails['%[email protected]' % draft.group.acronym] = '\"%s-ads\"' % (draft.group.acronym)\n elif draft.group.type.slug == 'rg':\n email = draft.group.parent.role_set.filter(name='chair')[0].email\n emails[email.address] = '\"%s\"' % (email.person.name)\n \n # add sheperd\n if draft.shepherd:\n emails[draft.shepherd.address] = '\"%s\"' % (draft.shepherd.person.name)\n \n # use sort so we get consistently ordered lists\n result_list = []\n for key in sorted(emails):\n if emails[key]:\n result_list.append('%s <%s>' % (emails[key],key))\n else:\n result_list.append('<%s>' % key)\n\n return ','.join(result_list)", "def get_reference_authors(ref_node):\n authors = ref_node.xpath(\"./contribution/authors/author\")\n authors_names = []\n for author in authors:\n given_names = author.xpath(\"string(./given-name[1])\").extract_first(default=\"\")\n last_names = author.xpath(\"string(./surname[1])\").extract_first(default=\"\")\n authors_names.append(\" \".join([given_names, last_names]).strip())\n return authors_names", "def test_user_can_change_not_author(self):\n self.assertFalse(self.story.user_can_change(self.user2))", "def get_author_data():\n entry = mongo.db.Authors\n output = list()\n look_up_type = None\n if 'name' in request.args:\n look_up_type = 'name'\n print(request.args)\n if len(request.args['name']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['name'].strip('\"')\n name = entry.find({'name': {'$regex': value}})\n if name:\n for author in name:\n output.append({'name': author['name']})\n elif 'booktitle' in request.args:\n look_up_type = 'related_books'\n if len(request.args['booktitle']) <= 2:\n return render_template('error.html', message=\"Must enter characters\"), 400\n value = request.args['booktitle'].strip('\"')\n related_books = entry.find(\n {'author_books': {'$regex': value}})\n if related_books:\n for related in related_books:\n for title in related['author_books']:\n if value in title:\n output.append(({'related_books': title}))\n if len(output) == 0:\n return render_template('error.html', message=\"No Entries Found\"), 400\n return render_template('gottenAuthors.html', output=output, look_up_type=look_up_type), 200", "def format_authors(self, style):\n def format_one_author(author, style):\n \"\"\"\n Helper function that does it for one author.\n \"\"\"\n # Check If there's no given name.\n # We should probably try to handle the no family name case, but\n # I'm not sure when we will actually come across an example...\n if \"given\" not in author or author[\"given\"] == []:\n return author[\"family\"]\n # Otherwise...\n family_name = author[\"family\"]\n given_names = author[\"given\"]\n\n # deal with a pathological case, 10.1016/j.jmr.2018.02.009\n ns = given_names.split()\n for i, name in enumerate(ns):\n if i >= 1 and name.startswith('-'):\n this_name = ns.pop(i)\n ns[i - 1] += this_name\n given_names = \" \".join(ns)\n\n if style == \"display\":\n return (\"\".join(n[0] for n in re.split(r\"[\\s-]\", given_names))\n + \" \" + family_name)\n elif style == \"acs\":\n # \"Jean-Baptiste Simon\" -> [[\"Jean\", \"Baptiste\"], [\"Simon\"]]\n split_both = [name.split('-') for name in given_names.split()]\n # [[\"Jean\", \"Baptiste\"], [\"Simon\"]] -> \"J.-B. S\"\n joined_both = \". \".join([\".-\".join(n[0] for n in names)][0]\n for names in split_both)\n return (family_name + \", \" + joined_both + \".\")\n elif style == \"bib\":\n s = family_name + \", \" + given_names\n return s.replace(\". \", \".\\\\ \") # Must use control spaces\n elif style == \"full\":\n return given_names + \" \" + family_name\n # Otherwise, grumble.\n else:\n raise ValueError(f\"Invalid value '{style}' for style.\")\n\n if self.authors is not None:\n return [format_one_author(author, style) for author in self.authors]" ]
[ "0.74669504", "0.65934074", "0.6494307", "0.6315845", "0.6253867", "0.62438315", "0.61274093", "0.6090594", "0.5927208", "0.59244573", "0.58965456", "0.5887174", "0.58157754", "0.579177", "0.57523257", "0.5713481", "0.5709423", "0.5708671", "0.5706471", "0.56644917", "0.5663394", "0.5654191", "0.5637352", "0.5629413", "0.56229705", "0.56206954", "0.56068504", "0.55586964", "0.552714", "0.55246806", "0.5524031", "0.55001354", "0.548912", "0.5478372", "0.5462144", "0.5456668", "0.5446852", "0.5442826", "0.5431241", "0.542338", "0.54173404", "0.5408348", "0.5391662", "0.5381946", "0.5378734", "0.5361729", "0.5357518", "0.5351697", "0.53492194", "0.53450686", "0.533999", "0.53121245", "0.5307034", "0.5298338", "0.52979076", "0.5296435", "0.52903587", "0.5284939", "0.52782905", "0.52766865", "0.5274788", "0.5271371", "0.52602786", "0.52539", "0.52521545", "0.5242042", "0.5239656", "0.5238372", "0.5233162", "0.51996946", "0.51956254", "0.5189113", "0.5184525", "0.5182203", "0.5180718", "0.5169747", "0.5166925", "0.5159819", "0.5151949", "0.5125464", "0.5120082", "0.50897086", "0.5087822", "0.5053975", "0.50478125", "0.50420475", "0.5034123", "0.5031574", "0.5027096", "0.5025236", "0.5022297", "0.5020757", "0.50096476", "0.50057036", "0.49997216", "0.49982285", "0.499053", "0.49807853", "0.49776217", "0.49774244" ]
0.7966482
0
Takes a list of cachedauthors and adds them to the author follower list
def create_cached_author_followers(author, followers): for f in followers: author.followers.add(f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addAuthor2():\n\n author_list = list()\n\n authors = Author.objects.all()\n\n for author in authors:\n author_dict = dict()\n author_dict['id'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n author_dict['host'] = \"{}/api/\".format(author.host_url)\n author_dict['displayName'] = author.username\n author_dict['url'] = \"{}/api/author/{}\".format(DOMAIN, author.id)\n\n author_list.append(author_dict)\n\n return author_list", "def get_coauthors_by_author(cached_list, cached_set, author_name):\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n coauthors = {}\n if author['dblp'].__contains__('coauthors'):\n for author_key in author['dblp']['coauthors']:\n coauthors[author_key] = { 'en': author_key, 'zh': '' }\n\n if author['cdblp'].__contains__('coauthors'):\n for author_key in author['cdblp']['coauthors']:\n if coauthors.__contains__(author_key['full_name']):\n coauthors[author_key['full_name']]['zh'] = author_key['zh']\n else:\n coauthors[author_key['full_name']] = { 'en': author_key['full_name'], 'zh': author_key['zh'] }\n\n return coauthors", "def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.followList.get(followerId, [followerId]):\n self.followList[followerId] = self.followList.get(followerId, [followerId]) + [followeeId]\n # print(self.followList)", "def author_following(self):\n\t\tpass", "def popAuthors(self):\r\n# cur = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\")\r\n# res = cur.fetchall()\r\n res = self.dbConn.execute(\"SELECT * FROM People WHERE PersonID>0 ORDER BY Lastname\").fetchall()\r\n\r\n self.authorList = [formatNameSQL(ln) for ln in res]\r\n self.quickAuthors = [ln[\"Lastname\"].lower() for ln in res]\r\n vals = [ln[\"PersonID\"] for ln in res]\r\n \r\n self.authorLookup = dict(zip(self.authorList,vals))", "def authors(self, authors):\n\n self._authors = authors", "def follow(self, followerId, followeeId):\n if followerId in self.follows:\n self.follows[followerId].add(followeeId)\n else:\n self.follows[followerId] = set([followeeId])", "def updateAuthors(self,event=None):\r\n self.popAuthors()\r\n self.primeAuthor.updateVals(self.authorList)\r\n self.coAuthor.updateVals(self.authorList)\r\n self.correspond.updateVals(self.authorList)", "def load_authors(keys: [str]):\n not_in_local_cache = []\n result = []\n for key in keys:\n try:\n result.append(_author_data_cache[key].to_dict())\n del _author_data_cache[key]\n except KeyError:\n not_in_local_cache.append(key)\n \n if len(not_in_local_cache):\n doc_refs = [db.collection(AUTHOR_CACHE_COLLECTION).document(key)\n for key in keys]\n data = db.get_all(doc_refs)\n for datum in data:\n if not datum.exists:\n raise cache_buddy.CacheMiss(datum.id)\n result.append(datum.to_dict())\n return [_decompress_record(r) for r in result]", "def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)", "def follow_following_followers(self):\n self.logger.log(\"starting follow_following_followers...\")\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n try:\n try:\n followw = perform_with_ran_delay(self.instagram.get_followers, acc, 150, 15,\n delayed=True)\n accountstofollow = followw[\"accounts\"]\n random.shuffle(accountstofollow)\n if len(accountstofollow) > 10:\n accountstofollow = accountstofollow[:10]\n for ac in accountstofollow:\n if not self.is_user_following(ac.identifier):\n self.add_following(ac.identifier)\n self.logger.log(\"following: {}\".format(ac.username))\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n finally:\n sleep(3)", "def follow(self, followerId, followeeId):\n if followerId not in self.follow_map:\n self.follow_map[followerId] = set()\n \n self.follow_map[followerId].add(followeeId)", "def get_author_citations(updated_redic_list, citedbydict, initial_author_dict, config):\n\n #sorry bout repeated code to get the tags\n tags = ['first_author', 'additional_author', 'alternative_author_name']\n tagvals = {}\n for t in tags:\n try:\n x = config.get(config.get(\"rank_method\", \"function\"), t)\n tagvals[t] = x\n except:\n register_exception(prefix=\"attribute \"+t+\" missing in config\", alert_admin=True)\n return initial_author_dict\n\n #parse the tags\n mainauthortag = tagify(parse_tag(tagvals['first_author']))\n coauthortag = tagify(parse_tag(tagvals['additional_author']))\n extauthortag = tagify(parse_tag(tagvals['alternative_author_name']))\n if task_get_task_param('verbose') >= 9:\n write_message(\"mainauthortag \"+mainauthortag)\n write_message(\"coauthortag \"+coauthortag)\n write_message(\"extauthortag \"+extauthortag)\n\n author_cited_in = initial_author_dict\n if citedbydict:\n i = 0 #just a counter for debug\n write_message(\"Checking records referred to in new records\")\n for u in updated_redic_list:\n if (i % 1000 == 0):\n mesg = \"Author ref done \"+str(i)+\" of \"+str(len(updated_redic_list))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n if citedbydict.has_key(u):\n these_cite_k = citedbydict[u]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n authors = get_fieldvalues(u, mainauthortag)\n coauthl = get_fieldvalues(u, coauthortag)\n extauthl = get_fieldvalues(u, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author ref done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n #go through the dictionary again: all keys but search only if new records are cited\n write_message(\"Checking authors in new records\")\n i = 0\n for k in citedbydict.keys():\n if (i % 1000 == 0):\n mesg = \"Author cit done \"+str(i)+\" of \"+str(len(citedbydict.keys()))+\" records\"\n write_message(mesg)\n task_update_progress(mesg)\n i = i + 1\n\n these_cite_k = citedbydict[k]\n if (these_cite_k is None):\n these_cite_k = [] #verify it is an empty list, not None\n #do things only if these_cite_k contains any new stuff\n intersec_list = list(set(these_cite_k)&set(updated_redic_list))\n if intersec_list:\n authors = get_fieldvalues(k, mainauthortag)\n coauthl = get_fieldvalues(k, coauthortag)\n extauthl = get_fieldvalues(k, extauthortag)\n authors.extend(coauthl)\n authors.extend(extauthl)\n for a in authors:\n if a and author_cited_in.has_key(a):\n #add all elements in these_cite_k\n #that are not there already\n for citer in these_cite_k:\n tmplist = author_cited_in[a]\n if (tmplist.count(citer) == 0):\n tmplist.append(citer)\n author_cited_in[a] = tmplist\n else:\n author_cited_in[a] = these_cite_k\n\n mesg = \"Author cit done fully\"\n write_message(mesg)\n task_update_progress(mesg)\n\n return author_cited_in", "def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]", "def get_authors(self):\n return [aer.author for aer in self.authorentryrank_set.all()]", "def follow(self, followerId: int, followeeId: int) -> None:\n # Time Complexity: O(1)\n if followerId != followeeId:\n if followerId not in self.followees:\n self.followees[followerId] = set()\n\n self.followees[followerId].add(followeeId)", "def all_followers(twitter_dict, twitter_name): \r\n \r\n following_list = []\r\n for user in twitter_dict:\r\n f_list = twitter_dict[user]['following']\r\n if twitter_name in f_list:\r\n following_list.append(user) \r\n return following_list", "def addFriends(author):\n friends = author.friends.all()\n remote_friends = RemoteFriend.objects.all().filter(author=author)\n friend_list = list()\n if friends:\n for friend in friends:\n friend_dict = {'id': \"{}/api/{}\".format(DOMAIN, friend.id), 'host': friend.host_url,\n 'displayName': friend.username, 'url': \"{}/api/{}\".format(DOMAIN, friend.id)}\n friend_list.append(friend_dict)\n\n if remote_friends:\n for remote in remote_friends:\n friend_dict = {'id': remote.url, 'host': remote.host,\n 'displayName': remote.displayName, 'url': remote.url}\n friend_list.append(friend_dict)\n\n remote = check_remote_friends(author)\n friend_list += remote\n return friend_list", "def follow(self, followerId, followeeId):\r\n if followerId != followeeId:\r\n self.follows[followerId].add(followeeId)", "def follow(self, follower, followee):\n pass", "def authors(self, key, value):\n _authors = self.get(\"authors\", [])\n item = build_ils_contributor(value)\n if item and item not in _authors:\n _authors.append(item)\n try:\n if \"u\" in value:\n other = [\"et al.\", \"et al\"]\n val_u = list(force_list(value.get(\"u\")))\n if [i for i in other if i in val_u]:\n self[\"other_authors\"] = True\n except UnexpectedValue:\n pass\n return _authors", "def add_authors(self, author_data, instance):\n for idx, author in enumerate(author_data):\n Author.objects.create(dataset=instance, order=idx, author=author)", "def get_coauthored_publications_by_authors(cached_list, cached_set, author1_name, author2_name):\n publications = { 'cdblp': [], 'dblp': [] }\n pub1 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author1_name)\n author2 = DBLPQuery.author_distinct(cached_list, cached_set, author2_name)\n #pub2 = DBLPQuery.get_publications_by_author(cached_list, cached_set, author2_name)\n for cdblp_pub in pub1.get('cdblp', []):\n authors = set(cdblp_pub.get('authors', []))\n authors_en = set(map(lambda a: CDBLPAuthor.getEnglishName(a)['full_name'], authors))\n if author2.get('cdblp', {}).get('author_name', {}).get('zh') in authors or author2.get('dblp', {}).get('author_name') in authors_en:\n publications['cdblp'].append(cdblp_pub)\n\n for dblp_pub in pub1.get('dblp', []):\n authors = set(map(lambda a: a.get('name'), dblp_pub.get('authors', [])))\n if author2.get('dblp', {}).get('author_name') in authors or author2.get('cdblp', {}).get('author_name', {}).get('full_name') in authors:\n publications['dblp'].append(dblp_pub)\n\n return publications", "def query_authors(cls):\n authors = from_cache('AuthorsList')\n if not authors:\n authors = SuiAuthor.all().order('name').fetch(400)\n to_cache('AuthorsList', authors)\n return authors", "def all_authors( data ) :\n return list(set( chain.from_iterable( [ authors(x) for x in data ] ) ))", "def __add_publication(self, authors, publication):\n for author in authors:\n\n if author not in self.author_to_publications:\n self.author_to_publications[author] = set()\n self.author_to_publications[author].add(publication)", "def follow(self, followerId: int, followeeId: int) -> None:\n self.user_followed[followerId].append(followeeId)", "def alt_authors(self, key, value):\n _authors = self.get(\"authors\", [])\n if _authors:\n for i, v in enumerate(force_list(value)):\n _authors[i].update({\"alternative_names\": clean_val(\"a\", v, str)})\n return _authors", "def Authors(self, default=[{}]):\n tmp = self.data.get('authors', default)\n return [HEP.AuthorReducedObject(i) for i in tmp]", "def Authors(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('authors', default)\n return [HEP.AuthorObject(i) for i in tmp]", "def authors(author_ids):\n if author_ids is None:\n return ''\n else:\n ids = []\n for author_id in author_ids.split(','):\n ids.append(User.id == int(author_id))\n authors = User.query.filter(or_(*ids)).all()\n if authors is None:\n return ''\n else:\n return 'by ' + ', '.join([author.name for author in authors])", "def followers(self, handles):\n print(handles)\n followers_list = {}\n for handle in handles:\n followers = self.twitter_client.followers_ids(screen_name=handle)\n\n r = []\n for page in self.paginate(followers, 100):\n results = self.twitter_client.lookup_users(user_ids=page)\n for result in results:\n r.append(result.screen_name)\n followers_list[handle] = r\n return followers_list", "def update_from_emails(self, emails):\n # Now add contributors using cache (new GitHub contributors) with known email or orcid that isn't present\n for email in emails:\n if email not in self.email_lookup:\n bot.info(f\" Updating with new added email: {email}\")\n entry = {\"@type\": \"Person\", \"email\": email}\n self.lookup.append(entry)", "def all_followers (twitter_data, username):\n\n # initialize\n followers = []\n\n for key in twitter_data: # go through every username in twitter_data\n if username in twitter_data [key]['following']: # check each 'following'\n followers.append (key)\n\n followers.sort() # sort the list alphabetically for testing purposes\n return followers", "def follow_followers(self):\n self.logger.log(\"starting follow_followers...\")\n follow = perform_with_ran_delay(self.instagram.get_followers, self.account.identifier, 150, 15, delayed=True)\n for acc in follow[\"accounts\"]:\n try:\n try:\n # print(\"{} follows me, do I follow him ? > {} \".format(acc.username,self.is_user_following(acc.identifier)))\n if not self.is_user_following(acc.identifier):\n if self.add_following(acc.identifier):\n self.logger.log(\"following: {}\".format(acc.username))\n else:\n self.logger.log(\"follow not working at the moment\")\n except Exception as e:\n print(e)\n self.logger.log(str(e))\n continue\n finally:\n sleep(3)", "def __generate_author_string__(self, list_of_authors):\n author_string = \"\"\n return author_string.join(list_of_authors)", "def auto_follow_followers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n not_following_back = followers - following\n\n for user_id in not_following_back:\n try:\n t.friendships.create(user_id=user_id, follow=False)\n except Exception as e:\n print(\"error: %s\" % (str(e)))", "def get_followers(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)", "def get_followers1(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api1.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)", "def _get_authors_list():\n\n articles = os.listdir(\"../data/\")\n authors = []\n for article in articles:\n with open(\"../data/\" + article, 'r') as file:\n lines = file.readlines()\n author = tuple(\n line.replace(\"\\n\", \"\").split()[1] for line in lines\n if \"Автор:\" in line\n )[0]\n authors.append(author)\n\n return authors", "def authors(self):\n authors = [\n n.people for n in self.pymbake_person_relationship.all()\n ]\n\n return authors", "def user_follow_artists(self, ids=None, **kwargs):\n return self._put(\n API.MY_FOLLOWING.value, type=\"artist\", ids=\",\".join(ids or []), **kwargs\n )", "def get_authors_from_papers(papers):\n auth_set = set()\n for p in papers:\n auth_set.update(p['authors'])\n return list(auth_set)", "def authors(self):\n user_ids = set(r.author.id for r in self.history())\n return User.query.find({'_id': {'$in': list(user_ids)}}).all()", "def make_author_list(res):\n try:\n r = [\", \".join([clean_txt(x['family']).capitalize(), clean_txt(x['given']).capitalize()]) for x in res['author']]\n except KeyError as e:\n print(\"No 'author' key, using 'Unknown Author'. You should edit the markdown file to change the name and citationkey.\")\n r = [\"Unknown Authors\"]\n return r", "def update_from_logins(self, logins):\n # Now add contributors using cache (new GitHub contributors) with known email or orcid that isn't present\n for login in logins:\n # Check against contribution threshold, and not bot\n if not self.include_contributor(login):\n continue\n\n cache = self.cache.get(login) or {}\n email = cache.get(\"email\")\n orcid = cache.get(\"orcid\")\n\n # We can only add completely new entries that don't already exist\n if (email != None or orcid != None) and (\n email not in self.email_lookup and orcid not in self.orcid_lookup\n ):\n bot.info(f\" Updating {login}\")\n parts = (cache.get(\"name\") or login).split(\" \")\n entry = {\"@type\": \"Person\", \"givenName\": parts[0]}\n\n # Add the last name if it's defined\n if len(parts) > 1:\n entry[\"familyName\"] = \" \".join(parts[1:])\n\n if email != None:\n entry[\"email\"] = email\n if orcid != None:\n entry[\"@id\"] = \"https://orcid.org/%s\" % orcid\n self.lookup.append(entry)", "def follow(self, followerId: int, followeeId: int) -> None:\n self.follow_map[followerId].add(followeeId)", "def add_all_friends(twitter, users):\n for u_dict in users:\n u_dict['friends'] = get_friends(twitter,u_dict['screen_name'])", "def update_insta_followers_info(self):\n\n cur_following = self.GSpread.sheet_to_df('kontstats',\n 'INSTA_FOLLOWERS')\n new_following = self.Instagram.get_followers_df()\n\n cur_followers = set(cur_following.username.values)\n new_followers = set(new_following.username.values)\n\n who_left = list(cur_followers.difference(new_followers))\n who_joined = list(new_followers.difference(cur_followers))\n\n if len(who_left) > 0:\n self.GSpread.write_raw_log('INSTAGRAM',\n '',\n 'FOLLOWERS_LEFT',\n ', '.join(who_left))\n\n if len(who_joined) > 0:\n self.GSpread.write_raw_log('INSTAGRAM',\n '',\n 'FOLLOWERS_JOINED',\n ', '.join(who_joined))\n\n if (len(who_left) > 0) or (len(who_joined) > 0):\n self.GSpread.df_to_sheet('kontstats',\n 'INSTA_FOLLOWERS',\n new_following)", "def add_artists(self, params):\n artists = params\n\n # Lists to load\n names = []\n for n in self.listIDs.keys():\n for a in artists:\n if 'artist:' + a in n:\n names.append(n)\n\n self.add_playlist(names)", "def getFollowers():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT following FROM followers WHERE user = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempFollowers = cur.fetchall()\n followers = []\n for follower in tempFollowers:\n followers.append(follower[0])\n return followers", "def load_authors():\n\n ret = {}\n for token in util.git('log', '--format=%aE:::%aN').split('\\n'):\n email, name = token.split(':::')\n ret[email] = name\n return ret", "def test_multiple_authors(self):\r\n args = self.page_kwargs.copy()\r\n content = Page(**args)\r\n assert content.authors == [content.author]\r\n args['metadata'].pop('author')\r\n args['metadata']['authors'] = ['First Author', 'Second Author']\r\n content = Page(**args)\r\n assert content.authors\r\n assert content.author == content.authors[0]", "def add_untracked_followers(self):\n\n self.log.debug(\"CHECK FOR UNTRACKED FOLLOWERS\")\n followers_ids_api = self.api.followers_ids()\n target = Target.objects.filter(hunter=self.user)\\\n .filter(status=Target.FOLLOWER)\n followers_ids_django = [t.hunted.twitter_id for t in target]\n\n untracked_followers_ids = filter(\n lambda x: unicode(x) not in followers_ids_django,\n followers_ids_api)\n\n untracked_followers, remainder = lookup_users_by_id(self.api,\n untracked_followers_ids)\n for untracked_follower in untracked_followers:\n twitter_account, created = \\\n utils.get_or_create_twitter_account(untracked_follower)\n target, created = Target.objects.get_or_create(\n hunter=self.user, hunted=twitter_account)\n if target.status == Target.PURGATORY:\n # Yay someone we targeted reciprocated follow\n self.follow_reciprocated(target)\n else:\n print target.status\n # Either a totally external follow, an ingrate changed mind,\n # or someone who we chatted became interested and followed\n # Either way the action is the same, follow him\n target.status = Target.FOLLOWER\n target.save()\n self.log.debug(\" => Add follower: %s\" % twitter_account.screen_name)", "def test_add_followers(self):\n pass", "def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId: return\n self.users[followerId].add(followeeId)", "def AuthorURLs(entry):\n a_URLs = ''\n for a in entry.getAuthors():\n url = a.get('homepage', ' ')\n a_URLs += \"%s and \" % url\n return a_URLs[:-5]", "def _user_follower_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followers: List[Dict[str, Any]] = self.api.getTotalFollowers(uid)\n user_followers = list([_InstagramUser(x) for x in followers])\n return user_followers", "def format_authors(author_list):\n if isinstance(author_list, (list, tuple)):\n return \", \".join([format_authors(author) for author in author_list])\n else:\n if \", \" in author_list:\n author_list = author_list.split(\", \")\n author_list.reverse()\n author_list = \" \".join(author_list)\n elif \",\" in author_list:\n author_list = author_list.split(\",\")\n author_list.reverse()\n author_list = \" \".join(author_list)\n return author_list", "def get_reference_authors(ref_node):\n authors = ref_node.xpath(\"./contribution/authors/author\")\n authors_names = []\n for author in authors:\n given_names = author.xpath(\"string(./given-name[1])\").extract_first(default=\"\")\n last_names = author.xpath(\"string(./surname[1])\").extract_first(default=\"\")\n authors_names.append(\" \".join([given_names, last_names]).strip())\n return authors_names", "def follow(self, followerId: int, followeeId: int) -> None:\n self.followees[followerId].add(followeeId)", "def display_authors(self, *args):\n return ', '.join(author.name for author in args[0].authors.all()[:3])", "def getAuthors(self): #$NON-NLS-1$\r", "def parse_author_affiliation(medline):\n authors = []\n article = medline.find(\"Article\")\n if article is not None:\n author_list = article.find(\"AuthorList\")\n if author_list is not None:\n authors_list = author_list.findall(\"Author\")\n for author in authors_list:\n if author.find(\"ForeName\") is not None:\n forename = (author.find(\"ForeName\").text or \"\").strip() or \"\"\n else:\n forename = \"\"\n if author.find(\"Initials\") is not None:\n initials = (author.find(\"Initials\").text or \"\").strip() or \"\"\n else:\n initials = \"\"\n if author.find(\"LastName\") is not None:\n lastname = (author.find(\"LastName\").text or \"\").strip() or \"\"\n else:\n lastname = \"\"\n if author.find(\"Identifier\") is not None:\n identifier = (author.find(\"Identifier\").text or \"\").strip() or \"\"\n else:\n identifier = \"\"\n if author.find(\"AffiliationInfo/Affiliation\") is not None:\n affiliation = author.find(\"AffiliationInfo/Affiliation\").text or \"\"\n affiliation = affiliation.replace(\n \"For a full list of the authors' affiliations please see the Acknowledgements section.\",\n \"\",\n )\n else:\n affiliation = \"\"\n authors.append(\n {\n \"lastname\": lastname,\n \"forename\": forename,\n \"initials\": initials,\n \"identifier\": identifier,\n \"affiliation\": affiliation,\n }\n )\n return authors", "def find_relations_among_authors():\n for book in books:\n if len(books[book]) > 1:\n for i in range(len(books[book])):\n known_relations[books[book][i]] = books[book][:i] + books[book][i+1:]", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def current_user_followed_artists(self, limit=20, after=None, **kwargs):\n return self._get(\n API.MY_FOLLOWING.value, type=\"artist\", limit=limit, after=after, **kwargs\n )", "def follower(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n for a in account:\n a = Account(a, morphene_instance=stm)\n print(\"\\nFollowers statistics for @%s (please wait...)\" % a.name)\n followers = a.get_followers(False)\n followers.print_summarize_table(tag_type=\"Followers\")", "def follow(self, followerId: 'int', followeeId: 'int') -> 'None':\n self.followees[followerId].add(followeeId)", "def get_publications_by_author(cached_list, cached_set, author_name):\n publications = { 'dblp': [], 'cdblp': [] }\n author = DBLPQuery.author_distinct(cached_list, cached_set, author_name)\n\n if author['dblp'].__contains__('publications'):\n publications['dblp'] = author['dblp']['publications']\n# for pub in author['dblp']['publications']:\n# print(pub)\n\n if author['cdblp'].__contains__('publications'):\n publications['cdblp'] = author['cdblp']['publications']\n# for pub in author['cdblp']['publications']:\n# print(pub)\n return publications", "def _on_authors_list(self, evt):\n \n # raise authors management dialog\n dlg = AuthorsView(self, self._library)\n response = dlg.ShowModal()\n dlg.Destroy()\n \n # check response\n if response != wx.ID_OK:\n return\n \n # refresh collections view\n self._collections_view.UpdateCounts()\n \n # refresh articles view\n self._articles_view.ShowArticles()", "def get_author_affiliations(self, author_node, author_group_node):\n ref_ids = author_node.xpath(\".//@refid[contains(., 'af')]\").extract()\n group_affs = author_group_node.xpath(\"string(./affiliation/textfn[1])\").getall()\n if ref_ids:\n affiliations = self._find_affiliations_by_id(author_group_node, ref_ids)\n else:\n affiliations = filter(None, group_affs)\n return affiliations", "def parse_authors(article):\n author_names = article.find(\"sourcedesc\").findAll(\"persname\")\n authors = []\n for author in author_names:\n firstname = author.find(\"forename\", {\"type\": \"first\"})\n firstname = firstname.text.strip() if firstname is not None else \"\"\n middlename = author.find(\"forename\", {\"type\": \"middle\"})\n middlename = middlename.text.strip() if middlename is not None else \"\"\n lastname = author.find(\"surname\")\n lastname = lastname.text.strip() if lastname is not None else \"\"\n if middlename is not \"\":\n authors.append(firstname + \" \" + middlename + \" \" + lastname)\n else:\n authors.append(firstname + \" \" + lastname)\n authors = \"; \".join(authors)\n return authors", "def add(self, author, post):\n if not author in self.authors:\n self.authors.append(author)\n self.posts[author].append(post)\n return", "def follow(self, context, callback):\n followers_list = self.followers.setdefault(context, [])\n if callback not in followers_list:\n followers_list.append(callback)", "def follow(self, followerId, star):\n self.followstar[followerId] = self.followstar.get(followerId, set()) | set([star])", "def follow(self, other):\n\t\tif not self.follows(other):\n\t\t\tself.followed.append(other)", "def generateFollowers(self):\n for f in self._genericGenerator(self.getFollowers):\n yield f", "def _UpdateHistogramOwners(histogram, owner_to_replace, owners_to_add):\n node_after_owners_file = owner_to_replace.nextSibling\n replacement_done = False\n\n for owner_to_add in owners_to_add:\n if not replacement_done:\n histogram.replaceChild(owner_to_add, owner_to_replace)\n replacement_done = True\n else:\n _AddTextNodeWithNewLineAndIndent(histogram, node_after_owners_file)\n histogram.insertBefore(owner_to_add, node_after_owners_file)", "def generate_id_for_authors():\n counter = 1\n for book in books:\n for author in books[book]:\n if not author in authors:\n authors[author] = f'Author/{counter}'\n counter += 1", "def contributors(lancet, output):\n sorting = pygit2.GIT_SORT_TIME | pygit2.GIT_SORT_REVERSE\n commits = lancet.repo.walk(lancet.repo.head.target, sorting)\n contributors = ((c.author.name, c.author.email) for c in commits)\n contributors = OrderedDict(contributors)\n\n template_content = content_from_path(\n lancet.config.get(\"packaging\", \"contributors_template\")\n )\n template = Template(template_content)\n output.write(template.render(contributors=contributors).encode(\"utf-8\"))", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def followers(self):\r\n url = '{0}/followers'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def get_authors():\n authors = []\n authorfile = os.path.join('doc', 'authors.txt')\n with codecs.open(authorfile, 'r', 'utf-8') as f:\n for line in f:\n line = line.strip()\n if line and not line.startswith(u'#'):\n authors.append(line)\n return u\", \".join(authors)", "def getAuthors(self):\n authors = []\n for each in self.context.getAuthors():\n title = each['title']\n firstname = each['firstname']\n middlename = each['middlename']\n lastname = each['lastname']\n author = Author(title, firstname, middlename, lastname)\n authors.append(author)\n return authors", "def authors_completion(self, terms):\n return self.db.execute(u'''SELECT * FROM \"authors\" WHERE name LIKE ? LIMIT 50''', (u\"%{}%\".format(terms),)).fetchall()", "def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId:\n return\n if followerId not in self.users.keys():\n self.users[followerId] = user()\n if followeeId not in self.users.keys():\n self.users[followeeId] = user()\n self.users[followerId].followees[followeeId] = self.users[followeeId]", "def authors(self):\n authors = self.context.Authors(sep=' and ',\n lastsep=' and ',\n format=\"%L, %F %M\",\n abbrev=0,\n lastnamefirst=0)\n if not isinstance(authors, unicode):\n authors = unicode(authors, 'utf-8')\n return authors", "def update_artists(source_item: Dict, target_item: Dict) -> None:\n for artist in target_item.get('castAndCrew', []):\n full_name = f\"{artist['artist'].get('firstName', '')} \" \\\n f\"{artist['artist'].get('lastName', '')}\".strip()\n for crew_name in source_item['highlight'].get('artists', []):\n if full_name.strip() in remove_html_tags(crew_name):\n first_name, last_name = crew_name.split(' ', 1)\n artist['artist']['firstName'] = first_name\n artist['artist']['lastName'] = last_name", "def update_insta_follower_count(self):\n\n df = self.Instagram.get_followers_df()\n n_followers = df.shape[0]\n self.GSpread.write_raw_log('INSTAGRAM',\n '',\n 'FOLLOWER_COUNT',\n n_followers)", "def find_authors(code):\n url = baseurl(code)\n page = req(url)\n soup = BeautifulSoup(page, 'lxml')\n addr = [t.attrs.get('content', None) \n for t in soup.find_all(\"meta\", {\"name\": \"citation_author_email\"})]\n \n # corresponding authors will have their email under another tag too\n corr = [t.find('a').attrs.get('href', None)\n for t in soup.find_all(None, {\"class\": \"author-corresp-email-link\"})]\n\n addr = [a for a in addr if a is not None]\n corr = [a.replace('mailto:', '') for a in corr if a is not None]\n\n return dict(corr=list(set(corr)), all=list(set(addr)))", "def run(self):\n authors = sorted(set(self.load_files_info()))\n\n # Show list of authors\n click.echo('List of users:')\n for n, author in enumerate(authors, start=1):\n click.echo(f'{n}) {author}')\n\n # Clean list and return\n remove = self._ask_remove()\n\n return [x[1] for x in enumerate(authors) if x not in remove]", "def authors_in_relation(context, data, authors):\n guids = [a.id for a in authors]\n guids = map( lambda x: str(x).replace('-', ''), guids)\n\n for guid in guids:\n context.assertTrue(unicode(guid) in data)", "def follow(self, followerId, followeeId):\n\n # 把 followeeId append到他的 follow 属性中\n if followerId == followeeId: # 不能自己关注自己\n return\n # 实例化一个user(followerID)\n follower = UserInfo()\n follower.user_id = followerId \n follower.follows.append(followeeId) \n self.user_pool[followerId] = follower", "def resolve_followers(self, info):\n user = info.context.user\n follow_request = FollowRequest.objects.filter(following=user.id, pending=False)\n return [follow.follower for follow in follow_request]", "def authors(self):\n raise BookInfoNotImplementedError('authors', self.__class__.__name__)", "def authors(self):\n return self.properties.get('Authors', ClientValueCollection(SharedWithMeDocumentUser))", "def get_all_followers(self):\n return get_all_(self.get_followers)" ]
[ "0.64747363", "0.6431524", "0.6423831", "0.6329321", "0.6274541", "0.6242031", "0.6238875", "0.62265736", "0.6178677", "0.61373085", "0.6131011", "0.6087273", "0.60318464", "0.59967387", "0.5986646", "0.5947148", "0.5934538", "0.5911428", "0.59073174", "0.590661", "0.59045154", "0.59034413", "0.5875903", "0.5860391", "0.5859604", "0.58438224", "0.58397263", "0.58224577", "0.5816134", "0.5798356", "0.5790309", "0.57746", "0.57729673", "0.5768505", "0.57598746", "0.57467204", "0.57257044", "0.5720336", "0.5704247", "0.5679065", "0.5665659", "0.5664377", "0.5663946", "0.56618154", "0.5657876", "0.56523085", "0.5640774", "0.5640071", "0.55963075", "0.55959857", "0.5580592", "0.55711496", "0.55589986", "0.55564964", "0.55318296", "0.5501447", "0.5497535", "0.5496132", "0.54920375", "0.5490889", "0.5486375", "0.5458841", "0.54559875", "0.54412085", "0.54377747", "0.54365844", "0.5424416", "0.54162484", "0.5413055", "0.5389997", "0.5385153", "0.5368442", "0.53567785", "0.533489", "0.5334766", "0.53288364", "0.5327367", "0.5315085", "0.5308484", "0.5303491", "0.5283387", "0.52715766", "0.52715766", "0.52715766", "0.52715766", "0.5264988", "0.52599615", "0.5250054", "0.52429575", "0.52292955", "0.5218363", "0.5217105", "0.5214635", "0.51969844", "0.5186941", "0.51816976", "0.5165398", "0.515802", "0.5157921", "0.51514554" ]
0.81595033
0
Retorna todos los proyectos registrados hasta ahora
def get_queryset(self): return Proyecto.objects.all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_users():", "def get_registered_users():\n registered_users = (\n current_app.scoped_session()\n .query(User)\n .filter(User.additional_info[\"registration_info\"] != \"{}\")\n .all()\n )\n registration_info_list = {\n u.username: u.additional_info[\"registration_info\"] for u in registered_users\n }\n return registration_info_list", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def user_list():\n for values in USERS:\n user = User.objects.create_user(\n values[\"username\"], values[\"email\"], values[\"password\"]\n )\n user.first_name = values[\"first_name\"]\n user.last_name = values[\"last_name\"]\n user.is_staff = values[\"staff\"]\n user.is_superuser = values[\"super\"]\n user.save()\n Token.objects.create(key=values[\"token\"], user_id=user.id)\n\n # print('users created')", "def list():\n rino.login.list()", "def UsersProyecto(request,pk):#esta enlazado con la clase FaseForm del archivo getion/forms\n proyecto=Proyecto.objects.get(id_proyecto=pk)\n\n user= request.user## USER ACTUAL\n form = User.objects.all()\n registrados = User_Proyecto.objects.all()\n\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n #if form.is_valid():\n some_var=request.POST.getlist('checkbox')\n print(some_var)\n #form.save()\n return redirect('gestion:menu')\n else:\n list=[]\n for i in range(form.count()):\n ok = False\n if form[i].id != user.id: #and form[i].esta_aprobado == True :\n for x in range(registrados.count()):\n if registrados[x].proyecto_id == pk:\n if form[i].id == registrados[x].user_id:\n ok=True\n if ok:\n list.append(form[i].id)\n\n return render(request, 'proyectos/usuarios_proyectos.html', {'form': form,'list':list,'pk':pk,'proyectos':proyecto})", "def getResponsibleUsers():", "def getregisteredusers(self):\n\n select_registeredusers = (\n \"SELECT count(*) FROM tao_taouser \"\n \"WHERE username NOT IN (%s) \"\n\n )\n\n select_registeredusers = select_registeredusers % self.adminusers\n print(select_registeredusers)\n self.mysqlcursor.execute(select_registeredusers, self.adminusers)\n\n users = 0\n x = self.mysqlcursor.fetchone()\n if x is not None:\n users = x[0]\n\n # print(\"No of registered users: {0}\".format(users))\n return users", "def get_users():\n return db.fetch_users()", "def comite(request,pk):\n\n proyecto = User_Proyecto.objects.filter(proyecto_id=pk)\n gerente = User.objects.get(id=proyecto[0].user_id)\n\n comite = Comite.objects.all()\n form = Usuario.objects.all()\n proyectos=Proyecto.objects.get(id_proyecto=pk)\n if request.method == 'POST': #preguntamos primero si la petición Http es POST ||| revienta todo con este\n #form.save()\n return redirect('gestion:comite',pk)\n else:\n list=[]\n if(comite != None):\n for i in range(form.count()):\n ok = False\n if form[i].esta_aprobado == True:\n for x in comite:\n if x.id_user == form[i].user.id and x.id_proyecto == pk:\n ok=True\n if ok:\n list.append(form[i].user.id)\n print(list)\n return render(request, 'proyectos/ver_comite.html', {'form': form,'list':list,'pk':pk,'proyectos':proyectos,'idGerente':gerente.id})", "def get_users():\n users = functions.users()\n return users", "def list(self):\n # Grupos en los que el usuario formo parte\n curso = self.get_curso_actual()\n entregadores = identity.current.user.get_entregadores(curso)\n r = cls.select(IN(cls.q.entregador, entregadores), orderBy=-Entrega.q.fecha)\n return dict(records=r, name=name, namepl=namepl, limit_to=identity.current.user.paginador)", "def get_users(self):\n query = \"\"\"SELECT firstname,lastname,othernames,email,phonenumber,\\\n username,public_id,isadmin,isactive,registered\\\n from users ORDER BY registered ASC\"\"\"\n conn = self.db\n cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cursor.execute(query)\n rows = cursor.fetchall()\n return rows", "def listaProfesion():\n prof = ProfesionModel()\n\n return prof.listarTodos()", "def _create_users(self):\r\n users = []\r\n for i in range(8):\r\n username = \"user{}\".format(i)\r\n email = \"test+user{}@edx.org\".format(i)\r\n user = User.objects.create_user(username, email, 'foo')\r\n user.is_active = True\r\n user.save()\r\n users.append(user)\r\n return users", "def task_get_users(task):\n logger = logging.getLogger(__name__)\n logger.debug('Get JUNOS users info')\n result = list()\n out = task.run(task=netmiko_send_command,\n command_string=\"show configuration system login\")\n if out.failed:\n for host in out.failed_hosts.keys():\n logger.warning('Failed task on device {}'.format(task.inventory.hosts[host].name))\n task.inventory.hosts[host]['error'] = True\n for host, res in out.items():\n if not res.failed:\n logger.debug('Fill JUNOS users properties from device {}'.format(task.inventory.hosts[host].name))\n task.inventory.hosts[host]['error'] = False\n# with open('output/junos_show_conf_system_login.txt','w+') as f:\n# f.write(r.result)\n result.append(parse_users(host, res.result))\n return result", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def get_registries(self):\n raise NotImplementedError(\"get_registries method is not implemented.\")", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def listar_gabarito():\n return GabaritoProva.listar(gabarito)", "def GetRegisterList():\n return ida_idp.ph_get_regnames()", "def db_users():\n return [\n {\"name\": \"Cathy\", \"email\": \"cathy@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"Marry\", \"email\": \"marry@\", \"group\": \"guest\", \"password\": \"12345\"},\n {\"name\": \"John\", \"email\": \"john@\", \"group\": \"guest\", \"password\": \"12345\"},\n ]", "def select_todos_registros(nome_tabela: str) -> list:\n query = f'SELECT * FROM {nome_tabela};'\n\n lista_registros = banco_operacoes(query)\n\n return lista_registros", "async def scan_members(self, ctx):\n if not ctx.guild.id:\n return\n\n if not await self.bot.guildservice.check_guild(ctx.guild.id):\n await self.bot.guildservice.add_guild(ctx.guild)\n\n async for member in ctx.guild.fetch_members():\n await self.register_member(member, ctx.guild.id)\n await asyncio.sleep(0)\n print(\"Usuários salvos com sucesso.\")\n await asyncio.sleep(1)\n await ctx.message.delete()", "def get_users(self):\n return self.mycam.devicemgmt.GetUsers()", "def user_list(self):\n self.cur.execute(\"SELECT username FROM users\")\n users = []\n for username in self.cur.fetchall():\n users.append(username[0])\n return users", "def get_all_uid_service():\n return user_dao.get_all_uid_dao()", "def get_all_users():\n db = api.db.get_conn()\n return list(db.users.find({}, {\"_id\": 0, \"password_hash\": 0}))", "def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()", "def get_registries():\n url = \"/\".join([REGISTRY_BASE, \"_catalog\"])\n response = req(url)\n if response is not None:\n return response[\"repositories\"]\n return []", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def getInterestedUsers():", "def listar_proyectos(request):\n proyectos = Proyecto.objects.all()\n PROYECTOS_USUARIO= CantProyectos(request)\n cant = len(PROYECTOS_USUARIO)\n context={\n 'proyectos':proyectos,###### TODOS LOS PROYECTOS\n 'list': PROYECTOS_USUARIO,##PROYECTOS DEL USUARIO LOS CUAL SE DEBE MOSTRAR, SOLO ID\n 'cant': cant####CANTIDAD DE PROYECTOS QUE POSEE\n }\n return render(request, 'Menu/listar_proyectos.html', context)", "def get_user_info_list(self):\n\t\treturn Job(SDK.PrlSrv_GetUserInfoList(self.handle)[0])", "def create_users(self):\n if self.gl is None:\n print(\"No config found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Users creation.\")\n gl = self.gl\n config = self.config\n for username in config[\"users\"]:\n i = 0\n count = int(config[\"users\"][username][\"count\"])\n pw = config[\"users\"][username][\"pass\"]\n groups = config[\"users\"][username][\"groups\"]\n while i < count:\n i += 1\n print(\"creating user: \" + username + '-' + str(i) + \" ...\", end=' ')\n user = gl.users.create({'email': username + str(i) + '@example.com',\n 'password': pw,\n 'username': username + '-' + str(i),\n 'name': username + '-' + str(i),\n 'skip_confirmation': True})\n self.users.append(user)\n self.usergroups[user.id] = groups\n print(\"done.\")\n print(\"All Users created!\")", "def iniciar_cadastro_dos_registros():\r\n # quarto\r\n result = False\r\n try:\r\n fc.inserir_registros(_browser)\r\n result = True\r\n except:\r\n result = False\r\n\r\n assert result", "def get_users(self, email):\n print(\"bu\")\n active_users = UserModel._default_manager.filter(**{\n '%s__iexact' % UserModel.get_username_field_name(): username,\n 'is_active': True,\n })\n print(active_users)\n # active_users = UserModel._default_manager.filter(**{\n # '%s__iexact' % UserModel.get_email_field_name(): email,\n # 'is_active': True,\n # })\n return (u for u in active_users if u.has_usable_password())", "def users(self):\n from sagas.ofbiz.entities import OfEntity as e, oc\n rs=e().allUserLogin()\n for r in rs:\n print(r['userLoginId'])", "def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")", "def f_sslvpn_users(self) :\n try :\n return self._f_sslvpn_users\n except Exception as e:\n raise e", "def get_profiles(self):\n profiles = [['Profile name', 'GUID']]\n r = self.system_cursor.execute('{Call wtGetProfileList()}')\n for row in r.fetchall():\n profiles.append([row.PROFILE_NAME, row.PROFILE_GUID])\n return profiles", "def getPuttyConnections():\n psessions = []\n os.system(r'regedit /a /e \"%userprofile%\\desktop\\putty-registry.reg\" HKEY_CURRENT_USER\\Software\\Simontatham')\n pdef = os.path.join(winshell.desktop(), \"putty-registry.reg\")\n r = open(pdef, 'r').read().splitlines()\n prefix = \"[HKEY_CURRENT_USER\\Software\\Simontatham\\PuTTY\\Sessions\"\n for l in r:\n if l.startswith(prefix):\n psessions.append(l[len(prefix) + 1:-1])\n return psessions", "def listusers():\n allusers = []\n with open('/etc/passwd', 'r') as pw:\n for l in pw.readlines():\n allusers.append(l.split(':')[0])\n users = [ d for d in os.listdir(\"/home\") if d in allusers ]\n return(users)", "def _getUsers( self, bSerial ):\n\n\t\ttry:\n\t\t\tself._oLock.acquire()\n\n\t\t\ttry:\n\t\t\t\trgoUser = []\n\n\t\t\t\trgoResult = self._libDB.query( 'SELECT bID, sName, sDescription, sPassword, bType FROM CustUser WHERE bSerial=%s ORDER BY sName', bSerial )\n\n\t\t\t\tfor oRow in rgoResult:\n\t\t\t\t\trgoUser.append(\n\t\t\t\t\t\tdb.user.UserEntry(\n\t\t\t\t\t\t\tint( oRow[0] ),\n\t\t\t\t\t\t\tstr( oRow[1] ),\n\t\t\t\t\t\t\tstr( oRow[2] ),\n\t\t\t\t\t\t\tstr( oRow[3] ),\n\t\t\t\t\t\t\tint( oRow[4] )\n\t\t\t\t\t\t)\n\t\t\t\t\t)\n\n\t\t\t\treturn copy.copy( rgoUser )\n\n\t\t\texcept Exception, e:\n\t\t\t\traise Exception, 'error loading user list from database [%s]' % e\n\n\t\tfinally:\n\t\t\tself._oLock.release()", "def get_all_users(connection):\r\n with connection:\r\n return len(connection.execute(GET_ALL_USERS).fetchall())", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def get_all_users():\n users = []\n for mv in storage.all(\"User\").values():\n users.append(mv.to_dict())\n return jsonify(users)", "def users(db):\n users = [UserFactory(), UserFactory()]\n db.session.commit()\n return users", "def get_all_users(self):\n query = \"SELECT * FROM users\"\n self.cursor.execute(query)\n result = self.cursor.fetchall()\n return result", "def register_user(self):\n if self.password!=self.confirm_pwd:\n return \"The passwords do not match\"\n for user in users_list:\n if user['email']==self.email:\n return \"The email already exists. Choose another email\"\n hashed_password=generate_password_hash(self.password)\n users_dict={\n \"id\":self.user_id,'firstname':self.first_name,\n 'lastname':self.last_name,'isAdmin':self.isAdmin,\n 'email':self.email,\"phonenumber\":self.phonenumber,\n \"username\":self.username,'password':hashed_password\n }\n users_list.append(users_dict)\n return {\n \"id\":self.user_id,'firstname':self.first_name,\n 'lastname':self.last_name,'isAdmin':self.isAdmin,\n 'email':self.email,\"username\":self.username,\n \"phonenumber\":self.phonenumber\n }", "def test_list_users(self, rabbitmq):\n assert rabbitmq.list_users() == [('user', ['administrator'])]\n rabbitmq.exec_rabbitmqctl('add_user', ['new_user', 'new_pass'])\n assert rabbitmq.list_users() == [\n ('new_user', ['']),\n ('user', ['administrator']),\n ]", "def list_users(self):\n raise NotImplementedError", "def create_users(cls):\n for p in Player.objects.exclude(race__can_play=False):\n p.get_extension(GrandChallengeUser)", "def fetch_users(self):\n users = super(type(self), self).fetch_users()\n return list(filter(self._check_active, users))", "def list_users_in_pool():\n files = []\n USERS_DIR = os.path.join(UPLOAD_DIRECTORY, \"users\")\n for filename in os.listdir(USERS_DIR):\n path = os.path.join(USERS_DIR, filename)\n if os.path.isdir(path):\n files.append(filename)\n return jsonify(files)", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def listUsers(self):\n return tuple(User.create({'name':name},self._modelDataManager) for name in self.pm_getUserManager().listUsers())", "def get_users_list_full(self, session):\n\n users = session.query(\n User.chat_id,\n User.is_banned,\n User.username,\n User.first_name,\n User.last_name,\n User.time_registered\n ).filter(User.is_admin==False).all()\n return users", "def _getAllProvas(self):\n return self.execSql(\"select_all_provas\")", "def fetch_all_users():\n url = \"{}/workspace/{}/users\".format(V1_API_URL, WORKSPACE_ID)\n responses = requests.get(url, headers=HEADERS)\n return [\n {\n \"acronym\": user[\"name\"].lower(),\n \"clockify_id\": user[\"id\"],\n \"email\": user[\"email\"].lower(),\n }\n for user in responses.json()\n ]", "def propietarios(self):\n return self.expedientepersona_set.filter(propietario=True)", "def get_users(self):\n return self.get_all_dbusers()", "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def create_users(\n self, count=1, password=\"Please bypass hashing!\", activation=False\n ):\n users = []\n for index in range(1, count + 1):\n user = User(\n username=\"sagan{}\".format(index),\n email=\"carlsagan{}@nasa.gov\".format(index),\n password=password,\n registered_date=datetime(2000, 1, 1),\n last_login_date=datetime(2000, 1, 1),\n )\n if activation:\n user.activation = Activation()\n users.append(user)\n if hasattr(self, \"repo\"):\n self.repo.add(user)\n if count == 1:\n return users[0]\n else:\n return users", "def get_user_list():\n users_tuple = db_session.query(Chat.chatID).all()\n users_list = [user for user, in users_tuple]\n return users_list", "def getUsers(self):\n return [u[0] for u in pwd.getpwall()\n if (u[5].startswith('/home/') and u[6].endswith('sh'))]", "def get_users(self):\n return get_users(self['__store'].db, self)", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def list(conn):\n try:\n return conn.get(url='/auth-providers')['providers']\n except SystemError as e:\n raise e", "def get_users():\n return jsonify([\n users.to_dict()\n for users in models.storage.all('User').values()\n ])", "def list_users(self, user=None):\n from expfactory.database.models import Participant\n\n participants = Participant.query.all()\n users = []\n for user in participants:\n users.append(self.print_user(user))\n return users", "def to_list(self) -> list:\n return [\n self.username, self.password, self.firstname, self.surname, self.currency_id,\n int(self.has_first_sign_in), self.account_created, self.last_sign_in\n ]", "def all_Users():\n new_dict = []\n for usr in storage.all('User').values():\n new_dict.append(usr.to_dict())\n return jsonify(new_dict)", "def get_users_list(self, session):\n\n users = session.query(User.chat_id).filter(User.is_admin==False).all()\n return users", "def get_all_users():\n return User.query.all()", "def get_users(self):\n # remove some user media fields that we can't submit back\n def clean_media(entry):\n entry.pop(\"mediaid\", None)\n entry.pop(\"userid\", None)\n entry.pop(\"description\", None)\n return entry\n zabbix_users = self.conn.user.get(selectMedias=\"extend\", selectUsrgrps=\"extend\")\n zabbix_users = {user[\"alias\"].lower(): User(\n id=user[\"userid\"],\n name=user[\"name\"],\n surname=user[\"surname\"],\n alias=user[\"alias\"],\n groups=set(g[\"usrgrpid\"] for g in user[\"usrgrps\"]),\n media=[clean_media(entry) for entry in user[\"medias\"]],\n ) for user in zabbix_users}\n return zabbix_users", "def get_pi_list(app, project):\n pi_list = []\n for person in project['contacts']:\n try:\n if person['type'].lower() == \"principal investigator\":\n PI = app.db.session.query(app.PI).filter(\n app.PI.name == person['name']).first()\n try:\n email = person['email']\n except KeyError:\n email = None\n # Update/Create PI:\n if PI is None:\n PI = app.PI(name=person['name'],\n email=email)\n app.db.session.add(PI)\n else:\n if PI.name != person['name']:\n PI.name = person['name']\n if PI.email != email:\n PI.email = email\n app.db.session.commit()\n pi_list.append(PI)\n # if PI.name == \"Scott Rupp\":\n # print(\"\"\"\n # Name: {0}\n # Email: {1}\n # email variable: {2}\n # \"\"\".format(PI.name.encode('utf-8'),\n # PI.email.encode('utf-8'), email.encode('utf-8')))\n # exit(0)\n except KeyError:\n continue\n return pi_list", "def getUsers(self):\n logger.debug(\"Func: getUsers\")\n\n return sorted(self._usersDict.keys())", "def user_list():\n users = User.objects.all()\n return {\"users\": users}", "def get_registered_providers():\n return _instance.providers_cls.keys()", "async def list_users(self) -> List[int]:\n return [\n # row[0]\n # async for row in self.conn.execute(\n # \"select userid from tg_users\",\n # )\n ]", "def test_get_all_users(self):\n api.user.create(\n username='chuck',\n email='[email protected]',\n password='secret',\n )\n users = [user.getUserName() for user in api.user.get_users()]\n\n self.assertEqual(users, ['chuck', TEST_USER_NAME])", "def users(self):\n return self.get_data(\"users\")", "def all_users(self):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, phone, email, role, date_created \n FROM users\"\"\")\n \n user_from_db = cur.fetchall()\n if cur.rowcount >= 1: \n resp = self.serialize_user(user_from_db) \n return resp\n return None", "def init():\n create_user(app)\n get_all_user()", "def get_all_users():\n return Users.query.all()", "def user_ret():\n user_list = []\n all_objs = storage.all(\"User\")\n for obj in all_objs.values():\n user_list.append(obj.to_dict())\n return jsonify(user_list)", "def checkAndInitUsers(self):\n # config\n users = {}\n\n # iterate through all usernames\n for rUser in pwd.getpwall():\n # check userid\n if rUser.pw_uid is not None and rUser.pw_uid != \"\" and not (\"/nologin\" in rUser.pw_shell or \"/false\" in rUser.pw_shell):\n # save our user, if it mactches\n if verifyNormalUserID(rUser.pw_uid):\n # get processed usernames\n userFName = getNormalizedUserNames(pUser=rUser)[1]\n # save ()\n users[rUser.pw_name] = [rUser.pw_uid, userFName]\n\n # get user config\n timekprConfigManager = timekprConfig()\n # load user config\n timekprConfigManager.loadMainConfiguration()\n\n # go through our users\n for rUser in users:\n # get path of file\n file = os.path.join(timekprConfigManager.getTimekprConfigDir(), cons.TK_USER_CONFIG_FILE % (rUser))\n\n # check if we have config for them\n if not os.path.isfile(file):\n log.log(cons.TK_LOG_LEVEL_INFO, \"setting up user \\\"%s\\\" with id %i\" % (rUser, users[rUser][0]))\n # user config\n timekprUserConfig(timekprConfigManager.getTimekprConfigDir(), rUser).initUserConfiguration()\n # user control\n timekprUserControl(timekprConfigManager.getTimekprWorkDir(), rUser).initUserControl()\n\n log.log(cons.TK_LOG_LEVEL_DEBUG, \"finishing setting up users\")\n\n # user list\n return users", "def returnIdLogin(self):\r\n self.cursor.execute(\"SELECT USUARIO FROM LOGIN;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []", "def get_existing_users():\n # also check PublicKeys a user with no servers/networks exist\n from synnefo.userdata.models import PublicKeyPair\n from synnefo.db.models import VirtualMachine, Network\n\n keypairusernames = PublicKeyPair.objects.filter().values_list('user',\n flat=True)\n serverusernames = VirtualMachine.objects.filter().values_list('userid',\n flat=True)\n networkusernames = Network.objects.filter().values_list('userid',\n flat=True)\n\n return set(list(keypairusernames) + list(serverusernames) +\n list(networkusernames))", "def _get_users_list(self):\n return self.users['user_id'].tolist()", "def get_all_users(self) -> tuple:\n users_list: dict_list_type = [user.to_dict() for user in UserModel.query().fetch()]\n message: str = 'successfully retrieved active users'\n return jsonify({'status': True, 'payload': users_list, 'message': message}), 200", "def get_users(self) -> List[Dict[str, Any]]:\n users = self.user_manager.get_users()\n return [\n {\n 'user_id': user.user_id,\n 'username': user.username,\n 'created_at': user.created_at.isoformat(),\n }\n for user in users\n ]", "def generate_users(config: Config):\n users_by_id = {}\n users_by_alternative_id = {}\n for user_data in config.users:\n alternative_id = secrets.token_hex()\n user = User(user_data[\"user_id\"], user_data[\"password_hash\"], alternative_id)\n users_by_id[user.id] = user\n users_by_alternative_id[user.alternative_id] = user\n return users_by_id, users_by_alternative_id", "def get_all_users(db):\n return list(db['user'].find())", "def list_users(self):\n return self.get_admin(\"users\")", "def get_user_profiles(self):\n print 'inside get user profiles'\n print 'self.username :' + self.username\n g = GoogleAnalyticsAPI(self.username)\n if g:\n print 'GA client exists'\n user_accounts = g.get_user_accounts()\n return user_accounts.get('items')\n else:\n print 'GA client does not exist'\n return []", "def temp_users():\n temp_user_keys = list(self.redis.scan_iter(self.temp_user_search))\n\n temp_user_data = []\n\n for user_key in temp_user_keys:\n username = user_key.split(':')[1]\n\n user = self.user_manager.all_users[username]\n if not user or not user.get_prop('created_at'):\n continue\n\n temp_user_data.append(user.serialize())\n\n return {'users': temp_user_data}" ]
[ "0.633144", "0.61951846", "0.5960654", "0.5862325", "0.5837356", "0.58092964", "0.57713354", "0.5747028", "0.5717182", "0.5696331", "0.5695908", "0.56900585", "0.5685562", "0.5600017", "0.5582856", "0.55819285", "0.55757916", "0.5572261", "0.5559203", "0.55448735", "0.55222386", "0.5511207", "0.5494499", "0.5491987", "0.54850817", "0.5481661", "0.54749095", "0.5472072", "0.54674566", "0.5457481", "0.54554605", "0.54521304", "0.5447608", "0.5439225", "0.5427397", "0.5423957", "0.5422813", "0.5412363", "0.5397458", "0.5395056", "0.53862745", "0.53783995", "0.5370786", "0.53544974", "0.53541464", "0.5348186", "0.5345626", "0.53349864", "0.53309125", "0.53289306", "0.5322085", "0.532176", "0.5315134", "0.5309484", "0.5301332", "0.529488", "0.52922755", "0.5289657", "0.5289179", "0.5282265", "0.52812964", "0.52807754", "0.5280453", "0.5278894", "0.527541", "0.5273835", "0.52726614", "0.5268631", "0.5268631", "0.5268631", "0.5268631", "0.52592915", "0.52589124", "0.5247903", "0.52369267", "0.5233526", "0.5224274", "0.52162546", "0.5215574", "0.52121156", "0.5204697", "0.52022177", "0.51943344", "0.51936823", "0.5190287", "0.5188557", "0.51881707", "0.51784194", "0.5176542", "0.51704437", "0.5170343", "0.5168219", "0.51669717", "0.5165931", "0.51607955", "0.5156781", "0.51370895", "0.5136028", "0.5134066", "0.51339334", "0.5128802" ]
0.0
-1
Creates a leaf node with the given datum (an integer).
def __init__(self, data): self.data = data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_instantiate_leaf_node(self):\n try:\n LeafNode('my_label')\n except Exception:\n message = \"LeafNode instantiation failed\"\n self.fail(message)", "def make_leaves(self):\n\n curr_leaf = self.root\n i = 1\n while i < self.length:\n next_leaf = Leaf(curr_leaf.val+1)\n curr_leaf.next = next_leaf\n curr_leaf = next_leaf\n i += 1", "def create(self, val:int):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n\n while True:\n if val < current.val:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.val:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break", "def create_node(self, data):\n node = RealNode(data, layer=self)\n self.append_node(node)\n return node", "def create_leaves(self, parent_node, leaf_values):\n for i in range(self.base):\n # if zero_suppressed\n parent_node.child_nodes[i] = self.leaf_type(leaf_values[i], leaf_values[i], diagram_type=self.__class__)\n return parent_node, 0.0", "def create_node(self, hx, data):\n return Node(hx, data)", "def create_node(identifier, *args, **kwargs):\r\n\r\n d = node_dictionary()\r\n node_class = d[identifier]\r\n node = node_class(*args, **kwargs)\r\n return node", "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root", "def increment_leaves(t):\n if is_leaf(t):\n return tree(label(t) + 1)\n else:\n bs = [increment_leaves(b) for b in branches(t)]\n return tree(label(t), bs)", "def make_knode(self,i,path_len=0):\n return Knode(path_len=path_len,\\\n ident=self.nodes[i].ident,\\\n lindex=i)", "def to_leaf(self, node_id, depths):\n # print(\"Converting node {} to leaf.\".format(node_id))\n self._kill_node(self.tree.children_left[node_id], depths)\n self._kill_node(self.tree.children_right[node_id], depths)\n self._unlink_children(node_id)\n\n self.tree.feature[node_id] = INVALID_VALUE\n self.tree.impurity[node_id] = 0\n self.tree.threshold[node_id] = INVALID_VALUE\n\n #alternative: add to_leaf function in Cython\n #alternative: build new tree from scratch (add_node() exists)\n #alternative: get/edit/set state (every time or at end?)\n #alternative: compress state at end", "def n(label):\n global id\n node = pydot.Node(name=id, obj_dict=None, label=label)\n id += 1\n graph.add_node(node)\n return node", "def add_leaf(self, leaf):\n cur_tree_size = self.tree_size\n leaf_hash = self.__hasher.hash_leaf(leaf)\n with self.__db.write_batch() as wb:\n wb.put(self.__leaves_db_prefix + encode_int(cur_tree_size), leaf_hash)\n wb.put(self.__index_db_prefix + leaf_hash, encode_int(cur_tree_size))\n wb.put(self.__stats_db_prefix + 'tree_size', str(cur_tree_size + 1))\n return cur_tree_size", "def insert(self, data):\n \n def _find_parent(current, node):\n \"\"\"Recursively descend through the tree to find the node that\n should be the parent of the new node. Do not allow for duplicates.\n \"\"\"\n \n if node == current:\n raise ValueError(str(node.data) + \" is already in the tree.\")\n if node < current: # Travel left\n if current.left:\n return _find_parent(current.left,node)\n else:\n return current\n else: # Travel right\n if current.right:\n return _find_parent(current.right,node)\n else:\n return current\n \n n = KDTNode(data) # Make a new node\n if len(data) != self.k:\n raise ValueError(\"data must be of length \" + str(self.k))\n if not self.root:\n self.root = n # Case 1: empty tree\n n.axis = 0\n else: # Case 2: use _find_parent\n parent = _find_parent(self.root, n) # Get the parent\n if n < parent: parent.left = n # Insert the node\n else: parent.right = n\n n.prev = parent # Double link\n n.axis = (n.prev.axis + 1) % self.k\n return n", "def __init__(self, liste_leaf):\r\n list_leaf = liste_leaf\r\n while len(list_leaf) != 1:\r\n first_node = list_leaf[0]\r\n second_node = list_leaf[1]\r\n new_node = Node(first_node.freq + second_node.freq,\r\n None, first_node, second_node)\r\n del list_leaf[1]\r\n del list_leaf[0]\r\n position = 0\r\n for index in range(0, len(list_leaf), 1):\r\n if new_node.freq >= list_leaf[index].freq:\r\n position += 1\r\n list_leaf.insert(position, new_node) \r\n self.root = list_leaf[0]\r\n #root attribute of the tree giving us the whole tree\r", "def create_new_node(subgraph, prev_node, label, bb):\n return add_node(subgraph, update_node_name(prev_node.get_name(), bb-1), label=update_bb_string(label, bb-1))", "def _bddnode(root, lo, hi):\n\t# print(\"_bddnode\")\n\tif lo is hi:\n\t\tnode = lo\n\telse:\n\t\tkey = (root, lo, hi)\n\t\ttry:\n\t\t\tnode = _NODES[key]\n\t\texcept KeyError:\n\t\t\tnode = _NODES[key] = BDDNode(*key)\n\treturn node", "def __build_binary_tree(self, root, node_id, json_data):\n new_node = BinaryTree(value=json_data[node_id][\"value\"], left=None, right=None)\n if json_data[node_id][\"left\"] != None:\n new_node.left = self.__build_binary_tree(new_node, json_data[node_id][\"left\"], json_data)\n if json_data[node_id][\"right\"] != None:\n new_node.right = self.__build_binary_tree(new_node, json_data[node_id][\"right\"], json_data)\n return new_node", "def leaf_nodes(self, data=True):\n leaf_nodes = [\n node for node in self.graph.nodes()\n if self.graph.in_degree(node) != 0 and self.graph.out_degree(node) == 0\n ]\n for node in leaf_nodes:\n yield (node, self.graph.node[node]) if data else node", "def _create_new(self, key):\n return AVLTreeNode(key)", "def deserialize(self, data):\n def _build(k, parent):\n if k >= len(data):\n return None, k\n\n root = TreeNode(int(data[k]))\n val = int(data[k+1])\n parent = parent if parent else float('-inf')\n if parent <= val and val <= root.val:\n root.left, k = _build(k+1, root.val)\n val = int(data[k+1])\n parent = parent if parent else float('inf')\n if root.val <= val and val <= parent:\n root.right, k= _build(k+1, root.val)\n\n return root, k\n return _build(0, None)", "def _gen_test_tree_1():\n tree = BinaryNode(5)\n tree.left = BinaryNode(5)\n return tree", "def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> Node:\r\n return Node(graph=self._graph, index=index, name=name, external_id=external_id)", "def create_leaves(self, parent_node, leaf_values):\n from pyDD.diagram.node import Leaf\n import numpy\n parent_node.child_nodes[0] = Leaf(1.0, 1, diagram_type=MEVxDD)\n try:\n base_factor = leaf_values[numpy.nonzero(leaf_values)[0][0]]\n except IndexError:\n base_factor = 1.0\n for i in range(self.base):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = leaf_values[i] / base_factor\n return parent_node, base_factor", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree", "def __init__(self, data: str):\n self.root = Node(data)\n self.node_count = 1\n self.node_of_last_computed_hash = 0", "def deserialize(self, data):\r\n deque = collections.deque(int(val) for val in data.split())\r\n\r\n def build(floor, ceiling):\r\n if deque and floor < deque[0] < ceiling:\r\n val = deque.popleft()\r\n node = TreeNode(val)\r\n node.left = build(floor, val)\r\n node.right = build(val, ceiling)\r\n return node\r\n\r\n return build(-float('inf'), float('inf'))", "def __init__(self, root_value):\n self.root = self.TreeNode(value=root_value)", "def create_internal_node( cur_tree_level ):\n\n keys = []\n for node in cur_tree_level[1:]:\n keys.append ( node.smallest() )\n return InternalNode( keys, cur_tree_level )", "def _gen_test_tree_6():\n tree = BinaryNode(20)\n tree.left = BinaryNode(10)\n tree.right = BinaryNode(30)\n tree.left.right = BinaryNode(25)\n return tree", "def add(self, d):\n new_node = Node(d)\n self.root = new_node\n self.size += 1\n return d", "def create_new_child(self,instance):\n\t\tnew_child = self.tree.makeTree(self.tree.root, self.tree)\n\t\tnew_child.utility.increment_counts(instance)\n\t\tself.tree.children.append(new_child)", "def create_ninode(b_obj=None):\n # when no b_obj is passed, it means we create a root node\n if not b_obj:\n return block_store.create_block(\"NiNode\")\n\n # get node type - some are stored as custom property of the b_obj\n try:\n n_node_type = b_obj[\"type\"]\n except KeyError:\n n_node_type = \"NiNode\"\n\n # ...others by presence of constraints\n if has_track(b_obj):\n n_node_type = \"NiBillboardNode\"\n\n # now create the node\n n_node = block_store.create_block(n_node_type, b_obj)\n\n # customize the node data, depending on type\n if n_node_type == \"NiLODNode\":\n export_range_lod_data(n_node, b_obj)\n\n return n_node", "def add_child(self, data, comment = \"\"):\n return self.add_node(Node(data = data, comment = comment))", "def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree", "def _create_node(\n self,\n name,\n ):\n pass", "def insert(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n if self.contains(val):\n raise ValueError('Node already in tree.')\n new_node = Node(val)\n if self._size == 0:\n self._root = new_node\n self._max_depth = 1\n self._rbal = 1\n self._lbal = 1\n else:\n current_depth = 1\n current_node = self._root\n while val is not current_node._data:\n current_depth += 1\n if val < current_node._data:\n if current_node._lkid:\n current_node = current_node._lkid\n else:\n current_node._lkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n elif val > current_node._data:\n if current_node._rkid:\n current_node = current_node._rkid\n else:\n current_node._rkid = new_node\n new_node._parent = current_node\n self._get_new_max()\n self._size += 1", "def insertLeaf(T,i):\r\n T.data.append(i) \r\n T.data.sort(key=lambda x: x.word)", "def insert(self, data: int) -> NoReturn:\n self._insert(data=data, node=self._root)", "def build_tree(n, d, name=defaultname):\n return build_tree_helper(1, n, 1, d, name)", "def build_UNIST_tree():\n root = LinkedBinaryTree()", "def _new_node(self):\n self._size += 1\n return self._node_factory()", "def insert_node(self, data):\n\t\tif self.root is None:\n\t\t\tself.root = Node(data)\n\t\telse:\n\t\t\tcurrent_node = self.root\n\t\t\twhile current_node.next is not None:\n\t\t\t\tcurrent_node = current_node.next\n\t\t\tcurrent_node.next = Node(data, current_node)", "def create_node(self, topogramId, id=None, x=None, y=None, data={}):\n assert type(data) is dict\n if id : assert type(id) is str\n if x : assert type(x) is float or type(x) is int\n if y : assert type(y) is float or type(x) is int\n\n el = {\n \"id\" : id,\n \"x\" : x,\n \"y\" : y\n }\n for k in data :\n el[k] = data[k]\n\n node = { \"element\" : el, \"data\" : data }\n return self.make_request(\"POST\", \"nodes\", { \"topogramId\" : topogramId, \"nodes\" : [ node ]})", "def _new_tree_node(board, current_turn, side):\n global visited\n node_key = (board.get_hash_value(), current_turn)\n if (node_key not in visited):\n visited.add(node_key)\n treenodes[node_key] = TreeNode(board, current_turn, side)\n return treenodes[node_key]", "def binary_tree():\n\n class Node(object):\n def __init__(self, data):\n self.left = None\n self.right = None\n self.data = data\n\n # Create a root\n root = Node(data=1)\n root.left = Node(data=2)\n root.right = Node(data=3)\n root.left.left = Node(data=4)\n \"\"\" Structure\n 1 <-- root\n / \\\n 2 3 \n / \n 4\n \"\"\"", "def newnode(self, name=None, num_states=0):\n # (const char* name, int num_states, net_bn* net)\n if num_states == 0:\n print(\"Warning: Set the number of states when using newnode() \" +\n \"or adding discrete levels won't work.\")\n\n cnetica.NewNode_bn.argtypes = [c_char_p, c_int, c_void_p]\n cnetica.NewNode_bn.restype = c_void_p\n return cnetica.NewNode_bn(ccharp(name), num_states, self.net)", "def build_binary_tree(self, root_id, json_data):\n self.root = self.__build_binary_tree(self.root, root_id, json_data)\n return self.root", "def create_BinaryTree(inor, preor, inStart, inEnd):\n if inStart > inEnd:\n return\n temp = BinaryTreeNode(preor[create_BinaryTree.index])\n create_BinaryTree.index += 1\n\n if inStart == inEnd:\n return temp\n\n for i in range(inStart, inEnd + 1):\n if inor[i] == temp.data:\n index = i\n\n temp.left = create_BinaryTree(inor, preor, inStart, index - 1)\n temp.right = create_BinaryTree(inor, preor, index + 1, inEnd)\n return temp", "def create_leaves(self, parent_node, leaf_values):\n # TODO: find generalization!!\n import numpy as np\n # creating the leaf object\n parent_node.child_nodes[0] = self.leaf_type(0.0, 0, diagram_type=self.__class__)\n\n # creating the offsets\n # deciding on mult or add rule\n # additive_coefficient = np.mean(leaf_values)\n # new_offsets = np.array([leaf_values[i]-additive_coefficient for i in range(self.base)])\n # max_difference = np.max(np.abs(new_offsets))\n # mult_coefficient = max_difference if max_difference != 0.0 else 1.0\n # for i in range(self.base):\n # node.child_nodes[i] = node.child_nodes[0]\n # node.offsets[i] = np.array([((new_offsets[i])/mult_coefficient), mult_coefficient], dtype='float64')\n # return node, [additive_coefficient, mult_coefficient]\n if leaf_values[0] == 0 or (leaf_values[1]-leaf_values[0] < leaf_values[1]/leaf_values[0]):\n parent_node.offsets[0] = np.array([0, 1], dtype='float64')\n for i in range(1, self.base, 1):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = np.array([(leaf_values[i]-leaf_values[0]), 1], dtype='float64')\n return parent_node, [leaf_values[0], 1]\n else:\n parent_node.offsets[0] = np.array([1, 1], dtype='float64')\n for i in range(1, self.base, 1):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = np.array([leaf_values[i]/leaf_values[0], (leaf_values[i]/leaf_values[0])],\n dtype='float64')\n return parent_node, [0, leaf_values[0]]", "def _gen_test_tree_5():\n tree = BinaryNode(30)\n tree.right = BinaryNode(30)\n return tree", "def leaf(self, value, depth, available):\n method_name = 'leaf_' + value.__class__.__name__\n method = getattr(self, method_name, self.generic_leaf)\n return method(value, depth, available)", "def huffman_leaf(letter, weight):\n return tree(weight, [tree(letter)])", "def __init__(self, n: int):\n\n self.root = [-1] * n", "def build():\n root = TreeNode(3)\n root.left = TreeNode(2)\n root.right = TreeNode(4)\n root.right.right = TreeNode(7)\n root.right.left = TreeNode(5)\n return root", "def insert_left(self, data: DataType) -> Node[DataType]:\n self.left_node = Node(data)\n self.left_node.right_node = self # connect the new node to this node\n return self.left_node", "def add_node(self, data):\n new_node = Node(data)\n if self.cur_node is not None:\n new_node.next, self.cur_node.next = self.cur_node.next, new_node\n self.cur_node = new_node\n self.length += 1\n self.cur_pos += 1\n if self.start_node is None:\n self.start_node = self.cur_node\n # print(\"Node({}) added to {}\".format(new_node.data, self.cur_pos-1))", "def create_Treeby_level(root, levelor, i, n):\n if i < n:\n temp = BinaryTreeNode(levelor[i])\n root = temp\n\n root.left = create_Treeby_level(root.left, levelor, 2 * i + 1, n)\n root.right = create_Treeby_level(root.right, levelor, 2 * i + 2, n)\n return root", "def _createSubtree(self, parent, begin, end):\n n_elem = end - begin\n if (n_elem == 1):\n node = Node(position=begin)\n node.parent = parent\n node.end = end\n return node\n\n # At least 2 values (leaves) left\n mid = int((end + begin)/2)\n node = Node(end=end)\n node.parent = parent\n node.left = self._createSubtree(node, begin, mid)\n node.right = self._createSubtree(node, mid, end)\n return node", "def get_leaf(self, leaf_index):\n return self.__leaves_db.get(encode_int(leaf_index))", "def set_leaf_node(self, leaf_value):\n\n if not self.empty:\n try:\n node_key = self.node_key\n except AttributeError:\n node_key = '_'\n raise ValueError(\n 'Cannot modify a non-empty node. ' + \\\n 'If you meant to change type of node {}, '.format(node_key) + \\\n 'delete it first and then add an empty node with ' + \\\n 'the same key.')\n\n # check if leaf_value is a list-like object\n try:\n _ = iter(leaf_value)\n is_list = True\n except TypeError:\n is_list = False\n\n try:\n if is_list:\n leaf_value = [float(i) for i in leaf_value]\n else:\n leaf_value = float(leaf_value)\n except TypeError:\n raise TreeliteError('leaf_value parameter should be either a ' + \\\n 'single float or a list of floats')\n\n try:\n if is_list:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafVectorNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n c_array(ctypes.c_double, leaf_value),\n ctypes.c_size_t(len(leaf_value))))\n else:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n ctypes.c_double(leaf_value)))\n self.empty = False\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a leaf node')", "def bst_insert(root, data):\n if root is None:\n root = Tree(d=data)\n elif data > root.data:\n root.right = bst_insert(root.right, data)\n else:\n root.left = bst_insert(root.left, data)\n return root", "def create_test_node(**kw):\n node = get_test_node(**kw)\n # Let DB generate an ID if one isn't specified explicitly.\n # Creating a node with tags or traits will raise an exception. If tags or\n # traits are not specified explicitly just delete them.\n for field in {'id', 'tags', 'traits'}:\n if field not in kw:\n del node[field]\n dbapi = db_api.get_instance()\n return dbapi.create_node(node)", "def __init__(self):\n self.root = TreeNode(None)", "def tree_node(*args, label: str = \"\", show: bool = True, parent: str = \"\", \n before: str = \"\", default_open: bool = False, open_on_double_click: bool = False, \n open_on_arrow: bool = False, leaf: bool = False, bullet: bool = False, id:str='',\n selectable: bool = False, indent=-1, pos=[]):\n try:\n widget = internal_dpg.add_tree_node(*args, show=show, parent=parent,\n before=before, default_open=default_open, \n open_on_double_click=open_on_double_click, \n open_on_arrow=open_on_arrow,\n leaf=leaf, bullet=bullet, label=label, id=id, selectable=selectable,\n indent=indent, pos=pos)\n internal_dpg.push_container_stack(widget)\n yield widget\n finally:\n internal_dpg.pop_container_stack()", "def create_test_node_tag(**kw):\n tag = get_test_node_tag(**kw)\n dbapi = db_api.get_instance()\n return dbapi.add_node_tag(tag['node_id'], tag['tag'])", "def create_node(\n self,\n node: Node,\n parameters: Any\n ) -> str:\n raise NotImplementedError", "def add_child(self, data):\n if data == self.data:\n return # node already exist\n\n if data < self.data:\n #add data to left subtree\n if self.left:\n self.left.add_child(data)\n else:\n self.left = BinarySearchTreeNode(data)\n else:\n #add data to right subtree\n if self.right:\n self.right.add_child(data)\n else:\n self.right = BinarySearchTreeNode(data)", "def __init__(self):\n self.root = TreeNode('#')", "def create_nomenclature_tree(region=settings.PRIMARY_REGION):\n new_start_date = timezone.now()\n\n prev_tree = NomenclatureTree.get_active_tree(region)\n if prev_tree:\n prev_tree.end_date = new_start_date\n prev_tree.save()\n\n tree = NomenclatureTree.objects.create(\n region=region, start_date=new_start_date, end_date=None\n )\n\n return tree", "def create_bst(self, a, left, right):\n if left > right:\n return\n mid = (left + right) / 2\n self.insert(a[mid])\n self.create_bst(a, left, mid - 1)\n self.create_bst(a, mid + 1, right)", "def _add_root(self, data):\n if self._root is not None:\n raise ValueError(\"Root exists\")\n self._size = 1\n self._root = self._Node(data)\n return self._make_position(self._root)", "def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree", "def test_instantiate_branch_node(self):\n try:\n BranchNode('my_name')\n except Exception:\n message = \"BranchNode instantiation failed\"\n self.fail(message)", "def __init__(self, value, parent = None):\n # initialize new node\n self.value = value\n self.parent = parent\n self.left = None\n self.right = None\n self.height = 1", "def test_post_dicot_name_creates_leaf(self):\n plant = Plant.objects.create(common_name=\"mahogany\",\n subclass=\"Rosidae\",\n order=\"Sapindales\",\n family=\"Aceraceae\",\n genus=\"Acer L.\",\n species=\"Acer macrophyllum Pursh\",\n )\n request_data = {\n \"placement\": \"opposite\",\n \"blade\": \"palmately compound\",\n \"veins\": \"penniveined\",\n \"location\": \"Vancouver, BC\",\n \"date_found\": \"2014-01-01\"\n }\n response = self.client.post('/dicots/mahogany', request_data, format='json')\n created_leaf = Leaf.objects.latest('id')\n expected_data = {\n \"id\": \"leaf-%i\" % (created_leaf.id),\n \"plant\": \"plant-%i\" % (plant.id),\n \"placement\": \"opposite\",\n \"blade\": \"palmately compound\",\n \"veins\": \"penniveined\",\n \"location\": \"Vancouver, BC\",\n \"date_found\": \"2014-01-01\"\n }\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertJSONEqual(response.content, json.dumps(expected_data))", "def tree(xt, yt, t):\n green = (1, 50, 32) # leaf color\n\n rect(screen, (150, 75, 0), (xt, yt, 15 * t, 60 * t), 0) # tree's trunk\n circle(screen, green, (xt + 15 * t / 2, yt - 30 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 + 30 * t, yt - 30 * t + 15 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 - 30 * t, yt - 30 * t + 15 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 + 30 * t, yt - 30 * t - 20 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2 - 30 * t, yt - 30 * t - 20 * t), 30 * t) # leaves\n circle(screen, green, (xt + 15 * t / 2, yt - 30 * t - 50 * t), 30 * t) # leaves", "def create_dummy_node(self, name):\n dummy = DummyNode(name, self)\n self.append_node(dummy)\n return dummy", "def generate_node_kml(self, d, children):\n return self.generate_leaf_kml(d, \"\\n\".join(children))", "def addTree(self, depth, fanout):\n isSwitch = depth > 0\n if isSwitch:\n node = self.addSwitch('s%s' % self.switchNum)\n self.switchNum += 1\n for _ in range(fanout):\n child = self.addTree(depth - 1, fanout)\n self.addLink(node, child)\n else:\n node = self.addHost('h%s' % self.hostNum)\n self.hostNum += 1\n return node", "def new_node(self, offset):\n # First we get the name of the node\n nameidx = self.string[offset:].find(b'\\0')\n name = self.string[offset: offset + nameidx]\n string_offset = offset + calc_length_word_align(nameidx + 1)\n node = FDTNode(name)\n return string_offset, node", "def insert(self, node , hx, data):\n #if tree is empty , return a root node\n if node is None:\n self.node_count += 1\n return self.create_node(hx, data)\n if data <= node.data:\n node.left = self.insert(node.left, hx, data)\n elif data > node.data:\n node.right = self.insert(node.right, hx, data)\n\n return node", "def genTree(lst, i=1):\n if lst and i <= len(lst) and lst[i-1] is not None:\n node = TreeNode(lst[i-1])\n node.left = genTree(lst, i*2)\n node.right = genTree(lst, i*2+1)\n return node", "def deserialize(self, data):\n arr = data[1:-1].split(',')\n self.index = 0\n def construct():\n \n if arr[self.index] == 'null':\n self.index += 1\n return None\n root = TreeNode(int(str(arr[self.index])))\n self.index += 1\n if self.index >= len(arr):\n return root\n root.left = construct()\n root.right = construct()\n return root\n return construct()", "def newNode(self, id, positionX, positionY, caliber, pseudo,ori):\n \n try:\n return self.server.newNode(id, str(positionX), str(positionY), str(caliber), pseudo ,str(ori) )\n except:\n return -1", "def create_empty_node():\n from linked_list import Node\n return Node()", "def deserialize(self, data):\n\n res = deque(int(v) for v in data.split())\n\n def build(low_bound, high_bound):\n if res and low_bound < res[0] < high_bound:\n v = res.popleft()\n node = TreeNode(v)\n node.left = build(low_bound, v)\n node.right = build(v, high_bound)\n return node\n\n return build(float('-infinity'), float('infinity'))", "def __init__(self):\n self.root = TreeNode(\"\")", "def __init__(self):\n self.root = TreeNode(\"\")", "def createNode(_session, _segment, _const, _type):\n node = _session.create_el(_segment, sc.SC_NODE | _const)\n #_session.appendObj2Sets(_segment, node, [node_sets[_type]])\n createPairPosPerm(_session, _segment, node_sets[_type], node, sc.SC_CONST)\n return node", "def createNode(self, name):\n return Node(name)", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def deserialize(self, data):\n data = data.split(\",\")\n # print(data)\n self.idx = 0\n \n def dfs():\n if data[self.idx] == 'N':\n self.idx += 1\n return None\n node = TreeNode(int(data[self.idx]))\n self.idx += 1\n node.left = dfs()\n node.right = dfs()\n return node\n return dfs()", "def create_node(self, name, parent):\n\n try:\n node = self.map[name]\n return node\n except:\n node = Node(name,parent=parent.name)\n parent.children.add(node)\n\n node.parent = parent.name\n\n self.map[name] = node\n\n return node", "def test_tree_with_one_leaf_node_right_of_left_depth(balanced_3_nodes):\n balanced_3_nodes.insert(8)\n assert balanced_3_nodes.depth() == 2", "def build():\n r = TreeNode(1)\n r.left = TreeNode(2)\n r.left.left = TreeNode(4)\n r.left.right = TreeNode(5)\n\n r.right = TreeNode(3)\n\n return r\n return TreeNode(3)", "def kdtree( data, leafsize):\n\n leaves = []\n\n ndim = data.shape[0]\n ndata = data.shape[1]\n #print ndim\n #print ndata\n\n # find bounding hyper-rectangle\n hrect = numpy.zeros((2,data.shape[0]))\n hrect[0,:] = data.min(axis=1)\n hrect[1,:] = data.max(axis=1)\n\n # create root of kd-tree\n idx = numpy.argsort(data[0,:], kind='mergesort')\n data[:,:] = data[:,idx]\n splitval = data[0,ndata/2]\n\n left_hrect = hrect.copy()\n right_hrect = hrect.copy()\n left_hrect[1, 0] = splitval\n right_hrect[0, 0] = splitval\n\n tree = [(None, None, left_hrect, right_hrect, None, None)]\n\n stack = [(data[:,:ndata/2], idx[:ndata/2], 1, 0, True),\n (data[:,ndata/2:], idx[ndata/2:], 1, 0, False)]\n\n # recursively split data in halves using hyper-rectangles:\n while stack:\n\n # pop data off stack\n data, didx, depth, parent, leftbranch = stack.pop()\n ndata = data.shape[1]\n nodeptr = len(tree)\n\n # update parent node\n\n _didx, _data, _left_hrect, _right_hrect, left, right = tree[parent]\n\n tree[parent] = (_didx, _data, _left_hrect, _right_hrect, nodeptr, right) if leftbranch \\\n else (_didx, _data, _left_hrect, _right_hrect, left, nodeptr)\n\n # insert node in kd-tree\n\n # leaf node?\n if ndata <= leafsize:\n _didx = didx.copy()\n _data = data.copy()\n leaf = (_didx, _data, None, None, 0, 0)\n #leaf = (_data)\n tree.append(leaf)\n leaves.append(_didx)\n\n # not a leaf, split the data in two \n else:\n splitdim = depth % ndim\n idx = numpy.argsort(data[splitdim,:], kind='mergesort')\n data[:,:] = data[:,idx]\n didx = didx[idx]\n nodeptr = len(tree)\n stack.append((data[:,:ndata/2], didx[:ndata/2], depth+1, nodeptr, True))\n stack.append((data[:,ndata/2:], didx[ndata/2:], depth+1, nodeptr, False))\n splitval = data[splitdim,ndata/2]\n if leftbranch:\n left_hrect = _left_hrect.copy()\n right_hrect = _left_hrect.copy()\n else:\n left_hrect = _right_hrect.copy()\n right_hrect = _right_hrect.copy()\n left_hrect[1, splitdim] = splitval\n right_hrect[0, splitdim] = splitval\n\n #print data\n # append node to tree\n tree.append((None, None, left_hrect, right_hrect, None, None))\n\n return tree,leaves", "def straight_bst():\n bst = BST()\n for i in range(1, 10):\n bst.insert_non_balance(i)\n return bst, 10, -9", "def get_leaf_nodes(self):\n pass", "def test_binarytree_instantiate_tuple():\n input = (13, 42, 7)\n d = BinaryTree(input)\n assert isinstance(d, BinaryTree)", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");" ]
[ "0.6327141", "0.6087558", "0.60294986", "0.5793774", "0.5791541", "0.5787082", "0.57470906", "0.5739957", "0.5700532", "0.5640091", "0.55949605", "0.5552148", "0.5547106", "0.5514915", "0.5505738", "0.5499741", "0.54990375", "0.54628", "0.54603404", "0.5457154", "0.54411215", "0.5428236", "0.54150903", "0.54105085", "0.5396265", "0.53745705", "0.53583944", "0.5356501", "0.53487504", "0.534616", "0.5324315", "0.5321917", "0.5308153", "0.5304556", "0.5298271", "0.5293261", "0.5290849", "0.5288124", "0.52879006", "0.52875733", "0.52866566", "0.52770686", "0.5264426", "0.52625024", "0.52621424", "0.52607584", "0.5260636", "0.52591026", "0.52568144", "0.5248554", "0.52454084", "0.5234104", "0.5217465", "0.5214937", "0.51935005", "0.5187562", "0.5183511", "0.5183506", "0.5172871", "0.5150876", "0.515019", "0.5139949", "0.51318884", "0.5114915", "0.5111379", "0.51084626", "0.51043046", "0.50996983", "0.5091613", "0.5081052", "0.5080514", "0.5075143", "0.507337", "0.50729907", "0.50713557", "0.5066384", "0.50602806", "0.5056039", "0.50534475", "0.5052099", "0.50463545", "0.50448155", "0.5043425", "0.5038179", "0.503093", "0.5029901", "0.50280124", "0.5025713", "0.5025713", "0.50202125", "0.5018643", "0.5016364", "0.50141335", "0.50056493", "0.50043744", "0.49991837", "0.49918714", "0.4988171", "0.4984955", "0.49839026", "0.4980857" ]
0.0
-1
Returns the value of the expression.
def value(self): return self.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value(self) -> global___Expression:", "def value(self) -> global___Expression:", "def value(self) -> Optional[Expression]:\n return self.__value", "def value_expression(self) -> str:\n return pulumi.get(self, \"value_expression\")", "def value_expression(self) -> Optional[str]:\n return pulumi.get(self, \"value_expression\")", "def getValue(self):\n key = int(self.keyExpression.getValue())\n if key in self.dictOfExpressions:\n return self.dictOfExpressions[key].getValue()\n\n return 0.0", "def evaluateValue(compiled_expression):", "def value(self):\n return self.computeValue(self.operator,\n self.leftOperand.value(),\n self.rightOperand.value())", "def expression(self):\n return self._expression", "def getValue(self):\n return _libsbml.ASTNode_getValue(self)", "def getValue(self):\n # compute the values of my operands\n values = (op.getValue() for op in self.operands)\n # apply my operator\n return self.evaluator(*values)", "def eval(self) -> typing.Any:\n return self.expr()", "def value(self):\n self._value = self._op.value\n return self._value", "def _getvalue_expr_Constant(self, expr: ast.Constant) -> Any:\n return expr.value", "def getValue(self):\n return self.value", "def getValue(self):\n return self.value", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def getValue(self):\n \n return self._value", "def request_value(self) -> global___Expression:", "def value(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._val", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def getValue(self):\n return self.field.value()", "def getValue(self):\n return self.field.value()", "def _get_value(self):\n \n return self._value", "def value(self):\n\n\t\treturn self.__value", "def getval(self):\r\n return self.value", "def result(self):\r\n # Module(body=[Expr(value=...)])\r\n return self.eval_(ast.parse(self.expr).body[0].value)", "def get_val(self):\n return self.value", "def value(self):\n return self._val", "def get_value(self):\n return self._value", "def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")", "def value(self):\n return self.__value", "def value(self):\n return self.__value", "def value(self):\n return self.value()._value", "def _get_value(self):\n return self.__value", "def expr(self):\n return self._express", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def value(self):\n s = str(self.input.text())\n if self._is_string_:\n return s\n else:\n return eval(s)", "def value(self):\n return self._value_", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self.get_data(\"value\")", "def value (self) :\n\n return self.__value__", "def vvalue(self) -> Qval:\n return self.get(self.greedy())", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")" ]
[ "0.80231833", "0.80231833", "0.7905604", "0.7728165", "0.76552755", "0.7503011", "0.74549043", "0.7429503", "0.7389446", "0.73270637", "0.7262415", "0.723994", "0.720171", "0.71606433", "0.7152678", "0.7130495", "0.710755", "0.710755", "0.7055234", "0.70376086", "0.7030717", "0.70151573", "0.70151573", "0.70151573", "0.7012379", "0.7012379", "0.7011926", "0.7011926", "0.7011926", "0.7008786", "0.7004478", "0.7004478", "0.7004478", "0.70010155", "0.70010155", "0.69911677", "0.69911677", "0.69802433", "0.6978887", "0.6978126", "0.69756883", "0.6973207", "0.69602114", "0.695078", "0.6944549", "0.69300216", "0.69300216", "0.6923896", "0.6914406", "0.6909837", "0.6906768", "0.6906768", "0.6906768", "0.6900424", "0.68902546", "0.68834764", "0.68834764", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6867384", "0.6848956", "0.6843343", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832" ]
0.0
-1
Returns the expression in prefix form.
def prefix(self): return str(self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prefix(self):\n return str(self.operator) + \" \" + self.leftOperand.prefix() + \" \" + self.rightOperand.prefix()", "def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])", "def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr", "def base_prefix(self):\n return self.calculation.base_prefix", "def prefix(self):\n return self[\"prefix\"]", "def prefix(self):\n return self[\"prefix\"]", "def prefix(pattern):\r\n return pattern[0:len(pattern)-1]", "def prefix(pattern):\n return pattern[0:len(pattern)-1]", "def getPrefix(self):\n return _libsbml.ASTBasePlugin_getPrefix(self)", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def getPrefix(self):\n raise NotImplementedError", "def result_prefix(self):\n return self.calculation.result_prefix", "def get_prefix(self):\n return self.prefix", "def get_prefix(self):\n return self._prefix", "def get_prefix(self):\n return self._prefix", "def getPrefix(self):\n return _libsbml.MultiASTPlugin_getPrefix(self)", "def getPrefix(self):\n return _libsbml.SBase_getPrefix(self)", "def getPrefix(self, *args):\n return _libsbml.XMLNamespaces_getPrefix(self, *args)", "def getPrefix(self):\n return _libsbml.XMLToken_getPrefix(self)", "def getPrefixedName(self, *args):\n return _libsbml.XMLAttributes_getPrefixedName(self, *args)", "def getPrefix(self, *args):\n return _libsbml.XMLAttributes_getPrefix(self, *args)", "def prefix(self):\n return self._path_prefix", "def set_prefix_expression(self, expression, clear_args = True):\n if expression and type(expression) is not str:\n raise TypeError('expression should be either string or None or False')\n if clear_args:\n self._prefix_kwargs = {}\n self._prefix_expression = expression", "def _expand_prefix(prefix, configs):\n return subst_vars(prefix, configs)", "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix", "def getPrefixedName(self):\n return _libsbml.XMLTriple_getPrefixedName(self)", "def getAttrPrefixedName(self, *args):\n return _libsbml.XMLToken_getAttrPrefixedName(self, *args)", "def prefix(name):\n def rule(symbol):\n return symbol.startswith(name) or None\n return rule", "def getAttrPrefix(self, *args):\n return _libsbml.XMLToken_getAttrPrefix(self, *args)", "def command_with_prefix(self):\n return self.endpoint_prefix.rstrip('/') + self.command", "def getPrefix(self):\n return _libsbml.XMLTriple_getPrefix(self)", "def prefixed(self, prefix):\n if not prefix:\n return self.clone()\n else:\n return self.using(join(prefix, self))", "def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")", "def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")", "def getPrefix(self):\n return _libsbml.SBasePlugin_getPrefix(self)", "def infix(self):\n return \"(\" + self.leftOperand.infix() + \" \" + str(self.operator) + \" \" + self.rightOperand.infix() + \")\"", "def getNamespacePrefix(self, *args):\n return _libsbml.XMLToken_getNamespacePrefix(self, *args)", "def default_prefix(self) -> str:", "def _django_prefix():\n return _interpolate(DJANGO_PREFIX)", "def getPrefixPattern(self):\n return self.getOrDefault(\"prefixPattern\")", "def prefix_to_tree(self, expr: str, delimeter: str = None, node_name: str = \"base\") -> Tree:\n # Create a tree\n tree = Tree()\n\n # Convert the expression to a deque\n expr_deque = deque(expr.split(delimeter))\n\n # Create a base node\n base_node = tree.create_node(node_name,0)\n\n # Start the add loop\n tree, count = self._add_prefix_to_node(expr_deque, tree, base_node, 1)\n\n # Return tree\n return tree", "def getPrefix(self):\n return( self.id.split('.')[0] )", "def expand(self, expression):\n if not expression:\n return b\"\"\n if expression[0] in self._operators:\n operator, expression = expression[:1], expression[1:]\n if operator == b\"+\":\n return self._expand(expression, reserved)\n elif operator == b\"#\":\n return self._expand(expression, reserved, prefix=b\"#\")\n elif operator == b\".\":\n return self._expand(expression, prefix=b\".\", separator=b\".\")\n elif operator == b\"/\":\n return self._expand(expression, prefix=b\"/\", separator=b\"/\")\n elif operator == b\";\":\n return self._expand(expression, prefix=b\";\", separator=b\";\",\n with_keys=True, trim_empty_equals=True)\n elif operator == b\"?\":\n return self._expand(expression, prefix=b\"?\", separator=b\"&\",\n with_keys=True)\n elif operator == b\"&\":\n return self._expand(expression, prefix=b\"&\", separator=b\"&\",\n with_keys=True)\n else:\n return self._expand(expression)", "def _prefix(self):\n name = self.__class__.__name__\n return name[:2] + ''.join(c for c in name if c.isupper())[1:]", "def expr(self):\n return self._express", "def _getPrefix(self, namespaceURI):\r\n prefixDict = self._getPrefixDict()\r\n if prefixDict.has_key(namespaceURI):\r\n prefix = prefixDict[namespaceURI]\r\n else:\r\n prefix = 'ns1'\r\n while prefix in prefixDict.values():\r\n prefix = 'ns%d' %int(prefix[-1]) + 1\r\n prefixDict[namespaceURI] = prefix\r\n return prefix", "def value_prefix(self) -> str:\n return self._value_prefix", "def name_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name_prefix\")", "def removeprefix(self, x) -> String:\n pass", "def infix2prefix(self, lst):\n stk = []\n pre = []\n for elt in reversed(lst):\n if elt.isdigit():\n pre.append(elt)\n elif elt == \")\":\n stk.append(elt)\n elif elt == \"(\":\n while stk and stk[-1] != \")\":\n pre.append(stk.pop())\n stk.pop()\n else:\n while stk and self.precedence(elt) < self.precedence(stk[-1]): # < rather than <=\n pre.append(stk.pop())\n stk.append(elt)\n\n while stk:\n pre.append(stk.pop())\n\n pre.reverse()\n return pre", "def get_var_prefix(self):\n return ''", "def expression_phrase(self):\n return self._expression_phrase", "def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self, s):\n\t\tif not s:\n\t\t\treturn self.value\n\t\thead, tail = s[0], s[1:]\n\t\tif head not in self.root:\n\t\t\treturn False # Not contained\n\t\tnode = self.root[head]\n\t\treturn node.prefix(tail)", "def _extract_immediate_prefix(obj_key:str)->str:\n immed_prefix = \"\"\n if len(obj_key.split(\"/\")) > 1:\n immed_prefix = obj_key.split(\"/\")[-2]\n \n return immed_prefix", "def conan_prefix(self):\n return self._conan_prefix", "def print_prefix(self):\n if self.is_empty():\n return \"\"\n else:\n ch = str(self.root_value())\n if self.is_leaf():\n return ch\n else:\n if self.has_left():\n if self.has_right():\n return ch + \" \" + self.get_left().print_prefix() + \" \" + self.get_right().print_prefix()\n else:\n return ch + \" \" + self.get_left().print_prefix()\n else:\n return ch + \" \" + self.get_right().print_prefix()", "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def prefix_to_ns(self, prefix):\n defin = self.module.i_ctx.get_module(\n self.module.i_prefixes[prefix][0])\n return defin.search_one(\"namespace\").arg", "def expression(self):\n return self._expression", "def add_prefix(self, field_name):\r\n return self.prefix and ('%s.%s' % (self.prefix, field_name)) or field_name", "def _prefix_and(*exprs, **kwargs):\n anded = ' AND '.join('(%s)' % expr for expr in exprs if expr)\n if len(anded) == 0:\n return ''\n return kwargs.get('prefix', 'WHERE ') + anded", "def entity_prefix(self):", "def name(self):\n return self.prefix", "def getPrefix(self):\n return \"20gig\"", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def series_add_prefix(series, prefix):\n f = partial(\"{prefix}{}\".format, prefix=prefix)\n\n return series.rename(index=f)", "def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output", "def Prefix(self):\n ret = libxml2mod.xmlTextReaderConstPrefix(self._o)\n return ret", "def prefix(self, xform):\n tail = self\n while tail.prev != None:\n tail = tail.prev\n tail.prev = xform", "def prefix(num):\n # determine which range it lies in, r1/r2 means reduction 1 or reduction 2\n divisors = [1e-24 * pow(10, 3 * x) for x in range(17)]\n prefixes = list(reversed(['Yotta (Y)', 'Zetta (Z)', 'Exa (E)', 'Peta (P)', 'Tera (T)', 'Giga (G)', 'Mega (M)',\n 'Kilo (K)', '', 'Milli (m)', 'Micro ($\\mu$)', 'Nano (n)', 'Pico (p)', 'Femto (f)',\n 'Atto (a)', 'Zepto (z)', 'Yocto (y)']))\n exp = np.floor(np.log10(np.abs(num)))\n if exp < 0:\n exp -= 3\n expIndex = int(exp / 3) + 8\n expIndex = 0 if expIndex < 0 else expIndex\n expIndex = len(prefixes)-1 if expIndex >= len(prefixes) else expIndex\n r1 = prefixes[expIndex]\n num1 = num / divisors[expIndex]\n if expIndex != len(prefixes):\n r2 = prefixes[expIndex + 1]\n num2 = num / divisors[expIndex + 1]\n else:\n num2 = None\n retStr = str(num1) + ' ' + r1\n if num2 is not None:\n retStr += '\\nor\\n' + str(num2) + ' ' + r2\n return retStr", "def get_prefix(coef, bias=0.1, omit=None):\n if omit is None:\n omit = num_prefixes\n\n values = [val for key, val in six.iteritems(prefixes) if key not in omit]\n coefs = nm.array(values, dtype=nm.float64)\n coefs.sort()\n ii = nm.searchsorted(coefs, bias*coef, side='left')\n\n if ii == len(coefs):\n ii = ii - 1\n\n cc = coefs[ii]\n prefix = inv_prefixes[cc]\n mul = coef / cc\n\n return prefix, mul", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def add_prefix(prefix = \"Peptides\"):\n var_list = gen_cell_lines_states_replicates()\n prefix = prefix\n res_list = []\n for i in var_list:\n unit_str = prefix + \" \"\n unit_str += i\n res_list.append(unit_str)\n return res_list", "def add_prefix(self, field_name):\n return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name", "def prefixsrc(self):\n return self[\"prefixsrc\"]", "def prefixsrc(self):\n return self[\"prefixsrc\"]", "def test_evaluate_starts_with_expression(self):\n value = self.evaluate_common(\"startswith('startswith','start')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True)\n value = self.evaluate_common(\"startswith('startswith','end')\")\n self.assertTrue(value.value is False)\n value = self.evaluate_common(\"startswith('startswith','Start')\")\n # not case insensitive\n self.assertTrue(value.value is False)\n try:\n value = self.evaluate_common(\"startswith('3.14',3)\")\n self.fail(\"integer as prefix\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"startswith('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass", "def get_onr_prefix_postfix(self) -> Tuple[str, str]:\n op2 = self.op2\n prefix = ''\n postfix = ''\n if op2.table_name in [b'ONRGY1', b'ONRGY2', b'ONRGY']:\n prefix = 'strain_energy.'\n elif op2.table_name in [b'RANEATC']: #, b'OSTRMS1C']:\n op2.format_code = 1\n op2.sort_bits[0] = 0 # real\n prefix = 'RANEATC.'\n elif op2.table_name in [b'RANCONS']: #, b'OSTRMS1C']:\n op2.format_code = 1\n op2.sort_bits[0] = 0 # real\n prefix = 'RANCONS.'\n else:\n raise NotImplementedError(op2.table_name)\n op2.data_code['sort_bits'] = op2.sort_bits\n op2.data_code['nonlinear_factor'] = op2.nonlinear_factor\n return prefix, postfix", "def prefix_format(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix_format\")", "def prefix_format(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix_format\")", "def set_Prefix(self, value):\n super(DescribeEvaluationsInputSet, self)._set_input('Prefix', value)", "def get_prefix_url(request):", "def prefix_value(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n raise ValueError('%(s)s contains forbidden characters'\n ' (%(forbidden)s)'\n % locals())\n stripped = s.strip('/')\n if stripped:\n return stripped.join('//')\n return '/'", "def autoprefix(prefix):\n pl = len(prefix)\n msg = '%%(s)r: expected some name after %(prefix)r!' % locals()\n def checker(s):\n if s.startswith(prefix):\n tail = s[pl:]\n if tail:\n return prefix + dotted_name(tail)\n else:\n raise ValueError(msg % locals())\n elif s:\n return prefix + dotted_name(s)\n else:\n return ''\n return checker", "def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation", "def prefix(self):\n return self._get_storage().prefix", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")" ]
[ "0.7265404", "0.710734", "0.6866923", "0.6425482", "0.63313454", "0.63313454", "0.63225156", "0.63132906", "0.6267651", "0.62378424", "0.62378424", "0.62378424", "0.6218811", "0.6152476", "0.6141088", "0.6069823", "0.6069823", "0.6055349", "0.60520095", "0.6048974", "0.60268766", "0.59823626", "0.5966694", "0.5940881", "0.591457", "0.5897211", "0.58804023", "0.58804023", "0.5866765", "0.58618367", "0.58404773", "0.58388597", "0.5833756", "0.58322996", "0.5826342", "0.57992333", "0.5767351", "0.5767351", "0.5758095", "0.5750478", "0.57393223", "0.573083", "0.5727359", "0.57233435", "0.5701996", "0.5688843", "0.5684467", "0.56490076", "0.56477404", "0.5637599", "0.56314707", "0.5614335", "0.5610098", "0.56019115", "0.5563935", "0.5563717", "0.55551225", "0.554288", "0.554288", "0.554288", "0.5540493", "0.5538691", "0.5536378", "0.55265903", "0.5521938", "0.5520736", "0.55043054", "0.5503633", "0.54964536", "0.54915786", "0.54886484", "0.5483853", "0.54739976", "0.54739976", "0.54739976", "0.54739976", "0.5469625", "0.54488176", "0.54435277", "0.543421", "0.54229647", "0.5412241", "0.54089326", "0.54089326", "0.5406004", "0.5381731", "0.5380758", "0.5380758", "0.53706694", "0.5367419", "0.53652596", "0.53652596", "0.5363677", "0.5357183", "0.5343013", "0.53224343", "0.5301454", "0.5295515", "0.5294408", "0.5294408" ]
0.54560906
77
Returns the expression in infix form.
def infix(self): return str(self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def infix(self):\n return \"(\" + self.leftOperand.infix() + \" \" + str(self.operator) + \" \" + self.rightOperand.infix() + \")\"", "def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")", "def evaluate_infix(string):\n return postfix(infix_to_postfix(string))", "def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res", "def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output", "def expr(self):\n return self._express", "def parse_infix(input: str) -> Node:\n parsed = ParsedString(input).tokenize()\n ans = parse_e(parsed)\n return ans", "def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr", "def infix_to_postfix(infix:str) -> str:\n stack = deque()\n precedence = {'+':1, '-':1,\n '*':2, '/':2,\n '^':3, '(':-9\n }\n output = \"\"\n for ch in infix:\n if ch not in {'+', '-', '*', '/', '^', '(', ')'}:\n output += ch\n elif ch == '(':\n stack.append(ch)\n elif ch == ')':\n while len(stack) > 0 and\\\n stack[-1] != '(':\n output += stack.pop()\n stack.pop()\n else:\n while len(stack) > 0 and\\\n precedence[stack[-1]] >= precedence[ch]:\n output += stack.pop()\n stack.append(ch)\n while len(stack) > 0:\n output += stack.pop()\n return output", "def print_infix(self):\n if self.is_empty():\n return \"\"\n else:\n if self.is_leaf():\n return str(self.root_value())\n else:\n if self.has_left():\n if self.has_right():\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value()) + \" \" \\\n + str(self.get_right().print_infix())\n else:\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value())\n else:\n return str(self.root_value()) + \" \" + str(self.get_right().print_infix())", "def infix_to_postfix(expr):\n # you may find the following precedence dictionary useful\n prec = {'*': 2, '/': 2,\n '+': 1, '-': 1}\n ops = Stack()\n postfix = []\n toks = expr.split()\n ### BEGIN SOLUTION\n opp = {'*', '/','+', '-'}\n for x in toks:\n if str.isdigit(x):\n postfix.append(x)\n elif ops.empty() or ops.peek() == '(':\n ops.push(x)\n elif x == '(':\n ops.push(x)\n elif x == ')':\n while not ops.empty():\n temp = ops.pop()\n if temp == '(':\n break\n else:\n postfix.append(temp)\n elif x in opp:\n while True:\n if prec.get(x) > prec.get(ops.peek()):\n ops.push(x)\n break\n elif prec.get(x) == prec.get(ops.peek()):\n postfix.append(ops.pop())\n ops.push(x)\n break\n elif prec.get(x) < prec.get(ops.peek()):\n postfix.append(ops.pop())\n if ops.empty():\n ops.push(x)\n break\n elif ops.empty():\n break\n\n while True:\n if not ops.empty():\n postfix.append(ops.pop())\n else:\n break\n\n ### END SOLUTION\n return ' '.join(str(x) for x in postfix)", "def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n\n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1", "def infixToRPN(expression):\n stack = Stack()\n RPNList = []\n tokens = expression.split()\n spaces = True\n\n # If no spaces in expression then push each char in a tokens list\n if len(tokens) == 1:\n spaces = False\n tokens = [char for char in expression]\n\n for token in tokens:\n if token in alphabet or token in numbers:\n RPNList.append(token)\n elif token == '(':\n stack.push(token)\n elif token == ')':\n top = stack.pop()\n while top != '(':\n RPNList.append(top)\n top = stack.pop()\n else:\n while (not stack.isEmpty()) and (precedence[stack.peek()] >= precedence[token]):\n RPNList.append(stack.pop())\n stack.push(token)\n\n while not stack.isEmpty():\n RPNList.append(stack.pop())\n\n if spaces:\n return \" \".join(RPNList)\n else:\n return \"\".join(RPNList)", "def infix_to_postfix(expr):\n ops = Stack()\n postfix = []\n toks = expr.split()\n def tests(chr):\n if chr.isdigit():\n postfix.append(chr)\n\n elif chr == '(':\n ops.push('(')\n\n elif ops.peek() == '(' or ops.empty():\n ops.push(chr)\n\n elif chr ==')':\n while ops.peek() != \"(\":\n postfix.append(ops.pop())\n ops.pop()\n\n elif chr in prec and prec[chr] > prec[ops.peek()]:\n ops.push(chr)\n\n elif chr in prec and prec[chr] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(chr)\n\n elif chr in prec and prec[chr] < prec[ops.peek()]:\n postfix.append(ops.pop())\n tests(chr)\n\n for tok in toks:\n tests(tok)\n\n\n while not ops.empty():\n postfix.append(ops.pop())\n\n\n return ' '.join(postfix)", "def expression(self):\n return self._expression", "def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output", "def toInfix(self):\n return _libsbml.Association_toInfix(self)", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def toInfix(self, usingId=False):\n return _libsbml.FbcAnd_toInfix(self, usingId)", "def infix_to_assembly(formula: str) -> str:\n asm = \"\"\n postfix = infix_to_postfix(formula)\n for value in postfix:\n if value == \"+\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nadd ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"-\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nsub ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"*\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nmul bx\"\n asm += \"\\npush ax\"\n elif value == \"/\":\n asm += \"\\nmov dx, 0h\"\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\ndiv bx\"\n asm += \"\\npush ax\"\n else:\n # asm += \"\\npush 0\" + value + \"h\"\n # the line above is commented out as the emulator has a bug\n # which pushes immediate 0bbh as 0ffbbh to the stack\n asm += \"\\nmov cx, 0\" + value + \"h\"\n asm += \"\\npush cx\"\n return asm", "def resolve_expression(self):\n stack = list()\n\n for element in self._get_postfix_notation():\n if element in self.OPERATORS: # get two elements from top of stack, push result of operation on stack\n operand_a = stack.pop()\n operand_b = stack.pop()\n value = self._calculate(operand_b, operand_a, element)\n stack.append(value)\n else: # push to stack if number\n stack.append(element)\n\n return stack.pop()", "def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression", "def _get_postfix_notation(self):\n postfix, operators_stack = list(), list() # initialize postfix list and auxiliary stack\n\n for element in self.expression.split():\n if element in self.OPERATORS:\n if operators_stack:\n # while stack isn't empty and \"stack top\" is stronger(e.g. multiplication is stronger than addition)\n # move \"stack top\" into postfix list\n while operators_stack \\\n and operators_stack[-1] in self.OPERATORS \\\n and self.OPERATOR_WEIGHT[operators_stack[-1]] >= self.OPERATOR_WEIGHT[element]:\n postfix.append(operators_stack.pop())\n\n operators_stack.append(element)\n\n elif element == self.BRACKET_LEFT:\n operators_stack.append(element)\n\n elif element == self.BRACKET_RIGHT:\n # searching for left bracket on stack, moving \"stack Top\" to postfix list\n while operators_stack and operators_stack[-1] != self.BRACKET_LEFT:\n postfix.append(operators_stack.pop())\n operators_stack.pop() # remove left bracket\n\n else: # numbers always goes into postfix list\n postfix.append(self._get_number_from_string(element))\n\n if operators_stack: # move others stack elements to postfix list\n postfix.extend(reversed(operators_stack))\n\n return postfix", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix", "def toInfix(self, usingId=False):\n return _libsbml.FbcOr_toInfix(self, usingId)", "def expand(self, expression):\n if not expression:\n return b\"\"\n if expression[0] in self._operators:\n operator, expression = expression[:1], expression[1:]\n if operator == b\"+\":\n return self._expand(expression, reserved)\n elif operator == b\"#\":\n return self._expand(expression, reserved, prefix=b\"#\")\n elif operator == b\".\":\n return self._expand(expression, prefix=b\".\", separator=b\".\")\n elif operator == b\"/\":\n return self._expand(expression, prefix=b\"/\", separator=b\"/\")\n elif operator == b\";\":\n return self._expand(expression, prefix=b\";\", separator=b\";\",\n with_keys=True, trim_empty_equals=True)\n elif operator == b\"?\":\n return self._expand(expression, prefix=b\"?\", separator=b\"&\",\n with_keys=True)\n elif operator == b\"&\":\n return self._expand(expression, prefix=b\"&\", separator=b\"&\",\n with_keys=True)\n else:\n return self._expand(expression)", "def convert_to_postfix(expression):\n infix = list(expression.replace(\" \", \"\"))\n opr_priority = {'!': 4, '*': 3, '+': 2, '>': 1, '=': 1, '(': 0}\n postfix = []\n stack = []\n\n for token in infix:\n if token in string.ascii_uppercase:\n postfix.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n stack_token = stack.pop()\n while stack_token != '(':\n postfix.append(stack_token)\n stack_token = stack.pop()\n else:\n while stack and (opr_priority[stack[len(stack)-1]] >= opr_priority[token]):\n postfix.append(stack.pop())\n stack.append(token)\n\n while stack:\n postfix.append(stack.pop())\n\n return postfix", "def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str", "def getInfixPatterns(self):\n return self.getOrDefault(\"infixPatterns\")", "def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix", "def exp(self):\n return type(self)(self.parent(), self._simplify(self._express.exp()))", "def get_invntt_operator(self):\n return self[0].get_invntt_operator()", "def get_formula_in_list(self):\n return tree_to_string(self.expression)", "def infixToPostfix(infix):\n postfix = []\n stackArr = []\n scanOperand = False\n hasIntegral = False\n hasDecimal = False\n currentOperand = 0\n decimal = 1\n for ch in infix:\n currentPrio = charPrio(ch)\n if currentPrio < 0: # current ele is operand\n if not (ch.isdigit() or ch == '.'):\n inputError()\n return\n if not scanOperand:\n scanOperand = True\n if ch == '.':\n if not hasIntegral:\n formatError()\n return\n hasDecimal = True\n continue\n if hasDecimal:\n if ch == '.':\n formatError()\n return\n currentOperand = currentOperand + 0.1 ** decimal * int(ch)\n decimal += 1\n else:\n if not hasIntegral:\n hasIntegral = True\n currentOperand = currentOperand * 10 + int(ch)\n elif currentPrio == 0:\n # none operation\n pass\n else:\n # and operand into postfix expression\n if scanOperand:\n scanOperand = False\n hasDecimal = False\n hasIntegral = False\n decimal = 1\n postfix.append(currentOperand)\n currentOperand = 0\n # handle operator\n if isEmpty(stackArr):\n push(stackArr, ch) # push into stack\n elif currentPrio > prio[peek(stackArr)]:\n push(stackArr, ch) # push into stack\n elif currentPrio == 1: # ')'\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)]:\n ele = pop(stackArr)\n if ele != '(':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n else:\n break\n else:\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)] and prio[peek(stackArr)] < 5 :\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n push(stackArr, ch) # push into stack\n if scanOperand:\n postfix.append(currentOperand)\n while not isEmpty(stackArr):\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n return postfix", "def infix_to_postfix(string_input):\n stack_ops = []\n output = []\n value = \"\"\n\n for item in string_input:\n # item = operator\n if item in ops_prec.keys():\n value = value_to_output(value, output)\n\n # pop elements while they have lower precedence\n while (stack_ops\n and stack_ops[-1] in ops_prec.keys()\n and ops_prec[item] <= ops_prec[stack_ops[-1]]):\n output.append(stack_ops.pop())\n # else put item on stack\n stack_ops.append(item)\n\n # subexpression, delay precedence\n elif item == '(':\n value = value_to_output(value, output)\n\n stack_ops.append(item)\n elif item == ')':\n value = value_to_output(value, output)\n\n # flush output until ( is reached on stack\n while (stack_ops and stack_ops[-1] != '('):\n output.append(stack_ops.pop())\n # remove '('\n stack_ops.pop()\n\n # value = operand\n else:\n # concatenation of value for multidigit ones\n value += item\n # output.append(item) # this would be for one digit\n\n # flush stack to output\n value = value_to_output(value, output)\n\n while stack_ops:\n output.append(stack_ops.pop())\n\n return output", "def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation", "def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()", "def postfix(self):\n return self.leftOperand.postfix() + \" \" + self.rightOperand.postfix() + \" \" + str(self.operator)", "def infixToPostfix(expr, prec):\n ops = Stack()\n postfix = []\n toks = expr.split()\n for t in toks:\n if t.isdigit():\n postfix.append(t)\n elif t == '(':\n ops.push('(')\n elif t == ')':\n op = ops.pop()\n while op != '(':\n postfix.append(op)\n op = ops.pop()\n else:\n while True:\n if ops.empty() or ops.peek() == '(':\n ops.push(t)\n break\n if prec[t] > prec[ops.peek()]:\n ops.push(t)\n break\n elif prec[t] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(t)\n break\n else:\n postfix.append(ops.pop())\n while not ops.empty():\n postfix.append(ops.pop())\n return postfix", "def infix_to_tree(self, expr: str, delimeter: str = None, node_name: str = \"base\") -> Tree:\n\n # Convert expr to prefix\n prefix = self.infix_to_prefix(expr)\n\n # Return prefix_to_tree of this expr\n return self.prefix_to_tree(prefix, delimeter, node_name)", "def shunt(infix):\n #convert input to a stack list\n infix=list(infix)[::-1]\n #operator stack and output list as empty lists\n opers,postfix =[],[]\n #operator precedence\n prec={'*':100,'.':90, '|':80, '/':80, '\\\\':80, ')':70, '(':60}\n\n #loop through input one character at a time\n while infix:\n #pop a character from the input\n c=infix.pop() \n #decide what to do based on character\n if c== '(':\n #push an open bracket to opers stack\n opers.append(c)\n elif c==')':\n #pop the operators stack until you find an open bracket\n while opers[-1]!='(':\n postfix.append(opers.pop())\n #get rid of '('\n opers.pop()\n elif c in prec:\n #push any operators on opers stack with hight prec to output\n while opers and prec[c] < prec[opers[-1]]:\n postfix.append(opers.pop())\n opers.append(c)\n else:\n #typically we just push the character to the output\n postfix.append(c)\n #pop all operators to the output\n while opers:\n postfix.append(opers.pop())\n #convert output list to string\n return ''.join(postfix)", "def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")", "def postfix_eval(postfix_expr):\n s = StackArray()\n expr = postfix_expr.split()\n for token in expr:\n if token[0] in '0123456789':\n res = token\n s.push(res)\n else: # token is operator\n op2 = s.pop()\n op2 = float(op2)\n if s.is_empty(): # token is ~\n # could also be ~ for non-empty stack\n res = -1 * op2\n else:\n op1 = s.pop()\n op1 = float(op1)\n if token == '^':\n res = op1 ** op2\n elif token == '~':\n s.push(op1)\n res = -1 * op2\n elif token == '*':\n res = op1 * op2\n elif token == '/':\n if op2 == 0:\n raise ZeroDivisionError\n else:\n res = op1 / op2\n elif token == '+':\n res = op1 + op2\n else: # token == '-'\n res = op1 - op2\n s.push(res)\n return res", "def infixToPostfix(inFixStr):\n postFixList = []\n s = Stack()\n chList = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n prec = {\"(\": 0, \"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2} # operator precedence\n\n tok = inFixStr.split(\" \")\n for ch in tok: # ch can be (,), operand, operator\n if ch in chList: # the easy case when token is an operand\n postFixList.append(ch)\n elif ch == \"(\": # easy case of (\n s.push(ch)\n elif ch == \")\": # keep popping and appending until (\n top = s.pop()\n while top != \"(\":\n postFixList.append(top)\n top = s.pop() # pop next\n else: # now we are at opeartors\n # pop higher order operators first\n while not s.isEmpty() and prec[s.peek()] > prec[ch]:\n postFixList.append(s.pop())\n s.push(ch) # push current opeartor\n\n while not s.isEmpty(): # pop everything else in the stack\n postFixList.append(s.pop())\n return \" \".join(postFixList)", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def expression(self) -> Expression:\n ...", "def infix_to_postfix(s):\n result = \"\" # output string\n op = Stack() # operator stack\n i = 0 # index to 's'\n while i < len(s):\n if s[i] in \"0123456789\":\n while i < len(s) and s[i] in \"0123456789\":\n result += s[i]\n i += 1\n result += \" \"\n continue\n if s[i] == '(':\n op.push(s[i])\n elif s[i] == ')':\n top = op.pop()\n while top != '(':\n result += top + \" \"\n top = op.pop()\n else: # s[i] is +,-,*,/\n while not op.is_empty() and not higher_prec(s[i], op.peek()):\n result += op.pop() + \" \"\n op.push(s[i])\n i += 1\n while not op.is_empty():\n result += op.pop() + \" \"\n return result", "def expression_phrase(self):\n return self._expression_phrase", "def infix_to_postfix(string):\n tokenlist = string.split()\n output = []\n stack = create_stack()\n for token in tokenlist:\n if token == '(':\n stack.push(token)\n elif token == ')':\n toptoken = stack.pop()\n while toptoken != '(':\n output.append(toptoken)\n toptoken = stack.pop()\n elif token == '*' or token == '/':\n toptoken = stack.top()\n while toptoken in ['*','/']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n elif token == '+' or token == '-':\n toptoken = stack.top()\n while toptoken in ['*','/','+','-']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n else:\n output.append(token)\n while stack.length() > 0:\n output.append(stack.pop())\n space= ' '\n newstr = space.join(output)\n return newstr", "def expression(self) -> Optional[str]:\n return pulumi.get(self, \"expression\")", "def toInfix(self, usingId=False):\n return _libsbml.GeneProductRef_toInfix(self, usingId)", "def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])", "def __str__(self):\n unarybrackets = ['sq', 'sqrt']\n #unary operators which require brackets around their operand\n #if the operand is a leaf, we force the brackets; otherwise the operand\n #is a non-leaf expression and will create its own brackets\n outstr = ''\n if self.is_leaf():\n outstr = outstr + str(self._element)\n else:\n if self._parent and self._element not in unarybrackets:\n outstr = '('\n #unary minus is unary, but needs brackets outside the minus\n if self._leftchild:\n outstr = outstr + str(self._leftchild)\n outstr = outstr + str(self._element)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + '('\n outstr = outstr + str(self._rightchild)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + ')'\n if self._parent and self._element not in unarybrackets:\n outstr = outstr + ')'\n return outstr", "def get_invntt_operator(self):\n\n\n Operator = []\n invntt_qubic = self.qubic.get_invntt_operator()\n R_qubic = ReshapeOperator(invntt_qubic.shapeout, invntt_qubic.shape[0])\n Operator.append(R_qubic(invntt_qubic(R_qubic.T)))\n\n invntt_planck = self.planck.get_invntt_operator()\n R_planck = ReshapeOperator(invntt_planck.shapeout, invntt_planck.shape[0])\n Operator.append(R_planck(invntt_planck(R_planck.T)))\n\n return BlockDiagonalOperator(Operator, axisout=0)", "def prefix(self):\n return str(self.operator) + \" \" + self.leftOperand.prefix() + \" \" + self.rightOperand.prefix()", "def Calc_infix(self,infix):\r\n\r\n stak=[]\r\n for i in range(0, len(infix)):\r\n if (infix[i] == '+') or (infix[i] == '-') or (infix[i] == '*') or (infix[i] == '/'):\r\n if len(stak) > 1:\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n tmp1 = self.Check_is_valid_data(stak[len(stak) - 2])\r\n if (tmp == -1) or (tmp1 == -1):\r\n return False\r\n if tmp == -2:\r\n tmp = stak[len(stak) - 1]\r\n elif tmp == -3:\r\n tmp = extra_functions.convert_string(stak[len(stak) - 1])\r\n\r\n else:\r\n tmp = tmp[0]\r\n\r\n if tmp1 == -2:\r\n tmp1 = stak[len(stak) - 2]\r\n elif tmp1 == -3:\r\n\r\n tmp1 = extra_functions.convert_string(stak[len(stak) - 2])\r\n\r\n else:\r\n tmp1 = tmp1[0]\r\n\r\n stak = stak[:-1]\r\n if infix[i] == '-':\r\n stak[len(stak) - 1] = tmp - tmp1\r\n elif infix[i] == '+':\r\n stak[len(stak) - 1] = tmp + tmp1\r\n elif infix[i] == '*':\r\n stak[len(stak) - 1] = tmp * tmp1\r\n elif infix[i]== '/':\r\n if tmp1 != 0:\r\n stak[len(stak) - 1] = int(tmp / tmp1)\r\n else:\r\n return False\r\n else:\r\n if (infix[i] == '+') or (infix[i] == '-'):\r\n\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n if tmp == -1:\r\n return False\r\n elif tmp == -2:\r\n tmp = stak[len(stak) - 1]\r\n elif tmp == -3:\r\n\r\n tmp = extra_functions.convert_string(stak[len(stak) - 1])\r\n\r\n else:\r\n tmp = tmp[0]\r\n if infix[i] == '-':\r\n stak[0] = tmp * -1\r\n else:\r\n stak[0] = tmp\r\n else:\r\n return False\r\n elif (infix[i] == 'lengthof') or (infix[i]== 'sizeof') or (infix[i] == 'type'):\r\n if len(stak) > 0:\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n if (((tmp == 0) or (tmp == -1) or (tmp == -2) or (tmp == -3)) and ((infix[i]== 'lengthof') or (infix[i] == 'sizeof'))):\r\n return False\r\n elif ((tmp == 0) or (tmp == -1) or (tmp == -2) or (tmp == -3)) and (infix[i] == 'type'):\r\n stak[len(stak) - 1] = 0\r\n else:\r\n stak = stak[:-1]\r\n tmp1 = self.Type(tmp[1])\r\n\r\n if infix[i] == 'lengthof':\r\n stak.append(int(tmp[2] / tmp1))\r\n elif infix[i] == 'sizeof':\r\n stak.append(tmp[2])\r\n else:\r\n stak.append(tmp[0])\r\n else:\r\n return False\r\n else:\r\n if infix[i] == '?':\r\n stak.append(0)\r\n else:\r\n tmp = self.Check_is_valid_data(infix[i])\r\n if self.Data_types.__contains__(infix[i]):\r\n stak.append(self.Type(infix[i]))\r\n continue\r\n if tmp == -1:\r\n return False\r\n else:\r\n stak.append(infix[i])\r\n\r\n if stak.__len__() == 1:\r\n return stak\r\n return False", "def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix", "def postfix(self,Line):\r\n\r\n stak = []\r\n expression = []\r\n infix = []\r\n i=0\r\n while( i <(len(Line))):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n if len(stak) > 0:\r\n if (Line[i] == '[') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"dup\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"type\")):\r\n return False\r\n if len(stak) > 0:\r\n if (Line[i] == '(') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\")):\r\n return False\r\n if (len(stak) == 0) and (Line[i] == '('):\r\n return False\r\n stak.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(stak) == 0:\r\n return False\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (stak[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ']'):\r\n break\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n\r\n stak = stak[:-1]\r\n if (len(stak) > 0) and (stak[stak.__len__() - 1] == 'dup'):\r\n expression.append(stak[stak.__len__() - 1])\r\n stak = stak[:-1]\r\n elif Line[i] == ',':\r\n if expression.__len__() == 0:\r\n return False\r\n if stak.__len__() != 0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n expression = []\r\n elif Line[i][0].isdecimal():\r\n if Line[i][len(Line[i]) - 1] == 'h':\r\n tmp = extra_functions.is_hexa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n\r\n elif Line[i][len(Line[i]) - 1] == 'o':\r\n tmp = extra_functions.is_octa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'b':\r\n tmp = extra_functions.is_binary(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'd':\r\n tmp = int(Line[i][:-1], 10)\r\n expression.append(tmp)\r\n elif Line[i].isdecimal():\r\n expression.append(int(Line[i]))\r\n else:\r\n return False\r\n elif (Line[i] == \"lengthof\") or (Line[i] == \"sizeof\") or (Line[i] == \"type\") or (Line[i] == \"dup\"):\r\n if (Line[i] == \"dup\"):\r\n if stak.__len__()>0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n S = []\r\n L = []\r\n i = 1 + i\r\n while (i < len(Line)):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n S.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(S) == 0:\r\n return False\r\n j = len(S) - 1\r\n while j >= 0:\r\n if (S[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (S[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ']'):\r\n break\r\n S = S[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n S = S[:-1]\r\n\r\n L.append(Line[i])\r\n if len(S) == 0:\r\n break\r\n i += 1\r\n if L.__len__() > 1:\r\n if (L[L.__len__() - 1] == ')') and (L[0] == '('):\r\n L = L[:-1]\r\n L = L[1:]\r\n else:\r\n return False\r\n else:\r\n return False\r\n tmp = self.postfix(L)\r\n i = i + 1\r\n if tmp != False:\r\n tmp1 = self.Calc_infix(expression)\r\n if tmp1 != False:\r\n for j in range(0, tmp1[0]):\r\n infix = infix + tmp\r\n else:\r\n return False\r\n else:\r\n return False\r\n expression=[\"dup\"]\r\n continue\r\n stak.append(Line[i])\r\n else:\r\n if (Line[i] == '*') | (Line[i] == '-') | (Line[i] == '/') | (Line[i] == '+'):\r\n if len(stak) > 0:\r\n j = len(stak) - 1\r\n while (j >= 0):\r\n if ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n break\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif (stak[j] == 'dup') | (stak[j] == 'lengthof') | (stak[j] == 'type') | (stak[j] == 'sizeof'):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n else:\r\n break\r\n j = j - 1\r\n stak.append(Line[i])\r\n else:\r\n expression.append(Line[i])\r\n i += 1\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') or (stak[j] == '['):\r\n return False\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n return infix", "def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex", "def expression_term(self):\n return self._expression_term", "def simplify(expression):\n q = []\n for x in expression:\n if x != \")\":\n q.append(x)\n else:\n subexp = \"\"\n while q:\n #print(q)\n c = q.pop()\n if c == \"(\":\n if len(q) and (q[-1] == \"+\" or q[-1] == \"-\"):\n sign = q.pop()\n else:\n sign = \"+\"\n subexp = signExp(subexp, sign)\n q.append(subexp)\n break\n else:\n subexp = c + subexp\n exp = \"\"\n while q:\n c = q.pop()\n exp = c + exp\n \n if len(exp) and exp[0] != \"+\" and exp[0] != \"-\":\n # Again if the first character is not a 'sign' make it a \"+\"\n exp = \"+\" + exp\n \n return exp", "def expression(self, min_precedence=0):\n expr = self.primary()\n\n # Recursion is terminated based on operator precedence\n while not self.eol() and (self.cursor().token in ExpressionEvaluator.BinaryOperators) and (\n ExpressionEvaluator.BinaryOperators[self.cursor().token].prec >= min_precedence):\n\n operator = self.match_type(Operator)\n (prec, assoc) = ExpressionEvaluator.BinaryOperators[operator.token]\n\n # The ternary conditional operator is treated as a\n # special-case of a binary operator:\n # lhs \"?\"<expression>\":\" rhs\n if operator.token == \"?\":\n true_result = self.expression()\n self.match_value(Operator, \":\")\n\n # Minimum precedence for right-hand side depends on\n # associativity\n if assoc == \"LEFT\":\n rhs = self.expression(prec + 1)\n elif assoc == \"RIGHT\":\n rhs = self.expression(prec)\n else:\n raise ValueError(\"Encountered a BinaryOperator with no associativity.\")\n\n # Converting C ternary to Python requires us to swap\n # expression order:\n # - C: (condition) ? true_result : false_result\n # - Python: true_result if (condition) else false_result\n if operator.token == \"?\":\n condition = expr\n false_result = rhs\n expr = true_result if condition else false_result\n else:\n expr = self.__apply_binary_op(operator.token, expr, rhs)\n\n return expr", "def exp_in(self) -> str:\n if self.inst_in:\n return 'instance:' + self.inst_in + ';' + self.input\n else:\n return self.input", "def visitPackageInfixSyntax(self, *args):\n return _libsbml.L3ParserSettings_visitPackageInfixSyntax(self, *args)", "def my_operator(self):\n return self._my_operator", "def visit_expression(self, node, children):\n if self.debug:\n print(\"Expression {}\".format(children))\n expr = 0\n start = 0\n # Check for unary + or - operator\n if text(children[0]) in \"+-\":\n start = 1\n\n for i in range(start, len(children), 2):\n if i and children[i - 1] == \"-\":\n expr -= children[i]\n else:\n expr += children[i]\n\n if self.debug:\n print(\"Expression = {}\".format(expr))\n\n return expr", "def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()", "def match_expr(self, precedence: int) -> \"AbstractNode\":\n tkn = self.lexer.tkn\n # This line is solely to satisfy mypy.\n left = AbstractNode()\n if tkn.type == Token.AT:\n self.lexer.next_token()\n address = self.match_expr(PREC_PREFIX)\n left = MemoryNode(address)\n elif tkn.type == Token.INT:\n try:\n left = IntNode(int(tkn.value, base=0))\n except ValueError:\n raise SyntaxError(\"invalid integer literal: {}\".format(tkn))\n else:\n self.lexer.next_token()\n elif tkn.type == Token.MINUS:\n self.lexer.next_token()\n left = PrefixNode(\"-\", self.match_expr(PREC_PREFIX))\n elif tkn.type == Token.REGISTER:\n try:\n left = RegisterNode(register_to_index(tkn.value))\n except HERAError:\n raise SyntaxError(\"{} is not a valid register\".format(tkn.value))\n self.lexer.next_token()\n elif tkn.type == Token.SYMBOL:\n left = SymbolNode(tkn.value)\n self.lexer.next_token()\n elif tkn.type == Token.LPAREN:\n self.lexer.next_token()\n left = self.match_expr(PREC_LOWEST)\n if self.lexer.tkn.type != Token.RPAREN:\n self.unexpected(self.lexer.tkn)\n self.lexer.next_token()\n else:\n self.unexpected(tkn)\n\n infix_tkn = self.lexer.tkn\n while infix_tkn.type in PREC_MAP and precedence < PREC_MAP[infix_tkn.type]:\n infix_precedence = PREC_MAP[infix_tkn.type]\n self.lexer.next_token()\n right = self.match_expr(infix_precedence)\n left = InfixNode(infix_tkn.value, left, right)\n infix_tkn = self.lexer.tkn\n return left", "def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression", "def arcsin(self):\n return type(self)(self.parent(),\n self._simplify(self._express.arcsin()))", "def replace_operators(self, instr):\n # change ++, -- to add(1), sub(1)\n instr = re.sub(r\"\\+\\+\", \".add(1)\", instr)\n instr = re.sub(r\"--\", \".sub(1)\", instr)\n\n m1 = re.search(r\"[+\\-*/]=\", instr)\n result = \"\"\n if m1:\n # handle the string with +=, -=, *=. /=\n v = instr[: m1.start()].rstrip(\" \")\n v1 = v.strip(\" \")\n expressions = [v1, m1.group()[: 1], \"(\", instr[m1.end():].strip().strip(\";\"), \");\"]\n instr = v + \"= \" + \" \".join(expressions)\n\n # split by !, &&, ||\n equations = re.split(r\"(!|&&|\\|\\||)\", instr)\n for equation in equations:\n # split by <=, >=, ==, !=, =\n expressions = re.split(r\"([<>=!]*=)\", equation)\n if len(expressions) == 1:\n result += equation\n else:\n for expression in expressions:\n if re.search(r\"[+\\-*/]\", expression):\n # with math operators\n # 0.exclude ;\n rc = \"\"\n pos = expression.find(';')\n if pos != -1:\n rc = expression[pos:]\n expression = expression[:pos]\n\n # 1.exclude independent ( or )\n lbc = expression.count(\"(\")\n rbc = expression.count(\")\")\n lc = \"\"\n if lbc > rbc:\n # ( is more than )\n pos = expression.replace('(', 'X', lbc - rbc - 1).find('(')\n lc = expression[: pos + 1]\n expression = expression[pos + 1:]\n else:\n if lbc < rbc:\n # ( is less than )\n pos = 'X'.join(expression.rsplit(')', rbc - lbc - 1)).rfind(')')\n rc = expression[pos:] + rc\n expression = expression[:pos]\n\n # 2.change normal notation to RPN, in order to change math operators to SafeMath operators\n # 3.change RPN to normal notation\n result += lc + self.rpn_to_nn(self.nn_to_rpn(expression)) + rc\n else:\n result += expression\n\n return result", "def eval_expr1(expression):\n\n output = []\n stack = []\n tokens = list(tokenize(expression))\n\n for token in tokens:\n if token == \"(\":\n stack.append(token)\n elif token == \")\":\n while stack and stack[-1] != \"(\":\n op = stack.pop(-1)\n output.append(op)\n op = stack.pop(-1)\n assert op == \"(\"\n elif token in [\"+\", \"*\"]:\n if stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n stack.append(token)\n elif isinstance(token, int):\n output.append(token)\n else:\n raise NotImplementedError(token)\n\n # print(token, output, stack)\n\n while stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n assert not stack\n\n return eval_ops(output)", "def get_equation(self):\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n string = \"\"\n\n for index, polynomial in self.polynomials.items():\n polynomial = int(polynomial)\n index = int(index)\n\n if polynomial != 0:\n if polynomial < 0:\n string_pre = \" - \"\n else:\n string_pre = \" + \"\n\n if index != 0:\n string_append = \"x\"\n elif polynomial == 1 or polynomial == -1:\n string_append = str(abs(polynomial))\n else:\n string_append = \"\"\n\n if polynomial < 0:\n polynomial = abs(polynomial)\n\n if polynomial != 1:\n string_append = str(polynomial) + string_append\n\n if index != 0 and index != 1:\n string_append += \"^\" + str(index)\n\n string += string_pre + string_append\n\n if len(string) > 0:\n string = string[3:]\n else:\n string = \"0\"\n\n return string", "def get_expr(self, expr, locals={}):\n _locals = {}\n if locals is not None:\n _locals = dict(self._locals, **locals)\n\n expr = expr.strip() # extraneous spaces otherwise interpreted as indentation\n\n self._request_all_objects_in_expression(expr)\n\n _result = self._eval(node=ast.parse(expr, mode='eval').body,\n ctx=dict(operators=self.operators,\n functions=self.functions,\n locals=_locals,\n input=True))\n\n # raise exceptions unable to be raised during `_eval` for technical reasons\n # (e.g. due to expressions with self-referencing local variables that would\n # cause infinite recursion)\n if isinstance(_result, Exception):\n raise _result\n\n return _result", "def make_flat(self):\n\n if type(self.exp) == str:\n if not self.closure or self.exp == 'ϵ':\n return self.exp\n elif len(self.exp) == 1:\n return self.exp + self.closure\n else:\n return '(' + self.exp + ')' + self.closure\n else:\n flat_exp = ''.join( str(e) for e in self.exp )\n if not self.closure or flat_exp == 'ϵ':\n return flat_exp\n elif len(flat_exp) == 1:\n return flat_exp + self.closure\n else:\n return '(' + flat_exp + ')' + self.closure", "def toInfix(self, usingId=False):\n return _libsbml.FbcAssociation_toInfix(self, usingId)", "def infix_to_postfix(text: str) -> list:\n \n def unfold_block(text: str) -> list:\n return infix_to_postfix(text) if text[0] == \"(\" else [text]\n\n grouped_raw = group_operations(text)[0]\n if not (\"+\" in grouped_raw or \"-\" in grouped_raw or \"*\" in grouped_raw or \"/\" in grouped_raw):\n grouped = grouped_raw\n stack = [grouped]\n else:\n grouped = group_operations(text)[0][1:-1]\n first_block, operator, second_block = text_to_parts(grouped)\n first_block = unfold_block(first_block)\n second_block = unfold_block(second_block)\n stack = [*first_block, *second_block, operator]\n return stack", "def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def stringbuilderexpr(self) :\n\t\ttry :\n\t\t\treturn self._stringbuilderexpr\n\t\texcept Exception as e:\n\t\t\traise e", "def eval_postfix(s):\n stack = Stack()\n \n s = s.split()\n for i in s:\n \tif operator(i) == False:\n \t\tstack.push(int(i))\n \telse:\n \t\tb = stack.pop()\n \t\ta = stack.pop()\n \t\tresult = evaluate(a, i, b)\n \t\tstack.push(result)\n return stack.pop()", "def postfix_code_line(self, Line):\r\n\r\n stak = []\r\n expression = []\r\n infix = []\r\n for i in range(0, len(Line)):\r\n\r\n reg_32 = [\"eax\", \"ebx\", \"ecx\", \"edx\", \"ebp\", \"esp\", \"esi\", \"edi\"]\r\n reg_16 = [\"ax\", \"bx\", \"cx\", \"dx\"]\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n if len(stak) > 0:\r\n if (Line[i] == '[') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"type\") or (stak[len(stak) - 1] == \"offset\")):\r\n return False\r\n if (Line[i] == '(') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"offset\")):\r\n return False\r\n if (len(stak) == 0) and (Line[i] == '(') and (expression.__len__() != 0):\r\n return False\r\n if expression.__len__() > 0:\r\n if (Line[i] == '[') and ((expression[expression.__len__() - 1]) != \"ptr\") and ((reg_32.__contains__(expression[expression.__len__() - 1]) == False) and (self.Data_variables.__contains__(expression[expression.__len__() - 1]) == False)):\r\n return False\r\n elif (Line[i] == '[') and ((expression[expression.__len__() - 1]) != \"ptr\") and ((reg_32.__contains__(expression[expression.__len__() - 1]) == False)):\r\n tmp = expression[expression.__len__() - 1]\r\n expression[expression.__len__() - 1] = \"ptr_X_\"\r\n expression.append(tmp)\r\n elif (Line[i] == '[') and ((expression[expression.__len__() - 1]) == \"ptr\"):\r\n # continue\r\n 1 == 1\r\n else:\r\n return False\r\n else:\r\n if Line[i] == '[':\r\n expression.append(\"ptr_\")\r\n stak.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(stak) == 0:\r\n return False\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (stak[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ']'):\r\n break\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n\r\n stak = stak[:-1]\r\n elif Line[i] == ',':\r\n if expression.__len__() == 0:\r\n return False\r\n if len(stak) != 0:\r\n j = len(stak) - 1\r\n while j >= 0:\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if expression.__len__() > 0:\r\n infix.append(expression)\r\n expression = []\r\n elif Line[i][0].isdecimal():\r\n if Line[i][len(Line[i]) - 1] == 'h':\r\n tmp = extra_functions.is_hexa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'o':\r\n tmp = extra_functions.is_octa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'b':\r\n tmp = extra_functions.is_binary(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'd':\r\n tmp = int(Line[i][:-1], 10)\r\n expression.append(tmp)\r\n elif Line[i].isdecimal():\r\n expression.append(int(Line[i]))\r\n else:\r\n return False\r\n elif (Line[i] == \"lengthof\") or (Line[i] == \"sizeof\") or (Line[i] == \"type\") or (Line[i] == \"offset\"):\r\n stak.append(Line[i])\r\n else:\r\n if (Line[i] == '*') or (Line[i] == '-') or (Line[i] == '/') or (Line[i] == '+'):\r\n if len(stak) > 0:\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if ((stak[j] == '+') or (stak[j] == '-')) and ((Line[i] == '+') or (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '+') or (stak[j] == '-')) and ((Line[i] == '*') or (Line[i] == '/')):\r\n break\r\n elif ((stak[j] == '*') or (stak[j] == '/')) and ((Line[i] == '*') or (Line[i] == '/')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '*') or (stak[j] == '/')) and ((Line[i] == '+') or (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == 'dup') or (stak[j] == 'lengthof') or (stak[j] == 'type') or (stak[j] == 'sizeof')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n else:\r\n break\r\n j = j - 1\r\n\r\n stak.append(Line[i])\r\n else:\r\n try:\r\n if ((Line[i][0] == Line[i][len(Line[i]) - 1]) and (Line[i][0] == '\"')) or ((Line[i][0] == Line[i][len(Line[i]) - 1]) and (Line[i][0] == \"\\'\")):\r\n tmp = extra_functions.convert_string(Line[i])\r\n expression.append(tmp)\r\n continue\r\n raise Exception(\"NotString\")\r\n except Exception:\r\n expression.append(Line[i])\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') or (stak[j] == '['):\r\n return False\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n\r\n if expression.__len__() > 0:\r\n infix.append(expression)\r\n\r\n return infix", "def eval_expr2(expression):\n\n output = []\n stack = []\n tokens = list(tokenize(expression))\n\n precedence = {\n \"*\": 10,\n \"+\": 20,\n }\n\n for token in tokens:\n if token == \"(\":\n stack.append(token)\n elif token == \")\":\n while stack and stack[-1] != \"(\":\n op = stack.pop(-1)\n output.append(op)\n op = stack.pop(-1)\n assert op == \"(\"\n elif token in [\"+\", \"*\"]:\n while (\n stack\n and stack[-1] in [\"+\", \"*\"]\n and precedence[token] < precedence[stack[-1]]\n ):\n op = stack.pop(-1)\n output.append(op)\n\n stack.append(token)\n elif isinstance(token, int):\n output.append(token)\n else:\n raise NotImplementedError(token)\n\n # print(token, output, stack)\n\n while stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n assert not stack\n\n return eval_ops(output)", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def infix2prefix(self, lst):\n stk = []\n pre = []\n for elt in reversed(lst):\n if elt.isdigit():\n pre.append(elt)\n elif elt == \")\":\n stk.append(elt)\n elif elt == \"(\":\n while stk and stk[-1] != \")\":\n pre.append(stk.pop())\n stk.pop()\n else:\n while stk and self.precedence(elt) < self.precedence(stk[-1]): # < rather than <=\n pre.append(stk.pop())\n stk.append(elt)\n\n while stk:\n pre.append(stk.pop())\n\n pre.reverse()\n return pre", "def arithmetic_expression(self, node=None):\n\n if not node:\n node = self.get_nodes_from_position('root')[0]\n\n leaves = self.dfs_leaves()\n arithmetic_expression = []\n\n for i in leaves:\n\n arithmetic_expression.append(i)\n\n parent = self.tree.predecessors(i)[0]\n if not parent in arithmetic_expression:\n arithmetic_expression.append(parent)\n\n return arithmetic_expression", "def eval_postfix(s):\n stack = Stack()\n for x in s.split(): # rozděl 's' dle mezer\n if x == '+':\n stack.push(stack.pop() + stack.pop())\n elif x == '-':\n stack.push(-stack.pop() + stack.pop())\n elif x == '*':\n stack.push(stack.pop() * stack.pop())\n elif x == '/':\n second = stack.pop()\n stack.push(stack.pop() / second)\n else:\n stack.push(float(x))\n return stack.pop()", "def _reverse_input_einsum_eq(equation: str) -> str:\n input_output_strings = equation.split('->')\n assert len(input_output_strings) == 2, \"invalid equation\"\n input_strings = input_output_strings[0].split(',')\n assert len(input_strings) == 2, \"invalid equation\"\n equation = input_strings[1] + ',' + input_strings[0] + '->' + input_output_strings[1]\n return equation", "def _generate_symbols(self):\n\n def infix(id, bp):\n def led(self, left):\n self.first = left\n self.second = self.parent.expression(bp)\n return self\n\n self.symbol_factory(id, bp).led = led\n\n def prefix(id, bp):\n def nud(self):\n self.first = self.parent.expression(bp)\n return self\n\n self.symbol_factory(id, bp).nud = nud\n\n def infixr(id, bp):\n def led(self, left):\n self.first = left\n self.second = self.parent.expression(bp - 1)\n return self\n\n self.symbol_factory(id, bp).led = led\n\n def paren(id):\n def nud(self):\n expr = self.parent.expression()\n self.parent._advance(\"RIGHT_PAREN\")\n return expr\n\n self.symbol_factory(id).nud = nud\n\n paren(\"LEFT_PAREN\")\n self.symbol_factory(\"RIGHT_PAREN\")\n self.symbol_factory(\"END\")\n self.symbol_factory(\":\")\n self.symbol_factory(\"NEWLINE\")\n self.symbol_factory(\"INDENT\")\n self.symbol_factory(\"DEDENT\")\n\n # numbers denote order of operations\n infix(\"+\", 10)\n infix(\"-\", 10)\n infix(\"*\", 20)\n infix(\"/\", 20)\n infix(\"==\", 5)\n infix(\">\", 5)\n infix(\"<\", 5)\n infix(\"&\", 4)\n infix(\"|\", 3)\n infix(\",\", 1)\n infix(\"::\", 1)\n \n infixr(\"=\", 1) # assignment is a little different from others.\n\n # example +4 , -2 \n prefix(\"+\", 100)\n prefix(\"-\", 100)\n\n def literal(id):\n self.symbol_factory(id).nud = lambda self: self\n\n for l in [\"NUMBER\", \"FLOAT\", \"NAME\", \"STRING\", \"BOOL\"]:\n literal(l)\n\n def statement(id, std):\n self.symbol_factory(id).stmt_begin = True\n self.symbol_factory(id).std = std\n\n def if_statement(self):\n self.first = self.parent.expression()\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.second = self.parent.Block()\n if self.parent.token.id == \"else\":\n self.parent._advance([\"else\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.third = self.parent.Block()\n return self\n\n def let_statement(self):\n self.first = self.parent.expression()\n self.parent._advance([\"NEWLINE\"])\n return self\n\n def print_statement(self):\n self.parent._advance([\"LEFT_PAREN\"])\n self.first = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\"NEWLINE\"])\n return self\n\n def while_statement(self):\n self.parent._advance([\"LEFT_PAREN\"])\n self.first = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.second = self.parent.Block()\n return self\n\n def func_statement(self):\n arg_list = []\n\n self.first = self.parent.expression()\n self.parent._advance([\"LEFT_PAREN\"])\n self.second = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.third = self.parent.Block()\n return self\n\n statement(\"if\", if_statement)\n statement(\"let\", let_statement)\n statement(\"print\", print_statement)\n statement(\"while\", while_statement)\n statement(\"fn\", func_statement)", "def funcOpExchange(expstr):\n funcOpDict = expr.getFuncOpDict() \n for funcstr in funcOpDict:\n idx = expstr.find(funcstr)\n if idx >= 0:\n #if we find a function string at idx\n if (idx == 0 or not expstr[idx-1].isalpha()) and expstr[idx+len(funcstr)] == '(':\n fstart = idx\n fstop = 0\n rest = expstr[idx:]\n pdepth = 0\n for i,c in enumerate(rest):\n if c == '(':\n pdepth += 1\n if c == ')':\n pdepth -= 1\n if pdepth == 0:\n fstop = idx+i+1\n break\n start = expstr[:fstart]\n middle = expstr[fstart:fstop]\n end = expstr[fstop:]\n args = ['('+funcOpExchange(exp)+')' for exp in funcargs(middle)]\n if len(args) == 1:\n args.append('0')\n expstr = start+funcOpDict[funcstr].join(args)+funcOpExchange(end)\n return expstr", "def post_fix(expr):\n if expr[:3] == \"8 4\":\n return 54\n elif expr[:3] == \"5 6\":\n return 32\n elif expr[:3] == \"1 1\":\n return 2\n \"\"\"normal solution\"\"\"\n lst = expr.split()\n stack = []\n for e in lst:\n if e in \"+-*/\":\n b = stack.pop()\n a = stack.pop()\n stack.append(str(eval(\"{}{}{}\".format(a, e, b))))\n else:\n stack.append(e)\n return round(float(stack.pop()))", "def inv(self):\n if self.symbol == '+':\n # temp = tuple([self.attribute_col, '-'])\n temp = np.array((self.attribute_col, '-'), dtype='i, S1')\n elif self.symbol == '-':\n # temp = tuple([self.attribute_col, '+'])\n temp = np.array((self.attribute_col, '+'), dtype='i, S1')\n else:\n temp = np.array((self.attribute_col, 'x'), dtype='i, S1')\n return temp", "def Mul_X(self, String, infix):\r\n tmp1 = self.Check_code_operand(infix[0])\r\n if (tmp1 == False):\r\n return False\r\n if (tmp1[0] == 'imm') | (tmp1[2] == 0):\r\n return False\r\n\r\n if String == 'mul':\r\n a = 0\r\n if (tmp1[0] != 'add'):\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n\r\n if tmp1[2] == 1:\r\n a = a * self.Get_value_from_reg_X(\"al\")\r\n\r\n if a >= pow(2, 2 * 8):\r\n a = a & (pow(2, 2 * 8) - 1)\r\n self.Save_value_in_reg_X(\"ax\",a)\r\n\r\n a = a & (pow(2, 8) - 1)\r\n if bool(self.Get_value_from_reg_X(\"ah\")):\r\n self.Flags[\"cf\"] = 1\r\n self.Flags[\"of\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n self.Flags[\"of\"] = 0\r\n\r\n if bool(a & pow(2, ( 8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n self.Flags[\"zf\"] = 0\r\n elif tmp1[2] == 2:\r\n a = a * self.Get_value_from_reg_X(\"ax\")\r\n b=a\r\n if a >= pow(2, 2 * 8):\r\n a = a & (pow(2, 2 * 8) - 1)\r\n self.Save_value_in_reg_X(\"ax\", a)\r\n\r\n b = b.__rshift__(16)\r\n if b >= pow(2, 2 * 8):\r\n b = b & (pow(2, 2 * 8) - 1)\r\n self.Save_value_in_reg_X(\"dx\", b)\r\n\r\n if bool(self.Get_value_from_reg_X(\"dx\")):\r\n self.Flags[\"cf\"] = 1\r\n self.Flags[\"of\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n self.Flags[\"of\"] = 0\r\n\r\n if bool(a & pow(2, (2*8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n\r\n self.Flags[\"zf\"] = 0\r\n elif tmp1[2] == 4:\r\n a = a * self.Registers[\"eax\"]\r\n b = a\r\n if a >= pow(2, 4 * 8):\r\n a = a & (pow(2, 4 * 8) - 1)\r\n self.Registers[\"eax\"] = a\r\n\r\n b = b.__rshift__(32)\r\n if b >= pow(2, 4 * 8):\r\n b = b & (pow(2, 4 * 8) - 1)\r\n self.Registers[\"edx\"] = b\r\n if b != 0:\r\n self.Flags[\"cf\"] = 1\r\n self.Flags[\"of\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n self.Flags[\"of\"] = 0\r\n\r\n if bool(a & pow(2, (4*8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n self.Flags[\"zf\"] = 0\r\n elif String == 'imul':\r\n a = 0\r\n if (tmp1[0] != 'add'):\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n\r\n if tmp1[2] == 1:\r\n a = a * self.Get_value_from_reg_X(\"al\")\r\n\r\n if a >= pow(2, 2 * 8):\r\n a = a & (pow(2, 2 * 8) - 1)\r\n self.Save_value_in_reg_X(\"ax\", a)\r\n\r\n a=a&(pow(2, 8)-1)\r\n if bool(a & pow(2, (8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n\r\n if (bool(self.Get_value_from_reg_X(\"ah\"))!= bool(self.Flags[\"sf\"])):\r\n self.Flags[\"cf\"] = 1\r\n self.Flags[\"of\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n self.Flags[\"of\"] = 0\r\n\r\n\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n\r\n self.Flags[\"zf\"] = 0\r\n elif tmp1[2] == 2:\r\n\r\n a = a * self.Get_value_from_reg_X(\"ax\")\r\n b = a\r\n if a >= pow(2, 2 * 8):\r\n a = a & (pow(2, 2 * 8) - 1)\r\n self.Save_value_in_reg_X(\"ax\", a)\r\n\r\n b = b.__rshift__(16)\r\n if b >= pow(2, 2 * 8):\r\n b = b & (pow(2, 2 * 8) - 1)\r\n self.Save_value_in_reg_X(\"dx\", b)\r\n\r\n if bool(a & pow(2, (2*8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n\r\n if (bool(self.Get_value_from_reg_X(\"dx\")) != bool(self.Flags[\"sf\"])):\r\n self.Flags[\"cf\"] = 1\r\n self.Flags[\"of\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n self.Flags[\"of\"] = 0\r\n\r\n\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n\r\n self.Flags[\"zf\"] = 0\r\n elif tmp1[2] == 4:\r\n a = a * self.Registers[\"eax\"]\r\n b = a\r\n if a >= pow(2, 4 * 8):\r\n a = a & (pow(2, 4 * 8) - 1)\r\n self.Registers[\"eax\"] = a\r\n\r\n b = b.__rshift__(32)\r\n if b >= pow(2, 4 * 8):\r\n b = b & (pow(2, 4 * 8) - 1)\r\n self.Registers[\"edx\"] = b\r\n\r\n if bool(a & pow(2, (4*8) - 1)):\r\n self.Flags[\"sf\"] = 1\r\n else:\r\n self.Flags[\"sf\"] = 0\r\n\r\n if (bool(b) != bool(self.Flags[\"sf\"])):\r\n self.Flags[\"cf\"] = 1\r\n self.Flags[\"of\"] = 1\r\n else:\r\n self.Flags[\"cf\"] = 0\r\n self.Flags[\"of\"] = 0\r\n\r\n\r\n\r\n v = a\r\n one = 0\r\n for i in range(0, 8):\r\n if bool(v & 1):\r\n one += 1\r\n v = v.__rshift__(1)\r\n if bool(one & 1):\r\n self.Flags[\"pf\"] = 0\r\n else:\r\n self.Flags[\"pf\"] = 1\r\n\r\n\r\n self.Flags[\"zf\"] = 0\r\n\r\n return True" ]
[ "0.820744", "0.74909353", "0.6935869", "0.68716174", "0.6740321", "0.670735", "0.66578454", "0.6584289", "0.6553258", "0.64778376", "0.636958", "0.6362766", "0.6355834", "0.634328", "0.6335792", "0.6330504", "0.6273808", "0.62522274", "0.62522274", "0.62522274", "0.624801", "0.6237045", "0.6233361", "0.6200406", "0.61456597", "0.613007", "0.613007", "0.60972464", "0.6035389", "0.60347813", "0.6018143", "0.6002562", "0.5997944", "0.59842885", "0.5975256", "0.5949107", "0.5936192", "0.58899605", "0.5884022", "0.586646", "0.5849742", "0.5839507", "0.5833771", "0.5793975", "0.5749529", "0.57478976", "0.5747896", "0.5739987", "0.56696564", "0.566242", "0.5659959", "0.5651482", "0.5645953", "0.5629481", "0.5627762", "0.56238353", "0.5612975", "0.5600742", "0.5582137", "0.5580683", "0.55452687", "0.5532263", "0.5517263", "0.5503633", "0.5491023", "0.54786575", "0.5466246", "0.54554754", "0.54253966", "0.53991246", "0.53942025", "0.53937995", "0.5378415", "0.53698283", "0.53410596", "0.5340185", "0.53261703", "0.5302086", "0.52869815", "0.5282346", "0.5261347", "0.525909", "0.52549344", "0.52485806", "0.5222865", "0.5214641", "0.52126485", "0.5209304", "0.5207644", "0.520524", "0.5200237", "0.51948303", "0.518519", "0.514304", "0.51408553", "0.5134235", "0.51304483", "0.5114681", "0.50880146", "0.50851864" ]
0.68402994
4
Returns the expression in postfix form.
def postfix(self): return str(self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output", "def _get_postfix_notation(self):\n postfix, operators_stack = list(), list() # initialize postfix list and auxiliary stack\n\n for element in self.expression.split():\n if element in self.OPERATORS:\n if operators_stack:\n # while stack isn't empty and \"stack top\" is stronger(e.g. multiplication is stronger than addition)\n # move \"stack top\" into postfix list\n while operators_stack \\\n and operators_stack[-1] in self.OPERATORS \\\n and self.OPERATOR_WEIGHT[operators_stack[-1]] >= self.OPERATOR_WEIGHT[element]:\n postfix.append(operators_stack.pop())\n\n operators_stack.append(element)\n\n elif element == self.BRACKET_LEFT:\n operators_stack.append(element)\n\n elif element == self.BRACKET_RIGHT:\n # searching for left bracket on stack, moving \"stack Top\" to postfix list\n while operators_stack and operators_stack[-1] != self.BRACKET_LEFT:\n postfix.append(operators_stack.pop())\n operators_stack.pop() # remove left bracket\n\n else: # numbers always goes into postfix list\n postfix.append(self._get_number_from_string(element))\n\n if operators_stack: # move others stack elements to postfix list\n postfix.extend(reversed(operators_stack))\n\n return postfix", "def postfix(self):\n return self.leftOperand.postfix() + \" \" + self.rightOperand.postfix() + \" \" + str(self.operator)", "def infix_to_postfix(expr):\n ops = Stack()\n postfix = []\n toks = expr.split()\n def tests(chr):\n if chr.isdigit():\n postfix.append(chr)\n\n elif chr == '(':\n ops.push('(')\n\n elif ops.peek() == '(' or ops.empty():\n ops.push(chr)\n\n elif chr ==')':\n while ops.peek() != \"(\":\n postfix.append(ops.pop())\n ops.pop()\n\n elif chr in prec and prec[chr] > prec[ops.peek()]:\n ops.push(chr)\n\n elif chr in prec and prec[chr] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(chr)\n\n elif chr in prec and prec[chr] < prec[ops.peek()]:\n postfix.append(ops.pop())\n tests(chr)\n\n for tok in toks:\n tests(tok)\n\n\n while not ops.empty():\n postfix.append(ops.pop())\n\n\n return ' '.join(postfix)", "def convert_to_postfix(expression):\n infix = list(expression.replace(\" \", \"\"))\n opr_priority = {'!': 4, '*': 3, '+': 2, '>': 1, '=': 1, '(': 0}\n postfix = []\n stack = []\n\n for token in infix:\n if token in string.ascii_uppercase:\n postfix.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n stack_token = stack.pop()\n while stack_token != '(':\n postfix.append(stack_token)\n stack_token = stack.pop()\n else:\n while stack and (opr_priority[stack[len(stack)-1]] >= opr_priority[token]):\n postfix.append(stack.pop())\n stack.append(token)\n\n while stack:\n postfix.append(stack.pop())\n\n return postfix", "def infix_to_postfix(expr):\n # you may find the following precedence dictionary useful\n prec = {'*': 2, '/': 2,\n '+': 1, '-': 1}\n ops = Stack()\n postfix = []\n toks = expr.split()\n ### BEGIN SOLUTION\n opp = {'*', '/','+', '-'}\n for x in toks:\n if str.isdigit(x):\n postfix.append(x)\n elif ops.empty() or ops.peek() == '(':\n ops.push(x)\n elif x == '(':\n ops.push(x)\n elif x == ')':\n while not ops.empty():\n temp = ops.pop()\n if temp == '(':\n break\n else:\n postfix.append(temp)\n elif x in opp:\n while True:\n if prec.get(x) > prec.get(ops.peek()):\n ops.push(x)\n break\n elif prec.get(x) == prec.get(ops.peek()):\n postfix.append(ops.pop())\n ops.push(x)\n break\n elif prec.get(x) < prec.get(ops.peek()):\n postfix.append(ops.pop())\n if ops.empty():\n ops.push(x)\n break\n elif ops.empty():\n break\n\n while True:\n if not ops.empty():\n postfix.append(ops.pop())\n else:\n break\n\n ### END SOLUTION\n return ' '.join(str(x) for x in postfix)", "def postfix_eval(postfix_expr):\n s = StackArray()\n expr = postfix_expr.split()\n for token in expr:\n if token[0] in '0123456789':\n res = token\n s.push(res)\n else: # token is operator\n op2 = s.pop()\n op2 = float(op2)\n if s.is_empty(): # token is ~\n # could also be ~ for non-empty stack\n res = -1 * op2\n else:\n op1 = s.pop()\n op1 = float(op1)\n if token == '^':\n res = op1 ** op2\n elif token == '~':\n s.push(op1)\n res = -1 * op2\n elif token == '*':\n res = op1 * op2\n elif token == '/':\n if op2 == 0:\n raise ZeroDivisionError\n else:\n res = op1 / op2\n elif token == '+':\n res = op1 + op2\n else: # token == '-'\n res = op1 - op2\n s.push(res)\n return res", "def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n\n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1", "def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res", "def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix", "def infixToPostfix(expr, prec):\n ops = Stack()\n postfix = []\n toks = expr.split()\n for t in toks:\n if t.isdigit():\n postfix.append(t)\n elif t == '(':\n ops.push('(')\n elif t == ')':\n op = ops.pop()\n while op != '(':\n postfix.append(op)\n op = ops.pop()\n else:\n while True:\n if ops.empty() or ops.peek() == '(':\n ops.push(t)\n break\n if prec[t] > prec[ops.peek()]:\n ops.push(t)\n break\n elif prec[t] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(t)\n break\n else:\n postfix.append(ops.pop())\n while not ops.empty():\n postfix.append(ops.pop())\n return postfix", "def eval_postfix(s):\n stack = Stack()\n \n s = s.split()\n for i in s:\n \tif operator(i) == False:\n \t\tstack.push(int(i))\n \telse:\n \t\tb = stack.pop()\n \t\ta = stack.pop()\n \t\tresult = evaluate(a, i, b)\n \t\tstack.push(result)\n return stack.pop()", "def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def eval_postfix(s):\n stack = Stack()\n for x in s.split(): # rozděl 's' dle mezer\n if x == '+':\n stack.push(stack.pop() + stack.pop())\n elif x == '-':\n stack.push(-stack.pop() + stack.pop())\n elif x == '*':\n stack.push(stack.pop() * stack.pop())\n elif x == '/':\n second = stack.pop()\n stack.push(stack.pop() / second)\n else:\n stack.push(float(x))\n return stack.pop()", "def resolve_expression(self):\n stack = list()\n\n for element in self._get_postfix_notation():\n if element in self.OPERATORS: # get two elements from top of stack, push result of operation on stack\n operand_a = stack.pop()\n operand_b = stack.pop()\n value = self._calculate(operand_b, operand_a, element)\n stack.append(value)\n else: # push to stack if number\n stack.append(element)\n\n return stack.pop()", "def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix", "def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output", "def infix_to_postfix(s):\n result = \"\" # output string\n op = Stack() # operator stack\n i = 0 # index to 's'\n while i < len(s):\n if s[i] in \"0123456789\":\n while i < len(s) and s[i] in \"0123456789\":\n result += s[i]\n i += 1\n result += \" \"\n continue\n if s[i] == '(':\n op.push(s[i])\n elif s[i] == ')':\n top = op.pop()\n while top != '(':\n result += top + \" \"\n top = op.pop()\n else: # s[i] is +,-,*,/\n while not op.is_empty() and not higher_prec(s[i], op.peek()):\n result += op.pop() + \" \"\n op.push(s[i])\n i += 1\n while not op.is_empty():\n result += op.pop() + \" \"\n return result", "def infix_to_postfix(infix:str) -> str:\n stack = deque()\n precedence = {'+':1, '-':1,\n '*':2, '/':2,\n '^':3, '(':-9\n }\n output = \"\"\n for ch in infix:\n if ch not in {'+', '-', '*', '/', '^', '(', ')'}:\n output += ch\n elif ch == '(':\n stack.append(ch)\n elif ch == ')':\n while len(stack) > 0 and\\\n stack[-1] != '(':\n output += stack.pop()\n stack.pop()\n else:\n while len(stack) > 0 and\\\n precedence[stack[-1]] >= precedence[ch]:\n output += stack.pop()\n stack.append(ch)\n while len(stack) > 0:\n output += stack.pop()\n return output", "def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str", "def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix", "def infix_to_postfix(string_input):\n stack_ops = []\n output = []\n value = \"\"\n\n for item in string_input:\n # item = operator\n if item in ops_prec.keys():\n value = value_to_output(value, output)\n\n # pop elements while they have lower precedence\n while (stack_ops\n and stack_ops[-1] in ops_prec.keys()\n and ops_prec[item] <= ops_prec[stack_ops[-1]]):\n output.append(stack_ops.pop())\n # else put item on stack\n stack_ops.append(item)\n\n # subexpression, delay precedence\n elif item == '(':\n value = value_to_output(value, output)\n\n stack_ops.append(item)\n elif item == ')':\n value = value_to_output(value, output)\n\n # flush output until ( is reached on stack\n while (stack_ops and stack_ops[-1] != '('):\n output.append(stack_ops.pop())\n # remove '('\n stack_ops.pop()\n\n # value = operand\n else:\n # concatenation of value for multidigit ones\n value += item\n # output.append(item) # this would be for one digit\n\n # flush stack to output\n value = value_to_output(value, output)\n\n while stack_ops:\n output.append(stack_ops.pop())\n\n return output", "def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def print_postfix(self):\n if self.is_empty():\n return \"\"\n else:\n ch = \"\"\n if self.is_leaf():\n return ch + str(self.root_value())\n else:\n if self.has_left():\n if self.has_right():\n ch = ch + \" \" + str(self.get_left().print_postfix()) + \" \" \\\n + str(self.get_right().print_postfix())\n return ch + \" \" + str(self.root_value())\n else:\n ch = ch + \" \" + str(self.get_left().print_postfix())\n return ch + \" \" + str(self.root_value)\n else:\n ch = ch + \" \" + str(self.get_right().print_postfix()) + \" \" + str(self.root_value)\n return ch + \" \" + str(self.root_value())", "def expr(self):\n return self._express", "def infix(self):\n return \"(\" + self.leftOperand.infix() + \" \" + str(self.operator) + \" \" + self.rightOperand.infix() + \")\"", "def evaluate_infix(string):\n return postfix(infix_to_postfix(string))", "def evaluatePostfixExp(self, postfixExpr):\n\n operandStack = []\n tokenList = postfixExpr.split(\" \")\n\n for token in tokenList:\n if self.isOperand(token):\n if \".\" in token:\n token = float(token)\n else:\n token = int(token)\n operandStack.append(token)\n else: # token is an operator\n operand2 = operandStack.pop()\n operand1 = operandStack.pop()\n try:\n result = self.applyOperator(operand1, operand2, token)\n except Exception as error:\n print(\"Invalid input. Please enter a valid arithmetic expression.\") # Most likely division by\n # zero error.\n return\n operandStack.append(result)\n return operandStack.pop()", "def infix_to_postfix(string):\n tokenlist = string.split()\n output = []\n stack = create_stack()\n for token in tokenlist:\n if token == '(':\n stack.push(token)\n elif token == ')':\n toptoken = stack.pop()\n while toptoken != '(':\n output.append(toptoken)\n toptoken = stack.pop()\n elif token == '*' or token == '/':\n toptoken = stack.top()\n while toptoken in ['*','/']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n elif token == '+' or token == '-':\n toptoken = stack.top()\n while toptoken in ['*','/','+','-']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n else:\n output.append(token)\n while stack.length() > 0:\n output.append(stack.pop())\n space= ' '\n newstr = space.join(output)\n return newstr", "def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()", "def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()", "def infixToPostfix(inFixStr):\n postFixList = []\n s = Stack()\n chList = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n prec = {\"(\": 0, \"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2} # operator precedence\n\n tok = inFixStr.split(\" \")\n for ch in tok: # ch can be (,), operand, operator\n if ch in chList: # the easy case when token is an operand\n postFixList.append(ch)\n elif ch == \"(\": # easy case of (\n s.push(ch)\n elif ch == \")\": # keep popping and appending until (\n top = s.pop()\n while top != \"(\":\n postFixList.append(top)\n top = s.pop() # pop next\n else: # now we are at opeartors\n # pop higher order operators first\n while not s.isEmpty() and prec[s.peek()] > prec[ch]:\n postFixList.append(s.pop())\n s.push(ch) # push current opeartor\n\n while not s.isEmpty(): # pop everything else in the stack\n postFixList.append(s.pop())\n return \" \".join(postFixList)", "def infixToRPN(expression):\n stack = Stack()\n RPNList = []\n tokens = expression.split()\n spaces = True\n\n # If no spaces in expression then push each char in a tokens list\n if len(tokens) == 1:\n spaces = False\n tokens = [char for char in expression]\n\n for token in tokens:\n if token in alphabet or token in numbers:\n RPNList.append(token)\n elif token == '(':\n stack.push(token)\n elif token == ')':\n top = stack.pop()\n while top != '(':\n RPNList.append(top)\n top = stack.pop()\n else:\n while (not stack.isEmpty()) and (precedence[stack.peek()] >= precedence[token]):\n RPNList.append(stack.pop())\n stack.push(token)\n\n while not stack.isEmpty():\n RPNList.append(stack.pop())\n\n if spaces:\n return \" \".join(RPNList)\n else:\n return \"\".join(RPNList)", "def post_fix(expr):\n if expr[:3] == \"8 4\":\n return 54\n elif expr[:3] == \"5 6\":\n return 32\n elif expr[:3] == \"1 1\":\n return 2\n \"\"\"normal solution\"\"\"\n lst = expr.split()\n stack = []\n for e in lst:\n if e in \"+-*/\":\n b = stack.pop()\n a = stack.pop()\n stack.append(str(eval(\"{}{}{}\".format(a, e, b))))\n else:\n stack.append(e)\n return round(float(stack.pop()))", "def postfix(self,Line):\r\n\r\n stak = []\r\n expression = []\r\n infix = []\r\n i=0\r\n while( i <(len(Line))):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n if len(stak) > 0:\r\n if (Line[i] == '[') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"dup\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"type\")):\r\n return False\r\n if len(stak) > 0:\r\n if (Line[i] == '(') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\")):\r\n return False\r\n if (len(stak) == 0) and (Line[i] == '('):\r\n return False\r\n stak.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(stak) == 0:\r\n return False\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (stak[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ']'):\r\n break\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n\r\n stak = stak[:-1]\r\n if (len(stak) > 0) and (stak[stak.__len__() - 1] == 'dup'):\r\n expression.append(stak[stak.__len__() - 1])\r\n stak = stak[:-1]\r\n elif Line[i] == ',':\r\n if expression.__len__() == 0:\r\n return False\r\n if stak.__len__() != 0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n expression = []\r\n elif Line[i][0].isdecimal():\r\n if Line[i][len(Line[i]) - 1] == 'h':\r\n tmp = extra_functions.is_hexa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n\r\n elif Line[i][len(Line[i]) - 1] == 'o':\r\n tmp = extra_functions.is_octa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'b':\r\n tmp = extra_functions.is_binary(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'd':\r\n tmp = int(Line[i][:-1], 10)\r\n expression.append(tmp)\r\n elif Line[i].isdecimal():\r\n expression.append(int(Line[i]))\r\n else:\r\n return False\r\n elif (Line[i] == \"lengthof\") or (Line[i] == \"sizeof\") or (Line[i] == \"type\") or (Line[i] == \"dup\"):\r\n if (Line[i] == \"dup\"):\r\n if stak.__len__()>0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n S = []\r\n L = []\r\n i = 1 + i\r\n while (i < len(Line)):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n S.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(S) == 0:\r\n return False\r\n j = len(S) - 1\r\n while j >= 0:\r\n if (S[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (S[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ']'):\r\n break\r\n S = S[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n S = S[:-1]\r\n\r\n L.append(Line[i])\r\n if len(S) == 0:\r\n break\r\n i += 1\r\n if L.__len__() > 1:\r\n if (L[L.__len__() - 1] == ')') and (L[0] == '('):\r\n L = L[:-1]\r\n L = L[1:]\r\n else:\r\n return False\r\n else:\r\n return False\r\n tmp = self.postfix(L)\r\n i = i + 1\r\n if tmp != False:\r\n tmp1 = self.Calc_infix(expression)\r\n if tmp1 != False:\r\n for j in range(0, tmp1[0]):\r\n infix = infix + tmp\r\n else:\r\n return False\r\n else:\r\n return False\r\n expression=[\"dup\"]\r\n continue\r\n stak.append(Line[i])\r\n else:\r\n if (Line[i] == '*') | (Line[i] == '-') | (Line[i] == '/') | (Line[i] == '+'):\r\n if len(stak) > 0:\r\n j = len(stak) - 1\r\n while (j >= 0):\r\n if ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n break\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif (stak[j] == 'dup') | (stak[j] == 'lengthof') | (stak[j] == 'type') | (stak[j] == 'sizeof'):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n else:\r\n break\r\n j = j - 1\r\n stak.append(Line[i])\r\n else:\r\n expression.append(Line[i])\r\n i += 1\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') or (stak[j] == '['):\r\n return False\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n return infix", "def infixToPostfix(infix):\n postfix = []\n stackArr = []\n scanOperand = False\n hasIntegral = False\n hasDecimal = False\n currentOperand = 0\n decimal = 1\n for ch in infix:\n currentPrio = charPrio(ch)\n if currentPrio < 0: # current ele is operand\n if not (ch.isdigit() or ch == '.'):\n inputError()\n return\n if not scanOperand:\n scanOperand = True\n if ch == '.':\n if not hasIntegral:\n formatError()\n return\n hasDecimal = True\n continue\n if hasDecimal:\n if ch == '.':\n formatError()\n return\n currentOperand = currentOperand + 0.1 ** decimal * int(ch)\n decimal += 1\n else:\n if not hasIntegral:\n hasIntegral = True\n currentOperand = currentOperand * 10 + int(ch)\n elif currentPrio == 0:\n # none operation\n pass\n else:\n # and operand into postfix expression\n if scanOperand:\n scanOperand = False\n hasDecimal = False\n hasIntegral = False\n decimal = 1\n postfix.append(currentOperand)\n currentOperand = 0\n # handle operator\n if isEmpty(stackArr):\n push(stackArr, ch) # push into stack\n elif currentPrio > prio[peek(stackArr)]:\n push(stackArr, ch) # push into stack\n elif currentPrio == 1: # ')'\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)]:\n ele = pop(stackArr)\n if ele != '(':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n else:\n break\n else:\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)] and prio[peek(stackArr)] < 5 :\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n push(stackArr, ch) # push into stack\n if scanOperand:\n postfix.append(currentOperand)\n while not isEmpty(stackArr):\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n return postfix", "def evaluateExpression(self, userExpression):\n return self.evaluatePostfixExp(userExpression)", "def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr", "def expression(self):\n return self._expression", "def set_postfix_expression(self, expression, clear_args = True):\n if expression and type(expression) is not str:\n raise TypeError('expression should be either string or None or False')\n if clear_args:\n self._prefix_kwargs = {}\n self._postfix_expression = expression", "def postfix(t_input):\r\n # guardo se gli elementi contengono caratteri non validi\r\n if is_valid(t_input) == 1:\r\n # restituisco Invalid se sono stati trovati caratteri invalidi\r\n result = \"Invalid\"\r\n return result\r\n\r\n # scorri di nuovo gli elementi\r\n # NOTA: sarebbe piu' efficiente fare un unico ciclo\r\n for element in t_input.strip(\"\\0\").split(\" \"):\r\n if element in [\"-\", \"+\", \"*\", \"/\"]:\r\n # ho trovato operatore, ricavo operandi dallo stack\r\n right_operand = stack.pop()\r\n left_operand = stack.pop()\r\n\r\n # faccio l'operazione che serve\r\n if element == \"-\":\r\n op_result = left_operand - right_operand\r\n elif element == \"+\":\r\n op_result = left_operand + right_operand\r\n elif element == \"*\":\r\n op_result = left_operand * right_operand\r\n else:\r\n op_result = left_operand // right_operand\r\n\r\n if boold:\r\n print(\"[DEBUG] Ho trovato operatore '{}': {} {} {} = {}\".format(element, left_operand, element, right_operand, op_result))\r\n # inserisco nello stack il risultato dell'operazione\r\n stack.push(op_result)\r\n else:\r\n # ho trovato operando, lo metto nello stack\r\n # > NOTA: e' necessaria conversione stringa -> intero\r\n stack.push(int(element))\r\n \r\n if boold:\r\n stack.print()\r\n\r\n # il risultato e' l'ultimo elemento\r\n # > NOTA: e' necessaria conversione intero -> stringa\r\n result = str(stack.pop())\r\n return result", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def infix_to_postfix(text: str) -> list:\n \n def unfold_block(text: str) -> list:\n return infix_to_postfix(text) if text[0] == \"(\" else [text]\n\n grouped_raw = group_operations(text)[0]\n if not (\"+\" in grouped_raw or \"-\" in grouped_raw or \"*\" in grouped_raw or \"/\" in grouped_raw):\n grouped = grouped_raw\n stack = [grouped]\n else:\n grouped = group_operations(text)[0][1:-1]\n first_block, operator, second_block = text_to_parts(grouped)\n first_block = unfold_block(first_block)\n second_block = unfold_block(second_block)\n stack = [*first_block, *second_block, operator]\n return stack", "def evaluatePostfix(postfix, variableList, variableLocation, methodVariables, output):\n\n stack = [] # Stack that will contain our pushed operands from the postfix expression\n immediateCount = 0 # Keeps count of how many immediate values are being expressed (not variables)\n sourceRegister = 1 # Source register starts at 1: \"B\", and increments as needed\n destRegister = 0 # Destination register starts at 0: 'A\" and increments as needed\n immFlag = 0 # Used to determine whether source or destination register holds an immediate\n\n for element in postfix:\n # Evaluate each postfix element one by one to determine appropriate action\n\n if sourceRegister > 6 or destRegister > 6:\n # We cap the total amount of registers used to 7 (0-6)\n raise ValueError(\"Too many operands in formula.\")\n\n if element in OPERATIONS:\n # Here, our element is an operator. This means we need to pop the top two values from the stack and\n # execute the given operation.\n operand1, operand2 = stack.pop(), stack.pop()\n\n if operand1 in variableList:\n # The operand is in the list of local variables, so we read the value from memory\n output.write(\" MEMR [4] #\" + str(variableLocation[operand1]) + \" $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif operand1 in methodVariables:\n # The operand is in the list of arguments passed into the method. We consult the methodVariables list\n # to determine the appropriate offset from the stack pointer register S2.\n output.write(\" MOV $A2 $S2\\n\")\n output.write(\" ADD #\" + str(int(methodVariables[operand1][1]) * 4) + \" $A2\\n\")\n output.write(\" MEMR [4] $A2 $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif operand1 in REGISTER_NAMES:\n # This is simply a register that was pushed onto the stack. We can keep it as is\n pass\n\n else:\n # The operand is an immediate value. We test to see if it's a valid integer\n try:\n isinstance(operand1, int)\n immediateCount += 1\n immFlag = 1\n except ValueError as e:\n raise ValueError(\"Invalid operand\")\n\n if operand2 in variableList:\n # The operand is in the list of local variables, so we read the value from memory\n output.write(\" MEMR [4] #\" + str(variableLocation[operand2]) + \" $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n elif operand2 in methodVariables:\n # The operand is in the list of arguments passed into the method. We consult the methodVariables list\n # to determine the appropriate offset from the stack pointer register S2.\n output.write(\" MOV $B2 $S2\\n\")\n output.write(\" ADD #\" + str(int(methodVariables[operand2][1]) * 4) + \" $B2\\n\")\n output.write(\" MEMR [4] $B2 $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n elif operand2 in REGISTER_NAMES:\n # This is simply a register that was pushed onto the stack. We can keep it as is\n pass\n\n else:\n # The operand is an immediate value. We test to see if it's a valid integer\n try:\n isinstance(operand2, int)\n immediateCount += 1\n immFlag = 2\n except ValueError as e:\n raise ValueError(\"Invalid operand\")\n\n if immediateCount == 2:\n # If we have two immediate values, we don't really need to calculate the arithmetic in Capua ASM.\n # We discretely do the calculations in the background and push the value to the stack. This avoids\n # unnecessary processing.\n try:\n stack.append(int(OPERATIONS[element]['function'](float(operand2), float(operand1))))\n\n except ZeroDivisionError:\n raise ValueError(\"Error: Division by zero! - {} {} {}\".format(operand2, element, operand1))\n\n else:\n if immediateCount == 1:\n # only one of the operands was an immediate value. We determine which one is the immediate value,\n # as the correct instruction output depends on it.\n if immFlag == 1:\n output.write(\" MOV #\" + str(int(operand1)) + \" $\" + REGISTERS[sourceRegister] + \"\\n\")\n operand1 = REGISTERS[sourceRegister]\n\n elif immFlag == 2:\n output.write(\" MOV #\" + str(int(operand2)) + \" $\" + REGISTERS[destRegister] + \"\\n\")\n operand2 = REGISTERS[destRegister]\n\n else:\n # No operands were immediate values. We can do the arithmetic operation as is.\n # We move the source and destination registers up one letter for the next operation\n sourceRegister += 1\n destRegister += 1\n\n output.write(\" \" + INSTRUCTIONS[element] + \" $\" + str(operand1) + \" $\" + str(operand2) + \"\\n\")\n stack.append(operand2)\n\n immediateCount = 0\n\n else:\n # We have an operand to push onto the stack\n stack.append(element)\n\n if len(stack) != 1:\n # If the stack has more than or less than one element, the expression is incorrect.\n raise ValueError(\"invalid expression.\")\n\n # our result is then \"saved\" into register A. The assignment can now be completed.\n result = stack.pop()\n\n if result in REGISTER_NAMES:\n # If we just have a register at the bottom of the stack, we assume the result is already in register A\n pass\n\n else:\n try:\n isinstance(int(result), int)\n output.write(\" MOV #\" + str(result) + \" $A\\n\")\n except ValueError as e:\n raise ValueError(\"Invalid mathematical expression\")", "def visit_expression(self, node, children):\n if self.debug:\n print(\"Expression {}\".format(children))\n expr = 0\n start = 0\n # Check for unary + or - operator\n if text(children[0]) in \"+-\":\n start = 1\n\n for i in range(start, len(children), 2):\n if i and children[i - 1] == \"-\":\n expr -= children[i]\n else:\n expr += children[i]\n\n if self.debug:\n print(\"Expression = {}\".format(expr))\n\n return expr", "def evaluate_postfix(list_input):\n stack_values = []\n\n for item in list_input:\n # debug stuff\n # print \"item\", item\n try:\n item_value = float(item)\n has_value = True\n except ValueError:\n has_value = False\n\n # value, operand, put on stack\n if has_value:\n stack_values.append(item_value)\n has_value = False\n\n # operator, pull two operands from stack\n elif (has_value == False\n and len(stack_values) >= 2):\n second_value = stack_values.pop()\n first_value = stack_values.pop()\n result = evaluate_op(item,\n first_value,\n second_value)\n stack_values.append(result)\n # debug stuff\n # print \"midstep\", result\n\n return stack_values.pop()", "def postfixCalc(self,tokens):\n if len(tokens) == 0:\n return 0\n stack = []\n # while expr is not empty\n while len(tokens)>0:\n toke = tokens.pop(0)\n # if token is a number push it onto the stack\n if isFloat(toke):\n stack.append(float(toke))\n # if token is a special number push it onto the stack\n elif toke in Calculator.specialNumbers:\n stack.append(Calculator.specialNumbers[toke])\n else:\n # Operators take 2 inputs, functions take 1 input except root which takes 2\n if toke in Calculator.operators or toke == 'root':\n n = 2\n elif toke in Calculator.functions:\n n = 1\n # If the length of the stack is less than the required number of operators the user has not \n # input enough values.\n if len(stack)<n:\n return \"Too Few Error\"\n # Pop the top n numbers from the stack\n popedVals = []\n for i in range(n):\n popedVals.append(stack.pop())\n # Evaluate the operator using the number(s) that were popped, and push back onto the stack\n if n == 2 and toke in Calculator.operators:\n stack.append(Calculator.operators[toke][0](popedVals[1], popedVals[0]))\n elif n == 2:\n stack.append(Calculator.functions[toke](popedVals[1], popedVals[0]))\n elif n == 1:\n stack.append(Calculator.functions[toke](popedVals[0]))\n # If there is more than one value left on the stack the user has input too many values\n if len(stack) > 1:\n return \"Too Many Error\"\n # Return the value on the stack (should only be 1 value left)\n return stack[-1]", "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def print_infix(self):\n if self.is_empty():\n return \"\"\n else:\n if self.is_leaf():\n return str(self.root_value())\n else:\n if self.has_left():\n if self.has_right():\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value()) + \" \" \\\n + str(self.get_right().print_infix())\n else:\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value())\n else:\n return str(self.root_value()) + \" \" + str(self.get_right().print_infix())", "def calculate_expression(expression, debug_output=False):\n if debug_output:\n print('{:<14}'.format('Calculating:'), '\"', expression, '\"', sep='')\n\n parser_tree = Calculator.parse_expression(expression)\n\n if debug_output:\n print('{:<14}'.format('Postfix:'), '\"', parser_tree.to_string(), '\"', sep='')\n\n if parser_tree.get_root() is not None:\n Calculator._simplify(parser_tree, parser_tree.get_root())\n\n if debug_output:\n print('{:<14}'.format('Result:'), '\"', parser_tree.to_string(), '\"', sep='')\n print()\n\n return parser_tree", "def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression", "def expression_phrase(self):\n return self._expression_phrase", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def postfix_eval(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n raise PostfixFormatException('Insufficient operands')\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if i in op_list:\n try:\n num_val = stack.pop()\n num_val_initial = stack.pop()\n except IndexError:\n raise PostfixFormatException(\"Insufficient operands\")\n if i == \"+\":\n stack.push(num_val_initial + num_val)\n if i == \"-\":\n stack.push(num_val_initial - num_val)\n if i == \"*\":\n stack.push(num_val_initial * num_val)\n if i == \"/\":\n if num_val == 0:\n raise ValueError(\"0 not divisible\")\n stack.push(num_val_initial / num_val)\n if i == \"**\":\n stack.push(num_val_initial ** num_val)\n if i == \"<<\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial << num_val)\n if i == \">>\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial >> num_val)\n elif new_val.isdigit():\n if \".\" in i:\n stack.push(float(i))\n else:\n stack.push(int(i))\n else:\n raise PostfixFormatException(\"Invalid token\")\n val = stack.pop()\n if not stack.is_empty():\n raise PostfixFormatException(\"Too many operands\")\n return val", "def isPostfixOp(tokens):\n stop = SwiftSupport.getLastOpTokenIndex(tokens)\n if stop == -1:\n return False\n\n start = tokens.index\n prevToken = tokens.get(start - 1)\n nextToken = tokens.get(stop + 1)\n prevIsWS = SwiftSupport.isLeftOperatorWS(prevToken)\n nextIsWS = SwiftSupport.isRightOperatorWS(nextToken)\n result = not prevIsWS and nextIsWS or not prevIsWS and nextToken.type == DOT\n text = tokens.getText(start, stop)\n return result", "def prefix(self):\n return str(self.operator) + \" \" + self.leftOperand.prefix() + \" \" + self.rightOperand.prefix()", "def expression(self, rbp=0):\n t = self.token\n self.token = next(self.token_gen)\n left = t.nud()\n while rbp < self.token.lbp:\n t = self.token\n self.token = next(self.token_gen)\n left = t.led(left)\n return left", "def calculate_expression(self, txt):\n self.shunting_yard(self.text_parser(txt))\n return self.RPN()", "def infix(self):\n return str(self.data)", "def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")", "def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()", "def expression(self) -> Optional[str]:\n return pulumi.get(self, \"expression\")", "def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation", "def exp(self):\n return type(self)(self.parent(), self._simplify(self._express.exp()))", "def expression(self, p):\n num_type, first, second = get_type_first_second_of_binary_operation(p.expression, p.term)\n\n opcode_type = I_for_int_R_for_float(num_type)\n opcode_action = \"ADD\" if p.ADDOP == \"+\" else \"SUB\"\n opcode = opcode_type + opcode_action\n\n temp = next(g_generate_temp_variable_name)\n temp_variables_values_dict[temp] = temp\n\n qaud_code(f\"{opcode} {temp} {first} {second}\")\n return Expression(num_type, temp)", "def __str__(self):\n unarybrackets = ['sq', 'sqrt']\n #unary operators which require brackets around their operand\n #if the operand is a leaf, we force the brackets; otherwise the operand\n #is a non-leaf expression and will create its own brackets\n outstr = ''\n if self.is_leaf():\n outstr = outstr + str(self._element)\n else:\n if self._parent and self._element not in unarybrackets:\n outstr = '('\n #unary minus is unary, but needs brackets outside the minus\n if self._leftchild:\n outstr = outstr + str(self._leftchild)\n outstr = outstr + str(self._element)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + '('\n outstr = outstr + str(self._rightchild)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + ')'\n if self._parent and self._element not in unarybrackets:\n outstr = outstr + ')'\n return outstr", "def nn_to_rpn(self, nn):\n expression = []\n ops = []\n\n # handle +-*/) to add a space before and after the operator\n nn = nn.strip()\n nn = re.sub(r\"(?P<operator>[+\\-*/])\", add_spaces_operator, nn)\n # handle the wrongly replaced \" * * \"(maybe many spaces around *) to \"**\"\n nn = re.sub(r\" *\\* {2}\\* *\", \"**\", nn)\n nn = re.sub(r\"(?P<operator>[(])\", add_spaces_left_bracket, nn)\n nn = re.sub(r\"(?P<operator>[)])\", add_spaces_right_bracket, nn)\n items = re.split(r\"\\s+\", nn)\n for item in items:\n if item in [\"+\", \"-\", \"*\", \"/\"]:\n while len(ops) >= 0:\n if len(ops) == 0:\n ops.append(item)\n break\n op = ops.pop()\n if op == \"(\" or self.ops_rule[item] > self.ops_rule[op]:\n ops.append(op)\n ops.append(item)\n break\n else:\n expression.append(op)\n elif item == \"(\":\n ops.append(item)\n elif item == \")\":\n while len(ops) > 0:\n op = ops.pop()\n if op == \"(\":\n break\n else:\n expression.append(op)\n else:\n expression.append(item)\n\n while len(ops) > 0:\n expression.append(ops.pop())\n\n return expression", "def _repr_(self):\n if self.parent()._chart.manifold().options.textbook_output:\n return str(ExpressionNice(self._express))\n else:\n return str(self._express)", "def shunt(infix):\n #convert input to a stack list\n infix=list(infix)[::-1]\n #operator stack and output list as empty lists\n opers,postfix =[],[]\n #operator precedence\n prec={'*':100,'.':90, '|':80, '/':80, '\\\\':80, ')':70, '(':60}\n\n #loop through input one character at a time\n while infix:\n #pop a character from the input\n c=infix.pop() \n #decide what to do based on character\n if c== '(':\n #push an open bracket to opers stack\n opers.append(c)\n elif c==')':\n #pop the operators stack until you find an open bracket\n while opers[-1]!='(':\n postfix.append(opers.pop())\n #get rid of '('\n opers.pop()\n elif c in prec:\n #push any operators on opers stack with hight prec to output\n while opers and prec[c] < prec[opers[-1]]:\n postfix.append(opers.pop())\n opers.append(c)\n else:\n #typically we just push the character to the output\n postfix.append(c)\n #pop all operators to the output\n while opers:\n postfix.append(opers.pop())\n #convert output list to string\n return ''.join(postfix)", "def output(self):\n return self.expr.lhs", "def parse(self):\n return self.expr()", "def RPN(self):\n stack = Stack()\n while not self.output_queue.is_empty():\n item = self.output_queue.pop()\n\n if isinstance(item, numbers.Number):\n stack.push(item)\n\n elif isinstance(item, Function):\n stack.push(item.execute(stack.pop()))\n\n elif isinstance(item, Operator):\n num2 = stack.pop()\n num1 = stack.pop()\n stack.push(item.execute(num1, num2))\n\n return stack.pop()", "def infix_to_assembly(formula: str) -> str:\n asm = \"\"\n postfix = infix_to_postfix(formula)\n for value in postfix:\n if value == \"+\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nadd ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"-\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nsub ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"*\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nmul bx\"\n asm += \"\\npush ax\"\n elif value == \"/\":\n asm += \"\\nmov dx, 0h\"\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\ndiv bx\"\n asm += \"\\npush ax\"\n else:\n # asm += \"\\npush 0\" + value + \"h\"\n # the line above is commented out as the emulator has a bug\n # which pushes immediate 0bbh as 0ffbbh to the stack\n asm += \"\\nmov cx, 0\" + value + \"h\"\n asm += \"\\npush cx\"\n return asm", "def _repr_latex_(self):\n reprlatex = \"\"\n if not self._terms:\n reprlatex += \"0\"\n else:\n for term in self:\n termlatex = term._reprlatex\n if not reprlatex:\n # Adding the first term. No leading +.\n reprlatex += termlatex\n else:\n if not termlatex.startswith(\"-\"):\n # Is it the first term added to the sum? No leading +.\n reprlatex += f\"+ {termlatex}\"\n else:\n reprlatex += termlatex\n\n return f\"${reprlatex}$\"", "def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex", "def get_formula_in_list(self):\n return tree_to_string(self.expression)", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def evaluate_post_fix(input_list):\n\n stack = Stack()\n\n # Iterate over elements\n for char in input_list:\n if char in [\"+\", \"-\", \"*\", \"/\"]:\n n_2, n_1 = int(stack.pop()), int(stack.pop())\n result = ops.get(char)(n_1, n_2)\n stack.push(result)\n else:\n stack.push(char)\n\n return stack.head.data", "def stringbuilderexpr(self) :\n\t\ttry :\n\t\t\treturn self._stringbuilderexpr\n\t\texcept Exception as e:\n\t\t\traise e", "def format_expr(expr, precedence=0):\n match expr:\n case BinaryOp(op, left, right):\n result = \\\n f\"{format_expr(left, expr.precedence)} {op} {format_expr(right, expr.precedence+1)}\"\n # Surround the result in parentheses if needed\n if precedence > expr.precedence:\n return f\"({result})\"\n else:\n return result\n case UnaryOp(op, arg):\n return f\"{op}{format_expr(arg, 0)}\"\n case VarExpr(name):\n return name\n case float() | int():\n return str(expr)\n case _:\n raise ValueError(f\"Invalid expression value: {repr(expr)}\")", "def eval(self, expression: str) -> str:\n ret = self.exec_(\"print({})\".format(expression))\n ret = ret.strip()\n return ret", "def simplify(expression):\n q = []\n for x in expression:\n if x != \")\":\n q.append(x)\n else:\n subexp = \"\"\n while q:\n #print(q)\n c = q.pop()\n if c == \"(\":\n if len(q) and (q[-1] == \"+\" or q[-1] == \"-\"):\n sign = q.pop()\n else:\n sign = \"+\"\n subexp = signExp(subexp, sign)\n q.append(subexp)\n break\n else:\n subexp = c + subexp\n exp = \"\"\n while q:\n c = q.pop()\n exp = c + exp\n \n if len(exp) and exp[0] != \"+\" and exp[0] != \"-\":\n # Again if the first character is not a 'sign' make it a \"+\"\n exp = \"+\" + exp\n \n return exp", "def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> Expression:\n ...", "def expand(self, expression):\n if not expression:\n return b\"\"\n if expression[0] in self._operators:\n operator, expression = expression[:1], expression[1:]\n if operator == b\"+\":\n return self._expand(expression, reserved)\n elif operator == b\"#\":\n return self._expand(expression, reserved, prefix=b\"#\")\n elif operator == b\".\":\n return self._expand(expression, prefix=b\".\", separator=b\".\")\n elif operator == b\"/\":\n return self._expand(expression, prefix=b\"/\", separator=b\"/\")\n elif operator == b\";\":\n return self._expand(expression, prefix=b\";\", separator=b\";\",\n with_keys=True, trim_empty_equals=True)\n elif operator == b\"?\":\n return self._expand(expression, prefix=b\"?\", separator=b\"&\",\n with_keys=True)\n elif operator == b\"&\":\n return self._expand(expression, prefix=b\"&\", separator=b\"&\",\n with_keys=True)\n else:\n return self._expand(expression)", "def __repr__(self):\n ret = \"\"\n if is_relation(self.root):\n ret += self.root + '('\n for index, obj in enumerate(self.arguments):\n ret += str(obj)\n if index != len(self.arguments)-1:\n ret += ','\n ret += ')'\n elif is_equality(self.root):\n ret = str(self.first) + self.root + str(self.second)\n elif is_quantifier(self.root):\n ret = self.root + str(self.variable) + '[' + str(self.predicate) + ']'\n elif is_unary(self.root):\n ret = self.root + str(self.first)\n elif is_binary(self.root):\n ret = '(' + str(self.first) + self.root + str(self.second) + ')'\n return ret\n # Task 7.2", "def _exec_pm(self, stack):\n rslt = stack.popleft()\n while stack:\n operator, operand = stack.popleft(), stack.popleft()\n if operator == \"+\":\n rslt += operand\n else:\n # operator == \"-\"\n rslt -= operand\n return rslt", "def expression_term(self):\n return self._expression_term", "def eval_expr2(expression):\n\n output = []\n stack = []\n tokens = list(tokenize(expression))\n\n precedence = {\n \"*\": 10,\n \"+\": 20,\n }\n\n for token in tokens:\n if token == \"(\":\n stack.append(token)\n elif token == \")\":\n while stack and stack[-1] != \"(\":\n op = stack.pop(-1)\n output.append(op)\n op = stack.pop(-1)\n assert op == \"(\"\n elif token in [\"+\", \"*\"]:\n while (\n stack\n and stack[-1] in [\"+\", \"*\"]\n and precedence[token] < precedence[stack[-1]]\n ):\n op = stack.pop(-1)\n output.append(op)\n\n stack.append(token)\n elif isinstance(token, int):\n output.append(token)\n else:\n raise NotImplementedError(token)\n\n # print(token, output, stack)\n\n while stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n assert not stack\n\n return eval_ops(output)", "def exp_pop(self) -> Any:\n return self.exp_stack.popleft()", "def get_equation(self):\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n string = \"\"\n\n for index, polynomial in self.polynomials.items():\n polynomial = int(polynomial)\n index = int(index)\n\n if polynomial != 0:\n if polynomial < 0:\n string_pre = \" - \"\n else:\n string_pre = \" + \"\n\n if index != 0:\n string_append = \"x\"\n elif polynomial == 1 or polynomial == -1:\n string_append = str(abs(polynomial))\n else:\n string_append = \"\"\n\n if polynomial < 0:\n polynomial = abs(polynomial)\n\n if polynomial != 1:\n string_append = str(polynomial) + string_append\n\n if index != 0 and index != 1:\n string_append += \"^\" + str(index)\n\n string += string_pre + string_append\n\n if len(string) > 0:\n string = string[3:]\n else:\n string = \"0\"\n\n return string", "def evaluate_RPN(RPN_expression: str):\n\tintermediate_results = []\n\tfor token in RPN_expression.split(DELIMITER):\n\t\ttry:\n\t\t\tintermediate_results.append(\n\t\t\t\tOPERATORS[token](intermediate_results.pop(),\n\t\t\t\t\t\t\t\tintermediate_results.pop())\n\t\t\t)\n\t\texcept:\n\t\t\tintermediate_results.append(int(token))\n\treturn intermediate_results[-1]", "def arithmetic_expression(self, node=None):\n\n if not node:\n node = self.get_nodes_from_position('root')[0]\n\n leaves = self.dfs_leaves()\n arithmetic_expression = []\n\n for i in leaves:\n\n arithmetic_expression.append(i)\n\n parent = self.tree.predecessors(i)[0]\n if not parent in arithmetic_expression:\n arithmetic_expression.append(parent)\n\n return arithmetic_expression", "def postfix_code_line(self, Line):\r\n\r\n stak = []\r\n expression = []\r\n infix = []\r\n for i in range(0, len(Line)):\r\n\r\n reg_32 = [\"eax\", \"ebx\", \"ecx\", \"edx\", \"ebp\", \"esp\", \"esi\", \"edi\"]\r\n reg_16 = [\"ax\", \"bx\", \"cx\", \"dx\"]\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n if len(stak) > 0:\r\n if (Line[i] == '[') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"type\") or (stak[len(stak) - 1] == \"offset\")):\r\n return False\r\n if (Line[i] == '(') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"offset\")):\r\n return False\r\n if (len(stak) == 0) and (Line[i] == '(') and (expression.__len__() != 0):\r\n return False\r\n if expression.__len__() > 0:\r\n if (Line[i] == '[') and ((expression[expression.__len__() - 1]) != \"ptr\") and ((reg_32.__contains__(expression[expression.__len__() - 1]) == False) and (self.Data_variables.__contains__(expression[expression.__len__() - 1]) == False)):\r\n return False\r\n elif (Line[i] == '[') and ((expression[expression.__len__() - 1]) != \"ptr\") and ((reg_32.__contains__(expression[expression.__len__() - 1]) == False)):\r\n tmp = expression[expression.__len__() - 1]\r\n expression[expression.__len__() - 1] = \"ptr_X_\"\r\n expression.append(tmp)\r\n elif (Line[i] == '[') and ((expression[expression.__len__() - 1]) == \"ptr\"):\r\n # continue\r\n 1 == 1\r\n else:\r\n return False\r\n else:\r\n if Line[i] == '[':\r\n expression.append(\"ptr_\")\r\n stak.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(stak) == 0:\r\n return False\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (stak[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ']'):\r\n break\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n\r\n stak = stak[:-1]\r\n elif Line[i] == ',':\r\n if expression.__len__() == 0:\r\n return False\r\n if len(stak) != 0:\r\n j = len(stak) - 1\r\n while j >= 0:\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if expression.__len__() > 0:\r\n infix.append(expression)\r\n expression = []\r\n elif Line[i][0].isdecimal():\r\n if Line[i][len(Line[i]) - 1] == 'h':\r\n tmp = extra_functions.is_hexa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'o':\r\n tmp = extra_functions.is_octa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'b':\r\n tmp = extra_functions.is_binary(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'd':\r\n tmp = int(Line[i][:-1], 10)\r\n expression.append(tmp)\r\n elif Line[i].isdecimal():\r\n expression.append(int(Line[i]))\r\n else:\r\n return False\r\n elif (Line[i] == \"lengthof\") or (Line[i] == \"sizeof\") or (Line[i] == \"type\") or (Line[i] == \"offset\"):\r\n stak.append(Line[i])\r\n else:\r\n if (Line[i] == '*') or (Line[i] == '-') or (Line[i] == '/') or (Line[i] == '+'):\r\n if len(stak) > 0:\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if ((stak[j] == '+') or (stak[j] == '-')) and ((Line[i] == '+') or (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '+') or (stak[j] == '-')) and ((Line[i] == '*') or (Line[i] == '/')):\r\n break\r\n elif ((stak[j] == '*') or (stak[j] == '/')) and ((Line[i] == '*') or (Line[i] == '/')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '*') or (stak[j] == '/')) and ((Line[i] == '+') or (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == 'dup') or (stak[j] == 'lengthof') or (stak[j] == 'type') or (stak[j] == 'sizeof')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n else:\r\n break\r\n j = j - 1\r\n\r\n stak.append(Line[i])\r\n else:\r\n try:\r\n if ((Line[i][0] == Line[i][len(Line[i]) - 1]) and (Line[i][0] == '\"')) or ((Line[i][0] == Line[i][len(Line[i]) - 1]) and (Line[i][0] == \"\\'\")):\r\n tmp = extra_functions.convert_string(Line[i])\r\n expression.append(tmp)\r\n continue\r\n raise Exception(\"NotString\")\r\n except Exception:\r\n expression.append(Line[i])\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') or (stak[j] == '['):\r\n return False\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n\r\n if expression.__len__() > 0:\r\n infix.append(expression)\r\n\r\n return infix", "def expr(s):\n if isinstance(s, Expr): return s\n if isnumber(s): return Expr(s)\n ## Replace the alternative spellings of operators with canonical spellings\n s = s.replace('==>', '>>').replace('<==', '<<')\n s = s.replace('<=>', '%').replace('=/=', '^')\n ## Replace a symbol or number, such as 'P' with 'Expr(\"P\")'\n s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr(\"\\1\")', s)\n ## Now eval the string. (A security hole; do not use with an adversary.)\n return eval(s, {'Expr':Expr})", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left" ]
[ "0.7961822", "0.78936446", "0.7821992", "0.77166677", "0.76865053", "0.7469475", "0.7190205", "0.71594596", "0.7055572", "0.696866", "0.6934861", "0.6923071", "0.6912573", "0.68213874", "0.6815801", "0.68071514", "0.6803007", "0.67953223", "0.67841136", "0.67317486", "0.6694917", "0.66704273", "0.66286063", "0.66140974", "0.65966684", "0.65886134", "0.65429217", "0.65381974", "0.6528521", "0.6502951", "0.6482315", "0.6402352", "0.6332747", "0.6312914", "0.6306951", "0.6147073", "0.609113", "0.6077245", "0.60503656", "0.6037519", "0.60284173", "0.6023747", "0.6023747", "0.60134065", "0.5961367", "0.5948007", "0.58907205", "0.5869775", "0.5837516", "0.5804538", "0.57694477", "0.573645", "0.57060724", "0.57000834", "0.57000834", "0.57000834", "0.5696889", "0.56825185", "0.5649508", "0.5648439", "0.5602371", "0.55839086", "0.5582602", "0.5574567", "0.5559988", "0.5547917", "0.55420333", "0.5542022", "0.5532214", "0.5518079", "0.5506121", "0.55055577", "0.55046", "0.5479519", "0.54787046", "0.5443405", "0.5420171", "0.53974336", "0.5376979", "0.53748524", "0.5330922", "0.5308253", "0.5293866", "0.52935505", "0.5288142", "0.5282677", "0.5280555", "0.52660286", "0.5263312", "0.5257955", "0.5253707", "0.52458507", "0.52350247", "0.5223085", "0.51880467", "0.51844996", "0.5176274", "0.5173105", "0.51720166", "0.51714087" ]
0.6813625
15
Returns the string rep of the expression.
def __str__(self): return str(self.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def expression_phrase(self):\n return self._expression_phrase", "def __repr__(self):\n descr = list(f\"<PotentialExpression, \")\n descr.append(f\"expression: {self.expression}, \")\n descr.append(\n f\"{len(self.independent_variables)} independent variables>\"\n )\n\n return \"\".join(descr)", "def _repr_(self):\n if self.parent()._chart.manifold().options.textbook_output:\n return str(ExpressionNice(self._express))\n else:\n return str(self._express)", "def operand_to_str(self, operand):\n s = str(operand)\n if s.startswith('(') and s.endswith(')'):\n return s\n if (isinstance(operand, Literal) or\n isinstance(operand, Attr) or \n isinstance(operand, Star)):\n return s\n return \"(%s)\" % s", "def pddl_rep(self):\n rep = ''\n if self.is_negated:\n rep += \"(not \"\n if self.name != \"\":\n rep += \"(\" + self.name + \" \"\n else:\n rep += \"(\"\n for argument in self.args:\n if self.is_typed:\n rep += argument[0] + \" - \" + argument[1] + \" \"\n else:\n rep += argument + \" \"\n rep = rep[:-1]\n rep += \")\"\n if self.is_negated:\n rep += \")\"\n return rep", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def pddl_rep(self):\n rep = '(' + self.obj_list[0] + \" (\"\n for argument in self.obj_list[1:-1]:\n rep += argument + \" \"\n rep = rep[:-1]\n rep += \") \" + self.obj_list[-1] + \") \"\n return rep", "def _repr_latex_(self):\n reprlatex = \"\"\n if not self._terms:\n reprlatex += \"0\"\n else:\n for term in self:\n termlatex = term._reprlatex\n if not reprlatex:\n # Adding the first term. No leading +.\n reprlatex += termlatex\n else:\n if not termlatex.startswith(\"-\"):\n # Is it the first term added to the sum? No leading +.\n reprlatex += f\"+ {termlatex}\"\n else:\n reprlatex += termlatex\n\n return f\"${reprlatex}$\"", "def _repr_(self):\n cr = self._cr()\n if len(self) == 0:\n return repr(self.__unit)\n s = ''\n mul = ' * '\n if cr:\n mul += '\\n'\n x = self.__x[0][0]\n try:\n atomic = (isinstance(x, (int, long)) or\n self.universe()._repr_option('element_is_atomic'))\n except AttributeError:\n atomic = False\n\n if isinstance(x, Element):\n one = x.parent()(1)\n else:\n one = 1\n\n for i in range(len(self)):\n t = repr(self.__x[i][0])\n n = self.__x[i][1]\n if not atomic and (n != 1 or len(self) > 1 or self.__unit != one):\n if '+' in t or '-' in t or ' ' in t:\n t = '(%s)'%t\n if n != 1:\n t += '^%s'%n\n s += t\n if i < len(self)-1:\n s += mul\n if self.__unit != one:\n if atomic:\n u = repr(self.__unit)\n else:\n u = '(%s)'%self.__unit\n s = u + mul + s\n return s", "def expr(self):\n return self._express", "def _repr_(self):\n s = 'An equation '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' == 0'\n return s", "def _repr_(self):\n s = 'An equation '\n have_A = not self.A().is_zero()\n if have_A:\n s += repr(self.A()) + ' x '\n if self.b()>=0:\n if have_A:\n s += '+'\n else:\n s += '-'\n if have_A:\n s += ' '\n s += repr(abs(self.b())) + ' == 0'\n return s", "def stringbuilderexpr(self) :\n\t\ttry :\n\t\t\treturn self._stringbuilderexpr\n\t\texcept Exception as e:\n\t\t\traise e", "def expression(self):\n return self._expression", "def __repr__(self):\n ret = \"\"\n if is_relation(self.root):\n ret += self.root + '('\n for index, obj in enumerate(self.arguments):\n ret += str(obj)\n if index != len(self.arguments)-1:\n ret += ','\n ret += ')'\n elif is_equality(self.root):\n ret = str(self.first) + self.root + str(self.second)\n elif is_quantifier(self.root):\n ret = self.root + str(self.variable) + '[' + str(self.predicate) + ']'\n elif is_unary(self.root):\n ret = self.root + str(self.first)\n elif is_binary(self.root):\n ret = '(' + str(self.first) + self.root + str(self.second) + ')'\n return ret\n # Task 7.2", "def pddl_rep(self):\n rep = '('\n for argument in self.obj_list:\n rep += argument + \" \"\n rep = rep[:-1]\n rep += \") - number\"\n return rep", "def __repr__(self):\n\n rep = \"\"\n rep += str(self.literal)+\"\\n\"\n rep += str(self.bindings)+\"\\n\"\n rep += str(self.facts)+\"\\n\"\n return (rep)", "def __str__(self):\n # special cases\n if self.is_nan() :\n return \"nan\"\n elif self.coeff == 1 :\n if self.expt == 1 :\n return \"x\"\n else :\n return \"x^\" + str(self.expt)\n elif self.coeff == -1 :\n if self.expt == 1 :\n return \"-x\"\n else :\n return \"-x^\" + str(self.expt)\n \n # str_builder\n if self.expt == 0 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator))\n elif self.expt == 1 :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x\"\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x\"\n else :\n if self.coeff.denominator == 1 :\n return str(self.coeff.nominator) + \"*x^\" + str(self.expt)\n else :\n return \"{}/{}\".format(str(self.coeff.nominator), str(self.coeff.denominator)) + \"*x^\" + str(self.expt)", "def name(self):\n base_str = 'd{}{}_'.format(self.derivative_count if\n self.derivative_count > 1 else '', self.expr)\n for var, count in self.variable_count:\n base_str += 'd{}{}'.format(var, count if count > 1 else '')\n return base_str", "def _create_formatted_string(self):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string", "def __repr__(self): # pragma: no cover\r\n if self.latex == self.sans_parens:\r\n latex_repr = u'\"{}\"'.format(self.latex)\r\n else:\r\n latex_repr = u'\"{}\" or \"{}\"'.format(self.latex, self.sans_parens)\r\n\r\n if self.tall:\r\n wrap = u'<[{}]>'\r\n else:\r\n wrap = u'<{}>'\r\n\r\n return wrap.format(latex_repr)", "def xpath_as_string(self, expr=''):\n return ''.join(self.xpath_as_xml(expr))", "def get_expr_exec_format(self):\n if self.haveExpr:\n self.haveExpr = False\n return '{}'\n return 'SELECT {} FROM DUMMY'", "def substitute(self):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in self.bindings:\n term[i] = self.bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))", "def eval(self, expression: str) -> str:\n ret = self.exec_(\"print({})\".format(expression))\n ret = ret.strip()\n return ret", "def __str__(self):\n termStrings = []\n for term in self.LHS:\n coefficient = term[0]\n unknownSet = term[1]\n\n termString = str(coefficient) + ' * '\n unknownStrings = []\n for unknown in unknownSet:\n unknownString = unknown[0].__class__.__name__ + '@' + str(id(unknown[0]))[-4:] + '.' + unknown[1] # last 4 digits of variable ID . attribute name\n unknownStrings.append(unknownString)\n termString += str.join(' * ', unknownStrings)\n termStrings.append(termString)\n\n termStrings = str.join(' + ', termStrings)\n return termStrings + ' = ' + str(self.RHS)", "def _sympystr(self, printer: StrPrinter, *args: Any) -> str:\n return self.__str__()", "def __str__(self):\n unarybrackets = ['sq', 'sqrt']\n #unary operators which require brackets around their operand\n #if the operand is a leaf, we force the brackets; otherwise the operand\n #is a non-leaf expression and will create its own brackets\n outstr = ''\n if self.is_leaf():\n outstr = outstr + str(self._element)\n else:\n if self._parent and self._element not in unarybrackets:\n outstr = '('\n #unary minus is unary, but needs brackets outside the minus\n if self._leftchild:\n outstr = outstr + str(self._leftchild)\n outstr = outstr + str(self._element)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + '('\n outstr = outstr + str(self._rightchild)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + ')'\n if self._parent and self._element not in unarybrackets:\n outstr = outstr + ')'\n return outstr", "def lispstr(exp):\n if isinstance(exp, List):\n return \"(\" + \" \".join(map(lispstr, exp)) + \")\"\n else:\n return str(exp)", "def _repr_(self):\n ending = \"\"\n if str(self.t)!='t':\n ending = ' with t=%s'%(self.t)\n return \"%s-Bounded Quotient of Symmetric Functions over %s\"%(self.k, self.base_ring())+ending", "def __repr__(self):\n if is_constant(self.root) or is_variable(self.root):\n return (self.root)\n else:\n return_string = self.root + \"(\"\n for index, arg in enumerate(self.arguments):\n return_string += str(arg)\n if index != len(self.arguments)-1:\n return_string += \",\"\n return_string += \")\"\n return return_string", "def __str__(self):\n def recurse(node, level):\n s = \"\"\n if type(node) == LeafNode:\n return (\"| \" * level) + str(node) + \"\\n\"\n if node != None:\n s += recurse(node.rightOperand, level + 1)\n s += \"| \" * level\n s += str(node.operator) + \"\\n\"\n s += recurse(node.leftOperand, level + 1)\n return s\n return recurse(self, 0)", "def __str_metric_expression(self,metricExpression):\n if metricExpression['type'] == \"LEAF_METRIC_EXPRESSION\":\n return metricExpression['metricDefinition']['logicalMetricName'].lower()\n else: #metricExpression['type'] == \"BOOLEAN_METRIC_EXPRESSION\"\n return __str_metric_expression(metricExpression['expression1']) + \" \" + metricExpression['operator']['type'] + \" \" + \\\n __str_metric_expression(metricExpression['expression2'])", "def display(self):\n from sage.tensor.modules.format_utilities import FormattedExpansion\n from sage.misc.latex import latex\n resu_txt = str(self.parent()._chart[:]) + ' |--> ' + \\\n str(ExpressionNice(self._express))\n resu_latex = latex(self.parent()._chart[:]) + r' \\mapsto' + \\\n latex(ExpressionNice(self._express))\n return FormattedExpansion(resu_txt, resu_latex)", "def latex_str_expanded(self):\n try:\n len(self.coeff[0])\n coeff_strs = [str_number_array(c, latex=True) for c in self.coeff]\n basis_strs = bernstein_basis_simplex_latex(self.r, self.vertices)\n for i in range(len(basis_strs)):\n if len(basis_strs[i]) > 3:\n basis_strs[i] = \"(\" + basis_strs[i] + \")\"\n return str_dot_product(coeff_strs, basis_strs)\n except TypeError:\n coeff_strs = [str_number(c, latex_fraction=True) for c in self.coeff]\n basis_strs = bernstein_basis_simplex_latex(self.r, self.vertices)\n for i in range(len(basis_strs)):\n if len(basis_strs[i]) > 3:\n basis_strs[i] = \"(\" + basis_strs[i] + \")\"\n return str_dot_product(coeff_strs, basis_strs)", "def _getvalue_expr_Str(self, expr: ast.Str) -> Any:\n return expr.s", "def __repr__(self) -> str:\n s = '-' if self.neg else '+'\n if 0 in self.numer:\n s += '0'\n elif 0 in self.denom:\n s += 'undef'\n else:\n s += '%d/%d' % (self.numer_prod(),\n self.denom_prod())\n return s", "def _latex_(self):\n if len(self) == 0:\n return self.__unit._latex_()\n try:\n atomic = (isinstance(self.__x[0][0], (int, long)) or\n self.universe()._repr_option('element_is_atomic'))\n except AttributeError:\n atomic = False\n s = ''\n for i in range(len(self)):\n t = self.__x[i][0]._latex_()\n if not atomic and ('+' in t or '-' in t or ' ' in t):\n t = '(%s)'%t\n n = self.__x[i][1]\n if n != 1:\n t += '^{%s}'%n\n s += t\n if i < len(self)-1:\n s += ' \\\\cdot '\n if self.__unit != 1:\n if atomic:\n u = self.__unit._latex_()\n else:\n u = '\\\\left(%s\\\\right)'%self.__unit._latex_()\n s = u + ' \\\\cdot ' + s\n return s", "def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string", "def gen_string_literal(self, expr):\n data = expr.to_bytes()\n value = self.emit(ir.LiteralData(data, \"cstr\"))\n value = self.emit(ir.AddressOf(value, \"dptr\"))\n return value", "def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)", "def __str__(self):\n value = []\n for term in self.terms:\n value += [term.termname]\n return '<formula: %s>' % ' + '.join(value)", "def expression(self) -> Optional[str]:\n return pulumi.get(self, \"expression\")", "def literal_to_string(self, literal):\n s = '!' if is_negated(literal) else ''\n return s + self.variables[literal >> 1]", "def postfix(self):\n return self.leftOperand.postfix() + \" \" + self.rightOperand.postfix() + \" \" + str(self.operator)", "def __str__(self):\n\t\tif self.code == Const.NEWLINE:\n\t\t\treturn \"[newline]\"\n\t\t\n\t\tname = \"[\" + self.code\n\t\tif self.code in (Const.numericalLiteral, Const.ID, Const.UET):\n\t\t\tname += \"(\" + self.value + \")\"\n\t\tname += \"]\"\n\t\treturn name", "def toString(self):\n \n if not self.coeff_map:\n raise Exception('no coeffs in constrain %s'%self.name)\n \n if self.result is None:\n raise Exception('result of this constrain is unknown!')\n \n if self.name is None:\n res=\"\"\n else:\n res=self.name+\": \"\n \n res+=coeff_sum(self.coeff_map) \n \n res+=self.op\n res+=\" \"+str(self.result)\n \n return res;", "def prefix(self):\n return str(self.operator) + \" \" + self.leftOperand.prefix() + \" \" + self.rightOperand.prefix()", "def _print_dot(_self, expr):\r\n return r'{((%s) \\cdot (%s))}' % (expr.args[0], expr.args[1])", "def get_string(self):\n return MACRO_TEMPLATE.substitute(\n macro_name = str(self._macro_name),\n macro_content = str(self._macro_content) + r\"\\xspace\")", "def output(self):\n return self.expr.lhs", "def vsstrrepr(expr, **settings):\n p = VectorStrReprPrinter(settings)\n return p.doprint(expr)", "def get_str(nr_rational):\n return f'{get_numarator(nr_rational)} / {get_numitor(nr_rational)}'", "def __str__(self):\n\n return \"[\" + str(self.quick) + \"] \" + \\\n self.regexp.pattern + \" --> \" + \\\n str(self.handler)", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def simplify(self, expr):\n return sy.simplify(expr)", "def Rstr(self):\n\n results = []\n for expr in self.graded_dict:\n _expr = str(expr).replace(\"*\", \":\")\n if _expr != '1':\n for order in self.graded_dict[expr]:\n for factors in self.graded_dict[expr][order]:\n results.append(':'.join([_expr] + [f.name for f in factors]))\n else:\n for order in self.graded_dict[expr]:\n for factors in self.graded_dict[expr][order]:\n if factors:\n results.append(':'.join([f.name for f in factors]))\n else:\n results.append('1')\n return \"+\".join(results)", "def to_string(self):\n\n # Get the header of the formula and the body separately\n header = f\"p cnf {self.nvars} {self.nclauses}\"\n body = \"\\n\".join(map(clause_to_string,\n self.base_clauses + self.aux_clauses))\n\n # Return the header and the body combined\n return header + \"\\n\" + body", "def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression", "def __str__(self):\n\n return '[ TID: ' + str(self.templateID) + ' // NAME: ' + self.templateName + ' // FIELD: ' + self.field \\\n + ' // REGEX: {{ ' + self.rawRegex + ' }} // GRP: { ' + str(self.regexGrp) + ' } || MATCH_IDX: { ' \\\n + str(self.regexMatchIdx) + ' } ]'", "def _repr_(self):\n s = 'The projection of a polyhedron into ' + repr(self.dimension) \n s += ' dimensions.'\n return s + \"\\n\"", "def return_string(self):\n\n if self.__numerator * self.__denominator < 0:\n sign = \"-\"\n else:\n sign = \"\"\n return \"{:s}{:d}/{:d}\".format(sign, abs(self.__numerator), abs(self.__denominator))", "def str(self):\n if self.num_dice is not None and self.dice_type is not None:\n descr = \"{}D{}\".format(self.num_dice, self.dice_type)\n if self.plus > 0:\n descr += \"+{}\".format(self.plus)\n elif self.min_value is not None and self.max_value is not None:\n descr = \"{}-{}\".format(self.min_value, self.max_value)\n elif self.plus != 0:\n descr = str(self.plus)\n else:\n descr = \"\"\n\n return descr", "def get_draw_expr(var, frame):\n return proto_draw_expr[var].format(frame)", "def _repr_(self):\n ring = self.coordinate_ring()\n args = ring.arguments()\n repr_x=self.change_ring(SR)._repr_()\n if len(args) == 1:\n return \"%s |--> %s\" % (args[0], repr_x)\n else:\n args = \", \".join(map(str, args))\n return \"(%s) |--> %s\" % (args, repr_x)", "def _repr_(self):\n if self._tensor._name is not None:\n name = self._tensor._name\n else:\n name = 'X'\n if self._con == '':\n return name + '_' + self._cov\n elif self._cov == '':\n return name + '^' + self._con\n else:\n return name + '^' + self._con + '_' + self._cov", "def expression_term(self):\n return self._expression_term", "def string(self):\n return f'y = {self.a.item()}'", "def __repr__(self):\n modulename = str(type(self).__module__)\n\n ichars = len(str(int(self.max())))\n slen = ichars + casas\n fstr = \"{{:>{}.{}g}}\".format(slen, casas)\n\n if modulename == \"__main__\":\n s = str(type(self).__name__)\n else:\n s = modulename + '.' + str(type(self).__name__)\n\n s += '(['\n s += ', '.join([fstr.format(x) for x in self.elem])\n s += '])'\n\n return s", "def _repr_(self):\n s = \"Space of Vector-Valued harmonic weak Maass forms\"\n s += \" on \" + str(self.multiplier().group()) + \" of weight \" + str(self._weight_rat) + \" \"\n s += \" and values in CC[ZZ/\" + str(len(self.multiplier().D())) + \"ZZ].\"\n s += \"\\nRepresentation is \" + str(self.multiplier())\n return s", "def __repr__(self):\n if len(self) == 0:\n return f\"{self.__class__.__name__}()\"\n else:\n return \" + \".join([repr(term) for term in self])", "def to_string(self):\r\n production_dict = self.get_production_dict()\r\n\r\n string_prods = ['S -> ' + ' | '.join([''.join(symbols) for symbols in production_dict.pop('S')])]\r\n for non_terminal, symbols_list in production_dict.items():\r\n string_prods.append(non_terminal + ' -> ' + ' | '.join([''.join(symbols) for symbols in symbols_list]))\r\n\r\n # concateate em\r\n return '\\n'.join(string_prods)", "def reaction_str(self):\n\n def format(number):\n return str(number).rstrip(\".0\") + \" \"\n\n reactant_bits = []\n product_bits = []\n for met in sorted(self._metabolites, key=attrgetter(\"id\")):\n coefficient = self._metabolites[met]\n if coefficient >= 0:\n product_bits.append(format(coefficient) + met.id)\n else:\n reactant_bits.append(format(abs(coefficient)) + met.id)\n\n reaction_string = ' + '.join(reactant_bits)\n if self.gapfill_direction == '=':\n reaction_string += ' <=> '\n elif self.gapfill_direction == '<':\n reaction_string += ' <-- '\n elif self.gapfill_direction == '>':\n reaction_string += ' --> '\n reaction_string += ' + '.join(product_bits)\n return reaction_string", "def __str__(self):\n\n rep = 'Generalized Syllogism:\\n'\n rep += '\\ttask: {}\\n'.format(self.task)\n rep += '\\tencoded_task: {}\\n'.format(self.encoded_task)\n rep += '\\tp1: {}\\n'.format(self.p1)\n rep += '\\tp2: {}\\n'.format(self.p2)\n rep += '\\tquantifier_p1: {}\\n'.format(self.quantifier_p1)\n rep += '\\tquantifier_p2: {}\\n'.format(self.quantifier_p2)\n rep += '\\tfigure: {}\\n'.format(self.figure)\n rep += '\\tTerms:\\n'\n rep += '\\t\\tA: {}\\n'.format(self.A)\n rep += '\\t\\tB: {}\\n'.format(self.B)\n rep += '\\t\\tC: {}\\n'.format(self.C)\n return rep", "def __str__(self):\n if self.U is None:\n q0 = np.ones(self.N + self.M)\n else :\n q0 = np.ones(self.N + self.M + self.U)\n \n return \"Evaluated point q0: \" + str(q0) + \"\\n F operator evaluated at q0: \" + str(self.Fone(q0)) + \"\\n Proximal Operator evaluated at q0: \" + str(self.prox(q0)) + \"\\n optimized?: \" + str(self.optimized) + \"\\n J Operator evaluated at q0: \" + str(self.Jone(q0)) + \"\\n\"", "def __repr__(self):\n s = self.name\n if self.param != \"None\":\n s += ' with parameter '+self.param\n s += '; '+self.applyTo\n if self.applyTo != \"global\":\n s += ': '+self.conditions\n return s", "def __str__(self):\n debug_str = \"%s ::=\" % str(self.head)\n for symbol in self.body:\n debug_str += \" %s\" % str(symbol)\n return debug_str", "def __repr__(self):\n st = '\\nProof(syntax=\\\"' + self.syntax + '\\\", formula_list=[\\n'\n for l in self.proof[:-1]:\n st += str(l) + ',\\n'\n return st + str(self.proof[-1]) + '])'", "def __str__(self):\n\n strme = \"fed method {} {} {} {}\"\\\n .format(ExpandedEnsemble.key, self.eta0, self.c_upd, self.n_upd)\n if self.smooth:\n strme = \"{!s} {!s}\".format(strme, self.smooth)\n\n return strme", "def to_str(self):\n return u\"Superellipse[{:.4g},{:.4g}]\".format(self.alpha0.l, self.alpha0.r)", "def __repr__(self):\n def r(a):\n rr = re.sub('\\s','',repr(a))\n rr = re.sub('\\.(?=\\D)','',rr)\n rr = re.sub('(?<=\\D)0\\.','.',rr)\n if rr.startswith('array(['):\n rr = rr[6:-1]\n return rr\n return 'MSTP(shape=%s,\\n ra=%s,\\n ri=%s,\\n dt=%s,\\n pal=%s)' % (\n r(self.z.shape), r(self.ra), r(self.ri), r(self.dt), r(self.pal))", "def make_flat(self):\n\n if type(self.exp) == str:\n if not self.closure or self.exp == 'ϵ':\n return self.exp\n elif len(self.exp) == 1:\n return self.exp + self.closure\n else:\n return '(' + self.exp + ')' + self.closure\n else:\n flat_exp = ''.join( str(e) for e in self.exp )\n if not self.closure or flat_exp == 'ϵ':\n return flat_exp\n elif len(flat_exp) == 1:\n return flat_exp + self.closure\n else:\n return '(' + flat_exp + ')' + self.closure", "def _printable(self) -> str:\n \n if self.type_of_second_operand == self.TYPE_REF_ID:\n operand_type = \"RefID\"\n else:\n operand_type = \"Value\"\n\n # parenthesis to concatenate the string over multiple lines\n return (\n \"CQC IF header. RefID=\" + str(self.first_operand)\n + \" | Operator=\" + str(self.operator)\n + \" | \" + operand_type + \"=\" + str(self.second_operand)\n + \" | Second_operand_type=\" + operand_type\n + \" | Body_length=\" + str(self.length)\n )", "def __str__(self):\n lstr = \"\"\n\n # String for x term\n if self.a != 0:\n if self.a == 1:\n lstr += \"x\"\n elif self.a == -1:\n lstr += \"-x\"\n else:\n lstr += str(self.a) + \"x\"\n\n # String for y term\n if lstr != \"\": # This means that the x-coefficient is not 0\n if self.b > 0:\n if self.b == 1:\n lstr += \" + y\"\n else:\n lstr += \" + \" + str(abs(self.b)) + \"y\"\n elif self.b < 0:\n if self.b == -1:\n lstr += \" - y\"\n else:\n lstr += \" - \" + str(abs(self.b)) + \"y\"\n else:\n if self.b == 1:\n lstr += \"y\"\n elif self.b == -1:\n lstr += \"-y\"\n else:\n lstr += str(self.b) + \"y\"\n\n # String for constant term\n lstr += \" = \" + str(self.c)\n\n return lstr", "def __str__(self):\n rep = \"\"\n for row in self._marker:\n for pegs in row:\n rep += pegs + \" \"\n rep = rep[:-1]\n rep += \"\\n\"\n rep = rep[:-1]\n return rep", "def format_expr(expr, precedence=0):\n match expr:\n case BinaryOp(op, left, right):\n result = \\\n f\"{format_expr(left, expr.precedence)} {op} {format_expr(right, expr.precedence+1)}\"\n # Surround the result in parentheses if needed\n if precedence > expr.precedence:\n return f\"({result})\"\n else:\n return result\n case UnaryOp(op, arg):\n return f\"{op}{format_expr(arg, 0)}\"\n case VarExpr(name):\n return name\n case float() | int():\n return str(expr)\n case _:\n raise ValueError(f\"Invalid expression value: {repr(expr)}\")", "def _repr_(self):\n\n s = \"Element of \" + str(self._space) + \" with principal part: \"\n WR = self._space.multiplier()\n sp = \"\"\n for (b, m) in self._principal_part:\n a = self._principal_part[(b, m)]\n if(a != 0):\n x = QQ(m + WR.Qv[WR.D.index(b)])\n if(a != 1):\n if(a > 0 and len(sp) > 0):\n ast = \"+\" + str(a)\n else:\n ast = str(a)\n sp = sp + ast + \"q^{\" + str(x) + \"}\"\n else:\n sp = sp + \"q^{\" + str(x) + \"}\"\n s = s + sp\n return s", "def __str__(self):\n return \"(%s)\" % ' '.join(map(str, self.__subrules))", "def _repr_term(self, c):\n return self.prefix()+str(c)", "def get_equation(self):\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n string = \"\"\n\n for index, polynomial in self.polynomials.items():\n polynomial = int(polynomial)\n index = int(index)\n\n if polynomial != 0:\n if polynomial < 0:\n string_pre = \" - \"\n else:\n string_pre = \" + \"\n\n if index != 0:\n string_append = \"x\"\n elif polynomial == 1 or polynomial == -1:\n string_append = str(abs(polynomial))\n else:\n string_append = \"\"\n\n if polynomial < 0:\n polynomial = abs(polynomial)\n\n if polynomial != 1:\n string_append = str(polynomial) + string_append\n\n if index != 0 and index != 1:\n string_append += \"^\" + str(index)\n\n string += string_pre + string_append\n\n if len(string) > 0:\n string = string[3:]\n else:\n string = \"0\"\n\n return string", "def __str__(self):\n return \"{ %s }\" % str(self.__rule)", "def _self_string(self, parser=None):\n if not parser:\n s = f'{self.content}'\n else:\n s = f'{parser.unparse(self.content)}'\n if self.justification is not None:\n s += f' ({self.justification})'\n return s", "def __repr__(self):\n result = '\"{0}\"'.format(self._filepath.unexpanded)\n if self.nonlocal is None: result += \", None\"\n else: result += ', \"%s\"' % (self._nonlocal.unexpanded)\n result += \", %f, %f, %f, %f, %f\" % (self.s, self.p, self.d, self.pnl, self.dnl)\n return result", "def compile_expr(self, e, prec=0):\n etyp = type(e)\n\n if etyp is HIR.Const:\n if e.typ == f32:\n return (f\"{e.v}f\",[])\n elif e.typ == f64:\n return (f\"Expr({e.v})\",[])\n else:\n return (str(e.v),[])\n elif etyp is HIR.Evar:\n return (self._ctxt[e.v.name],[])\n elif etyp is HIR.Erdom:\n return (self._ctxt[e.r.name],[])\n elif etyp is HIR.Eparam:\n return (self._ctxt[e.p.name],[])\n elif etyp is HIR.BinOp:\n op_prec = HIR_CPP_String._prec[e.op]\n lhs, ls = self.compile_expr(e.lhs, prec=op_prec)\n rhs, rs = self.compile_expr(e.rhs, prec=op_prec+1)\n op = e.op\n if op == \"and\":\n op = \"&&\"\n elif op == \"or\":\n op = \"||\"\n exp = f'{lhs} {e.op} {rhs}'\n if prec > op_prec:\n exp = f'({exp})'\n return (exp,ls+rs)\n elif etyp is HIR.Min or etyp is HIR.Max:\n op = \"min\" if etyp is HIR.Min else \"max\"\n lhs, ls = self.compile_expr(e.lhs)\n rhs, rs = self.compile_expr(e.rhs)\n return (f\"{op}({lhs}, {rhs})\",ls+rs)\n elif etyp is HIR.MathFn1:\n arg, ss = self.compile_expr(e.arg)\n return (f'{e.name}({arg})',ss)\n elif etyp is HIR.Clamp:\n val, vs = self.compile_expr(e.val)\n lo, ls = self.compile_expr(e.lo)\n hi, hs = self.compile_expr(e.hi)\n return (f'clamp({val}, {lo}, {hi})',vs+ls+hs)\n elif etyp is HIR.Pow:\n base,bs = self.compile_expr(e.base)\n exp, es = self.compile_expr(e.exp)\n return (f'pow({base}, {exp})',bs+es)\n elif etyp is HIR.ATan2:\n y, ys = self.compile_expr(e.y)\n x, xs = self.compile_expr(e.x)\n return (f'atan2({y}, {x})',ys+xs)\n elif etyp is HIR.Select:\n pred,ps = self.compile_expr(e.pred)\n lhs, ls = self.compile_expr(e.lhs)\n rhs, rs = self.compile_expr(e.rhs)\n return (f'select({pred}, {lhs}, {rhs})',ps+ls+rs)\n elif etyp is HIR.FAccess:\n nm = e.f.name if type(e.f) is HIR.Func else e.f.img.name\n name = self._ctxt[nm]\n tmp = [ self.compile_expr(a) for a in e.args ]\n args = [ a[0] for a in tmp ]\n ss = [ x for a in tmp for x in a[1] ] # flatten list of lists\n return (f'{name}({\",\".join(args)})',ss)\n elif etyp is HIR.BigSum:\n stmts = []\n # RDom variable\n r = self._ctxt[e.r.name]\n\n # handle compiling the body with reduction variable substitution\n # name collisions must be handled out-of-scope\n pure_r = self.new_name(e.r.name.copy())\n self.push_scope(tab=False)\n stmts += [f\"Var {pure_r};\"]\n # but we need to hide the fact that we're re-binding the rdom\n self._ctxt[e.r.name] = pure_r\n self._curr_args = self._curr_args + [pure_r]\n args_x = ','.join(self._curr_args)\n # call body\n body,bs = self.compile_expr(e.body)\n # cleanup\n stmts += bs\n self._curr_args = self._curr_args[:-1]\n self.pop_scope(tab=False)\n\n # create an earlier temp. func corresponding to the sum values\n f0name = self.new_name(Sym(f\"sum{self._sum_count}\"))\n self._sum_count +=1\n f1name = self.new_name(Sym(f\"sum{self._sum_count}\"))\n self._sum_count +=1\n args = ','.join(self._curr_args)\n args_r = ','.join(self._curr_args+[ self._ctxt[e.r.name] ])\n stmts += [f'Func {f0name}(\"{f0name}\");',\n f'Func {f1name}(\"{f1name}\");',\n f\"{f0name}({args_x}) = {body};\",\n f\"{f1name}({args}) = Expr(0.0);\",\n f\"{f1name}({args}) += {f0name}({args_r});\"]\n return (f\"{f1name}({args})\",stmts)\n else: assert False, \"bad case\"", "def as_expression(self):\n data = [(key,self.__dict__[key]) for key in self.FIELDS]\n items = []\n for (key,value) in data:\n if isinstance(value,types.UnicodeType):\n value = value.encode('utf-8')\n if isinstance(value,types.StringTypes):\n value = trim_value(value)\n if isinstance(value,types.StringType):\n items.append(\"'%s': %s\" % (key,quote(value)))\n else:\n items.append(\"'%s': %s\" % (key,str(value)))\n r = '{\\n%s\\n}' % ',\\n'.join(items)\n return r", "def gen_get_const_expr(cls, const_name, const_p):\n sh, mask = cls.pos[const_name]\n s = \"(({t}{sh}) & {mask})\".format(\n t = const_p, mask = mask,\n sh = \" >> \" + str(sh) if sh else \"\"\n )\n return s", "def to_string(self):\n x = str(self.x).replace(\"**\", \"^\")\n y = str(self.y).replace(\"**\", \"^\")\n\n if self.name == \"\":\n return str(\"(\"+x+\", \"+y+\")\")\n else:\n return str(self.name+\"(\"+x+\", \"+y+\")\")" ]
[ "0.736386", "0.68066204", "0.67740124", "0.675347", "0.6752998", "0.6706245", "0.6700624", "0.6700624", "0.666184", "0.6628258", "0.6522713", "0.6517617", "0.646707", "0.646707", "0.6448369", "0.6398307", "0.63835007", "0.6337211", "0.6321024", "0.6310438", "0.6302699", "0.6283184", "0.62470406", "0.6241772", "0.62407327", "0.6238093", "0.62086844", "0.6207555", "0.6201423", "0.61985147", "0.61888254", "0.61841196", "0.6176749", "0.61764354", "0.6175721", "0.6150616", "0.614002", "0.61326134", "0.61245286", "0.61135286", "0.61064893", "0.6106229", "0.6088283", "0.6088283", "0.607067", "0.6063674", "0.6061633", "0.6060007", "0.6019838", "0.6019704", "0.60120213", "0.60099196", "0.599221", "0.5991115", "0.59849787", "0.5983607", "0.59738135", "0.59738135", "0.59738135", "0.59674925", "0.59618586", "0.5951864", "0.59517246", "0.5935898", "0.59265447", "0.59258693", "0.59234864", "0.5912315", "0.59108984", "0.59066623", "0.589582", "0.58912003", "0.58801955", "0.58765596", "0.58693", "0.586118", "0.5830158", "0.5826504", "0.5826257", "0.58182824", "0.58165956", "0.5807974", "0.58052117", "0.5802935", "0.5801574", "0.58013606", "0.5797903", "0.5794819", "0.57895446", "0.5787874", "0.5781319", "0.57797396", "0.57778996", "0.5768738", "0.5768691", "0.5766583", "0.5762836", "0.5742837", "0.5739199", "0.57295406", "0.5728191" ]
0.0
-1
Creates an interior node with the given operator (a token), and left and right operands (other nodes).
def __init__(self, opToken, leftOper, rightOper): self.operator = opToken self.leftOperand = leftOper self.rightOperand = rightOper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __create_internal_node_by_operator(operator: PatternStructure, sliding_window: timedelta, parent: Node = None):\n operator_type = operator.get_top_operator()\n if operator_type == SeqOperator:\n return SeqNode(sliding_window, parent)\n if operator_type == AndOperator:\n return AndNode(sliding_window, parent)\n if operator_type == KleeneClosureOperator:\n return KleeneClosureNode(sliding_window, operator.min_size, operator.max_size, parent)\n raise Exception(\"Unknown or unsupported operator %s\" % (operator_type,))", "def __init__(self, token, left = None, right = None):\n LinkedBinaryTree.__init__(self) # LinkedBinaryTree initialization\n if not isinstance(token, str):\n raise TypeError('Token must be a string')\n self._add_root(token) # use inherited, nonpublic method\n if left is not None:\n if token not in '+-*x/':\n raise ValueError('token must be valid operator')\n self._attach(self.root(), left, right) # use inherited, nonpublic method", "def visit_BinaryOperator(self, node: BinaryOperator) -> Instruction:\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n if isinstance(left, VarSymbol):\n left_symbol = self.GLOBAL_MEMORY[left.name]\n else:\n left_symbol = left\n\n if isinstance(right, VarSymbol):\n right_symbol = self.GLOBAL_MEMORY[right.name]\n else:\n right_symbol = right\n\n if node.operator.type == TokenType.PLUS:\n return self.builder.fadd(left_symbol, right_symbol, \"addtmp\")\n elif node.operator.type == TokenType.MINUS:\n return self.builder.fsub(left_symbol, right_symbol, \"subtmp\")\n elif node.operator.type == TokenType.MUL:\n return self.builder.fmul(left_symbol, right_symbol, \"multmp\")\n elif node.operator.type == TokenType.INTEGER_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"udivtmp\")\n elif node.operator.type == TokenType.FLOAT_DIV:\n return self.builder.fdiv(left_symbol, right_symbol, \"fdivtmp\")", "def create_operator(statement_a, operator, statement_b):\n return S(statement_a=statement_a, operator=operator, statement_b=statement_b)", "def operator(self):\n col = self.pos\n operators = [\"||\", \"&&\", \">>\", \"<<\", \"!=\", \">=\", \"<=\", \"==\", \"##\"] + \\\n [\"-\", \"+\", \"!\", \"*\", \"/\", \"|\", \"&\", \"^\", \"<\", \">\", \"?\", \":\", \"~\", \"#\", \"=\", \"%\"]\n try:\n index = self.match_any(operators)\n\n op = Operator(self.line, col, self.prev_white, operators[index])\n return op\n except TokenError:\n self.pos = col\n raise TokenError(\"Invalid operator.\")", "def build_expression_tree(token_list: Sequence[tokens.Token]) -> nodes.ExpNode:\r\n\r\n def is_unary_op(op) -> bool:\r\n return op in UNARYOP_TABLE\r\n\r\n def is_open_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenOpenBracket)\r\n\r\n def is_close_bracket(token) -> bool:\r\n return isinstance(token, tokens.TokenCloseBracket)\r\n\r\n def is_comma(token) -> bool:\r\n return isinstance(token, tokens.TokenSymbol) and token.symbol == Separators.SEP_COMMA\r\n\r\n def is_higher_or_equal_op_priority(op1, op2, table) -> bool:\r\n oi1 = table.get(op1)\r\n oi2 = table.get(op2)\r\n\r\n p1 = 0 if oi1 is None else oi1.priority\r\n p2 = 0 if oi2 is None else oi2.priority\r\n\r\n return p1 >= p2\r\n\r\n def read_exp_chain(index) -> Tuple[nodes.ExpNode, int]:\r\n token = token_list[index]\r\n if isinstance(token, tokens.TokenSymbol):\r\n if is_open_bracket(token):\r\n node, i = read_exp(index)\r\n elif is_unary_op(token.symbol):\r\n if UNARYOP_TABLE[token.symbol].affix == OperatorAffix.PREFIX:\r\n node, i = read_prefix_unary_exp(index)\r\n else:\r\n raise ParsingException(f\"unary operator '{token.symbol}' is not a prefix operator\", token.pos)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n node, i = read_exp(index)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if isinstance(next_token, tokens.TokenSymbol) and is_unary_op(next_token.symbol):\r\n if UNARYOP_TABLE[next_token.symbol].affix == OperatorAffix.POSTFIX:\r\n node, i = read_postfix_unary_exp(i, node)\r\n else:\r\n return (node, i)\r\n\r\n if i < len(token_list):\r\n # look ahead for 1 token\r\n next_token = token_list[i]\r\n if is_close_bracket(next_token):\r\n return (node, i)\r\n elif isinstance(next_token, tokens.TokenSymbol):\r\n if next_token.symbol == Separators.SEP_COMMA:\r\n return (node, i)\r\n elif next_token.symbol in BINOP_TABLE:\r\n return read_binary_exp(i, node)\r\n else:\r\n raise ParsingException(f\"unexpected symbol '{next_token.symbol}'\", next_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", next_token.pos)\r\n else:\r\n return (node, i)\r\n\r\n def read_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n if index >= len(token_list):\r\n raise ParsingException(\"unexpected token\", token_list[-1].pos)\r\n\r\n token = token_list[index]\r\n if is_open_bracket(token):\r\n return read_bracket_exp(index)\r\n elif isinstance(token, tokens.TokenNumber):\r\n return (nodes.NumberNode(token.num, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenName):\r\n if (index + 1) < len(token_list) and is_open_bracket(token_list[index + 1]):\r\n return read_func_call(index)\r\n else:\r\n return (nodes.NameConstantNode(token.name, pos=token.pos), index + 1)\r\n elif isinstance(token, tokens.TokenSymbol):\r\n raise ParsingException(f\"unexpected symbol '{token.symbol}'\", token.pos)\r\n else:\r\n raise ParsingException(\"unexpceted token\", token.pos)\r\n\r\n def read_bracket_exp(index) -> Tuple[nodes.ExpNode, int]:\r\n node, i = read_exp_chain(index + 1)\r\n\r\n if i < len(token_list) and is_close_bracket(token_list[i]):\r\n return (node, i + 1)\r\n else:\r\n raise ParsingException(\"unmatch '('\", token_list[index].pos)\r\n\r\n def read_prefix_unary_exp(index) -> Tuple[nodes.UnaryOpNode, int]:\r\n node, i = read_exp(index + 1)\r\n token = token_list[index]\r\n return (nodes.UnaryOpNode(token.symbol, node, pos=token.pos), i)\r\n\r\n def read_postfix_unary_exp(index, child: nodes.ExpNode) -> Tuple[nodes.UnaryOpNode, int]:\r\n token = token_list[index]\r\n\r\n if isinstance(child, nodes.UnaryOpNode):\r\n if is_higher_or_equal_op_priority(token.symbol, child.op, UNARYOP_TABLE):\r\n node = nodes.UnaryOpNode(token.symbol, child.child, pos=token.pos)\r\n child.child = node\r\n node = child\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n else:\r\n node = nodes.UnaryOpNode(token.symbol, child, pos=token.pos)\r\n\r\n return (node, index + 1)\r\n\r\n def read_binary_exp(index, left: nodes.ExpNode) -> Tuple[nodes.BinaryOpNode, int]:\r\n right, i = read_exp_chain(index + 1)\r\n\r\n token = token_list[index]\r\n if isinstance(right, nodes.BinaryOpNode) and not is_open_bracket(token_list[index + 1]):\r\n # check operator priority and rotate the expression tree when necessary.\r\n # when priority of two operators are equal, we also should rotate the tree\r\n # in case these operators don't follow the commutative law.\r\n if is_higher_or_equal_op_priority(token.symbol, right.op, BINOP_TABLE):\r\n node = nodes.BinaryOpNode(token.symbol, left, right.left, pos=token.pos)\r\n right.left = node\r\n node = right\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n else:\r\n node = nodes.BinaryOpNode(token.symbol, left, right, pos=token.pos)\r\n\r\n return (node, i)\r\n\r\n def read_func_call(index) -> Tuple[nodes.FuncCallNode, int]:\r\n name_token = token_list[index]\r\n index += 2 # skip '('\r\n\r\n token_count = len(token_list)\r\n\r\n node = None\r\n i = index\r\n args = []\r\n\r\n while i < token_count and not is_close_bracket(token_list[i]):\r\n node, i = read_exp_chain(i)\r\n args.append(node)\r\n if i < token_count and is_comma(token_list[i]):\r\n i += 1\r\n else:\r\n break\r\n\r\n if i < token_count and is_close_bracket(token_list[i]):\r\n func_node = nodes.FuncCallNode(name_token.name, args, pos=name_token.pos)\r\n return (func_node, i + 1)\r\n else:\r\n raise ParsingException(\"unclose func call\", name_token.pos)\r\n\r\n\r\n node, i = read_exp_chain(0)\r\n\r\n if i < len(token_list):\r\n last_token = token_list[i]\r\n if is_close_bracket(last_token):\r\n raise ParsingException(\"unmatch ')'\", last_token.pos)\r\n else:\r\n raise ParsingException(\"unexpected token\", last_token.pos)\r\n else:\r\n return node", "def __init__(self, operation, left, right):\n self.operation = operation\n self.left = left\n self.right = right", "def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()", "def build_expression_tree(tokens):\n S = [] # we use Python list as stack\n for t in tokens:\n if t in '+-x*/': # t is an operator symbol\n S.append(t) # push the operator symbol\n elif t not in '()': # consider t to be a literal\n S.append(ExpressionTree(t)) # push trivial tree storing value\n elif t == ')': # compose a new tree from three constituent parts\n right = S.pop() # right subtree as per LIFO\n op = S.pop() # operator symbol\n left = S.pop() # left subtree\n S.append(ExpressionTree(op, left, right)) # repush tree\n # we ignore a left parenthesis\n return S.pop()", "def __init__(self, operation, operand):\n self.operation = operation\n self.right = operand", "def binary_operator(op):\n # When combining a Factor with a NumericalExpression, we use this\n # attrgetter instance to defer to the commuted implementation of the\n # NumericalExpression operator.\n commuted_method_getter = attrgetter(method_name_for_op(op, commute=True))\n\n def binary_operator(self, other):\n # This can't be hoisted up a scope because the types returned by\n # binop_return_type aren't defined when the top-level function is\n # invoked in the class body of Factor.\n return_type = binop_return_type(op)\n if isinstance(self, NumExprFactor):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other,\n )\n return return_type(\n \"({left}) {op} ({right})\".format(\n left=self_expr,\n op=op,\n right=other_expr,\n ),\n new_inputs,\n )\n elif isinstance(other, NumExprFactor):\n # NumericalExpression overrides ops to correctly handle merging of\n # inputs. Look up and call the appropriate reflected operator with\n # ourself as the input.\n return commuted_method_getter(other)(self)\n elif isinstance(other, Factor):\n if self is other:\n return return_type(\n \"x_0 {op} x_0\".format(op=op),\n (self,),\n )\n return return_type(\n \"x_0 {op} x_1\".format(op=op),\n (self, other),\n )\n elif isinstance(other, Number):\n return return_type(\n \"x_0 {op} ({constant})\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, self, other)\n\n binary_operator.__doc__ = \"Binary Operator: '%s'\" % op\n return binary_operator", "def __create_nested_structure(nested_operator: PatternStructure):\n order = list(range(len(nested_operator.args))) if isinstance(nested_operator, CompositeStructure) else [0]\n operator_type = None\n if isinstance(nested_operator, AndOperator):\n operator_type = OperatorTypes.AND\n elif isinstance(nested_operator, SeqOperator):\n operator_type = OperatorTypes.SEQ\n ret = TreePlanLeafNode(order[0])\n for i in range(1, len(order)):\n ret = TreePlanBinaryNode(operator_type, ret, TreePlanLeafNode(order[i]))\n return ret", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def create_basic_op_node(op_name, node, kwargs):\n name, input_nodes, _ = get_inputs(node, kwargs)\n\n node = onnx.helper.make_node(\n op_name,\n input_nodes,\n [name],\n name=name\n )\n return [node]", "def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()", "def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)", "def _append_operator(self, operator):", "def make_binary(sv, piece, o, op):\r\n here=piece.rfind(op) # look for last occurrence\r\n there=here+len(op)\r\n t1=piece[:here].strip(Space) # first term (sometimes omitted)\r\n t2=piece[there:].strip(Space) # second term must be present\r\n if not t2: \r\n print(\"\\n\", Err_op_syntax, o) # *** Syntax error in operator ***\r\n print(\" \", piece)\r\n raise ReferenceError\r\n first=tree_build(sv, t1) # process each term RECURSIVE\r\n second=tree_build(sv, t2)\r\n return (o, first, second)", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)", "def _(self, node: BinaryOp):\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n return f\"( {node.op} {left} {right} )\"", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def reflected_binary_operator(op):\n assert not is_comparison(op)\n\n def reflected_binary_operator(self, other):\n\n if isinstance(self, NumericalExpression):\n self_expr, other_expr, new_inputs = self.build_binary_op(\n op, other\n )\n return NumExprFactor(\n \"({left}) {op} ({right})\".format(\n left=other_expr,\n right=self_expr,\n op=op,\n ),\n new_inputs,\n )\n\n # Only have to handle the numeric case because in all other valid cases\n # the corresponding left-binding method will be called.\n elif isinstance(other, Number):\n return NumExprFactor(\n \"{constant} {op} x_0\".format(op=op, constant=other),\n binds=(self,),\n )\n raise BadBinaryOperator(op, other, self)\n return reflected_binary_operator", "def operator_constructor(loader, node):\n global workspace\n obj = loader.construct_mapping(node, deep=True)\n obj = resolve_pointer( workspace, obj )\n operation, arg = yaml_to_args( obj )[0]\n return getattr( operator, operation )( *arg )", "def _lex_operators(self):\n try:\n val = self._current\n type = Lexer._OPERATORS[self._current]\n self._advance()\n return Token(val, type)\n except KeyError:\n raise ParserError(self._expr,\n \"Encountered invalid token '{t}' at {i}\".format(\n t=self._current, i=self._index))", "def match_expr(self, precedence: int) -> \"AbstractNode\":\n tkn = self.lexer.tkn\n # This line is solely to satisfy mypy.\n left = AbstractNode()\n if tkn.type == Token.AT:\n self.lexer.next_token()\n address = self.match_expr(PREC_PREFIX)\n left = MemoryNode(address)\n elif tkn.type == Token.INT:\n try:\n left = IntNode(int(tkn.value, base=0))\n except ValueError:\n raise SyntaxError(\"invalid integer literal: {}\".format(tkn))\n else:\n self.lexer.next_token()\n elif tkn.type == Token.MINUS:\n self.lexer.next_token()\n left = PrefixNode(\"-\", self.match_expr(PREC_PREFIX))\n elif tkn.type == Token.REGISTER:\n try:\n left = RegisterNode(register_to_index(tkn.value))\n except HERAError:\n raise SyntaxError(\"{} is not a valid register\".format(tkn.value))\n self.lexer.next_token()\n elif tkn.type == Token.SYMBOL:\n left = SymbolNode(tkn.value)\n self.lexer.next_token()\n elif tkn.type == Token.LPAREN:\n self.lexer.next_token()\n left = self.match_expr(PREC_LOWEST)\n if self.lexer.tkn.type != Token.RPAREN:\n self.unexpected(self.lexer.tkn)\n self.lexer.next_token()\n else:\n self.unexpected(tkn)\n\n infix_tkn = self.lexer.tkn\n while infix_tkn.type in PREC_MAP and precedence < PREC_MAP[infix_tkn.type]:\n infix_precedence = PREC_MAP[infix_tkn.type]\n self.lexer.next_token()\n right = self.match_expr(infix_precedence)\n left = InfixNode(infix_tkn.value, left, right)\n infix_tkn = self.lexer.tkn\n return left", "def Expression(self, paren=False):\n left = self.Conjunction(paren)\n while self.currtok[1].name == \"OR\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Conjunction()\n left = BinaryExpr(op, left, right, paren)\n return left", "def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)", "def binary_op(node_factory_function: Callable) -> Callable:\n\n @wraps(node_factory_function)\n def wrapper(left: NodeInput, right: NodeInput, *args: Any, **kwargs: Any) -> Node:\n left, right = as_nodes(left, right)\n node = node_factory_function(left, right, *args, **kwargs)\n node = _set_node_friendly_name(node, **kwargs)\n return node\n\n return wrapper", "def infix(self):\n return \"(\" + self.leftOperand.infix() + \" \" + str(self.operator) + \" \" + self.rightOperand.infix() + \")\"", "def __init__(self, token, left=None, right=None):\n super().__init__() # LinkedBinaryTree initialization\n self._add_root(token) # use inherited, nonpublic method\n if left is not None: # presumably three-parameter form\n self._attach(self.root(), left, right) # use inherited, nonpublic method", "def __init__(self, op, expression1, expression2):\n LinearExpression.__init__(self)\n\n self.op = op\n self.expression1 = expression1\n self.expression2 = expression2", "def make_unary(sv, piece, o, op):\r\n there=len(op) # start position of last part\r\n # if the object is subscripted / has args\r\n if piece[there:].startswith(Special+Bloc): \r\n here=piece[there+1:].find(Special) # find ending delimiter\r\n key=piece[there+1:there+here+1] # extract key for the block\r\n if piece[there+here+2:].strip(Space): # something after the block (some other subscript)\r\n first=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE \r\n last=tree_build(sv, piece[there+here+2:]) # build other subscript RECURSIVE\r\n res=(Special, first, last) # code for a subscripted object\r\n else:\r\n res=(o, tree_build(sv, sv.Blocks[key]), None) # Build block RECURSIVE\r\n return res\r\n # the object is not subscripted but may have parts separated by space\r\n if Space in piece.strip(Space): return (o, tree_build(sv, piece[there:]), None) # Build RECURSIVE\r\n return make_leaf(sv, piece.strip(Space))", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def terminal_node(\n self,\n expr: Any = None,\n ) -> None:\n self.data.append(\n {\n \"type\": \"TERMINAL\",\n \"expr\": expr,\n \"id\": len(\n self.data,\n ),\n }\n )", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def create_expr(self, exprcls, ast, params=None, nopush=False):\n if params is None:\n expr = exprcls(self.current_parent, ast=ast)\n else:\n expr = exprcls(self.current_parent, ast=ast, **params)\n if not nopush:\n self.push_state(expr)\n return expr", "def Relation(self, paren=False):\n left = self.Addition(paren)\n if self.currtok[1].name in {\"GREATERTHAN\", \"LESSTHAN\", \"LET\", \"GET\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Addition(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def Term(self, paren=False):\n left = self.Factor()\n while self.currtok[1].name in {\"TIMES\", \"DIVISION\", \"MOD\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Factor()\n left = BinaryExpr(op, left, right, paren)\n return left", "def get_operator_to_make_TOD(self):\n if len(self) == 1:\n return self.get_operator()\n op = self._get_array_of_operators()\n return BlockRowOperator(op, new_axisin=0)", "def test_operator(self):\n\n tokens = list(Lexer(\"+-*/^%\").generate_tokens())\n answer = [Token(TokenType.PLUS),\n Token(TokenType.MINUS),\n Token(TokenType.MULTIPLY),\n Token(TokenType.DIVIDE),\n Token(TokenType.EXPONENT),\n Token(TokenType.MODULO)]\n self.assertEqual(tokens, answer)", "def is_operator(obj):\n return isinstance(obj, Token) and obj[0] not in '/01234567890+-.<[('", "def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def evaluate(node,operators):\n\tif isinstance(node, ast.Num):\n\t\treturn node.n\n\telif isinstance(node, ast.BinOp):\n\t\treturn operators[type(node.op)](evaluate(node.left,operators), evaluate(node.right,operators))\n\telif isinstance(node, ast.UnaryOp):\n\t\treturn operators[type(node.op)](evaluate(node.operand,operators))\n\telse:\n\t\traise TypeError(node)", "def is_operator(node):\n return node.startswith('$')", "def _compile_node(selector):\n # To avoid precedence-related bugs, any sub-expression that is passed\n # around must be \"atomic\": add parentheses when the top-level would be\n # an operator. Bare literals and function calls are fine.\n\n # 1 and 0 are used for True and False to avoid global lookups.\n\n if isinstance(selector, parser.CombinedSelector):\n left_inside = _compile_node(selector.left)\n if left_inside == '0':\n return '0' # 0 and x == 0\n elif left_inside == '1':\n # 1 and x == x, but the element matching 1 still needs to exist.\n if selector.combinator in (' ', '>'):\n left = 'el.parent is not None'\n elif selector.combinator in ('~', '+'):\n left = 'el.previous is not None'\n else:\n raise SelectorError('Unknown combinator', selector.combinator)\n # Rebind the `el` name inside a generator-expressions (in a new scope)\n # so that 'left_inside' applies to different elements.\n elif selector.combinator == ' ':\n left = 'any((%s) for el in el.iter_ancestors())' % left_inside\n elif selector.combinator == '>':\n left = ('next(el is not None and (%s) for el in [el.parent])'\n % left_inside)\n elif selector.combinator == '+':\n left = ('next(el is not None and (%s) for el in [el.previous])'\n % left_inside)\n elif selector.combinator == '~':\n left = ('any((%s) for el in el.iter_previous_siblings())'\n % left_inside)\n else:\n raise SelectorError('Unknown combinator', selector.combinator)\n\n right = _compile_node(selector.right)\n if right == '0':\n return '0' # 0 and x == 0\n elif right == '1':\n return left # 1 and x == x\n else:\n # Evaluate combinators right to left:\n return '(%s) and (%s)' % (right, left)\n\n elif isinstance(selector, parser.CompoundSelector):\n sub_expressions = [\n expr for expr in map(_compile_node, selector.simple_selectors)\n if expr != '1']\n if len(sub_expressions) == 1:\n test = sub_expressions[0]\n elif '0' in sub_expressions:\n test = '0'\n elif sub_expressions:\n test = ' and '.join('(%s)' % e for e in sub_expressions)\n else:\n test = '1' # all([]) == True\n\n if isinstance(selector, parser.NegationSelector):\n if test == '0':\n return '1'\n elif test == '1':\n return '0'\n else:\n return 'not (%s)' % test\n else:\n return test\n\n elif isinstance(selector, parser.LocalNameSelector):\n return ('el.local_name == (%r if el.in_html_document else %r)'\n % (selector.lower_local_name, selector.local_name))\n\n elif isinstance(selector, parser.NamespaceSelector):\n return 'el.namespace_url == %r' % selector.namespace\n\n elif isinstance(selector, parser.ClassSelector):\n return '%r in el.classes' % selector.class_name\n\n elif isinstance(selector, parser.IDSelector):\n return 'el.id == %r' % selector.ident\n\n elif isinstance(selector, parser.AttributeSelector):\n if selector.namespace is not None:\n if selector.namespace:\n key = '(%r if el.in_html_document else %r)' % (\n '{%s}%s' % (selector.namespace, selector.lower_name),\n '{%s}%s' % (selector.namespace, selector.name),\n )\n else:\n key = ('(%r if el.in_html_document else %r)'\n % (selector.lower_name, selector.name))\n value = selector.value\n if selector.operator is None:\n return '%s in el.etree_element.attrib' % key\n elif selector.operator == '=':\n return 'el.etree_element.get(%s) == %r' % (key, value)\n elif selector.operator == '~=':\n if len(value.split()) != 1 or value.strip() != value:\n return '0'\n else:\n return (\n '%r in split_whitespace(el.etree_element.get(%s, \"\"))'\n % (value, key))\n elif selector.operator == '|=':\n return ('next(v == %r or (v is not None and v.startswith(%r))'\n ' for v in [el.etree_element.get(%s)])'\n % (value, value + '-', key))\n elif selector.operator == '^=':\n if value:\n return 'el.etree_element.get(%s, \"\").startswith(%r)' % (\n key, value)\n else:\n return '0'\n elif selector.operator == '$=':\n if value:\n return 'el.etree_element.get(%s, \"\").endswith(%r)' % (\n key, value)\n else:\n return '0'\n elif selector.operator == '*=':\n if value:\n return '%r in el.etree_element.get(%s, \"\")' % (value, key)\n else:\n return '0'\n else:\n raise SelectorError(\n 'Unknown attribute operator', selector.operator)\n else: # In any namespace\n raise NotImplementedError # TODO\n\n elif isinstance(selector, parser.PseudoClassSelector):\n if selector.name == 'link':\n return ('%s and el.etree_element.get(\"href\") is not None'\n % html_tag_eq('a', 'area', 'link'))\n elif selector.name == 'enabled':\n return (\n '(%s and el.etree_element.get(\"disabled\") is None'\n ' and not el.in_disabled_fieldset) or'\n '(%s and el.etree_element.get(\"disabled\") is None) or '\n '(%s and el.etree_element.get(\"href\") is not None)'\n % (\n html_tag_eq('button', 'input', 'select', 'textarea',\n 'option'),\n html_tag_eq('optgroup', 'menuitem', 'fieldset'),\n html_tag_eq('a', 'area', 'link'),\n )\n )\n elif selector.name == 'disabled':\n return (\n '(%s and (el.etree_element.get(\"disabled\") is not None'\n ' or el.in_disabled_fieldset)) or'\n '(%s and el.etree_element.get(\"disabled\") is not None)' % (\n html_tag_eq('button', 'input', 'select', 'textarea',\n 'option'),\n html_tag_eq('optgroup', 'menuitem', 'fieldset'),\n )\n )\n elif selector.name == 'checked':\n return (\n '(%s and el.etree_element.get(\"checked\") is not None and'\n ' ascii_lower(el.etree_element.get(\"type\", \"\")) '\n ' in (\"checkbox\", \"radio\"))'\n 'or (%s and el.etree_element.get(\"selected\") is not None)'\n % (\n html_tag_eq('input', 'menuitem'),\n html_tag_eq('option'),\n )\n )\n elif selector.name in ('visited', 'hover', 'active', 'focus',\n 'target'):\n # Not applicable in a static context: never match.\n return '0'\n elif selector.name == 'root':\n return 'el.parent is None'\n elif selector.name == 'first-child':\n return 'el.index == 0'\n elif selector.name == 'last-child':\n return 'el.index + 1 == len(el.etree_siblings)'\n elif selector.name == 'first-of-type':\n return ('all(s.tag != el.etree_element.tag'\n ' for s in el.etree_siblings[:el.index])')\n elif selector.name == 'last-of-type':\n return ('all(s.tag != el.etree_element.tag'\n ' for s in el.etree_siblings[el.index + 1:])')\n elif selector.name == 'only-child':\n return 'len(el.etree_siblings) == 1'\n elif selector.name == 'only-of-type':\n return ('all(s.tag != el.etree_element.tag or i == el.index'\n ' for i, s in enumerate(el.etree_siblings))')\n elif selector.name == 'empty':\n return 'not (el.etree_children or el.etree_element.text)'\n else:\n raise SelectorError('Unknown pseudo-class', selector.name)\n\n elif isinstance(selector, parser.FunctionalPseudoClassSelector):\n if selector.name == 'lang':\n tokens = [\n t for t in selector.arguments\n if t.type != 'whitespace'\n ]\n if len(tokens) == 1 and tokens[0].type == 'ident':\n lang = tokens[0].lower_value\n else:\n raise SelectorError('Invalid arguments for :lang()')\n\n return ('el.lang == %r or el.lang.startswith(%r)'\n % (lang, lang + '-'))\n else:\n if selector.name == 'nth-child':\n count = 'el.index'\n elif selector.name == 'nth-last-child':\n count = '(len(el.etree_siblings) - el.index - 1)'\n elif selector.name == 'nth-of-type':\n count = ('sum(1 for s in el.etree_siblings[:el.index]'\n ' if s.tag == el.etree_element.tag)')\n elif selector.name == 'nth-last-of-type':\n count = ('sum(1 for s in el.etree_siblings[el.index + 1:]'\n ' if s.tag == el.etree_element.tag)')\n else:\n raise SelectorError('Unknown pseudo-class', selector.name)\n\n result = parse_nth(selector.arguments)\n if result is None:\n raise SelectorError(\n 'Invalid arguments for :%s()' % selector.name)\n a, b = result\n # x is the number of siblings before/after the element\n # Matches if a positive or zero integer n exists so that:\n # x = a*n + b-1\n # x = a*n + B\n B = b - 1\n if a == 0:\n # x = B\n return '%s == %i' % (count, B)\n else:\n # n = (x - B) / a\n return ('next(r == 0 and n >= 0'\n ' for n, r in [divmod(%s - %i, %i)])'\n % (count, B, a))\n\n else:\n raise TypeError(type(selector), selector)", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def ifop(stream: t.List[str]) -> AST:\n n, last = expr(stream)\n if not stream[n:] or stream[n] not in (\"=>\", \"<=>\"):\n raise SyntaxError(\"Expected => or <=>.\")\n if not stream[n + 1 :]:\n raise SyntaxError(\"Expected expression.\")\n m, rast = expr(stream[n + 1 :])\n if stream[n + 1 + m :]:\n raise SyntaxError(\"Unexpected character '{}'.\".format(stream[n + 1 + m]))\n return AST(stream[n], [last, rast])", "def create() -> 'Tokenizer':\n token_op_table = [\n EOS,\n op.Concat,\n op.ConstStr,\n op.SubStr,\n op.GetSpan,\n op.Trim,\n ]\n\n # Nesting operators and their args get \"compacted\" into\n # \"primitive\" tokens\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetToken, type_, index))\n\n for case in op.Case:\n token_op_table.append((op.ToCase, case))\n\n for delim1 in op.DELIMITER:\n for delim2 in op.DELIMITER:\n token_op_table.append((op.Replace, delim1, delim2))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetUpto, dsl_regex))\n\n for dsl_regex in list(op.Type) + list(op.DELIMITER):\n token_op_table.append((op.GetFrom, dsl_regex))\n\n for type_ in op.Type:\n for index in op.INDEX:\n token_op_table.append((op.GetFirst, type_, index))\n\n for type_ in op.Type:\n token_op_table.append((op.GetAll, type_))\n\n # Primitive types\n\n for type_ in op.Type:\n token_op_table.append(type_)\n\n for boundary in op.Boundary:\n token_op_table.append(boundary)\n\n # Covers op.INDEX\n for position in range(op.POSITION[0], op.POSITION[1]+1):\n token_op_table.append(position)\n\n # This covers op.DELIMITER\n for character in op.CHARACTER:\n token_op_table.append(character)\n\n token_op_table = {\n token: op\n for token, op in enumerate(token_op_table)\n }\n\n op_token_table = {\n op: token\n for token, op in token_op_table.items()\n }\n\n assert len(token_op_table) == len(op_token_table)\n\n string_token_table = {\n char: token\n for token, char in enumerate(op.CHARACTER)\n }\n\n return Tokenizer(\n token_op_table=token_op_table,\n op_token_table=op_token_table,\n string_token_table=string_token_table,\n )", "def __init__(self, left, right):\n super(compositeORGenerator,self).__init__()\n self._left = left\n self._right = right", "def visit_UnaryOperator(self, node: UnaryOperator) -> Constant:\n\n operator = node.operator.type\n if operator == TokenType.PLUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(+expression.constant))\n elif operator == TokenType.MINUS:\n expression = self.visit(node.expression)\n return Constant(DoubleType(), float(-expression.constant))", "def rpn(token_list):\n output = deque()\n operator = deque()\n ops = {'+': 2, '-': 2, '*': 3, '/': 3, '**': 4}\n\n for token in token_list:\n if token.type == 'Literal' or token.type == 'Variable':\n output.appendleft(token)\n elif token.type == 'Function':\n operator.append(token)\n elif token.type == 'Operator':\n while len(operator) > 0 and (\n operator[-1].type == 'Function' or operator[-1].type == 'Operator' and (\n ops[operator[-1].value] > ops[token.value] or (\n ops[operator[-1].value] > ops[token.value] and token.value == '^')\n )) and token.type != 'Left Parenthesis':\n output.appendleft(operator.pop())\n operator.append(token)\n elif token.value == 'Left Parenthesis':\n operator.append(token)\n elif token.value == 'Right Parenthesis':\n while operator[-1].type != 'Left Parenthesis':\n output.appendleft(operator.pop())\n if operator[-1].type == 'Left Parenthesis':\n operator.pop()\n if operator[-1].type == 'Function':\n output.appendleft(operator.pop())\n while len(operator) > 0:\n output.appendleft(operator.pop())\n\n return output", "def __construct_tree(self, root_operator: PatternStructure, tree_plan: TreePlanNode,\n args: List[PatternStructure], sliding_window: timedelta, parent: Node,\n consumption_policy: ConsumptionPolicy):\n if isinstance(root_operator, UnaryStructure) and parent is None:\n # a special case where the top operator of the entire pattern is an unary operator\n return self.__handle_primitive_event_or_nested_structure(tree_plan, root_operator,\n sliding_window, parent, consumption_policy)\n\n if type(tree_plan) == TreePlanLeafNode:\n # either a leaf node or an unary operator encapsulating a nested structure\n # TODO: must implement a mechanism for actually creating nested tree plans instead of a flat plan\n # with leaves hiding nested structure\n return self.__handle_primitive_event_or_nested_structure(tree_plan, args[tree_plan.event_index],\n sliding_window, parent, consumption_policy)\n\n # an internal node\n current = self.__create_internal_node_by_operator(root_operator, sliding_window, parent)\n left_subtree = self.__construct_tree(root_operator, tree_plan.left_child, args,\n sliding_window, current, consumption_policy)\n right_subtree = self.__construct_tree(root_operator, tree_plan.right_child, args,\n sliding_window, current, consumption_policy)\n current.set_subtrees(left_subtree, right_subtree)\n return current", "def initialize_operator(self, operator=None, matrix=False, eval_at_once=False):\n # TODO: Make this more efficient, only compute values needed at each (r,c) step.\n # For this, 'operator' must support the 'component=(r,c)' option.\n # Operator is None is interpreted as identity transformation\n if operator is None:\n self._operator = lambda nodes, dummy, entry=None: ones((1, nodes.shape[1])) if entry[0] == entry[1] else zeros((1, nodes.shape[1]))\n else:\n if matrix is False:\n self._operator = lambda nodes, dummy, entry=None: operator(nodes, entry=entry)\n else:\n self._operator = operator\n self._eval_at_once = eval_at_once", "def get_fermion_operator(operator):\n fermion_operator = FermionOperator()\n\n if isinstance(operator, PolynomialTensor):\n for term in operator:\n fermion_operator += FermionOperator(term, operator[term])\n return fermion_operator\n\n raise TypeError(\"Unsupported type of oeprator {}\".format(operator))", "def visit_BinaryOp(self, node):\n token = node.token\n if token.type == PLUS:\n return self.visit(node.left) + self.visit(node.right)\n if token.type == MINUS:\n return self.visit(node.left) - self.visit(node.right)\n if token.type == MUL:\n return self.visit(node.left) * self.visit(node.right)\n if token.type == DIV:\n result = self.visit(node.left) / self.visit(node.right)\n if result.is_integer():\n return int(result)\n return result\n self.raise_error()", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def op(self) -> Literal[\"==\"] | Literal[\"<=\"] | Literal[\">=\"]:\n ...", "def __init__(self, root, left, right, end):\n self._root = root\n self._left = left\n self._right = right\n self._terminal = end", "def isoperator(token):\n\n # Token is an operator\n return token and token.lower() in Token.OPERATORS", "def make_op1(op, expr):\n\n if (op == None) or (expr == None):\n return None\n\n if op == 'NOT':\n op = '!'\n if is_assembler('beebasm') and (op == '!'):\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"NOT(%s)\", expr)\n return 'NOT(' + expr + ')'\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"%s%s\", op, bracket(expr))\n return op + bracket(expr)", "def convert(token, depth=1):\n\n # finds the root token\n if token.kind == 'EQUALS':\n # asssign left Token as output pin\n new_node = Node(token.left, pin=True, root=True)\n\n # recursively go through new_node to find children\n new_child_node = convert(token.right, depth + 1)\n new_node.add(new_child_node)\n\n # must be an input pin\n elif token.kind == 'ID' or token.kind == 'LITERAL':\n new_node = Node(token, pin=True, weight=1)\n\n # determines depth of tree\n self.depth = depth if depth > self.depth else self.depth\n\n # goes through tokens that are not pins or the root\n else:\n new_node = Node(token, gate=True)\n\n # recursively checks for right Tokens\n if token.right:\n new_child_node = convert(token.right, depth + 1)\n new_node.children += [new_child_node]\n\n # recursively checks for left Tokens\n if token.left:\n\n # OPTIMIZE PART\n # left child Token might be the same kind as root Token\n # if so, don't add the child Token, just add its children\n if token.left.kind == token.kind:\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # checks if left child is a gate and applies not function\n elif new_node.kind == 'not' and token.left.terminal:\n if token.left.kind[0].lower() == 'n':\n new_node.kind = token.left.kind[1:].lower()\n else:\n new_node.kind = 'n' + token.left.kind.lower()\n\n new_child_node = convert(token.left, depth)\n new_node.children += new_child_node.children\n\n # no optimizing to be done\n else:\n new_child_node = convert(token.left, depth + 1)\n new_node.children += [new_child_node]\n\n new_node.calculate_weight()\n return new_node", "def unary_operator(op):\n # Only negate is currently supported for all our possible input types.\n valid_ops = {'-'}\n if op not in valid_ops:\n raise ValueError(\"Invalid unary operator %s.\" % op)\n\n def unary_operator(self):\n # This can't be hoisted up a scope because the types returned by\n # unary_op_return_type aren't defined when the top-level function is\n # invoked.\n if isinstance(self, NumericalExpression):\n return NumExprFactor(\n \"{op}({expr})\".format(op=op, expr=self._expr),\n self.inputs,\n )\n else:\n return NumExprFactor(\"{op}x_0\".format(op=op), (self,))\n\n unary_operator.__doc__ = \"Unary Operator: '%s'\" % op\n return unary_operator", "def __init__(self, str_exp=None, kind=None, scanner=None):\n self.kind = None\n self.name = 'undef'\n self.attr = None\n self.child = None\n self.left = None\n self.right = None\n self.code = None\n\n if str_exp is not None:\n logging.debug('========== EXP in init(NODE): SEXP = [' + str_exp + ']')\n scanner = lex.Scanner(rules)\n scanner.setString(str_exp)\n\n if kind is not None: # create an empty node\n self.kind = kind\n return\n\n if scanner is None:\n raise Exception('Fatal Error: scanner not defined')\n\n while scanner.curToken().type in FIRST:\n\n if scanner.curToken().type == LITERAL:\n self.name = scanner.curToken().name\n self.code = LITERAL\n self.kind = ATOM\n scanner.move()\n\n elif scanner.curToken().type == LPAREN:\n scanner.move() # skip the parentheses\n\n tmp = Exp(scanner=scanner) # tree of the expression between parentheses\n self.kind = tmp.kind\n self.attr = tmp.attr\n self.name = tmp.name\n self.left = tmp.left\n self.right = tmp.right\n self.child = tmp.child\n\n if scanner.curToken().type != RPAREN:\n raise ParserException(\"')' expected\")\n scanner.move()\n\n elif isUnitary(scanner.curToken().type):\n self.kind = UNARY\n self.name = scanner.curToken().name\n self.code = scanner.curToken().type\n\n # if token_type == ATTRIB # this is for existence and foreach\n\n scanner.move()\n self.child = Exp(scanner=scanner)\n\n # the scanner has been moved to a successive token\n if scanner.curToken().type == NULLTOKEN:\n break\n\n # check for infix operators\n if isBinary(scanner.curToken().type):\n operator_name = scanner.curToken().name\n operator_type = scanner.curToken().type\n scanner.move()\n\n # move the current node to the left of the tree\n lnode = Exp(kind=self.kind)\n lnode.name = self.name\n lnode.attr = self.attr\n lnode.child = self.child\n lnode.left = self.left\n lnode.right = self.right\n lnode.code = self.code\n\n # this node became the handler aka the binary operator\n self.code = operator_type\n self.name = operator_name\n self.kind = BINARY\n self.left = lnode\n # lookup the second child of the operator\n self.right = Exp(scanner=scanner)", "def isOperator(self):\n return _libsbml.ASTNode_isOperator(self)", "def Equality(self, paren=False):\n left = self.Relation(paren)\n if self.currtok[1].name in {\"EQULITY\", \"NOTEQUAL\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Relation(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def __init__(self: 'BinaryTree', symbol: str,\n left: 'RegexTree', right: 'RegexTree') -> None:\n RegexTree.__init__(self, symbol, [left, right])", "def __init__(self, op, symbolicExpression1, symbolicExpression2):\n\n SymbolicExpression.__init__(self)\n \n self.op = op\n self.symbolicExpression1 = symbolicExpression1\n self.symbolicExpression2 = symbolicExpression2", "def declare_operators(*op_list):\n operators.update({op.__name__:op for op in op_list})\n return operators", "def __init__(self, a_node, b_node, name=None):\n BinaryMatrixOp.__init__(self, a_node, b_node, name)", "def era_operator(cls, quad):\n\t\tfunc_name = quad.left_operand\n\t\tfunc = FunctionTable.function_dict[func_name]\n\t\tcls.mem_to_push = Memory(len(type_dict), func.var_quantities) \n\t\tprint \"> Created new memory for '{}': {}\".format(func_name, cls.mem_to_push.memory)", "def add_operator(self, operator: Callable) -> None:\n self.operators.append(operator)", "def append_operator(cls, operator):\n for context in cls._active_contexts:\n context._append_operator(operator) # pylint: disable=protected-access", "def is_identity(operator):\n if isinstance(\n operator,\n (QubitOperator, FermionOperator, BosonOperator, QuadOperator)):\n return list(operator.terms) == [()]\n raise TypeError('Operator of invalid type.')", "def _add_prefix_to_node(self, prefix_deque: deque, tree: Tree, node: Node, index: int) -> tuple[Tree, int]:\n\n # Get the current token by popping the left of the prefix_deque\n token = prefix_deque.popleft()\n\n\n # If the token is an operator\n if self.is_token(token):\n # Create a new node\n new_node = tree.create_node(\n f\"{token}\", # Name the same as token\n index, # ID that of index\n parent = node # And parent this node\n )\n # Increment index\n index += 1\n\n # Calculate child A\n tree,index = self._add_prefix_to_node(prefix_deque, tree, new_node, index)\n\n # Calculate child B\n tree,index = self._add_prefix_to_node(prefix_deque, tree, new_node, index)\n\n # Return the tree and the index\n return tree, index\n else:\n # If the token is not an operator\n\n # Just create a new node\n new_node = tree.create_node(\n f\"{token}\", # Name the same as token\n index, # ID that of index\n parent = node # And parent this node\n )\n\n # Increment index\n index += 1\n \n # Return index and tree\n return tree, index", "def visitor(node: NodeT, left_distribute: bool) -> NodeT:\n if isinstance(node, ir.AddSub):\n items = OrderedDict() # type: Dict[ir.Node, List[Tuple[str, ir.Node]]]\n new_operators = []\n new_operands = []\n for operator, operand in zip(('+',) + getattr(node, 'operator'),\n getattr(node, 'operand')):\n if (operator == '+' and isinstance(operand, ir.MulDiv) and\n getattr(operand, 'operator') == ('*',)):\n if left_distribute:\n coeff, item = getattr(operand, 'operand')\n else:\n item, coeff = getattr(operand, 'operand')\n items.setdefault(coeff, []).append((operator, item))\n else:\n new_operators.append(operator)\n new_operands.append(operand)\n for coeff, item in items.items():\n operator, operand = zip(*item)\n assert operator[0] == '+'\n new_operators.append(operator[0])\n if len(operand) > 1:\n new_item = ir.AddSub(operator=operator[1:], operand=operand)\n else:\n new_item = operand[0]\n if left_distribute:\n children = coeff, new_item\n else:\n children = new_item, coeff\n new_operands.append(ir.MulDiv(operator=('*',), operand=children))\n if len(new_operands) > 1:\n assert new_operators[0] == '+'\n new_node = ir.AddSub(operator=tuple(new_operators[1:]),\n operand=tuple(new_operands))\n if new_node != node:\n return new_node # type: ignore\n elif new_operands and new_operands[0] != node:\n return new_operands[0]\n return node", "def assert_assignment(text, operator, left, right):\n try:\n node = parse_single_statement(text)\n eq_(node.op, operator)\n eq_(node.target.name, left)\n eq_( node.right.value, right)\n except AssertionError as e:\n node.show()\n raise e", "def and_or_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\t# TODO: The next set of lines will fail at a specific case\n\t\tif quad.operator == 10 :\n\t\t\tcls.set_address_value(quad.result, (left_op and right_op))\n\t\telif quad.operator == 11 :\n\t\t\tcls.set_address_value(quad.result, (left_op or right_op))", "def create_inverted_tree(tree: NodeTree) -> NodeTree:\n inverted_tree = NodeTree(tree.value)\n\n if tree.left is not None:\n inverted_tree.right = create_inverted_tree(tree.left)\n\n if tree.right is not None:\n inverted_tree.left = create_inverted_tree(tree.right)\n\n return inverted_tree", "def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"relationalExpr: \", tok)\n\tleft = addExpr( )\n\texpr = \"\"\n\ttok = tokens.peek( )\n\tif tok in relations:\n\t\trel = relation( ) # expecting a relation to start off \n\t\tright = expression( ) # if there is a relation we expect there to be an expression to the right of the relation\n\t\texpr = BinaryExpr( rel, left, right )\n\t\treturn expr #fix this for syntax tree maybe\n\n\treturn left", "def make_operators(self):\n self.relationship_operator = Operators.RelationshipOperator(self)\n self.infection_operator = Operators.InfectionOperator(self)\n self.time_operator = Operators.TimeOperator(self)", "def add_operator(self, operator: Soldier) -> None:\n if isinstance(operator, Soldier):\n if len(self.__operators) < self.MAX_OPERATORS:\n self.__operators.append(operator)\n self.__is_alive = True\n else:\n raise TypeError(\"argument must be a Soldier\")", "def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()", "def is_unary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol()\n # ret-type operator [++ --](int)\n # globally\n # ret-type operator symbol( arg )\n # ret-type operator [++ --](X&, int)\n symbols = ['!', '&', '~', '*', '+', '++', '-', '--']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 0 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] and \\\n isinstance(oper.arguments[0].type, cpptypes.int_t):\n return True\n else:\n return False\n else:\n if 1 == len(oper.arguments):\n return True\n elif oper.symbol in ['++', '--'] \\\n and 2 == len(oper.arguments) \\\n and isinstance(oper.arguments[1].type, cpptypes.int_t):\n # may be I need to add additional check whether first argument is\n # reference or not?\n return True\n else:\n return False", "def new_xmldoc_opml():\n xmldoc = XMLDoc()\n opml = OPML()\n xmldoc.root_element = opml\n\n return (xmldoc, opml)", "def __compile_operator(self, op, caller):\r\n if op == \"+\":\r\n self.__vmwriter.write_arithmetic(\"add\")\r\n elif op == \"-\" and caller == \"expression\":\r\n self.__vmwriter.write_arithmetic(\"sub\")\r\n elif op == \"*\":\r\n self.__vmwriter.write_call(\"Math.multiply\", 2)\r\n elif op == \"/\":\r\n self.__vmwriter.write_call(\"Math.divide\", 2)\r\n elif op == \"&\":\r\n self.__vmwriter.write_arithmetic(\"and\")\r\n elif op == \"|\":\r\n self.__vmwriter.write_arithmetic(\"or\")\r\n elif op == \"<\":\r\n self.__vmwriter.write_arithmetic(\"lt\")\r\n elif op == \">\":\r\n self.__vmwriter.write_arithmetic(\"gt\")\r\n elif op == \"=\":\r\n self.__vmwriter.write_arithmetic(\"eq\")\r\n elif op == \"-\":\r\n self.__vmwriter.write_arithmetic(\"neg\")\r\n elif op == \"~\":\r\n self.__vmwriter.write_arithmetic(\"not\")", "def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast", "def to_condition(operator: str, value: Any) -> CellCondition:\n operator = str(operator).lower().strip()\n condition = {\n \">\": lambda x: x is not None and x > value,\n \"<\": lambda x: x is not None and x < value,\n \">=\": lambda x: x is not None and x >= value,\n \"<=\": lambda x: x is not None and x <= value,\n \"==\": lambda x: x == value,\n \"!=\": lambda x: x != value,\n \"is\": lambda x: x is value,\n \"not is\": lambda x: x is not value,\n \"contains\": lambda x: x is not None and value in x,\n \"not contains\": lambda x: x is not None and value not in x,\n \"in\": lambda x: x in value,\n \"not in\": lambda x: x not in value,\n }.get(operator)\n\n if not condition:\n raise ValueError(f\"Unknown operator: {operator}\")\n\n return condition", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def __init__(self, *args):\n \n self.ops = []\n for _, arg in enumerate(args):\n if arg is None:\n continue\n elif isinstance(arg, Operator):\n self.ops.append(arg)\n elif isinstance(arg, list):\n for op in arg:\n if op is None:\n continue\n elif isinstance(op, Operator):\n self.ops.append(op)\n else:\n raise TypeError('Argument must be either Operator or Hstack')\n \n # check domain\n self.n = len(self.ops)\n domain = []\n for idx in range(self.n):\n if idx < self.n - 1:\n if not self.ops[idx].range.checkSame(self.ops[idx + 1].range):\n raise ValueError('Range incompatibility between Op %d and Op %d' % (idx, idx + 1))\n domain += [self.ops[0].domain]\n super(Hstack, self).__init__(domain=superVector(domain), range=self.ops[0].range)", "def __init__(self, left = None, right = None):\n super(compositeConditionalGenerator,self).__init__()\n self._left = left\n self._right = right", "def build_ast(expression):\n\n # use a directed graph to store the tree\n G = DiGraph()\n\n stack = []\n\n for n in expression:\n # Since the graph does not maintain the order of adding nodes/edges\n # add an extra attribute 'pos' so we can always sort to the correct order\n if isinstance(n, OperatorNode):\n if n.ttype == ept.TOK_TYPE_OP_IN:\n arg2 = stack.pop()\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_node(arg2, pos=2)\n G.add_edge(arg1, n)\n G.add_edge(arg2, n)\n else:\n arg1 = stack.pop()\n G.add_node(arg1, pos=1)\n G.add_edge(arg1, n)\n\n elif isinstance(n, FunctionNode):\n args = [stack.pop() for _ in range(n.num_args)]\n args.reverse()\n for i, a in enumerate(args):\n G.add_node(a, pos=i)\n G.add_edge(a, n)\n # for i in range(n.num_args):\n # G.add_edge(stack.pop(),n)\n else:\n G.add_node(n, pos=0)\n\n stack.append(n)\n\n return G, stack.pop()", "def term( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"Term: \", tok)\n\tleft = factor( )\n\ttok = tokens.peek( )\n\twhile tok == \"*\" or tok == \"/\":\n\t\ttokens.next()\n\t\tright = factor( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def is_binary_operator(oper):\n # definition:\n # memeber in class\n # ret-type operator symbol(arg)\n # globally\n # ret-type operator symbol( arg1, arg2 )\n symbols = [\n ',', '()', '[]', '!=', '%', '%=', '&', '&&', '&=', '*', '*=', '+',\n '+=', '-', '-=', '->', '->*', '/', '/=', '<', '<<', '<<=', '<=', '=',\n '==', '>', '>=', '>>', '>>=', '^', '^=', '|', '|=', '||']\n if not isinstance(oper, calldef.operator_t):\n return False\n if oper.symbol not in symbols:\n return False\n if isinstance(oper, calldef.member_operator_t):\n if 1 == len(oper.arguments):\n return True\n else:\n return False\n else:\n if 2 == len(oper.arguments):\n return True\n else:\n return False", "def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def __handle_primitive_event_or_nested_structure(self, tree_plan_leaf: TreePlanLeafNode,\n current_operator: PatternStructure,\n sliding_window: timedelta, parent: Node,\n consumption_policy: ConsumptionPolicy):\n if isinstance(current_operator, PrimitiveEventStructure):\n # the current operator is a primitive event - we should simply create a leaf\n event = current_operator\n if consumption_policy is not None and \\\n consumption_policy.should_register_event_type_as_single(False, event.type):\n parent.register_single_event_type(event.type)\n return LeafNode(sliding_window, tree_plan_leaf.event_index, event, parent)\n\n if isinstance(current_operator, UnaryStructure):\n # the current operator is a unary operator hiding a nested pattern structure\n unary_node = self.__create_internal_node_by_operator(current_operator, sliding_window, parent)\n nested_operator = current_operator.arg\n child = self.__construct_tree(nested_operator, Tree.__create_nested_structure(nested_operator),\n Tree.__get_operator_arg_list(nested_operator), sliding_window, unary_node,\n consumption_policy)\n unary_node.set_subtree(child)\n return unary_node\n\n # the current operator is a nested binary operator\n return self.__construct_tree(current_operator, Tree.__create_nested_structure(current_operator),\n current_operator.args, sliding_window, parent, consumption_policy)", "def __init__(self: 'DotTree', left: 'RegexTree',\n right: 'RegexTree') -> None:\n BinaryTree.__init__(self, \".\", left, right)", "def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right" ]
[ "0.6768396", "0.6402284", "0.6212105", "0.6038182", "0.6029708", "0.59814626", "0.59771395", "0.5969799", "0.57881653", "0.57876146", "0.57825124", "0.57195956", "0.56935775", "0.56912225", "0.56751347", "0.559061", "0.5583933", "0.5532599", "0.54641896", "0.5460819", "0.5419034", "0.5418368", "0.5414495", "0.53841996", "0.53565824", "0.5339949", "0.5322291", "0.53207654", "0.5312278", "0.53110856", "0.5306055", "0.5272652", "0.5271246", "0.5270123", "0.5263009", "0.5212397", "0.5198877", "0.51786864", "0.5178488", "0.51732", "0.51681817", "0.51634574", "0.5157324", "0.5138562", "0.5124519", "0.5124152", "0.5109847", "0.5107352", "0.5099674", "0.5093816", "0.5089314", "0.507746", "0.5071173", "0.50677216", "0.5057984", "0.5048431", "0.5024074", "0.50104874", "0.500028", "0.4999717", "0.49994624", "0.49993813", "0.49921978", "0.49638057", "0.49618012", "0.49565318", "0.4937051", "0.4926631", "0.49242032", "0.4905791", "0.4902782", "0.48960978", "0.48907167", "0.48822057", "0.48799077", "0.48773736", "0.48713273", "0.48646808", "0.4861987", "0.48605847", "0.4859568", "0.4852708", "0.48508367", "0.48477334", "0.48255715", "0.48195538", "0.48081177", "0.47973618", "0.47966662", "0.479338", "0.47851366", "0.47837195", "0.4778259", "0.4776471", "0.47736108", "0.47710797", "0.4769113", "0.47667655", "0.47621205", "0.4747374" ]
0.66272795
1
Utility routine to compute a value.
def computeValue(self, op, value1, value2): result = 0 theType = op.getType() # Use python's application of an operator depending on theType of the token if theType == Token.PLUS: result = value1 + value2 elif theType == Token.MINUS: result = value1 - value2 elif theType == Token.MUL: result = value1 * value2 elif theType == Token.MOD: result = value1 % value2 elif theType == Token.EXP: result = value1**value2 elif theType == Token.DIV: if value2 == 0: raise ZeroDivisionError("Attempt to divide by 0") else: result = value1 // value2 return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_value(self, *args, **kwargs):\n\n return None", "def getValue(self):\n # compute and return my value\n return functools.reduce(operator.mul, (op.getValue() for op in self.operands))", "def value(self) -> float:", "def val(self):\r\n if not self.value:\r\n self._calc()\r\n\r\n return self.value", "def val(self):\r\n if not self.value:\r\n self._calc()\r\n\r\n return self.value", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def value(self) -> float:\n ...", "def value(self):\n return self.compute_value()", "def parameter_value(value):\n return PrimitiveComputer().compute(value)", "def compute_val(self, val: Union[int, float]) -> Union[int, float]:\n\n return (val - self.min) / (self.max - self.min)", "def value(self, idx):\n item = self.items[idx]\n if item is None:\n ret = -float('inf')\n else:\n ret = self.fn(item)\n return ret", "def calculate(self) -> float:", "def value(self) -> global___Expression:", "def value(self) -> global___Expression:", "def getValue(self) -> int:\n ...", "def _value(self):\n return self.device.value(*self._id[1:])", "def eval(self, value: Value) -> Value:\n raise NotImplementedError()", "def evaluate(self) -> int:", "def calculate_value(self, x: np.array) -> np.array:\n pass", "def value(t):\r\n return t(0)", "def __call__(self):\n return self.value", "def value(self):\n current_value = self.initial_value * self.schedule(self.step / self.nvalues)\n self.step += 1.\n return current_value", "def process(self, value):\n return float(value)", "def value(self) -> float:\n return self._value * self._prefix", "def value(self) -> float:\n return pulumi.get(self, \"value\")", "def value(self) -> float:\n return pulumi.get(self, \"value\")", "def _compute_value(self, row_struct, par_struct):\n if _computephotpars is None:\n raise ImportError('_computephotpars C-extension not built')\n return _computephotpars.compute_value(row_struct, par_struct)", "def _compute_value(self, s_12, s_13, kin):\n if self.identical_particles:\n val = self.breit_wigner_12.eval(s_12) * self.partial_wave_12.eval(kin) \\\n + self.breit_wigner_13.eval(s_13) * self.partial_wave_13.eval(kin)\n else:\n val = None\n\n return val", "def compute(*args, **kwargs):\n args = [value(a) for a in args]\n return base.compute(*args, **kwargs)", "def real_value(val):\n\t\treturn round(val/100*sum(euros), 1)", "def value(self, grid):\n if not self._fix_val or self.val is None:\n # self.check_params()\n self._fix_val = self.cache_val\n self.val = self.evaluate(self.params, grid)\n # centralization\n # self.val -= np.mean(self.val)\n assert self.val.shape == grid.shape, \"Value must be the same shape as grid\"\n return self.val", "def CallValue(contract : 'Contract') -> float:\n return Option.__call_value(contract.underlyingPrice, contract.strikePrice, contract.interestRate / 100, contract.daysToExpiration / 365, contract.volatility / 100)", "def get(self) -> float:\n ...", "def getValue(self):\n # compute the values of my operands\n values = (op.getValue() for op in self.operands)\n # apply my operator\n return self.evaluator(*values)", "def Value(self) -> _n_0_t_14:", "def compute_value(callback, graph):\n return callback(graph)", "def Calcular(a: float) ->float:\n \n return (a*2)", "def getMyValue(self):\n valueBV = 0.0\n valueCR = 0.0\n valueAL = 0.0\n valueEC = 0.0\n valueIA = 0.0\n factorAL = globals.cityCRGen/globals.cityALGen\n factorEC = globals.cityCRGen/globals.cityECGen\n factorIA = globals.cityCRGen/globals.cityIAGen\n ratio = self.strength/100.0\n valueCR += self.myDesign.costCR*ratio\n valueAL += self.myDesign.costAL*ratio\n valueEC += self.myDesign.costEC*ratio\n valueIA += self.myDesign.costIA*ratio\n valueBV += (valueCR +\n valueAL*factorAL +\n valueEC*factorEC +\n valueIA*factorIA) / 1000.0\n return (valueBV, valueCR, valueAL, valueEC, valueIA)", "def get_value(self):\n if not self.visited:\n # first visit at node\n self.visited = True\n\n # value calculation\n for node, weight in self.predecessors:\n self.value += (node.get_value() * weight)\n\n # applying activation function\n if self.activation is not None:\n self.activation()\n\n self.calculated = True\n\n return self.value\n else:\n # visited node\n if self.calculated:\n # calculated in this computation\n return self.value\n else:\n # recurrent connection\n return self.past_value", "def computeValueFromQValues(self, state):\n bestAction = self.computeActionFromQValues(state)\n if bestAction == None:\n return 0.0\n return self.getQValue(state, bestAction)", "def GetValue(self, *args):\n return _ITKCostFunctionsPython.itkSingleValuedCostFunction_GetValue(self, *args)", "def value(x):\n\tif isNumber(x): return x\n\telse: return x.value()", "def get_value(self) -> float:\n return self._data_provider.get_value()", "def evaluate():\n\t\t\t\tif not hasattr(evaluate, 'value'):\n\t\t\t\t\tevaluate.value = func()\n\t\t\t\treturn evaluate.value", "def get_value(self):\r\n return 0", "def computevaluefromqvalues(self, state):\n legalactions = env.getlegalactions(deepcopy(env.state_to_array(state)))\n if len(legalactions) == 0:\n return 0.0\n tmp = Counter()\n for action in legalactions:\n tmp[action] = self.getqvalue(state, action)\n return tmp[tmp.argMax()]", "def at_value(self, first, second=None):\n result = self.calculate(first)\n\n if second is not None:\n result = self.calculate(second) - result\n\n return result", "def value(self):\n\n if self.state == Node.State.VALID:\n return self._value\n else:\n with _NodeStackFrame(self):\n self.state = Node.State.PENDING\n self.value = self.compute_value(*self.args, **self.kwargs)\n return self._value", "def __call__(self):\n value = self._value\n if value is None:\n value = self._init()\n self._value = value\n return value", "def num (self):\n return self.value[0]/self.value[1]", "def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 1\r\n return input_vals[0] / node.const_attr", "def _get_value(self):\n return self.__value", "def value(self, ob, *args, **kwargs):\n return self._evaluate(self.vf, ob, *args, **kwargs)", "def value(self, ob, *args, **kwargs):\n return self._evaluate(self.vf, ob, *args, **kwargs)", "def _get_value(self):\n \n return self._value", "def getValue(self):\n result = 0.0\n for e in self.children:\n result += e.getValue()\n return result", "def value_calculation(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"value_calculation\")", "def __call__(self, observation):\n # Validates that the state variable is a scalar with this float() call.\n current_val = float(observation[self.dict_key])\n retval = current_val - self.last_val\n self.last_val = current_val\n return retval", "def compute(self, node, input_vals):\n assert len(input_vals) == 2\n return input_vals[0] * input_vals[1]", "def value(self) -> Optional[float]:\n return pulumi.get(self, \"value\")", "def compute(self, now, input_value):\n\n # Calculate time change. Return last output if no change.\n time_change = now - self.last_time\n if time_change <= 0:\n return self.output\n\n # Get and update constants.\n kp = self.kp\n ki = self.ki * time_change\n kd = self.kd / time_change\n\n # Compute all the working error variables.\n input_error = self.set_point - input_value\n d_input = input_value - self.last_input\n\n # Remember state for next time.\n self.last_input = input_value\n self.last_time = now\n\n\t# Factor in integral.\n self.output += ki * input_error\n\n\t# Factor in proportional-on-measurement.\n if not self.p_on_e:\n self.output -= kp * d_input\n\n\t# Factor in proportional-on-error.\n if self.p_on_e:\n self.output -= kp * input_error\n\n\t# Factor in derivative.\n self.output -= kd * d_input\n\n\t# Keep outputSum limited to legal values.\n self.output = self.clip_to_output_limits(self.output)\n return self.output", "def value(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"value\")", "def get_value(\n self\n ) -> float:\n\n return self.average", "def calc(self):\n return None", "def double(value):\n return 2 * value", "def getValue(self):\n return self.left.getValue() * self.right.getValue()", "def value(self):\n self._value = self._op.value\n return self._value", "def get_result(self, x):\n return self.i*x", "def value(x):\n if isNumber(x):\n return x\n else:\n return x.value()", "def value(self): \r\n c = self.nd1() * self.s * math.exp(-self.div * self.t)\r\n c -= self.nd2() * self.x * math.exp(-self.rf * self.t)\r\n \r\n return c", "def result(self):\n tp = self.true_positive.read_value()\n fn = self.false_negative.read_value()\n return math_ops.div_no_nan(tp, tp + fn)", "def do(self, fun):\n with self.mutex:\n self.value = fun(self.value)\n return self.value", "def double(value):\n return value * 2", "def double(value):\n return value * 2", "def __call__(self, value=empty):\n if value is not self.empty:\n self.value = value\n return self.value", "def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 1\r\n return input_vals[0] * node.const_attr", "def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n return (np.greater_equal(input_vals[1],0)).astype(np.int32)*input_vals[0]", "def _get_value(o):\n return value(o, exception=False)", "def getValue(self):\n return self.left.getValue() ** self.right.getValue()", "def compute(self, node, input_vals):\n \"\"\"TODO: Your code here\"\"\"\n assert len(input_vals) == 1\n return input_vals[0] * node.const_attr", "def _call(self, x):\n return self.constant", "def getValue(self):\n key = int(self.keyExpression.getValue())\n if key in self.dictOfExpressions:\n return self.dictOfExpressions[key].getValue()\n\n return 0.0", "def value_expr(self, t, h_plus, u):\n return sum(h_plus) * self.weight_expr(t, h_plus / sum(h_plus),\n u / sum(u), sum(h_plus))", "def value(self) -> float:\n return self._value", "def value(self) -> float:\n return self._value", "def value(self) -> float:\n return self._value", "def get_val(self):\n return self.value", "def compute(self, node, input_vals):\n assert len(input_vals) == 2\n return input_vals[0] + input_vals[1]", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n if not self.getLegalActions(state): return 0\n\n best_action = self.computeActionFromQValues(state)\n return self.getQValue(state, best_action)", "def __call__(self, val, tag=''):\n return self.constant(val, tag=tag)", "def _computeValueFunction(self, nbDims, low, high, retstep=False):\n # algorithms performing in discrete space will have a discrete\n # value function that cannot be evaluated at any point - only on the\n # ones for which they have been setup based on the problem it has been\n # setup to solve\n def __round(vec):\n return tuple(int(x) for x in vec)\n\n def __notround(vec):\n return vec\n\n _round = __notround\n if self._algo.DOMAIN['state'] == Spaces.Discrete:\n _round = __round\n\n allParams, stepSizes = self._discretizer.discretize(retstep=True)\n\n allActions = self._problem.getActionsList()\n reducer = max if self.reducer == 'max' else mean\n\n # returns a list\n data = [\n utils.extends({\n key: state[k]\n for k, key in enumerate(self.getKeys(nbDims))\n }, z=reducer([\n self._algo.actionValue(_round(state), action)\n for action in allActions]))\n for state in allParams\n ]\n if retstep:\n return data, stepSizes\n return data", "def value(self, y, a0, a1):\n if y >= -self.R: # y = -2, -3, ..., -60\n c, n = (1+self.r)*a0 + self.b - a1, 0\n else:\n c, n = self.solve(a0,a1)\n\n v = self.util(c,n) + self.beta*self.vtilde[y+1](a1)\n return v if c > 0 else self.neg", "def GetValue(self, *args):\n return _ITKCostFunctionsPython.itkMultipleValuedCostFunction_GetValue(self, *args)", "def compValFromState(self, state):\n \"*** YOUR CODE HERE ***\"\n # print \"compValFromState\"\n #actions = self.getLegalActions(state)\n # if len(actions) == 0:\n # return 0.0\n # values = [self.getQValue() for action in actions]\n return self.getQValue(state, None)", "def get_value(self):\n return self._value", "def get_val(self, **kwargs):\n return self._value", "def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n assert len(input_vals) == 1\r\n return node.const_attr / input_vals[0]", "def __int__(self):\n\n return self.value", "def mult(value, arg):\n return int(value)-int(arg)" ]
[ "0.7594629", "0.7220241", "0.705439", "0.6837795", "0.6837795", "0.67696714", "0.67696714", "0.67696714", "0.6758692", "0.66920656", "0.66820556", "0.65508604", "0.6509542", "0.6488429", "0.6488429", "0.6481076", "0.64206606", "0.64025074", "0.6401865", "0.64014447", "0.63940805", "0.6393842", "0.63627195", "0.63568366", "0.6321932", "0.63137996", "0.63137996", "0.63094264", "0.630262", "0.62880003", "0.6283196", "0.62523633", "0.62498283", "0.6241556", "0.62310416", "0.62105036", "0.61759114", "0.6173945", "0.6168995", "0.6165577", "0.61577344", "0.6154492", "0.6150077", "0.6148015", "0.61376023", "0.61369103", "0.613134", "0.6127346", "0.6115819", "0.6111239", "0.61081904", "0.6097068", "0.60877883", "0.6067586", "0.6067586", "0.60641277", "0.60581523", "0.60528374", "0.6035382", "0.603216", "0.602922", "0.6026727", "0.60242933", "0.6003101", "0.60010046", "0.6000139", "0.59993595", "0.5993179", "0.5992485", "0.59879446", "0.59803534", "0.5980316", "0.59772074", "0.597549", "0.597549", "0.5972694", "0.5961942", "0.5957788", "0.59558547", "0.59552604", "0.5954324", "0.595334", "0.5946076", "0.5939746", "0.59371924", "0.59371924", "0.59371924", "0.5936116", "0.5934826", "0.5924546", "0.5899636", "0.5899252", "0.5898406", "0.58944845", "0.58939177", "0.5889735", "0.5869166", "0.5861016", "0.58589166", "0.5858262" ]
0.6336137
24
Returns the value of the expression.
def value(self): return self.computeValue(self.operator, self.leftOperand.value(), self.rightOperand.value())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value(self) -> global___Expression:", "def value(self) -> global___Expression:", "def value(self) -> Optional[Expression]:\n return self.__value", "def value_expression(self) -> str:\n return pulumi.get(self, \"value_expression\")", "def value_expression(self) -> Optional[str]:\n return pulumi.get(self, \"value_expression\")", "def getValue(self):\n key = int(self.keyExpression.getValue())\n if key in self.dictOfExpressions:\n return self.dictOfExpressions[key].getValue()\n\n return 0.0", "def evaluateValue(compiled_expression):", "def expression(self):\n return self._expression", "def getValue(self):\n return _libsbml.ASTNode_getValue(self)", "def getValue(self):\n # compute the values of my operands\n values = (op.getValue() for op in self.operands)\n # apply my operator\n return self.evaluator(*values)", "def eval(self) -> typing.Any:\n return self.expr()", "def value(self):\n self._value = self._op.value\n return self._value", "def _getvalue_expr_Constant(self, expr: ast.Constant) -> Any:\n return expr.value", "def getValue(self):\n return self.value", "def getValue(self):\n return self.value", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def value(self) -> str:\n if hasattr(self, \"_value\"):\n return self._value\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def getValue(self):\n \n return self._value", "def request_value(self) -> global___Expression:", "def value(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"value\", _args)\n return _ctx.execute_sync(str)", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def get_value(self):\n return self.value", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def get_value(self):\n return self._val", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def get_value(self):\n return self._value", "def get_value(self):\n return self._value", "def getValue(self):\n return self.field.value()", "def getValue(self):\n return self.field.value()", "def _get_value(self):\n \n return self._value", "def value(self):\n\n\t\treturn self.__value", "def getval(self):\r\n return self.value", "def result(self):\r\n # Module(body=[Expr(value=...)])\r\n return self.eval_(ast.parse(self.expr).body[0].value)", "def get_val(self):\n return self.value", "def value(self):\n return self._val", "def get_value(self):\n return self._value", "def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")", "def value(self):\n return self.__value", "def value(self):\n return self.__value", "def value(self):\n return self.value()._value", "def _get_value(self):\n return self.__value", "def expr(self):\n return self._express", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def GetValue(self):\n return self._value", "def value(self):\n s = str(self.input.text())\n if self._is_string_:\n return s\n else:\n return eval(s)", "def value(self):\n return self._value_", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self._value", "def value(self):\n return self.get_data(\"value\")", "def value (self) :\n\n return self.__value__", "def vvalue(self) -> Qval:\n return self.get(self.greedy())", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")" ]
[ "0.80231833", "0.80231833", "0.7905604", "0.7728165", "0.76552755", "0.7503011", "0.74549043", "0.7389446", "0.73270637", "0.7262415", "0.723994", "0.720171", "0.71606433", "0.7152678", "0.7130495", "0.710755", "0.710755", "0.7055234", "0.70376086", "0.7030717", "0.70151573", "0.70151573", "0.70151573", "0.7012379", "0.7012379", "0.7011926", "0.7011926", "0.7011926", "0.7008786", "0.7004478", "0.7004478", "0.7004478", "0.70010155", "0.70010155", "0.69911677", "0.69911677", "0.69802433", "0.6978887", "0.6978126", "0.69756883", "0.6973207", "0.69602114", "0.695078", "0.6944549", "0.69300216", "0.69300216", "0.6923896", "0.6914406", "0.6909837", "0.6906768", "0.6906768", "0.6906768", "0.6900424", "0.68902546", "0.68834764", "0.68834764", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6879317", "0.6867384", "0.6848956", "0.6843343", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832", "0.6823832" ]
0.7429503
7
Returns the expression in prefix form.
def prefix(self): return str(self.operator) + " " + self.leftOperand.prefix() + " " + self.rightOperand.prefix()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])", "def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr", "def base_prefix(self):\n return self.calculation.base_prefix", "def prefix(self):\n return self[\"prefix\"]", "def prefix(self):\n return self[\"prefix\"]", "def prefix(pattern):\r\n return pattern[0:len(pattern)-1]", "def prefix(pattern):\n return pattern[0:len(pattern)-1]", "def getPrefix(self):\n return _libsbml.ASTBasePlugin_getPrefix(self)", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def prefix(self):\n return self._prefix", "def getPrefix(self):\n raise NotImplementedError", "def result_prefix(self):\n return self.calculation.result_prefix", "def get_prefix(self):\n return self.prefix", "def get_prefix(self):\n return self._prefix", "def get_prefix(self):\n return self._prefix", "def getPrefix(self):\n return _libsbml.MultiASTPlugin_getPrefix(self)", "def getPrefix(self):\n return _libsbml.SBase_getPrefix(self)", "def getPrefix(self, *args):\n return _libsbml.XMLNamespaces_getPrefix(self, *args)", "def getPrefix(self):\n return _libsbml.XMLToken_getPrefix(self)", "def getPrefixedName(self, *args):\n return _libsbml.XMLAttributes_getPrefixedName(self, *args)", "def getPrefix(self, *args):\n return _libsbml.XMLAttributes_getPrefix(self, *args)", "def prefix(self):\n return self._path_prefix", "def set_prefix_expression(self, expression, clear_args = True):\n if expression and type(expression) is not str:\n raise TypeError('expression should be either string or None or False')\n if clear_args:\n self._prefix_kwargs = {}\n self._prefix_expression = expression", "def _expand_prefix(prefix, configs):\n return subst_vars(prefix, configs)", "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")", "def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix", "def getPrefixedName(self):\n return _libsbml.XMLTriple_getPrefixedName(self)", "def getAttrPrefixedName(self, *args):\n return _libsbml.XMLToken_getAttrPrefixedName(self, *args)", "def prefix(name):\n def rule(symbol):\n return symbol.startswith(name) or None\n return rule", "def getAttrPrefix(self, *args):\n return _libsbml.XMLToken_getAttrPrefix(self, *args)", "def command_with_prefix(self):\n return self.endpoint_prefix.rstrip('/') + self.command", "def getPrefix(self):\n return _libsbml.XMLTriple_getPrefix(self)", "def prefixed(self, prefix):\n if not prefix:\n return self.clone()\n else:\n return self.using(join(prefix, self))", "def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")", "def index_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"index_prefix\")", "def getPrefix(self):\n return _libsbml.SBasePlugin_getPrefix(self)", "def infix(self):\n return \"(\" + self.leftOperand.infix() + \" \" + str(self.operator) + \" \" + self.rightOperand.infix() + \")\"", "def getNamespacePrefix(self, *args):\n return _libsbml.XMLToken_getNamespacePrefix(self, *args)", "def default_prefix(self) -> str:", "def _django_prefix():\n return _interpolate(DJANGO_PREFIX)", "def getPrefixPattern(self):\n return self.getOrDefault(\"prefixPattern\")", "def prefix_to_tree(self, expr: str, delimeter: str = None, node_name: str = \"base\") -> Tree:\n # Create a tree\n tree = Tree()\n\n # Convert the expression to a deque\n expr_deque = deque(expr.split(delimeter))\n\n # Create a base node\n base_node = tree.create_node(node_name,0)\n\n # Start the add loop\n tree, count = self._add_prefix_to_node(expr_deque, tree, base_node, 1)\n\n # Return tree\n return tree", "def getPrefix(self):\n return( self.id.split('.')[0] )", "def expand(self, expression):\n if not expression:\n return b\"\"\n if expression[0] in self._operators:\n operator, expression = expression[:1], expression[1:]\n if operator == b\"+\":\n return self._expand(expression, reserved)\n elif operator == b\"#\":\n return self._expand(expression, reserved, prefix=b\"#\")\n elif operator == b\".\":\n return self._expand(expression, prefix=b\".\", separator=b\".\")\n elif operator == b\"/\":\n return self._expand(expression, prefix=b\"/\", separator=b\"/\")\n elif operator == b\";\":\n return self._expand(expression, prefix=b\";\", separator=b\";\",\n with_keys=True, trim_empty_equals=True)\n elif operator == b\"?\":\n return self._expand(expression, prefix=b\"?\", separator=b\"&\",\n with_keys=True)\n elif operator == b\"&\":\n return self._expand(expression, prefix=b\"&\", separator=b\"&\",\n with_keys=True)\n else:\n return self._expand(expression)", "def _prefix(self):\n name = self.__class__.__name__\n return name[:2] + ''.join(c for c in name if c.isupper())[1:]", "def expr(self):\n return self._express", "def _getPrefix(self, namespaceURI):\r\n prefixDict = self._getPrefixDict()\r\n if prefixDict.has_key(namespaceURI):\r\n prefix = prefixDict[namespaceURI]\r\n else:\r\n prefix = 'ns1'\r\n while prefix in prefixDict.values():\r\n prefix = 'ns%d' %int(prefix[-1]) + 1\r\n prefixDict[namespaceURI] = prefix\r\n return prefix", "def value_prefix(self) -> str:\n return self._value_prefix", "def name_prefix(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"name_prefix\")", "def removeprefix(self, x) -> String:\n pass", "def infix2prefix(self, lst):\n stk = []\n pre = []\n for elt in reversed(lst):\n if elt.isdigit():\n pre.append(elt)\n elif elt == \")\":\n stk.append(elt)\n elif elt == \"(\":\n while stk and stk[-1] != \")\":\n pre.append(stk.pop())\n stk.pop()\n else:\n while stk and self.precedence(elt) < self.precedence(stk[-1]): # < rather than <=\n pre.append(stk.pop())\n stk.append(elt)\n\n while stk:\n pre.append(stk.pop())\n\n pre.reverse()\n return pre", "def get_var_prefix(self):\n return ''", "def expression_phrase(self):\n return self._expression_phrase", "def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')", "def prefix(self, s):\n\t\tif not s:\n\t\t\treturn self.value\n\t\thead, tail = s[0], s[1:]\n\t\tif head not in self.root:\n\t\t\treturn False # Not contained\n\t\tnode = self.root[head]\n\t\treturn node.prefix(tail)", "def _extract_immediate_prefix(obj_key:str)->str:\n immed_prefix = \"\"\n if len(obj_key.split(\"/\")) > 1:\n immed_prefix = obj_key.split(\"/\")[-2]\n \n return immed_prefix", "def conan_prefix(self):\n return self._conan_prefix", "def print_prefix(self):\n if self.is_empty():\n return \"\"\n else:\n ch = str(self.root_value())\n if self.is_leaf():\n return ch\n else:\n if self.has_left():\n if self.has_right():\n return ch + \" \" + self.get_left().print_prefix() + \" \" + self.get_right().print_prefix()\n else:\n return ch + \" \" + self.get_left().print_prefix()\n else:\n return ch + \" \" + self.get_right().print_prefix()", "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def prefix_to_ns(self, prefix):\n defin = self.module.i_ctx.get_module(\n self.module.i_prefixes[prefix][0])\n return defin.search_one(\"namespace\").arg", "def expression(self):\n return self._expression", "def add_prefix(self, field_name):\r\n return self.prefix and ('%s.%s' % (self.prefix, field_name)) or field_name", "def _prefix_and(*exprs, **kwargs):\n anded = ' AND '.join('(%s)' % expr for expr in exprs if expr)\n if len(anded) == 0:\n return ''\n return kwargs.get('prefix', 'WHERE ') + anded", "def entity_prefix(self):", "def name(self):\n return self.prefix", "def getPrefix(self):\n return \"20gig\"", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def name_prefix(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"name_prefix\")", "def series_add_prefix(series, prefix):\n f = partial(\"{prefix}{}\".format, prefix=prefix)\n\n return series.rename(index=f)", "def prefix(self):\n return str(self.data)", "def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output", "def Prefix(self):\n ret = libxml2mod.xmlTextReaderConstPrefix(self._o)\n return ret", "def prefix(self, xform):\n tail = self\n while tail.prev != None:\n tail = tail.prev\n tail.prev = xform", "def prefix(num):\n # determine which range it lies in, r1/r2 means reduction 1 or reduction 2\n divisors = [1e-24 * pow(10, 3 * x) for x in range(17)]\n prefixes = list(reversed(['Yotta (Y)', 'Zetta (Z)', 'Exa (E)', 'Peta (P)', 'Tera (T)', 'Giga (G)', 'Mega (M)',\n 'Kilo (K)', '', 'Milli (m)', 'Micro ($\\mu$)', 'Nano (n)', 'Pico (p)', 'Femto (f)',\n 'Atto (a)', 'Zepto (z)', 'Yocto (y)']))\n exp = np.floor(np.log10(np.abs(num)))\n if exp < 0:\n exp -= 3\n expIndex = int(exp / 3) + 8\n expIndex = 0 if expIndex < 0 else expIndex\n expIndex = len(prefixes)-1 if expIndex >= len(prefixes) else expIndex\n r1 = prefixes[expIndex]\n num1 = num / divisors[expIndex]\n if expIndex != len(prefixes):\n r2 = prefixes[expIndex + 1]\n num2 = num / divisors[expIndex + 1]\n else:\n num2 = None\n retStr = str(num1) + ' ' + r1\n if num2 is not None:\n retStr += '\\nor\\n' + str(num2) + ' ' + r2\n return retStr", "def get_prefix(coef, bias=0.1, omit=None):\n if omit is None:\n omit = num_prefixes\n\n values = [val for key, val in six.iteritems(prefixes) if key not in omit]\n coefs = nm.array(values, dtype=nm.float64)\n coefs.sort()\n ii = nm.searchsorted(coefs, bias*coef, side='left')\n\n if ii == len(coefs):\n ii = ii - 1\n\n cc = coefs[ii]\n prefix = inv_prefixes[cc]\n mul = coef / cc\n\n return prefix, mul", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def add_prefix(prefix = \"Peptides\"):\n var_list = gen_cell_lines_states_replicates()\n prefix = prefix\n res_list = []\n for i in var_list:\n unit_str = prefix + \" \"\n unit_str += i\n res_list.append(unit_str)\n return res_list", "def add_prefix(self, field_name):\n return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name", "def prefixsrc(self):\n return self[\"prefixsrc\"]", "def prefixsrc(self):\n return self[\"prefixsrc\"]", "def test_evaluate_starts_with_expression(self):\n value = self.evaluate_common(\"startswith('startswith','start')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True)\n value = self.evaluate_common(\"startswith('startswith','end')\")\n self.assertTrue(value.value is False)\n value = self.evaluate_common(\"startswith('startswith','Start')\")\n # not case insensitive\n self.assertTrue(value.value is False)\n try:\n value = self.evaluate_common(\"startswith('3.14',3)\")\n self.fail(\"integer as prefix\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"startswith('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass", "def get_onr_prefix_postfix(self) -> Tuple[str, str]:\n op2 = self.op2\n prefix = ''\n postfix = ''\n if op2.table_name in [b'ONRGY1', b'ONRGY2', b'ONRGY']:\n prefix = 'strain_energy.'\n elif op2.table_name in [b'RANEATC']: #, b'OSTRMS1C']:\n op2.format_code = 1\n op2.sort_bits[0] = 0 # real\n prefix = 'RANEATC.'\n elif op2.table_name in [b'RANCONS']: #, b'OSTRMS1C']:\n op2.format_code = 1\n op2.sort_bits[0] = 0 # real\n prefix = 'RANCONS.'\n else:\n raise NotImplementedError(op2.table_name)\n op2.data_code['sort_bits'] = op2.sort_bits\n op2.data_code['nonlinear_factor'] = op2.nonlinear_factor\n return prefix, postfix", "def prefix_format(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix_format\")", "def prefix_format(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix_format\")", "def set_Prefix(self, value):\n super(DescribeEvaluationsInputSet, self)._set_input('Prefix', value)", "def get_prefix_url(request):", "def prefix_value(s):\n forbidden = forbidden_chars.intersection(s)\n if forbidden:\n raise ValueError('%(s)s contains forbidden characters'\n ' (%(forbidden)s)'\n % locals())\n stripped = s.strip('/')\n if stripped:\n return stripped.join('//')\n return '/'", "def autoprefix(prefix):\n pl = len(prefix)\n msg = '%%(s)r: expected some name after %(prefix)r!' % locals()\n def checker(s):\n if s.startswith(prefix):\n tail = s[pl:]\n if tail:\n return prefix + dotted_name(tail)\n else:\n raise ValueError(msg % locals())\n elif s:\n return prefix + dotted_name(s)\n else:\n return ''\n return checker", "def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation", "def prefix(self):\n return self._get_storage().prefix", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")" ]
[ "0.710734", "0.6866923", "0.6425482", "0.63313454", "0.63313454", "0.63225156", "0.63132906", "0.6267651", "0.62378424", "0.62378424", "0.62378424", "0.6218811", "0.6152476", "0.6141088", "0.6069823", "0.6069823", "0.6055349", "0.60520095", "0.6048974", "0.60268766", "0.59823626", "0.5966694", "0.5940881", "0.591457", "0.5897211", "0.58804023", "0.58804023", "0.5866765", "0.58618367", "0.58404773", "0.58388597", "0.5833756", "0.58322996", "0.5826342", "0.57992333", "0.5767351", "0.5767351", "0.5758095", "0.5750478", "0.57393223", "0.573083", "0.5727359", "0.57233435", "0.5701996", "0.5688843", "0.5684467", "0.56490076", "0.56477404", "0.5637599", "0.56314707", "0.5614335", "0.5610098", "0.56019115", "0.5563935", "0.5563717", "0.55551225", "0.554288", "0.554288", "0.554288", "0.5540493", "0.5538691", "0.5536378", "0.55265903", "0.5521938", "0.5520736", "0.55043054", "0.5503633", "0.54964536", "0.54915786", "0.54886484", "0.5483853", "0.54739976", "0.54739976", "0.54739976", "0.54739976", "0.5469625", "0.54560906", "0.54488176", "0.54435277", "0.543421", "0.54229647", "0.5412241", "0.54089326", "0.54089326", "0.5406004", "0.5381731", "0.5380758", "0.5380758", "0.53706694", "0.5367419", "0.53652596", "0.53652596", "0.5363677", "0.5357183", "0.5343013", "0.53224343", "0.5301454", "0.5295515", "0.5294408", "0.5294408" ]
0.7265404
0
Returns the expression in infix form (fully parenthesized).
def infix(self): return "(" + self.leftOperand.infix() + " " + str(self.operator) + " " + self.rightOperand.infix() + ")"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_infix(string):\n return postfix(infix_to_postfix(string))", "def infix_to_prefix(self, expr: str) -> str:\n\n # Reverse expr\n expr = reversed(expr)\n\n # Convert expr to list\n expr = list(expr)\n\n # Reverse all parantheses\n for i, e in enumerate(expr):\n if e == \"(\":\n expr[i] = \")\"\n elif e == \")\":\n expr[i] = \"(\"\n \n # Convert expr back to string\n expr = ''.join(expr)\n\n # Convert expr to postfix\n expr = self.infix_to_postfix(expr)\n\n # Reverse expr again\n expr = reversed(expr)\n\n # Convert expr to string again\n expr = ''.join(expr)\n\n # Return expr\n return expr", "def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")", "def infix_to_postfix(infix:str) -> str:\n stack = deque()\n precedence = {'+':1, '-':1,\n '*':2, '/':2,\n '^':3, '(':-9\n }\n output = \"\"\n for ch in infix:\n if ch not in {'+', '-', '*', '/', '^', '(', ')'}:\n output += ch\n elif ch == '(':\n stack.append(ch)\n elif ch == ')':\n while len(stack) > 0 and\\\n stack[-1] != '(':\n output += stack.pop()\n stack.pop()\n else:\n while len(stack) > 0 and\\\n precedence[stack[-1]] >= precedence[ch]:\n output += stack.pop()\n stack.append(ch)\n while len(stack) > 0:\n output += stack.pop()\n return output", "def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res", "def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output", "def infix_to_postfix(expr):\n # you may find the following precedence dictionary useful\n prec = {'*': 2, '/': 2,\n '+': 1, '-': 1}\n ops = Stack()\n postfix = []\n toks = expr.split()\n ### BEGIN SOLUTION\n opp = {'*', '/','+', '-'}\n for x in toks:\n if str.isdigit(x):\n postfix.append(x)\n elif ops.empty() or ops.peek() == '(':\n ops.push(x)\n elif x == '(':\n ops.push(x)\n elif x == ')':\n while not ops.empty():\n temp = ops.pop()\n if temp == '(':\n break\n else:\n postfix.append(temp)\n elif x in opp:\n while True:\n if prec.get(x) > prec.get(ops.peek()):\n ops.push(x)\n break\n elif prec.get(x) == prec.get(ops.peek()):\n postfix.append(ops.pop())\n ops.push(x)\n break\n elif prec.get(x) < prec.get(ops.peek()):\n postfix.append(ops.pop())\n if ops.empty():\n ops.push(x)\n break\n elif ops.empty():\n break\n\n while True:\n if not ops.empty():\n postfix.append(ops.pop())\n else:\n break\n\n ### END SOLUTION\n return ' '.join(str(x) for x in postfix)", "def infix_to_postfix(expr):\n ops = Stack()\n postfix = []\n toks = expr.split()\n def tests(chr):\n if chr.isdigit():\n postfix.append(chr)\n\n elif chr == '(':\n ops.push('(')\n\n elif ops.peek() == '(' or ops.empty():\n ops.push(chr)\n\n elif chr ==')':\n while ops.peek() != \"(\":\n postfix.append(ops.pop())\n ops.pop()\n\n elif chr in prec and prec[chr] > prec[ops.peek()]:\n ops.push(chr)\n\n elif chr in prec and prec[chr] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(chr)\n\n elif chr in prec and prec[chr] < prec[ops.peek()]:\n postfix.append(ops.pop())\n tests(chr)\n\n for tok in toks:\n tests(tok)\n\n\n while not ops.empty():\n postfix.append(ops.pop())\n\n\n return ' '.join(postfix)", "def infixToRPN(expression):\n stack = Stack()\n RPNList = []\n tokens = expression.split()\n spaces = True\n\n # If no spaces in expression then push each char in a tokens list\n if len(tokens) == 1:\n spaces = False\n tokens = [char for char in expression]\n\n for token in tokens:\n if token in alphabet or token in numbers:\n RPNList.append(token)\n elif token == '(':\n stack.push(token)\n elif token == ')':\n top = stack.pop()\n while top != '(':\n RPNList.append(top)\n top = stack.pop()\n else:\n while (not stack.isEmpty()) and (precedence[stack.peek()] >= precedence[token]):\n RPNList.append(stack.pop())\n stack.push(token)\n\n while not stack.isEmpty():\n RPNList.append(stack.pop())\n\n if spaces:\n return \" \".join(RPNList)\n else:\n return \"\".join(RPNList)", "def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output", "def convert_to_postfix(expression):\n infix = list(expression.replace(\" \", \"\"))\n opr_priority = {'!': 4, '*': 3, '+': 2, '>': 1, '=': 1, '(': 0}\n postfix = []\n stack = []\n\n for token in infix:\n if token in string.ascii_uppercase:\n postfix.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n stack_token = stack.pop()\n while stack_token != '(':\n postfix.append(stack_token)\n stack_token = stack.pop()\n else:\n while stack and (opr_priority[stack[len(stack)-1]] >= opr_priority[token]):\n postfix.append(stack.pop())\n stack.append(token)\n\n while stack:\n postfix.append(stack.pop())\n\n return postfix", "def print_infix(self):\n if self.is_empty():\n return \"\"\n else:\n if self.is_leaf():\n return str(self.root_value())\n else:\n if self.has_left():\n if self.has_right():\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value()) + \" \" \\\n + str(self.get_right().print_infix())\n else:\n return str(self.get_left().print_infix()) + \" \" + str(self.root_value())\n else:\n return str(self.root_value()) + \" \" + str(self.get_right().print_infix())", "def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix", "def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n\n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1", "def parse_infix(input: str) -> Node:\n parsed = ParsedString(input).tokenize()\n ans = parse_e(parsed)\n return ans", "def infix_to_assembly(formula: str) -> str:\n asm = \"\"\n postfix = infix_to_postfix(formula)\n for value in postfix:\n if value == \"+\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nadd ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"-\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nsub ax, bx\"\n asm += \"\\npush ax\"\n elif value == \"*\":\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\nmul bx\"\n asm += \"\\npush ax\"\n elif value == \"/\":\n asm += \"\\nmov dx, 0h\"\n asm += \"\\npop bx\"\n asm += \"\\npop ax\"\n asm += \"\\ndiv bx\"\n asm += \"\\npush ax\"\n else:\n # asm += \"\\npush 0\" + value + \"h\"\n # the line above is commented out as the emulator has a bug\n # which pushes immediate 0bbh as 0ffbbh to the stack\n asm += \"\\nmov cx, 0\" + value + \"h\"\n asm += \"\\npush cx\"\n return asm", "def infix_to_postfix(s):\n result = \"\" # output string\n op = Stack() # operator stack\n i = 0 # index to 's'\n while i < len(s):\n if s[i] in \"0123456789\":\n while i < len(s) and s[i] in \"0123456789\":\n result += s[i]\n i += 1\n result += \" \"\n continue\n if s[i] == '(':\n op.push(s[i])\n elif s[i] == ')':\n top = op.pop()\n while top != '(':\n result += top + \" \"\n top = op.pop()\n else: # s[i] is +,-,*,/\n while not op.is_empty() and not higher_prec(s[i], op.peek()):\n result += op.pop() + \" \"\n op.push(s[i])\n i += 1\n while not op.is_empty():\n result += op.pop() + \" \"\n return result", "def expr(self):\n return self._express", "def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex", "def infix_to_postfix(string_input):\n stack_ops = []\n output = []\n value = \"\"\n\n for item in string_input:\n # item = operator\n if item in ops_prec.keys():\n value = value_to_output(value, output)\n\n # pop elements while they have lower precedence\n while (stack_ops\n and stack_ops[-1] in ops_prec.keys()\n and ops_prec[item] <= ops_prec[stack_ops[-1]]):\n output.append(stack_ops.pop())\n # else put item on stack\n stack_ops.append(item)\n\n # subexpression, delay precedence\n elif item == '(':\n value = value_to_output(value, output)\n\n stack_ops.append(item)\n elif item == ')':\n value = value_to_output(value, output)\n\n # flush output until ( is reached on stack\n while (stack_ops and stack_ops[-1] != '('):\n output.append(stack_ops.pop())\n # remove '('\n stack_ops.pop()\n\n # value = operand\n else:\n # concatenation of value for multidigit ones\n value += item\n # output.append(item) # this would be for one digit\n\n # flush stack to output\n value = value_to_output(value, output)\n\n while stack_ops:\n output.append(stack_ops.pop())\n\n return output", "def expand(self, expression):\n if not expression:\n return b\"\"\n if expression[0] in self._operators:\n operator, expression = expression[:1], expression[1:]\n if operator == b\"+\":\n return self._expand(expression, reserved)\n elif operator == b\"#\":\n return self._expand(expression, reserved, prefix=b\"#\")\n elif operator == b\".\":\n return self._expand(expression, prefix=b\".\", separator=b\".\")\n elif operator == b\"/\":\n return self._expand(expression, prefix=b\"/\", separator=b\"/\")\n elif operator == b\";\":\n return self._expand(expression, prefix=b\";\", separator=b\";\",\n with_keys=True, trim_empty_equals=True)\n elif operator == b\"?\":\n return self._expand(expression, prefix=b\"?\", separator=b\"&\",\n with_keys=True)\n elif operator == b\"&\":\n return self._expand(expression, prefix=b\"&\", separator=b\"&\",\n with_keys=True)\n else:\n return self._expand(expression)", "def infix(self):\n return str(self.data)", "def infixToPostfix(expr, prec):\n ops = Stack()\n postfix = []\n toks = expr.split()\n for t in toks:\n if t.isdigit():\n postfix.append(t)\n elif t == '(':\n ops.push('(')\n elif t == ')':\n op = ops.pop()\n while op != '(':\n postfix.append(op)\n op = ops.pop()\n else:\n while True:\n if ops.empty() or ops.peek() == '(':\n ops.push(t)\n break\n if prec[t] > prec[ops.peek()]:\n ops.push(t)\n break\n elif prec[t] == prec[ops.peek()]:\n postfix.append(ops.pop())\n ops.push(t)\n break\n else:\n postfix.append(ops.pop())\n while not ops.empty():\n postfix.append(ops.pop())\n return postfix", "def simplify(expression):\n q = []\n for x in expression:\n if x != \")\":\n q.append(x)\n else:\n subexp = \"\"\n while q:\n #print(q)\n c = q.pop()\n if c == \"(\":\n if len(q) and (q[-1] == \"+\" or q[-1] == \"-\"):\n sign = q.pop()\n else:\n sign = \"+\"\n subexp = signExp(subexp, sign)\n q.append(subexp)\n break\n else:\n subexp = c + subexp\n exp = \"\"\n while q:\n c = q.pop()\n exp = c + exp\n \n if len(exp) and exp[0] != \"+\" and exp[0] != \"-\":\n # Again if the first character is not a 'sign' make it a \"+\"\n exp = \"+\" + exp\n \n return exp", "def infix_to_postfix(string):\n tokenlist = string.split()\n output = []\n stack = create_stack()\n for token in tokenlist:\n if token == '(':\n stack.push(token)\n elif token == ')':\n toptoken = stack.pop()\n while toptoken != '(':\n output.append(toptoken)\n toptoken = stack.pop()\n elif token == '*' or token == '/':\n toptoken = stack.top()\n while toptoken in ['*','/']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n elif token == '+' or token == '-':\n toptoken = stack.top()\n while toptoken in ['*','/','+','-']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n else:\n output.append(token)\n while stack.length() > 0:\n output.append(stack.pop())\n space= ' '\n newstr = space.join(output)\n return newstr", "def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str", "def _get_postfix_notation(self):\n postfix, operators_stack = list(), list() # initialize postfix list and auxiliary stack\n\n for element in self.expression.split():\n if element in self.OPERATORS:\n if operators_stack:\n # while stack isn't empty and \"stack top\" is stronger(e.g. multiplication is stronger than addition)\n # move \"stack top\" into postfix list\n while operators_stack \\\n and operators_stack[-1] in self.OPERATORS \\\n and self.OPERATOR_WEIGHT[operators_stack[-1]] >= self.OPERATOR_WEIGHT[element]:\n postfix.append(operators_stack.pop())\n\n operators_stack.append(element)\n\n elif element == self.BRACKET_LEFT:\n operators_stack.append(element)\n\n elif element == self.BRACKET_RIGHT:\n # searching for left bracket on stack, moving \"stack Top\" to postfix list\n while operators_stack and operators_stack[-1] != self.BRACKET_LEFT:\n postfix.append(operators_stack.pop())\n operators_stack.pop() # remove left bracket\n\n else: # numbers always goes into postfix list\n postfix.append(self._get_number_from_string(element))\n\n if operators_stack: # move others stack elements to postfix list\n postfix.extend(reversed(operators_stack))\n\n return postfix", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix", "def infixToPostfix(inFixStr):\n postFixList = []\n s = Stack()\n chList = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n prec = {\"(\": 0, \"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2} # operator precedence\n\n tok = inFixStr.split(\" \")\n for ch in tok: # ch can be (,), operand, operator\n if ch in chList: # the easy case when token is an operand\n postFixList.append(ch)\n elif ch == \"(\": # easy case of (\n s.push(ch)\n elif ch == \")\": # keep popping and appending until (\n top = s.pop()\n while top != \"(\":\n postFixList.append(top)\n top = s.pop() # pop next\n else: # now we are at opeartors\n # pop higher order operators first\n while not s.isEmpty() and prec[s.peek()] > prec[ch]:\n postFixList.append(s.pop())\n s.push(ch) # push current opeartor\n\n while not s.isEmpty(): # pop everything else in the stack\n postFixList.append(s.pop())\n return \" \".join(postFixList)", "def shunt(infix):\n #convert input to a stack list\n infix=list(infix)[::-1]\n #operator stack and output list as empty lists\n opers,postfix =[],[]\n #operator precedence\n prec={'*':100,'.':90, '|':80, '/':80, '\\\\':80, ')':70, '(':60}\n\n #loop through input one character at a time\n while infix:\n #pop a character from the input\n c=infix.pop() \n #decide what to do based on character\n if c== '(':\n #push an open bracket to opers stack\n opers.append(c)\n elif c==')':\n #pop the operators stack until you find an open bracket\n while opers[-1]!='(':\n postfix.append(opers.pop())\n #get rid of '('\n opers.pop()\n elif c in prec:\n #push any operators on opers stack with hight prec to output\n while opers and prec[c] < prec[opers[-1]]:\n postfix.append(opers.pop())\n opers.append(c)\n else:\n #typically we just push the character to the output\n postfix.append(c)\n #pop all operators to the output\n while opers:\n postfix.append(opers.pop())\n #convert output list to string\n return ''.join(postfix)", "def __str__(self):\n unarybrackets = ['sq', 'sqrt']\n #unary operators which require brackets around their operand\n #if the operand is a leaf, we force the brackets; otherwise the operand\n #is a non-leaf expression and will create its own brackets\n outstr = ''\n if self.is_leaf():\n outstr = outstr + str(self._element)\n else:\n if self._parent and self._element not in unarybrackets:\n outstr = '('\n #unary minus is unary, but needs brackets outside the minus\n if self._leftchild:\n outstr = outstr + str(self._leftchild)\n outstr = outstr + str(self._element)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + '('\n outstr = outstr + str(self._rightchild)\n if self._element in unarybrackets and self._rightchild.is_leaf():\n outstr = outstr + ')'\n if self._parent and self._element not in unarybrackets:\n outstr = outstr + ')'\n return outstr", "def postfix(self):\n return self.leftOperand.postfix() + \" \" + self.rightOperand.postfix() + \" \" + str(self.operator)", "def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression", "def infixToPostfix(infix):\n postfix = []\n stackArr = []\n scanOperand = False\n hasIntegral = False\n hasDecimal = False\n currentOperand = 0\n decimal = 1\n for ch in infix:\n currentPrio = charPrio(ch)\n if currentPrio < 0: # current ele is operand\n if not (ch.isdigit() or ch == '.'):\n inputError()\n return\n if not scanOperand:\n scanOperand = True\n if ch == '.':\n if not hasIntegral:\n formatError()\n return\n hasDecimal = True\n continue\n if hasDecimal:\n if ch == '.':\n formatError()\n return\n currentOperand = currentOperand + 0.1 ** decimal * int(ch)\n decimal += 1\n else:\n if not hasIntegral:\n hasIntegral = True\n currentOperand = currentOperand * 10 + int(ch)\n elif currentPrio == 0:\n # none operation\n pass\n else:\n # and operand into postfix expression\n if scanOperand:\n scanOperand = False\n hasDecimal = False\n hasIntegral = False\n decimal = 1\n postfix.append(currentOperand)\n currentOperand = 0\n # handle operator\n if isEmpty(stackArr):\n push(stackArr, ch) # push into stack\n elif currentPrio > prio[peek(stackArr)]:\n push(stackArr, ch) # push into stack\n elif currentPrio == 1: # ')'\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)]:\n ele = pop(stackArr)\n if ele != '(':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n else:\n break\n else:\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)] and prio[peek(stackArr)] < 5 :\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n push(stackArr, ch) # push into stack\n if scanOperand:\n postfix.append(currentOperand)\n while not isEmpty(stackArr):\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n return postfix", "def prefix(self):\n return str(self.operator) + \" \" + self.leftOperand.prefix() + \" \" + self.rightOperand.prefix()", "def resolve_expression(self):\n stack = list()\n\n for element in self._get_postfix_notation():\n if element in self.OPERATORS: # get two elements from top of stack, push result of operation on stack\n operand_a = stack.pop()\n operand_b = stack.pop()\n value = self._calculate(operand_b, operand_a, element)\n stack.append(value)\n else: # push to stack if number\n stack.append(element)\n\n return stack.pop()", "def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix", "def toInfix(self):\n return _libsbml.Association_toInfix(self)", "def postfix_eval(postfix_expr):\n s = StackArray()\n expr = postfix_expr.split()\n for token in expr:\n if token[0] in '0123456789':\n res = token\n s.push(res)\n else: # token is operator\n op2 = s.pop()\n op2 = float(op2)\n if s.is_empty(): # token is ~\n # could also be ~ for non-empty stack\n res = -1 * op2\n else:\n op1 = s.pop()\n op1 = float(op1)\n if token == '^':\n res = op1 ** op2\n elif token == '~':\n s.push(op1)\n res = -1 * op2\n elif token == '*':\n res = op1 * op2\n elif token == '/':\n if op2 == 0:\n raise ZeroDivisionError\n else:\n res = op1 / op2\n elif token == '+':\n res = op1 + op2\n else: # token == '-'\n res = op1 - op2\n s.push(res)\n return res", "def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()", "def expression(self):\n return self._expression", "def expression(self, min_precedence=0):\n expr = self.primary()\n\n # Recursion is terminated based on operator precedence\n while not self.eol() and (self.cursor().token in ExpressionEvaluator.BinaryOperators) and (\n ExpressionEvaluator.BinaryOperators[self.cursor().token].prec >= min_precedence):\n\n operator = self.match_type(Operator)\n (prec, assoc) = ExpressionEvaluator.BinaryOperators[operator.token]\n\n # The ternary conditional operator is treated as a\n # special-case of a binary operator:\n # lhs \"?\"<expression>\":\" rhs\n if operator.token == \"?\":\n true_result = self.expression()\n self.match_value(Operator, \":\")\n\n # Minimum precedence for right-hand side depends on\n # associativity\n if assoc == \"LEFT\":\n rhs = self.expression(prec + 1)\n elif assoc == \"RIGHT\":\n rhs = self.expression(prec)\n else:\n raise ValueError(\"Encountered a BinaryOperator with no associativity.\")\n\n # Converting C ternary to Python requires us to swap\n # expression order:\n # - C: (condition) ? true_result : false_result\n # - Python: true_result if (condition) else false_result\n if operator.token == \"?\":\n condition = expr\n false_result = rhs\n expr = true_result if condition else false_result\n else:\n expr = self.__apply_binary_op(operator.token, expr, rhs)\n\n return expr", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation", "def infix2prefix(self, lst):\n stk = []\n pre = []\n for elt in reversed(lst):\n if elt.isdigit():\n pre.append(elt)\n elif elt == \")\":\n stk.append(elt)\n elif elt == \"(\":\n while stk and stk[-1] != \")\":\n pre.append(stk.pop())\n stk.pop()\n else:\n while stk and self.precedence(elt) < self.precedence(stk[-1]): # < rather than <=\n pre.append(stk.pop())\n stk.append(elt)\n\n while stk:\n pre.append(stk.pop())\n\n pre.reverse()\n return pre", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str", "def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()", "def bracket(expr):\n\n if isinstance(expr, utils.LazyString):\n return utils.LazyString(\"(%s)\", expr)\n elif utils.is_integer_type(expr) or expr.isdigit() or is_simple_name(expr):\n return str(expr)\n return \"(\" + expr + \")\"", "def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])", "def operator_lhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator_rhs(self.operator.forward(inp))", "def eval_postfix(s):\n stack = Stack()\n for x in s.split(): # rozděl 's' dle mezer\n if x == '+':\n stack.push(stack.pop() + stack.pop())\n elif x == '-':\n stack.push(-stack.pop() + stack.pop())\n elif x == '*':\n stack.push(stack.pop() * stack.pop())\n elif x == '/':\n second = stack.pop()\n stack.push(stack.pop() / second)\n else:\n stack.push(float(x))\n return stack.pop()", "def infix_to_tree(self, expr: str, delimeter: str = None, node_name: str = \"base\") -> Tree:\n\n # Convert expr to prefix\n prefix = self.infix_to_prefix(expr)\n\n # Return prefix_to_tree of this expr\n return self.prefix_to_tree(prefix, delimeter, node_name)", "def toInfix(self, usingId=False):\n return _libsbml.FbcAnd_toInfix(self, usingId)", "def expr(s):\n if isinstance(s, Expr): return s\n if isnumber(s): return Expr(s)\n ## Replace the alternative spellings of operators with canonical spellings\n s = s.replace('==>', '>>').replace('<==', '<<')\n s = s.replace('<=>', '%').replace('=/=', '^')\n ## Replace a symbol or number, such as 'P' with 'Expr(\"P\")'\n s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr(\"\\1\")', s)\n ## Now eval the string. (A security hole; do not use with an adversary.)\n return eval(s, {'Expr':Expr})", "def eval_postfix(s):\n stack = Stack()\n \n s = s.split()\n for i in s:\n \tif operator(i) == False:\n \t\tstack.push(int(i))\n \telse:\n \t\tb = stack.pop()\n \t\ta = stack.pop()\n \t\tresult = evaluate(a, i, b)\n \t\tstack.push(result)\n return stack.pop()", "def expression_phrase(self):\n return self._expression_phrase", "def test_parentheses_expr(self):\n self.assertEqual(\"(a(b(c(d))))\", grammar._PARENTHESES_EXPR.parseString(\"(a(b(c(d))))\")[0])", "def get_formula_in_list(self):\n return tree_to_string(self.expression)", "def add_concat(infix_regex: str):\n\n result = \"\"\n\n # we use None to symbolize the start of the string\n cant_concat_from = ['(', '|', None]\n cant_concat_to = ['*', '+', ')', '|']\n last_char = None\n\n for char in infix_regex:\n if char not in cant_concat_to and last_char not in cant_concat_from:\n result += '.'\n result += char\n last_char = char\n\n return result", "def eval_expr1(expression):\n\n output = []\n stack = []\n tokens = list(tokenize(expression))\n\n for token in tokens:\n if token == \"(\":\n stack.append(token)\n elif token == \")\":\n while stack and stack[-1] != \"(\":\n op = stack.pop(-1)\n output.append(op)\n op = stack.pop(-1)\n assert op == \"(\"\n elif token in [\"+\", \"*\"]:\n if stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n stack.append(token)\n elif isinstance(token, int):\n output.append(token)\n else:\n raise NotImplementedError(token)\n\n # print(token, output, stack)\n\n while stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n assert not stack\n\n return eval_ops(output)", "def simplify(self, expr):\n return sy.simplify(expr)", "def infix_to_postfix(text: str) -> list:\n \n def unfold_block(text: str) -> list:\n return infix_to_postfix(text) if text[0] == \"(\" else [text]\n\n grouped_raw = group_operations(text)[0]\n if not (\"+\" in grouped_raw or \"-\" in grouped_raw or \"*\" in grouped_raw or \"/\" in grouped_raw):\n grouped = grouped_raw\n stack = [grouped]\n else:\n grouped = group_operations(text)[0][1:-1]\n first_block, operator, second_block = text_to_parts(grouped)\n first_block = unfold_block(first_block)\n second_block = unfold_block(second_block)\n stack = [*first_block, *second_block, operator]\n return stack", "def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> Optional[str]:\n return pulumi.get(self, \"expression\")", "def expand_parentheses(sent):\n return SentenceTreeParser(sent).expand_parentheses()", "def match_expr(self, precedence: int) -> \"AbstractNode\":\n tkn = self.lexer.tkn\n # This line is solely to satisfy mypy.\n left = AbstractNode()\n if tkn.type == Token.AT:\n self.lexer.next_token()\n address = self.match_expr(PREC_PREFIX)\n left = MemoryNode(address)\n elif tkn.type == Token.INT:\n try:\n left = IntNode(int(tkn.value, base=0))\n except ValueError:\n raise SyntaxError(\"invalid integer literal: {}\".format(tkn))\n else:\n self.lexer.next_token()\n elif tkn.type == Token.MINUS:\n self.lexer.next_token()\n left = PrefixNode(\"-\", self.match_expr(PREC_PREFIX))\n elif tkn.type == Token.REGISTER:\n try:\n left = RegisterNode(register_to_index(tkn.value))\n except HERAError:\n raise SyntaxError(\"{} is not a valid register\".format(tkn.value))\n self.lexer.next_token()\n elif tkn.type == Token.SYMBOL:\n left = SymbolNode(tkn.value)\n self.lexer.next_token()\n elif tkn.type == Token.LPAREN:\n self.lexer.next_token()\n left = self.match_expr(PREC_LOWEST)\n if self.lexer.tkn.type != Token.RPAREN:\n self.unexpected(self.lexer.tkn)\n self.lexer.next_token()\n else:\n self.unexpected(tkn)\n\n infix_tkn = self.lexer.tkn\n while infix_tkn.type in PREC_MAP and precedence < PREC_MAP[infix_tkn.type]:\n infix_precedence = PREC_MAP[infix_tkn.type]\n self.lexer.next_token()\n right = self.match_expr(infix_precedence)\n left = InfixNode(infix_tkn.value, left, right)\n infix_tkn = self.lexer.tkn\n return left", "def signExp(expression, sign):\n arr = list(expression)\n if sign == \"-\":\n for i in range(len(expression)):\n # Invert the sign if the 'sign' is '-'\n if arr[i] == \"+\":\n arr[i] = \"-\"\n elif arr[i] == \"-\":\n arr[i] = \"+\"\n # If the first characters is not a sign, it is a '+' and we need to \n # add it to the subexpression\n if arr[0] != \"+\" and arr[0] != \"-\":\n arr.insert(0, sign)\n return \"\".join(x for x in arr)", "def postfix(self,Line):\r\n\r\n stak = []\r\n expression = []\r\n infix = []\r\n i=0\r\n while( i <(len(Line))):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n if len(stak) > 0:\r\n if (Line[i] == '[') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"dup\") or (stak[len(stak) - 1] == \"sizeof\") or (stak[len(stak) - 1] == \"type\")):\r\n return False\r\n if len(stak) > 0:\r\n if (Line[i] == '(') and ((stak[len(stak) - 1] == \"lengthof\") or (stak[len(stak) - 1] == \"sizeof\")):\r\n return False\r\n if (len(stak) == 0) and (Line[i] == '('):\r\n return False\r\n stak.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(stak) == 0:\r\n return False\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (stak[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (stak[j] == '[') and (Line[i] == ']'):\r\n break\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n\r\n stak = stak[:-1]\r\n if (len(stak) > 0) and (stak[stak.__len__() - 1] == 'dup'):\r\n expression.append(stak[stak.__len__() - 1])\r\n stak = stak[:-1]\r\n elif Line[i] == ',':\r\n if expression.__len__() == 0:\r\n return False\r\n if stak.__len__() != 0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n expression = []\r\n elif Line[i][0].isdecimal():\r\n if Line[i][len(Line[i]) - 1] == 'h':\r\n tmp = extra_functions.is_hexa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n\r\n elif Line[i][len(Line[i]) - 1] == 'o':\r\n tmp = extra_functions.is_octa(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'b':\r\n tmp = extra_functions.is_binary(Line[i])\r\n if not tmp:\r\n return False\r\n expression.append(tmp)\r\n elif Line[i][len(Line[i]) - 1] == 'd':\r\n tmp = int(Line[i][:-1], 10)\r\n expression.append(tmp)\r\n elif Line[i].isdecimal():\r\n expression.append(int(Line[i]))\r\n else:\r\n return False\r\n elif (Line[i] == \"lengthof\") or (Line[i] == \"sizeof\") or (Line[i] == \"type\") or (Line[i] == \"dup\"):\r\n if (Line[i] == \"dup\"):\r\n if stak.__len__()>0:\r\n j = stak.__len__() - 1\r\n while (j >= 0):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n S = []\r\n L = []\r\n i = 1 + i\r\n while (i < len(Line)):\r\n if (Line[i] == '(') or (Line[i] == '['):\r\n S.append(Line[i])\r\n elif (Line[i] == ')') or (Line[i] == ']'):\r\n if len(S) == 0:\r\n return False\r\n j = len(S) - 1\r\n while j >= 0:\r\n if (S[j] == '(') and (Line[i] == ')'):\r\n break\r\n elif (S[j] == '(') and (Line[i] == ']'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ')'):\r\n return False\r\n elif (S[j] == '[') and (Line[i] == ']'):\r\n break\r\n S = S[:-1]\r\n j = j - 1\r\n if j < 0:\r\n break\r\n S = S[:-1]\r\n\r\n L.append(Line[i])\r\n if len(S) == 0:\r\n break\r\n i += 1\r\n if L.__len__() > 1:\r\n if (L[L.__len__() - 1] == ')') and (L[0] == '('):\r\n L = L[:-1]\r\n L = L[1:]\r\n else:\r\n return False\r\n else:\r\n return False\r\n tmp = self.postfix(L)\r\n i = i + 1\r\n if tmp != False:\r\n tmp1 = self.Calc_infix(expression)\r\n if tmp1 != False:\r\n for j in range(0, tmp1[0]):\r\n infix = infix + tmp\r\n else:\r\n return False\r\n else:\r\n return False\r\n expression=[\"dup\"]\r\n continue\r\n stak.append(Line[i])\r\n else:\r\n if (Line[i] == '*') | (Line[i] == '-') | (Line[i] == '/') | (Line[i] == '+'):\r\n if len(stak) > 0:\r\n j = len(stak) - 1\r\n while (j >= 0):\r\n if ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '+') | (stak[j] == '-')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n break\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '*') | (Line[i] == '/')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif ((stak[j] == '*') | (stak[j] == '/')) & ((Line[i] == '+') | (Line[i] == '-')):\r\n\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n elif (stak[j] == 'dup') | (stak[j] == 'lengthof') | (stak[j] == 'type') | (stak[j] == 'sizeof'):\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n else:\r\n break\r\n j = j - 1\r\n stak.append(Line[i])\r\n else:\r\n expression.append(Line[i])\r\n i += 1\r\n\r\n j = len(stak) - 1\r\n while j >= 0:\r\n if (stak[j] == '(') or (stak[j] == '['):\r\n return False\r\n expression.append(stak[j])\r\n stak = stak[:-1]\r\n j = j - 1\r\n\r\n if (expression.__len__() > 0)and(expression!=[\"dup\"]):\r\n infix.append(expression)\r\n return infix", "def preprocess(expression_string: str):\n return expression_string.replace(\"(\", \"(X\")", "def post_fix(expr):\n if expr[:3] == \"8 4\":\n return 54\n elif expr[:3] == \"5 6\":\n return 32\n elif expr[:3] == \"1 1\":\n return 2\n \"\"\"normal solution\"\"\"\n lst = expr.split()\n stack = []\n for e in lst:\n if e in \"+-*/\":\n b = stack.pop()\n a = stack.pop()\n stack.append(str(eval(\"{}{}{}\".format(a, e, b))))\n else:\n stack.append(e)\n return round(float(stack.pop()))", "def addExpr( ):\n\n\ttok = tokens.peek( )\n\tif debug: print (\"addExpr: \", tok)\n\tleft = term( )\n\ttok = tokens.peek( )\n\twhile tok == \"+\" or tok == \"-\":\n\t\ttokens.next()\n\t\tright = term( )\n\t\tleft = BinaryExpr( tok, left, right )\n\t\ttok = tokens.peek( )\n\treturn left", "def make_positive(expression: Expr) -> Expr:\n if expression.op == '~':\n new_expression = Expr(expression.args[0].op, *expression.args[0].args)\n return new_expression\n return expression", "def Addition(self, paren=False):\n left = self.Term(paren)\n while self.currtok[1].name in {\"PLUS\", \"MINUS\"}:\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Term(paren)\n left = BinaryExpr(op, left, right, paren)\n return left", "def toInfix(self, usingId=False):\n return _libsbml.FbcOr_toInfix(self, usingId)", "def exp(self):\n return type(self)(self.parent(), self._simplify(self._express.exp()))", "def andExpr( ): #DOUBLE CHECK THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"andExpr: \", tok)\n\tleft = relationalExpr( ) #does the left side of the grammar\n\ttok = tokens.peek( )\n\twhile tok == \"and\": #checks to see if there is the token \"and\" and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = relationalExpr( )\n\t\tleft = BinaryExpr(tok, left, right)#MIGHT HAVE TO CHANGE TO STRING \n\t\ttok = tokens.peek( )\n\treturn left", "def replace_operators(self, instr):\n # change ++, -- to add(1), sub(1)\n instr = re.sub(r\"\\+\\+\", \".add(1)\", instr)\n instr = re.sub(r\"--\", \".sub(1)\", instr)\n\n m1 = re.search(r\"[+\\-*/]=\", instr)\n result = \"\"\n if m1:\n # handle the string with +=, -=, *=. /=\n v = instr[: m1.start()].rstrip(\" \")\n v1 = v.strip(\" \")\n expressions = [v1, m1.group()[: 1], \"(\", instr[m1.end():].strip().strip(\";\"), \");\"]\n instr = v + \"= \" + \" \".join(expressions)\n\n # split by !, &&, ||\n equations = re.split(r\"(!|&&|\\|\\||)\", instr)\n for equation in equations:\n # split by <=, >=, ==, !=, =\n expressions = re.split(r\"([<>=!]*=)\", equation)\n if len(expressions) == 1:\n result += equation\n else:\n for expression in expressions:\n if re.search(r\"[+\\-*/]\", expression):\n # with math operators\n # 0.exclude ;\n rc = \"\"\n pos = expression.find(';')\n if pos != -1:\n rc = expression[pos:]\n expression = expression[:pos]\n\n # 1.exclude independent ( or )\n lbc = expression.count(\"(\")\n rbc = expression.count(\")\")\n lc = \"\"\n if lbc > rbc:\n # ( is more than )\n pos = expression.replace('(', 'X', lbc - rbc - 1).find('(')\n lc = expression[: pos + 1]\n expression = expression[pos + 1:]\n else:\n if lbc < rbc:\n # ( is less than )\n pos = 'X'.join(expression.rsplit(')', rbc - lbc - 1)).rfind(')')\n rc = expression[pos:] + rc\n expression = expression[:pos]\n\n # 2.change normal notation to RPN, in order to change math operators to SafeMath operators\n # 3.change RPN to normal notation\n result += lc + self.rpn_to_nn(self.nn_to_rpn(expression)) + rc\n else:\n result += expression\n\n return result", "def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()", "def make_flat(self):\n\n if type(self.exp) == str:\n if not self.closure or self.exp == 'ϵ':\n return self.exp\n elif len(self.exp) == 1:\n return self.exp + self.closure\n else:\n return '(' + self.exp + ')' + self.closure\n else:\n flat_exp = ''.join( str(e) for e in self.exp )\n if not self.closure or flat_exp == 'ϵ':\n return flat_exp\n elif len(flat_exp) == 1:\n return flat_exp + self.closure\n else:\n return '(' + flat_exp + ')' + self.closure", "def eval_expr2(expression):\n\n output = []\n stack = []\n tokens = list(tokenize(expression))\n\n precedence = {\n \"*\": 10,\n \"+\": 20,\n }\n\n for token in tokens:\n if token == \"(\":\n stack.append(token)\n elif token == \")\":\n while stack and stack[-1] != \"(\":\n op = stack.pop(-1)\n output.append(op)\n op = stack.pop(-1)\n assert op == \"(\"\n elif token in [\"+\", \"*\"]:\n while (\n stack\n and stack[-1] in [\"+\", \"*\"]\n and precedence[token] < precedence[stack[-1]]\n ):\n op = stack.pop(-1)\n output.append(op)\n\n stack.append(token)\n elif isinstance(token, int):\n output.append(token)\n else:\n raise NotImplementedError(token)\n\n # print(token, output, stack)\n\n while stack and stack[-1] in [\"+\", \"*\"]:\n op = stack.pop(-1)\n output.append(op)\n\n assert not stack\n\n return eval_ops(output)", "def parens(ast_node: Expression, parent_node=None) -> str:\n if (\n (isinstance(ast_node, (Number, Variable, DifferentialProgram))) # DEs commute\n or (isinstance(ast_node, Times) and isinstance(parent_node, (Plus, Times, Neg)))\n or (isinstance(ast_node, Neg) and isinstance(parent_node, Plus))\n ):\n return pp(ast_node)\n elif isinstance(ast_node, (Term, Formula)):\n return \"(\" + pp(ast_node) + \")\"\n elif isinstance(ast_node, Program):\n return \"{\" + pp(ast_node) + \"}\"\n else:\n raise MatchError(\n f\"Do not know how to add parens to .{ast_node}. of type {ast_node.__class__}\"\n )", "def relationalExpr( ):#MAKE SURE I USED THE RIGHT LOGIC FOR THIS\n\n\ttok = tokens.peek( )\n\tif debug: print(\"relationalExpr: \", tok)\n\tleft = addExpr( )\n\texpr = \"\"\n\ttok = tokens.peek( )\n\tif tok in relations:\n\t\trel = relation( ) # expecting a relation to start off \n\t\tright = expression( ) # if there is a relation we expect there to be an expression to the right of the relation\n\t\texpr = BinaryExpr( rel, left, right )\n\t\treturn expr #fix this for syntax tree maybe\n\n\treturn left", "def Expression(self, paren=False):\n left = self.Conjunction(paren)\n while self.currtok[1].name == \"OR\":\n op = self.currtok[0]\n self.currtok = next(self.tg)\n right = self.Conjunction()\n left = BinaryExpr(op, left, right, paren)\n return left", "def Calc_infix(self,infix):\r\n\r\n stak=[]\r\n for i in range(0, len(infix)):\r\n if (infix[i] == '+') or (infix[i] == '-') or (infix[i] == '*') or (infix[i] == '/'):\r\n if len(stak) > 1:\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n tmp1 = self.Check_is_valid_data(stak[len(stak) - 2])\r\n if (tmp == -1) or (tmp1 == -1):\r\n return False\r\n if tmp == -2:\r\n tmp = stak[len(stak) - 1]\r\n elif tmp == -3:\r\n tmp = extra_functions.convert_string(stak[len(stak) - 1])\r\n\r\n else:\r\n tmp = tmp[0]\r\n\r\n if tmp1 == -2:\r\n tmp1 = stak[len(stak) - 2]\r\n elif tmp1 == -3:\r\n\r\n tmp1 = extra_functions.convert_string(stak[len(stak) - 2])\r\n\r\n else:\r\n tmp1 = tmp1[0]\r\n\r\n stak = stak[:-1]\r\n if infix[i] == '-':\r\n stak[len(stak) - 1] = tmp - tmp1\r\n elif infix[i] == '+':\r\n stak[len(stak) - 1] = tmp + tmp1\r\n elif infix[i] == '*':\r\n stak[len(stak) - 1] = tmp * tmp1\r\n elif infix[i]== '/':\r\n if tmp1 != 0:\r\n stak[len(stak) - 1] = int(tmp / tmp1)\r\n else:\r\n return False\r\n else:\r\n if (infix[i] == '+') or (infix[i] == '-'):\r\n\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n if tmp == -1:\r\n return False\r\n elif tmp == -2:\r\n tmp = stak[len(stak) - 1]\r\n elif tmp == -3:\r\n\r\n tmp = extra_functions.convert_string(stak[len(stak) - 1])\r\n\r\n else:\r\n tmp = tmp[0]\r\n if infix[i] == '-':\r\n stak[0] = tmp * -1\r\n else:\r\n stak[0] = tmp\r\n else:\r\n return False\r\n elif (infix[i] == 'lengthof') or (infix[i]== 'sizeof') or (infix[i] == 'type'):\r\n if len(stak) > 0:\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n if (((tmp == 0) or (tmp == -1) or (tmp == -2) or (tmp == -3)) and ((infix[i]== 'lengthof') or (infix[i] == 'sizeof'))):\r\n return False\r\n elif ((tmp == 0) or (tmp == -1) or (tmp == -2) or (tmp == -3)) and (infix[i] == 'type'):\r\n stak[len(stak) - 1] = 0\r\n else:\r\n stak = stak[:-1]\r\n tmp1 = self.Type(tmp[1])\r\n\r\n if infix[i] == 'lengthof':\r\n stak.append(int(tmp[2] / tmp1))\r\n elif infix[i] == 'sizeof':\r\n stak.append(tmp[2])\r\n else:\r\n stak.append(tmp[0])\r\n else:\r\n return False\r\n else:\r\n if infix[i] == '?':\r\n stak.append(0)\r\n else:\r\n tmp = self.Check_is_valid_data(infix[i])\r\n if self.Data_types.__contains__(infix[i]):\r\n stak.append(self.Type(infix[i]))\r\n continue\r\n if tmp == -1:\r\n return False\r\n else:\r\n stak.append(infix[i])\r\n\r\n if stak.__len__() == 1:\r\n return stak\r\n return False", "def solve_equation_same_precedence(eq, verbose=False):\n tokens = tokenize(eq)\n if verbose:\n print(f\"eq: {tokens}\")\n\n stack = []\n ops = {\n None: do_push,\n \"(\": do_push,\n \")\": do_parenthesis,\n \"+\": do_addition,\n \"*\": do_multiplication,\n }\n\n for t in tokens:\n if isinstance(t, int):\n op = stack[-1] if len(stack) else None\n ops[op](stack, t)\n elif t == \"+\" or t == \"*\" or t == \"(\":\n stack.append(t)\n elif t == \")\":\n ops[\")\"](stack, t)\n # solve pre parenthesis operators\n if len(stack) > 2:\n v = stack.pop()\n assert isinstance(v, int)\n ops[stack[-1]](stack, v)\n else:\n assert False, f\"fail token: {t}\"\n\n if verbose:\n print(f\"stack: {stack}\")\n\n assert len(stack) == 1\n return stack[0]", "def operand_to_str(self, operand):\n s = str(operand)\n if s.startswith('(') and s.endswith(')'):\n return s\n if (isinstance(operand, Literal) or\n isinstance(operand, Attr) or \n isinstance(operand, Star)):\n return s\n return \"(%s)\" % s", "def visit_expression(self, node, children):\n if self.debug:\n print(\"Expression {}\".format(children))\n expr = 0\n start = 0\n # Check for unary + or - operator\n if text(children[0]) in \"+-\":\n start = 1\n\n for i in range(start, len(children), 2):\n if i and children[i - 1] == \"-\":\n expr -= children[i]\n else:\n expr += children[i]\n\n if self.debug:\n print(\"Expression = {}\".format(expr))\n\n return expr", "def get_invntt_operator(self):\n return self[0].get_invntt_operator()", "def funcOpExchange(expstr):\n funcOpDict = expr.getFuncOpDict() \n for funcstr in funcOpDict:\n idx = expstr.find(funcstr)\n if idx >= 0:\n #if we find a function string at idx\n if (idx == 0 or not expstr[idx-1].isalpha()) and expstr[idx+len(funcstr)] == '(':\n fstart = idx\n fstop = 0\n rest = expstr[idx:]\n pdepth = 0\n for i,c in enumerate(rest):\n if c == '(':\n pdepth += 1\n if c == ')':\n pdepth -= 1\n if pdepth == 0:\n fstop = idx+i+1\n break\n start = expstr[:fstart]\n middle = expstr[fstart:fstop]\n end = expstr[fstop:]\n args = ['('+funcOpExchange(exp)+')' for exp in funcargs(middle)]\n if len(args) == 1:\n args.append('0')\n expstr = start+funcOpDict[funcstr].join(args)+funcOpExchange(end)\n return expstr", "def _generate_symbols(self):\n\n def infix(id, bp):\n def led(self, left):\n self.first = left\n self.second = self.parent.expression(bp)\n return self\n\n self.symbol_factory(id, bp).led = led\n\n def prefix(id, bp):\n def nud(self):\n self.first = self.parent.expression(bp)\n return self\n\n self.symbol_factory(id, bp).nud = nud\n\n def infixr(id, bp):\n def led(self, left):\n self.first = left\n self.second = self.parent.expression(bp - 1)\n return self\n\n self.symbol_factory(id, bp).led = led\n\n def paren(id):\n def nud(self):\n expr = self.parent.expression()\n self.parent._advance(\"RIGHT_PAREN\")\n return expr\n\n self.symbol_factory(id).nud = nud\n\n paren(\"LEFT_PAREN\")\n self.symbol_factory(\"RIGHT_PAREN\")\n self.symbol_factory(\"END\")\n self.symbol_factory(\":\")\n self.symbol_factory(\"NEWLINE\")\n self.symbol_factory(\"INDENT\")\n self.symbol_factory(\"DEDENT\")\n\n # numbers denote order of operations\n infix(\"+\", 10)\n infix(\"-\", 10)\n infix(\"*\", 20)\n infix(\"/\", 20)\n infix(\"==\", 5)\n infix(\">\", 5)\n infix(\"<\", 5)\n infix(\"&\", 4)\n infix(\"|\", 3)\n infix(\",\", 1)\n infix(\"::\", 1)\n \n infixr(\"=\", 1) # assignment is a little different from others.\n\n # example +4 , -2 \n prefix(\"+\", 100)\n prefix(\"-\", 100)\n\n def literal(id):\n self.symbol_factory(id).nud = lambda self: self\n\n for l in [\"NUMBER\", \"FLOAT\", \"NAME\", \"STRING\", \"BOOL\"]:\n literal(l)\n\n def statement(id, std):\n self.symbol_factory(id).stmt_begin = True\n self.symbol_factory(id).std = std\n\n def if_statement(self):\n self.first = self.parent.expression()\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.second = self.parent.Block()\n if self.parent.token.id == \"else\":\n self.parent._advance([\"else\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.third = self.parent.Block()\n return self\n\n def let_statement(self):\n self.first = self.parent.expression()\n self.parent._advance([\"NEWLINE\"])\n return self\n\n def print_statement(self):\n self.parent._advance([\"LEFT_PAREN\"])\n self.first = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\"NEWLINE\"])\n return self\n\n def while_statement(self):\n self.parent._advance([\"LEFT_PAREN\"])\n self.first = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.second = self.parent.Block()\n return self\n\n def func_statement(self):\n arg_list = []\n\n self.first = self.parent.expression()\n self.parent._advance([\"LEFT_PAREN\"])\n self.second = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.third = self.parent.Block()\n return self\n\n statement(\"if\", if_statement)\n statement(\"let\", let_statement)\n statement(\"print\", print_statement)\n statement(\"while\", while_statement)\n statement(\"fn\", func_statement)", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def is_operator(node):\n return node.startswith('$')", "def my_operator(self):\n return self._my_operator" ]
[ "0.71687776", "0.70765674", "0.70678484", "0.70356464", "0.69882065", "0.6876605", "0.6813432", "0.6722101", "0.6542399", "0.6502566", "0.648048", "0.64614725", "0.645874", "0.63886565", "0.63599867", "0.632505", "0.626854", "0.6246395", "0.62401587", "0.62371445", "0.6197802", "0.61815476", "0.60982955", "0.6090422", "0.60867715", "0.60864705", "0.6082653", "0.6081127", "0.60450387", "0.60127366", "0.6009722", "0.5981038", "0.5969826", "0.59647405", "0.5932378", "0.5927162", "0.591695", "0.58953315", "0.5888091", "0.5873874", "0.58631164", "0.5850185", "0.5830931", "0.57970023", "0.578729", "0.578729", "0.578729", "0.57850724", "0.5730272", "0.5721898", "0.5721898", "0.5709315", "0.5702036", "0.56509733", "0.5643691", "0.56114924", "0.5599088", "0.5582529", "0.55552745", "0.55448693", "0.55284446", "0.5515559", "0.5514243", "0.55128187", "0.5505342", "0.55044943", "0.544835", "0.5444637", "0.54396904", "0.54317963", "0.54225534", "0.5421338", "0.54172283", "0.5410805", "0.5399069", "0.53797454", "0.53777605", "0.5368753", "0.5367368", "0.5366278", "0.53607094", "0.53503275", "0.53490895", "0.53442824", "0.53298396", "0.53269464", "0.5310365", "0.52855676", "0.52473944", "0.52445966", "0.52006584", "0.5198895", "0.51987886", "0.51933974", "0.51878595", "0.517832", "0.5168578", "0.5166424", "0.51621413", "0.51567626" ]
0.81181073
0
Returns the expression in postfic form.
def postfix(self): return self.leftOperand.postfix() + " " + self.rightOperand.postfix() + " " + str(self.operator)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expression(self) -> str:\n return pulumi.get(self, \"expression\")", "def expr(self):\n return self._express", "def expression(self):\n return self._expression", "def expression(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"expression\")", "def expression(self) -> Optional[str]:\n return pulumi.get(self, \"expression\")", "def expression_phrase(self):\n return self._expression_phrase", "def expression_term(self):\n return self._expression_term", "def expression(self):\n assert not self._handle_used\n self._expression_used = True\n return self._expression", "def expression(self) -> Expression:\n ...", "def eval_espresso(self):\n esp_form, = espresso_exprs(self.formula.to_dnf())\n return esp_form", "def parse(self):\n return self.expr()", "def exp(self):\n return type(self)(self.parent(), self._simplify(self._express.exp()))", "def expression(self):\n\n result = u\"{}({}\".format(self.function.lower(),\n self.metric_name)\n\n if self.dimensions_str:\n result += u\"{{{}}}\".format(self.dimensions_str)\n\n if self.deterministic:\n result += u\", deterministic\"\n\n if self.period:\n result += u\", {}\".format(str(self.period))\n\n result += u\")\"\n\n result += u\" {} {}\".format(self.operator,\n str(self.threshold))\n\n if self.periods:\n result += u\" times {}\".format(str(self.periods))\n\n return result", "def value_expression(self) -> str:\n return pulumi.get(self, \"value_expression\")", "def prep_equation(self):\n \n # This transforms the equation into an expression for sympy.\n prepped_equation = self.equation.replace(\"=\", \"-(\") + \")\"\n\n # This transforms the equation string into a sympy-readable equation.\n transformations = standard_transformations + (implicit_multiplication_application,)\n prepped_equation = parse_expr(prepped_equation, transformations=transformations)\n\n return prepped_equation", "def calculate_expression(self, txt):\n self.shunting_yard(self.text_parser(txt))\n return self.RPN()", "def eval(self) -> typing.Any:\n return self.expr()", "def visit_expression(self, node, children):\n if self.debug:\n print(\"Expression {}\".format(children))\n expr = 0\n start = 0\n # Check for unary + or - operator\n if text(children[0]) in \"+-\":\n start = 1\n\n for i in range(start, len(children), 2):\n if i and children[i - 1] == \"-\":\n expr -= children[i]\n else:\n expr += children[i]\n\n if self.debug:\n print(\"Expression = {}\".format(expr))\n\n return expr", "def expression(self, p):\n num_type, first, second = get_type_first_second_of_binary_operation(p.expression, p.term)\n\n opcode_type = I_for_int_R_for_float(num_type)\n opcode_action = \"ADD\" if p.ADDOP == \"+\" else \"SUB\"\n opcode = opcode_type + opcode_action\n\n temp = next(g_generate_temp_variable_name)\n temp_variables_values_dict[temp] = temp\n\n qaud_code(f\"{opcode} {temp} {first} {second}\")\n return Expression(num_type, temp)", "def as_expression(self):\n data = [(key,self.__dict__[key]) for key in self.FIELDS]\n items = []\n for (key,value) in data:\n if isinstance(value,types.UnicodeType):\n value = value.encode('utf-8')\n if isinstance(value,types.StringTypes):\n value = trim_value(value)\n if isinstance(value,types.StringType):\n items.append(\"'%s': %s\" % (key,quote(value)))\n else:\n items.append(\"'%s': %s\" % (key,str(value)))\n r = '{\\n%s\\n}' % ',\\n'.join(items)\n return r", "def get_plot_expression(self):\n pass", "def evaluateExpression(expr):\n\toperators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,\n\t\t\t\t ast.Div: op.truediv, ast.USub: op.neg, ast.Pow: myPow}\n\tnode = ast.parse(expr.strip(), mode='eval')\n\treturn evaluate(node.body,operators)", "def _repr_(self):\n if self.parent()._chart.manifold().options.textbook_output:\n return str(ExpressionNice(self._express))\n else:\n return str(self._express)", "def output(self):\n return self.expr.lhs", "def evaluate(compiled_expression):", "def expression( ):#DOUBLE CHECK THIS\n\t\n\ttok = tokens.peek( )\n\tif debug: print(\"Expression: \", tok)\n\tleft = andExpr( ) #does the left side of the grammar \n\ttok = tokens.peek( )\n\twhile tok == \"or\": #checks to see if there is the token or and will preform what is inside the curly bracket since it is a series \n\t\ttokens.next()\n\t\tright = andExpr( )\n\t\tleft = BinaryExpr(tok, left, right) # MIGHT HAVE TO CHANGE THIS TO STRING CAUSE ITS \"or\"\n\t\ttok = tokens.peek( )\n\treturn left", "def infix_to_postfix(self, expr: str) -> str:\n\n # The stack that we will be performing operations on\n stack: list[str] = []\n\n # The output\n output: str = \"\"\n\n # We always need surrounding parentheses\n expr = f\"({expr})\"\n\n # The tokenized expression\n expr = self.tokenize_expr(expr)\n\n\n \n # For every token in expression\n for token in expr:\n # Check what token it is\n if token == \"(\":\n # If it is a (, then append to stack\n stack.append(\"(\")\n elif token == \")\":\n # If it is a ), then iterate over stack\n while stack[-1] != '(':\n # Popping the last item from stack, to output\n # Include a trailing space\n # Until the last item in the stack is a (\n output += f\"{stack.pop()} \"\n # Pop the last ( from the stack\n stack.pop()\n elif re.match(r\"[a-zA-Z_][a-zA-Z0-9_]*\", token):\n # If it matches a name/variable\n # Append to output with a trailing space\n output += f\"{token} \"\n elif re.match(r\"\\d+\",token):\n # If it is a number\n # Then append with a trailing space\n output += f\"{token} \"\n else:\n if self.is_token(token):\n # If it is a token\n # Pop it from the stack while\n # It's priority is smaller than\n # the last priority of the stack\n # Put it into output with a trailing space\n while self.get_token_priority(token) <= self.get_token_priority(stack[-1]):\n output += f\"{stack.pop()} \"\n # And append token to stack\n stack.append(token)\n # Return output\n return output", "def as_expression(self):\n data = [(key,self.__dict__[key]) for key in self.FIELDS]\n items = []\n for (key,value) in data:\n if isinstance(value,types.UnicodeType):\n value = value.encode('utf-8')\n if isinstance(value,types.StringTypes):\n value = trim_value(value)\n if isinstance(value,types.StringType):\n items.append(\"'%s': %s\" % (key,quote(value)))\n elif key == 'versions':\n l = []\n for version in self.versions:\n l.append(version.as_expression())\n items.append(\"'%s': [\\n%s\\n]\" % (key,'%s' % ',\\n'.join(l)))\n else:\n items.append(\"'%s': %s\" % (key,str(value)))\n r = '{\\n%s\\n}' % ',\\n'.join(items)\n return r", "def exeval(expression): \n if len(expression) <= 3: #Assuming no spaces (\" \") between each value given in the expression\n if expression[0] == \"+\":\n return float(expression[1]) + float(expression[2])\n elif expression[0] == \"-\":\n return float(expression[1]) - float(expression[2])\n else:\n if expression[0] == \"+\":\n return float(expression[1]) + exeval(expression[2:])\n elif expression[0] == \"-\":\n return float(expression[1]) - exeval(expression[2:])", "def evaluateExpression(self, userExpression):\n return self.evaluatePostfixExp(userExpression)", "def value(self) -> global___Expression:", "def value(self) -> global___Expression:", "def get_data(self, form_name, expr, data=None):\n if data:\n self.update_cleaned_data(data, form_name=form_name)\n data = self.cleaned_data\n return expr.evaluate(data=data, context=self.context)", "def evaluateExpression(self, expression):\n # remove spaces\n expression = expression.replace(\" \", \"\")\n # perform lazy initialization\n if not self.hasInitializedStringSubstitution:\n\n # The groups of substitution rules it contains will loop until there\n # is no more changes, thus only the relative order between the\n # groups matter: make sure groups appear earlier contain expansions\n # that should be done before groups appear later.\n # Note that all the substitution strings contain no spaces\n self.useStringSubstitution_normalization = (\n\n # 0th priorities: standardize notations\n StringSubstitution((\n\n # common\n (\"\\(e\\)\", \"(ed)\"), # ed -> e\n (\"\\(s\\)\", \"(sd)\"), # sd -> s\n\n # add {} to subscript to enable expansion of [2] and [4]\n (\"_([\\d]+)\", \"_{{{0[0]}}}\"), # add { } to subscripts\n\n # eccentricities\n (\"Eccentricity_\", \"Ecc_\"), # Eccentricity_ -> Ecc_\n (\"E_\", \"Ecc_\"), # E_ -> Ecc_\n (\"eccentricity_\", \"ecc_\"), # eccentricity_ -> ecc_\n (\"e_\", \"ecc_\"), # e_ -> ecc_\n # latex style support\n (\"Epsilon_\", \"Ecc_\"),\n (\"epsilon_\", \"ecc_\"),\n\n # eccentricity:\n # Ecc_{m,n}(ed) := {r^m e^{i n phi}}_e\n (\"Ecc_{([\\d]+)}\", \"Ecc_{{{0[0]},{0[0]}}}\"), # Ecc_{n} -> Ecc_{n,n}\n\n # r-averages\n # {r^m}(ed) := int(r^m*ed)/int(ed)\n (\"{R\\^\", \"{{r^\"),\n\n # r-integrals\n # [r^m](ed) := int(r^m*ed)\n (\"\\[R\\^\", \"[r^\"),\n\n # multiplicity:\n # dN/dy(pion) := pion multiplicity\n (\"[^d]N\\(\", \"dN/dy(\"),\n (\"dN\\(\", \"dN/dy(\"),\n\n # spectra:\n # dN/(dydpT)(pTs)(pion) := pion spectra at pTs values\n (\"dN/dpT\", \"dN/(dydpT)\"),\n (\"dN/dydpT\", \"dN/(dydpT)\"),\n )),\n\n # 1st priorities: expanding [2] [4]\n StringSubstitution((\n\n # support for xxx_{ooo}[2](oxox)\n (\"([\\w_]+)_{([\\d,]+)}\\[2\\]\\(([\\w_]+)\\)\", 'sqrt(<{0[0]}_{{{0[1]}}}({0[2]})**2>)'), # without (pTs)\n (\"([\\w_]+)_{([\\d,]+)}\\[2\\](\\(.*?\\))\\(([\\w_]+)\\)\", 'sqrt(<{0[0]}_{{{0[1]}}}{0[2]}({0[3]})**2>)'), # with (pTs)\n\n # support for xxx_{ooo}[4](oxox)\n (\"([\\w_]+)_{([\\d,]+)}\\[4\\]\\(([\\w_]+)\\)\", '((2*<{0[0]}_{{{0[1]}}}({0[2]})**2>**2-<{0[0]}_{{{0[1]}}}({0[2]})**4>)**0.25)'), # without (pTs)\n (\"([\\w_]+)_{([\\d,]+)}\\[4\\](\\(.*?\\))\\(([\\w_]+)\\)\", '((2*<{0[0]}_{{{0[1]}}}{0[2]}({0[3]})**2>**2-<{0[0]}_{{{0[1]}}}{0[2]}({0[3]})**4>)**0.25)'), # with (pTs)\n )),\n\n # 2nd priorities: expand special functions || <> $$ (related: ecc, v, Phi, Psi)\n StringSubstitution((\n\n # ecc = |Ecc|\n (\"ecc_\", \"|Ecc|_\"),\n # v = |V|\n (\"v_\", \"|V|_\"),\n\n # || = abs\n (\"\\|([\\w_]+)\\|(.*?)\\(([\\w_]+)\\)\", \"|{0[0]}{0[1]}({0[2]})|\"), # |ooo|xxx(oxox) -> |oooxxx(oxox)|; oxox is a word\n\n # <> = mean\n (\"<([\\w_]+)>(.*?)\\(([\\w_]+)\\)\", \"<{0[0]}{0[1]}({0[2]})>\"), # <ooo>xxx(oxox) -> <oooxxx(oxox)>; oxox is a word\n\n # Phi = $Ecc$\n (\"Phi_\", \"$Ecc$_\"),\n # Psi = $V$\n (\"Psi_\", '$V$_'),\n\n # $$ = get plane angles; only applies to Ecc and V\n (\"\\$([\\w_]+)\\$(.*?)\\(([\\w_]+)\\)\", \"${0[0]}{0[1]}({0[2]})$\"), # <ooo>xxx(oxox) -> <oooxxx(oxox)>; oxox is a word\n )),\n\n )\n\n # convert standardized notations to functions\n self.useStringSubstitution_functionization = StringSubstitution((\n\n # ||: absolute value\n (\"\\|(.*?)\\|\", 'abs({0[0]})'),\n\n # <>: mean value\n (\"<(.*?)>\", 'mean({0[0]},0)'),\n\n # $$: get plane angles; only applies to Ecc (angle(-Ecc_n)/n) and V (angle(V_n)/n)\n (\"\\$Ecc_{([\\d\\w+]),([\\d\\w+])}(.*?)\\$\", 'angle(Ecc_{{{0[0]},{0[1]}}}{0[2]})/{0[1]}'),\n (\"\\$V_{([\\d\\w+])}(.*?)\\$\", 'angle(V_{{{0[0]}}}{0[1]})/{0[0]}'),\n\n # eccentricity:\n # ecc_{m,n}(ed) := {-r^m e^{i n phi}}_e\n (\"Ecc_{([\\d]+),([\\d]+)}\\((\\w\\w)\\)\", 'self.get_Ecc_n(eccType=\"{0[2]}\", r_power={0[0]}, order={0[1]})'), # to functions\n\n # r-averages\n # {r^m}(ed) := int(r^m*ed)/int(ed)\n (\"{r\\^([\\d]+)}\\((\\w\\w)\\)\", 'self.getRIntegrals(eccType=\"{0[1]}\", r_power={0[0]}) / self.getRIntegrals(eccType=\"{0[1]}\", r_power=0)'),\n\n # r-integrals\n # [r^m](ed) := int(r^m*ed)\n (\"\\[r\\^([\\d]+)\\]\\((\\w\\w)\\)\", 'self.getRIntegrals(eccType=\"{0[1]}\", r_power={0[0]})'),\n\n # lifetimes\n (\"lifetime\", 'self.getLifetimes()'),\n\n # integrated flow:\n # V_{n}(pion) := pion complex flow vector of order n\n (\"V_{([\\d]+)}\\(([\\w_]+)\\)\", 'self.get_V_n(particleName=\"{0[1]}\", order={0[0]})'),\n\n # multiplicity:\n # dN/dy(pion) := pion multiplicity\n (\"dN/dy\\(([\\w_]+)\\)\", 'self.get_dNdy(particleName=\"{0[0]}\")'),\n\n # differential flows\n # V_{n}(pTs)(pion) := complex differential flow vector of order n for pion at pTs values\n (\"V_{([\\d]+)}\\((.*?)\\)\\(([\\w_]+)\\)\", 'self.get_diff_V_n(particleName=\"{0[2]}\", order={0[0]}, pTs={0[1]}, verbose=True)'),\n\n # spectra:\n # dN/(dydpT)(pTs)(pion) := pion spectra at pTs values\n (\"dN/\\(dydpT\\)\\((.*?)\\)\\(([\\w_]+)\\)\", 'self.get_dNdydpT(particleName=\"{0[1]}\", pTs={0[0]}, verbose=True)'),\n\n ))\n\n\n # perform normalization, should repeat until there is no more changes\n exprAfterNormalization = expression\n needMoreChanges = True\n while needMoreChanges:\n needMoreChanges = False\n for stringSubstitution in self.useStringSubstitution_normalization:\n exprAfterNormalization, numberOfScans = stringSubstitution.applyAllRules(exprAfterNormalization)\n if numberOfScans>0: needMoreChanges = True\n # perform functionization, should do only once\n exprAfterFunctionization, numberOfScans = self.useStringSubstitution_functionization.applyAllRules(exprAfterNormalization)\n # try to evaluate it\n try:\n value = eval(exprAfterFunctionization)\n return (value, exprAfterNormalization, exprAfterFunctionization)\n except:\n print(\"Error encounterred evaluating {}:\".format(expression))\n print(\"-> {}\\n-> {}\".format(exprAfterNormalization, exprAfterFunctionization))\n raise", "def stringbuilderexpr(self) :\n\t\ttry :\n\t\t\treturn self._stringbuilderexpr\n\t\texcept Exception as e:\n\t\t\traise e", "def resolve_expression(self):\n stack = list()\n\n for element in self._get_postfix_notation():\n if element in self.OPERATORS: # get two elements from top of stack, push result of operation on stack\n operand_a = stack.pop()\n operand_b = stack.pop()\n value = self._calculate(operand_b, operand_a, element)\n stack.append(value)\n else: # push to stack if number\n stack.append(element)\n\n return stack.pop()", "def get_formula_in_list(self):\n return tree_to_string(self.expression)", "def eval(self, expression: str) -> str:\n ret = self.exec_(\"print({})\".format(expression))\n ret = ret.strip()\n return ret", "def __repr__(self):\n descr = list(f\"<PotentialExpression, \")\n descr.append(f\"expression: {self.expression}, \")\n descr.append(\n f\"{len(self.independent_variables)} independent variables>\"\n )\n\n return \"\".join(descr)", "def calculate_expression(expression, debug_output=False):\n if debug_output:\n print('{:<14}'.format('Calculating:'), '\"', expression, '\"', sep='')\n\n parser_tree = Calculator.parse_expression(expression)\n\n if debug_output:\n print('{:<14}'.format('Postfix:'), '\"', parser_tree.to_string(), '\"', sep='')\n\n if parser_tree.get_root() is not None:\n Calculator._simplify(parser_tree, parser_tree.get_root())\n\n if debug_output:\n print('{:<14}'.format('Result:'), '\"', parser_tree.to_string(), '\"', sep='')\n print()\n\n return parser_tree", "def evaluate(self, p: Posting) -> Union[str, None]:\n return self.eval_fn(p)", "def value_expression(self) -> Optional[str]:\n return pulumi.get(self, \"value_expression\")", "def __str__(self):\n return self.get_equation()", "def evaluate(expr):\n def isdigit(ch):\n try:\n int(ch)\n return True\n except ValueError:\n return False\n\n def evaluate_helper(expr, index):\n ch = expr[index]\n if ch == '(':\n # complex\n index += 1 # move past (\n\n # get the left operand\n left, index = evaluate_helper(expr, index)\n opr = expr[index]\n index += 1 # move past the operator\n\n # get the right operand\n right, index = evaluate_helper(expr, index)\n index += 1 # to move past closing paranthesis\n if opr == '+':\n return left + right, index\n elif opr == '*':\n return left * right, index\n\n \n else:\n if isdigit(ch):\n value = 0\n while isdigit(ch):\n value = value * 10 + int(ch)\n index += 1\n if index < len(expr):\n ch = expr[index]\n else:\n break\n return value, index\n\n \n\n return evaluate_helper(expr, 0)[0]", "def Eval(expression):\n # pylint: disable=eval-used\n return eval(expression)", "def render_expression(ex):\r\n try:\r\n return _render_to_html(_get_final_tree(ex))\r\n except ParseException:\r\n return err(ex)", "def compile_expr(self, e, prec=0):\n etyp = type(e)\n\n if etyp is HIR.Const:\n if e.typ == f32:\n return (f\"{e.v}f\",[])\n elif e.typ == f64:\n return (f\"Expr({e.v})\",[])\n else:\n return (str(e.v),[])\n elif etyp is HIR.Evar:\n return (self._ctxt[e.v.name],[])\n elif etyp is HIR.Erdom:\n return (self._ctxt[e.r.name],[])\n elif etyp is HIR.Eparam:\n return (self._ctxt[e.p.name],[])\n elif etyp is HIR.BinOp:\n op_prec = HIR_CPP_String._prec[e.op]\n lhs, ls = self.compile_expr(e.lhs, prec=op_prec)\n rhs, rs = self.compile_expr(e.rhs, prec=op_prec+1)\n op = e.op\n if op == \"and\":\n op = \"&&\"\n elif op == \"or\":\n op = \"||\"\n exp = f'{lhs} {e.op} {rhs}'\n if prec > op_prec:\n exp = f'({exp})'\n return (exp,ls+rs)\n elif etyp is HIR.Min or etyp is HIR.Max:\n op = \"min\" if etyp is HIR.Min else \"max\"\n lhs, ls = self.compile_expr(e.lhs)\n rhs, rs = self.compile_expr(e.rhs)\n return (f\"{op}({lhs}, {rhs})\",ls+rs)\n elif etyp is HIR.MathFn1:\n arg, ss = self.compile_expr(e.arg)\n return (f'{e.name}({arg})',ss)\n elif etyp is HIR.Clamp:\n val, vs = self.compile_expr(e.val)\n lo, ls = self.compile_expr(e.lo)\n hi, hs = self.compile_expr(e.hi)\n return (f'clamp({val}, {lo}, {hi})',vs+ls+hs)\n elif etyp is HIR.Pow:\n base,bs = self.compile_expr(e.base)\n exp, es = self.compile_expr(e.exp)\n return (f'pow({base}, {exp})',bs+es)\n elif etyp is HIR.ATan2:\n y, ys = self.compile_expr(e.y)\n x, xs = self.compile_expr(e.x)\n return (f'atan2({y}, {x})',ys+xs)\n elif etyp is HIR.Select:\n pred,ps = self.compile_expr(e.pred)\n lhs, ls = self.compile_expr(e.lhs)\n rhs, rs = self.compile_expr(e.rhs)\n return (f'select({pred}, {lhs}, {rhs})',ps+ls+rs)\n elif etyp is HIR.FAccess:\n nm = e.f.name if type(e.f) is HIR.Func else e.f.img.name\n name = self._ctxt[nm]\n tmp = [ self.compile_expr(a) for a in e.args ]\n args = [ a[0] for a in tmp ]\n ss = [ x for a in tmp for x in a[1] ] # flatten list of lists\n return (f'{name}({\",\".join(args)})',ss)\n elif etyp is HIR.BigSum:\n stmts = []\n # RDom variable\n r = self._ctxt[e.r.name]\n\n # handle compiling the body with reduction variable substitution\n # name collisions must be handled out-of-scope\n pure_r = self.new_name(e.r.name.copy())\n self.push_scope(tab=False)\n stmts += [f\"Var {pure_r};\"]\n # but we need to hide the fact that we're re-binding the rdom\n self._ctxt[e.r.name] = pure_r\n self._curr_args = self._curr_args + [pure_r]\n args_x = ','.join(self._curr_args)\n # call body\n body,bs = self.compile_expr(e.body)\n # cleanup\n stmts += bs\n self._curr_args = self._curr_args[:-1]\n self.pop_scope(tab=False)\n\n # create an earlier temp. func corresponding to the sum values\n f0name = self.new_name(Sym(f\"sum{self._sum_count}\"))\n self._sum_count +=1\n f1name = self.new_name(Sym(f\"sum{self._sum_count}\"))\n self._sum_count +=1\n args = ','.join(self._curr_args)\n args_r = ','.join(self._curr_args+[ self._ctxt[e.r.name] ])\n stmts += [f'Func {f0name}(\"{f0name}\");',\n f'Func {f1name}(\"{f1name}\");',\n f\"{f0name}({args_x}) = {body};\",\n f\"{f1name}({args}) = Expr(0.0);\",\n f\"{f1name}({args}) += {f0name}({args_r});\"]\n return (f\"{f1name}({args})\",stmts)\n else: assert False, \"bad case\"", "def evaluateValue(compiled_expression):", "def compile_expression(self):\n\t\t\n\t\tself.outfile.write('<expression>\\n')\n\t\tself.compile_term()\n\t\tself.outfile.write('</expression>\\n')", "def reducedFormOne(self, equation):\n splitter = re.split('(\\+|\\-)', equation)\n newEquation = str()\n state = 0\n for token in splitter:\n if '(' in token or state > 0 or '[' in token:\n state += 1\n newEquation += token\n continue\n if ')' in token or ']' in token:\n state -= 1\n continue\n if '^' + self.var in token:\n newEquation += token\n self.validPolynome = False\n continue\n find = re.findall('(\\*|\\^|\\/)?(' + self.var + ')(\\^\\d+)?' , token)\n newVar = []\n for var in find:\n newVar.append(''.join(map(str,var)))\n for var in newVar:\n token = token.replace(var, '')\n if token != '+' and token != '-' and token != '':\n try:\n newEquation += str(eval(token.replace('^', '**'))) + ''.join(newVar)\n except:\n self.error = True\n continue\n else:\n newEquation += token\n return newEquation", "def result(self) -> global___Expression:", "def schedule_expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schedule_expression\")", "def schedule_expression(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schedule_expression\")", "def eval(self):\n return self.with_transforms(\"eval\")", "def get_features_expression(self):\n \n # Get the model from cache or disk based on the model_name in request\n self._get_model_by_name()\n \n # Prepare the expression as a string\n delimiter = \" &'|'& \"\n\n # Get the complete feature definitions for this model\n features_df = self.model.original_features_df.copy()\n \n # Set features that are not expected in the features expression in Qlik\n exclude = [\"excluded\"]\n\n if not self.model.lag_target:\n exclude.append(\"target\")\n if not self.model.lags:\n exclude.append(\"identifier\")\n\n # Exclude columns that are not expected in the request data\n exclusions = features_df['variable_type'].isin(exclude)\n features_df = features_df.loc[~exclusions]\n \n # Get the feature names\n features = features_df[\"name\"].tolist()\n \n # Prepare a string which can be evaluated to an expression in Qlik with features as field names\n self.response = pd.Series(delimiter.join([\"[\" + f + \"]\" for f in features]))\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"expression\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def expression(self, rbp=0):\n t = self.token\n self.token = next(self.token_gen)\n left = t.nud()\n while rbp < self.token.lbp:\n t = self.token\n self.token = next(self.token_gen)\n left = t.led(left)\n return left", "def evaluateText(compiled_expression):", "def main(expression):\n\n exception = parse_expression(expression)\n return calc(poland_notation(exception))", "def return_expression(b, p):\n # Properties of Gases and Liquids, Eq. 9-5.14\n # and Eq. 10-6.4\n ViscosityWilke.build_phi_ij(b, p)\n if not hasattr(b, \"_therm_cond_phase_comp\"):\n b._make_therm_cond_phase_comp() # pylint: disable=protected-access\n\n # Properties of Gases and Liquids, Eq. 10-6.2\n return sum(\n [\n b.mole_frac_phase_comp[p, i]\n * b._therm_cond_phase_comp[p, i] # pylint: disable=protected-access\n / sum(\n [\n b.mole_frac_phase_comp[p, j] * b.visc_d_phi_ij[i, j]\n for j in b.components_in_phase(p)\n ]\n )\n for i in b.components_in_phase(p)\n ]\n )", "def eval(self, expression, **kwargs):\n # pylint: disable=eval-used\n return eval(expression, self._environment, kwargs)", "def write(self):\n return self.expr.lhs.base.function", "def expression_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"expression_id\")", "def ev(expr):\n return eval(expr,user_ns())", "def _get_postfix_notation(self):\n postfix, operators_stack = list(), list() # initialize postfix list and auxiliary stack\n\n for element in self.expression.split():\n if element in self.OPERATORS:\n if operators_stack:\n # while stack isn't empty and \"stack top\" is stronger(e.g. multiplication is stronger than addition)\n # move \"stack top\" into postfix list\n while operators_stack \\\n and operators_stack[-1] in self.OPERATORS \\\n and self.OPERATOR_WEIGHT[operators_stack[-1]] >= self.OPERATOR_WEIGHT[element]:\n postfix.append(operators_stack.pop())\n\n operators_stack.append(element)\n\n elif element == self.BRACKET_LEFT:\n operators_stack.append(element)\n\n elif element == self.BRACKET_RIGHT:\n # searching for left bracket on stack, moving \"stack Top\" to postfix list\n while operators_stack and operators_stack[-1] != self.BRACKET_LEFT:\n postfix.append(operators_stack.pop())\n operators_stack.pop() # remove left bracket\n\n else: # numbers always goes into postfix list\n postfix.append(self._get_number_from_string(element))\n\n if operators_stack: # move others stack elements to postfix list\n postfix.extend(reversed(operators_stack))\n\n return postfix", "def postfix_eval(postfix_expr):\n s = StackArray()\n expr = postfix_expr.split()\n for token in expr:\n if token[0] in '0123456789':\n res = token\n s.push(res)\n else: # token is operator\n op2 = s.pop()\n op2 = float(op2)\n if s.is_empty(): # token is ~\n # could also be ~ for non-empty stack\n res = -1 * op2\n else:\n op1 = s.pop()\n op1 = float(op1)\n if token == '^':\n res = op1 ** op2\n elif token == '~':\n s.push(op1)\n res = -1 * op2\n elif token == '*':\n res = op1 * op2\n elif token == '/':\n if op2 == 0:\n raise ZeroDivisionError\n else:\n res = op1 / op2\n elif token == '+':\n res = op1 + op2\n else: # token == '-'\n res = op1 - op2\n s.push(res)\n return res", "def post_fix(expr):\n if expr[:3] == \"8 4\":\n return 54\n elif expr[:3] == \"5 6\":\n return 32\n elif expr[:3] == \"1 1\":\n return 2\n \"\"\"normal solution\"\"\"\n lst = expr.split()\n stack = []\n for e in lst:\n if e in \"+-*/\":\n b = stack.pop()\n a = stack.pop()\n stack.append(str(eval(\"{}{}{}\".format(a, e, b))))\n else:\n stack.append(e)\n return round(float(stack.pop()))", "def get_expr(self, expr, locals={}):\n _locals = {}\n if locals is not None:\n _locals = dict(self._locals, **locals)\n\n expr = expr.strip() # extraneous spaces otherwise interpreted as indentation\n\n self._request_all_objects_in_expression(expr)\n\n _result = self._eval(node=ast.parse(expr, mode='eval').body,\n ctx=dict(operators=self.operators,\n functions=self.functions,\n locals=_locals,\n input=True))\n\n # raise exceptions unable to be raised during `_eval` for technical reasons\n # (e.g. due to expressions with self-referencing local variables that would\n # cause infinite recursion)\n if isinstance(_result, Exception):\n raise _result\n\n return _result", "def eval(self):\n return self._eval_node(self.syntax_tree)", "def preview_formcalc(self, get):\r\n\r\n result = {'preview': '',\r\n 'error': ''}\r\n\r\n try:\r\n formula = get['formula']\r\n except KeyError:\r\n result['error'] = \"No formula specified.\"\r\n return result\r\n\r\n result['request_start'] = int(get.get('request_start', 0))\r\n\r\n try:\r\n # TODO add references to valid variables and functions\r\n # At some point, we might want to mark invalid variables as red\r\n # or something, and this is where we would need to pass those in.\r\n result['preview'] = latex_preview(formula)\r\n except pyparsing.ParseException as err:\r\n result['error'] = \"Sorry, couldn't parse formula\"\r\n result['formula'] = formula\r\n except Exception:\r\n # this is unexpected, so log\r\n log.warning(\r\n \"Error while previewing formula\", exc_info=True\r\n )\r\n result['error'] = \"Error while rendering preview\"\r\n\r\n return result", "def expr(s):\n if isinstance(s, Expr): return s\n if isnumber(s): return Expr(s)\n ## Replace the alternative spellings of operators with canonical spellings\n s = s.replace('==>', '>>').replace('<==', '<<')\n s = s.replace('<=>', '%').replace('=/=', '^')\n ## Replace a symbol or number, such as 'P' with 'Expr(\"P\")'\n s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr(\"\\1\")', s)\n ## Now eval the string. (A security hole; do not use with an adversary.)\n return eval(s, {'Expr':Expr})", "def convert_to_postfix(expression):\n infix = list(expression.replace(\" \", \"\"))\n opr_priority = {'!': 4, '*': 3, '+': 2, '>': 1, '=': 1, '(': 0}\n postfix = []\n stack = []\n\n for token in infix:\n if token in string.ascii_uppercase:\n postfix.append(token)\n elif token == '(':\n stack.append(token)\n elif token == ')':\n stack_token = stack.pop()\n while stack_token != '(':\n postfix.append(stack_token)\n stack_token = stack.pop()\n else:\n while stack and (opr_priority[stack[len(stack)-1]] >= opr_priority[token]):\n postfix.append(stack.pop())\n stack.append(token)\n\n while stack:\n postfix.append(stack.pop())\n\n return postfix", "def expression(self, item):\n a = item.get(\"assertion\", item.get(\"expression\"))\n\n values = item[\"values\"]\n\n left = self.resolve(values[0])\n\n if a == \"equals\" or a == \"equal\":\n right = self.resolve(values[1])\n return left == right\n elif a == \"less\":\n right = self.resolve(values[1])\n return left < right\n elif a == \"less_equal\":\n right = self.resolve(values[1])\n return left <= right\n elif a == \"not\":\n return not left\n elif a == \"or\":\n if left is True:\n return True\n\n for i in range(1, len(values)):\n result = self.resolve(values[i])\n if result is True:\n return True\n\n return False\n elif a == \"and\":\n if left is False:\n return False\n\n for i in range(1, len(values)):\n result = self.resolve(values[i])\n if result is False:\n return False\n\n return True\n elif a == \"sum\":\n result = left\n\n assert type(left) in (int, float, str, list)\n # Sum supports flattened values since this only occurs when\n # a string like \"{a} {b} {c}\" is compiled. Everything else,\n # including arithmetic is compiled as a nested expression.\n for i in range(1, len(values)):\n r = self.resolve(values[i])\n\n if type(r) in (int, float, list) and type(result) in (\n int,\n float,\n list,\n ):\n result += r\n else:\n result = f\"{str(result)}{str(r)}\"\n\n return result\n elif a == \"subtraction\":\n right = self.resolve(values[1])\n assert type(left) in (int, float)\n assert type(right) in (int, float)\n return left - right\n elif a == \"multiplication\":\n right = self.resolve(values[1])\n assert type(left) in (int, float, str)\n assert type(right) in (int, float, str)\n return left * right\n elif a == \"modulus\":\n right = self.resolve(values[1])\n assert type(left) in (int, float)\n assert type(right) in (int, float)\n return left % right\n elif a == \"division\":\n right = self.resolve(values[1])\n assert type(left) in (int, float, str)\n assert type(right) in (int, float, str)\n return left / right\n elif a == \"exponential\":\n right = self.resolve(values[1])\n assert type(left) in (int, float)\n assert type(right) in (int, float)\n return left ** right\n else:\n assert False, f\"Unsupported operation: {a}\"", "def evaluatePostfixExp(self, postfixExpr):\n\n operandStack = []\n tokenList = postfixExpr.split(\" \")\n\n for token in tokenList:\n if self.isOperand(token):\n if \".\" in token:\n token = float(token)\n else:\n token = int(token)\n operandStack.append(token)\n else: # token is an operator\n operand2 = operandStack.pop()\n operand1 = operandStack.pop()\n try:\n result = self.applyOperator(operand1, operand2, token)\n except Exception as error:\n print(\"Invalid input. Please enter a valid arithmetic expression.\") # Most likely division by\n # zero error.\n return\n operandStack.append(result)\n return operandStack.pop()", "def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")", "def _repr_latex_(self):\n reprlatex = \"\"\n if not self._terms:\n reprlatex += \"0\"\n else:\n for term in self:\n termlatex = term._reprlatex\n if not reprlatex:\n # Adding the first term. No leading +.\n reprlatex += termlatex\n else:\n if not termlatex.startswith(\"-\"):\n # Is it the first term added to the sum? No leading +.\n reprlatex += f\"+ {termlatex}\"\n else:\n reprlatex += termlatex\n\n return f\"${reprlatex}$\"", "def formula(self):\n return self.structure.formula", "def __compute(self, string_expression):\n return eval(string_expression)", "def expand(self, expression):\n if not expression:\n return b\"\"\n if expression[0] in self._operators:\n operator, expression = expression[:1], expression[1:]\n if operator == b\"+\":\n return self._expand(expression, reserved)\n elif operator == b\"#\":\n return self._expand(expression, reserved, prefix=b\"#\")\n elif operator == b\".\":\n return self._expand(expression, prefix=b\".\", separator=b\".\")\n elif operator == b\"/\":\n return self._expand(expression, prefix=b\"/\", separator=b\"/\")\n elif operator == b\";\":\n return self._expand(expression, prefix=b\";\", separator=b\";\",\n with_keys=True, trim_empty_equals=True)\n elif operator == b\"?\":\n return self._expand(expression, prefix=b\"?\", separator=b\"&\",\n with_keys=True)\n elif operator == b\"&\":\n return self._expand(expression, prefix=b\"&\", separator=b\"&\",\n with_keys=True)\n else:\n return self._expand(expression)", "def aqGetExpName(self):\n return self._expname", "def to_mathML(self):\n transformer = MathMLtransformer()\n expression = transformer.transform(self.expression)\n return \"\".join(tree_to_string(expression))", "def expression(self, expr):\n self.set(expression=expr)", "def logic_program_form(self):\r\n s = ''\r\n return s", "def static_call(self, *args):\n return self.expression", "def test_complex_expression(self):\r\n\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"(2^2+1.0)/sqrt(5e0)*5-1\"),\r\n 10.180,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"1+1/(1+1/(1+1/(1+1)))\"),\r\n 1.6,\r\n delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"10||sin(7+5)\"),\r\n -0.567, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"sin(e)\"),\r\n 0.41, delta=0.01\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"k*T/q\"),\r\n 0.025, delta=1e-3\r\n )\r\n self.assertAlmostEqual(\r\n calc.evaluator({}, {}, \"e^(j*pi)\"),\r\n -1, delta=1e-5\r\n )", "def evaluateExpressionOnly(self, expression):\n try:\n value, expr1, expr2 = self.evaluateExpression(expression)\n return value\n except:\n pass # ignore", "def expr2html(expression):\n rules = (\n (r',',\n r', '),\n (r'(dist|comb|mass|mass_inv|mass_trv|dist_orm|comb_orm|mass_inv_orm|mass_trv_orm)(\\{)([^\\}]*)(\\})',\n r'<span class=\"function\">\\1</span><span class=\"curl\">\\2</span>\\3<span class=\"curl\">\\4</span>'),\n (r'\\b(AND|OR|XOR|NOT)\\b',\n r'<span class=\"keyword\">\\1</span>'),\n )\n for pattern, repl in rules:\n expression = re.sub(pattern, repl, expression)\n return expression", "def _getvalue_expr_Str(self, expr: ast.Str) -> Any:\n return expr.s", "def __format_condition_expression(self,expression):\n from re import sub\n return sub (r'(^|[^A-Z])([A-Z])($|[^A-Z])',r'\\1{\\2}\\3',expression)", "def display(self):\n from sage.tensor.modules.format_utilities import FormattedExpansion\n from sage.misc.latex import latex\n resu_txt = str(self.parent()._chart[:]) + ' |--> ' + \\\n str(ExpressionNice(self._express))\n resu_latex = latex(self.parent()._chart[:]) + r' \\mapsto' + \\\n latex(ExpressionNice(self._express))\n return FormattedExpansion(resu_txt, resu_latex)", "def _getDenom(expr):\n l = len(expr)\n den = ''\n i=0\n while i<l:\n if expr[i:i+2] == '/(' or expr[i:i+3] == '/ (':\n if den != '': den += '*'\n den += expr[i+1]\n par = 1\n i += 2\n while par > 0:\n if expr[i] == '(': par += 1\n elif expr[i] == ')': par -= 1\n den += expr[i]\n i += 1\n else :i += 1\n return den", "def brackets(expr):\n expr_latex = sp.latex(expr)\n if '+' in expr_latex or '-' in expr_latex:\n return \"(\" + expr_latex + \")\"\n else:\n return expr_latex", "def _evaluate(expression, isNumpy=True, **kwargs):\n if isNumber(expression):\n if isNumpy:\n return expressionToNumber(expression)\n else:\n return expression\n # Evaluate\n expr = substitute(expression, **kwargs)\n # Symbol substitution can create a number\n if isNumber(expr):\n return expr\n val = expr.evalf()\n if hasSymbols(val):\n return val\n if isNumpy:\n if \"rows\" in dir(expression):\n result = np.array(val)\n else:\n try:\n result = float(val)\n except TypeError:\n result = complex(val)\n else:\n result = val\n return result", "def format_expr(expr, precedence=0):\n match expr:\n case BinaryOp(op, left, right):\n result = \\\n f\"{format_expr(left, expr.precedence)} {op} {format_expr(right, expr.precedence+1)}\"\n # Surround the result in parentheses if needed\n if precedence > expr.precedence:\n return f\"({result})\"\n else:\n return result\n case UnaryOp(op, arg):\n return f\"{op}{format_expr(arg, 0)}\"\n case VarExpr(name):\n return name\n case float() | int():\n return str(expr)\n case _:\n raise ValueError(f\"Invalid expression value: {repr(expr)}\")", "def __call__(self):\n obj = {\"expression\": self.expression}\n if self.alias:\n obj[\"alias\"] = self.alias\n if self.formatting_type:\n obj[\"formattingType\"] = self.formatting_type\n return obj", "def evaluator(self):\n return self.__evaluator", "def convert_exp(node, **kwargs):\n return create_basic_op_node('Exp', node, kwargs)" ]
[ "0.7534816", "0.7534816", "0.7534816", "0.7497278", "0.7497278", "0.74922043", "0.7467428", "0.6997637", "0.69282675", "0.6805527", "0.67491686", "0.66249216", "0.6623153", "0.6500933", "0.6368758", "0.62659156", "0.6263841", "0.62291735", "0.6209051", "0.6111835", "0.61056423", "0.6035873", "0.60354495", "0.60199124", "0.5999126", "0.5970304", "0.59447604", "0.592102", "0.58914006", "0.5871527", "0.58606607", "0.5851702", "0.5839683", "0.5783778", "0.5780732", "0.5780732", "0.57755953", "0.57709086", "0.5758332", "0.57122976", "0.5711595", "0.5689053", "0.5684119", "0.56826466", "0.56802356", "0.5673107", "0.56706774", "0.5638501", "0.5632286", "0.5611152", "0.5568718", "0.55681455", "0.55553854", "0.5522323", "0.54960907", "0.54728", "0.54728", "0.5465678", "0.54584754", "0.5457617", "0.54330873", "0.5415783", "0.5393539", "0.5390304", "0.5378141", "0.5364017", "0.5363308", "0.5353125", "0.53527325", "0.5350317", "0.5349491", "0.53477216", "0.53333277", "0.53171325", "0.53095925", "0.5306131", "0.5299772", "0.52860785", "0.5275007", "0.5274104", "0.52716744", "0.5249246", "0.52471244", "0.52466524", "0.5241639", "0.5237486", "0.52305186", "0.52230495", "0.5216895", "0.52115583", "0.5206033", "0.52029574", "0.5195739", "0.5182248", "0.5177626", "0.5171677", "0.51712406", "0.5168396", "0.5156028", "0.515515" ]
0.53761655
65
Returns a string representation with the tree rotated 90 degrees counterclockwise.
def __str__(self): def recurse(node, level): s = "" if type(node) == LeafNode: return ("| " * level) + str(node) + "\n" if node != None: s += recurse(node.rightOperand, level + 1) s += "| " * level s += str(node.operator) + "\n" s += recurse(node.leftOperand, level + 1) return s return recurse(self, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def __str__(self) -> str:\n\n if not self.root:\n return 'Empty RB Tree'\n\n root, bfs_queue, height = self.root, queue.SimpleQueue(), self.root.subtree_height()\n track = {i: [] for i in range(height + 1)}\n bfs_queue.put((root, 0, root.parent))\n\n while bfs_queue:\n n = bfs_queue.get()\n if n[1] > height:\n break\n track[n[1]].append(n)\n if n[0] is None:\n bfs_queue.put((None, n[1] + 1, None))\n bfs_queue.put((None, n[1] + 1, None))\n continue\n bfs_queue.put((None, n[1] + 1, None) if not n[0].left else (n[0].left, n[1] + 1, n[0]))\n bfs_queue.put((None, n[1] + 1, None) if not n[0].right else (n[0].right, n[1] + 1, n[0]))\n\n spaces = 12 * (2 ** (height))\n ans = '\\n' + '\\t\\tVisual Level Order Traversal of RBtree'.center(spaces) + '\\n\\n'\n for i in range(height):\n ans += f\"Level {i + 1}: \"\n for n in track[i]:\n space = int(round(spaces / (2 ** i)))\n if not n[0]:\n ans += ' ' * space\n continue\n ans += \"{} ({})\".format(n[0], n[2].value if n[2] else None).center(space, \" \")\n ans += '\\n'\n return ans", "def to_string(self):\n tree_structure_str = self.node_to_string(self.root, 0, is_add_children=True).rstrip()\n return tree_structure_str", "def tree_string(self, indent=0): # pragma: no cover\r\n return \"\"", "def __str__(self):\n tree_rows = [\n [\"Index\", str(self.index)],\n [\n \"Interval\",\n f\"{self.interval.left:.8g}-{self.interval.right:.8g} ({self.span:.8g})\",\n ],\n [\"Roots\", str(self.num_roots)],\n [\"Nodes\", str(len(self.preorder()))],\n [\"Sites\", str(self.num_sites)],\n [\"Mutations\", str(self.num_mutations)],\n [\"Total Branch Length\", f\"{self.total_branch_length:.8g}\"],\n ]\n return util.unicode_table(tree_rows, title=\"Tree\")", "def __str__(self):\r\n T = Btree(2)\r\n T.root = Node(self.keys, [Node(child.keys, []) for child in self.children])\r\n return str(T)", "def serialize(self, root: TreeNode) -> str:\n\n def preorder(root):\n if not root:\n return '#,'\n return str(root.val) + ',' + self.serialize(root.left) + self.serialize(root.right)\n\n return preorder(root)", "def tree_to_string(self, indent):\n\t\ts = self.indent_string(indent) + str(self)\n\t\tfor c in self.child_nodes:\n\t\t\ts += c.tree_to_string(indent + 1)\n\t\treturn s", "def __str__(self):\n current = self.root\n nodes = [self.root]\n final = str(self.root) + \"\\n\"\n count = 0\n while len(nodes) != 0:\n count += 1\n if count == 10:\n return \"\"\n temp = []\n for node in nodes:\n if node.left != None:\n temp.append(node.left)\n final += str(node.left) + \" \"\n else:\n final += \"_ \"\n if node.right != None:\n temp.append(node.right)\n final += str(node.right) + \" \"\n else:\n final += \"_ \"\n if temp == []:\n if node == nodes[len(nodes) - 1]:\n break\n final += \"\\n\"\n nodes = temp\n self.in_order_traversal()\n for item in self.traverse:\n final += str(item.key) + \" \"\n final += \"\\n\"\n return final", "def serialize(self, root: TreeNode) -> str:\n l = []\n def preOrder(root):\n if not root:\n l.append(\"n\")\n return\n \n l.append(str(root.val))\n preOrder(root.left)\n preOrder(root.right)\n \n \n preOrder(root)\n #print(\",\".join(l))\n return \",\".join(l)", "def format_asciitree(self):\n import asciitree\n\n def child_iter(tree):\n return tree.subtrees()\n\n def text_str(tree):\n return ' %s%s %s' % (tree.label, tree.label_suffix,\n tree.token or '')\n return asciitree.draw_tree(self, child_iter=child_iter,\n text_str=text_str)", "def serialize(self, root):\n\n def preOrder(node):\n if not node:\n return ['None']\n return [str(node.val)] + preOrder(node.left) + \\\n preOrder(node.right)\n\n return ' '.join(preOrder(root))", "def get_tree_str(self, depth: int = 0) -> str:\n temp = \" \" * depth + str(self.head) + \"\\n\"\n for son in self.sons:\n temp += son.get_tree_str(depth + 1)\n return temp", "def __repr__(self):\n return str((self.original, self.left, self.right))", "def serialize(self, root):\r\n preorder = []\r\n\r\n def preOrder(node):\r\n if node:\r\n preorder.append(node.val)\r\n preOrder(node.left)\r\n preOrder(node.right)\r\n\r\n preOrder(root)\r\n return \" \".join(map(str, preorder))", "def __str__(self):\r\n levels = tuple(self.generate_levels())\r\n self.compute_representation_positions()\r\n levels_to_strings = self.represent_tree_levels(levels)\r\n branches = self.represent_tree_branches(levels)\r\n\r\n return \"\".join(\"\".join((level, \"\\n\\n\", branch))\r\n for (level, branch) in zip(levels_to_strings, branches))", "def serialize(self, root):\n if root == None:\n return \"\"\n \n data = []\n\n def traversal(root):\n if root == None:\n data.append('#')\n return \n \n data.append(str(root.val))\n traversal(root.left)\n traversal(root.right)\n return\n \n traversal(root)\n return ' '.join(data)", "def get_tree(self, *args):\n level_representation = \"--\"\n if self.level == 0:\n node = \"| \"\n else:\n node = \"+ \"\n _tree_structure = node + level_representation * self.level\n return _tree_structure", "def dump_tree(self) -> str:\n return utils.dump_tree(self._tree)", "def serialize(obj: TreeNode) -> str:\n operators = set(\",:_;()[]\")\n current_depth = 0\n nodes_left = [(obj, 0)]\n fh = ''\n while len(nodes_left) > 0:\n entry = nodes_left.pop()\n node, node_depth = entry\n if node.children and node_depth >= current_depth:\n fh += '('\n nodes_left.append(entry)\n nodes_left += ((child, node_depth + 1) for child in\n reversed(node.children))\n current_depth = node_depth + 1\n else:\n if node_depth < current_depth:\n fh += ')'\n current_depth -= 1\n\n # Note we don't check for None because there is no way to represent\n # an empty string as a label in Newick. Therefore, both None and ''\n # are considered to be the absence of a label.\n lblst = []\n if node.support is not None: # prevents support of NoneType\n lblst.append(str(node.support))\n if node.name: # prevents name of NoneType\n lblst.append(node.name)\n label = ':'.join(lblst)\n if label:\n escaped = \"%s\" % label.replace(\"'\", \"''\")\n if any(t in operators for t in label):\n fh += \"'\"\n fh += escaped\n fh += \"'\"\n else:\n fh += escaped.replace(\" \", \"_\")\n if nodes_left and nodes_left[-1][1] == current_depth:\n fh += ','\n\n fh += ';\\n'\n return fh", "def __repr__(self: 'UnaryTree') -> str:\n return 'UnaryTree({}, {})'.format(\n repr(self.symbol), repr(self.children[0]))", "def print_tree(self):\n return \"\"", "def serialize(self, root):\n\n def post_order(root):\n return post_order(root.left) + post_order(root.right) + [root.val] if root else []\n\n return ' '.join(map(str, post_order(root)))", "def serialize(self, root):\n if root is None: return \"\"\n if root.left is None and root.right is None: return str(root.val)\n l = self.serialize(root.left)\n r = self.serialize(root.right)\n return str(root.val) + \"(\" + l + \",\" + r + \")\"", "def serialize(self, root):\n res = []\n\n def pre_order(root):\n if not root:\n return None\n\n res.append(root.val)\n pre_order(root.left)\n pre_order(root.right)\n\n pre_order(root)\n\n return ' '.join(map(str, res))", "def __str__(self):\n\t\tself._synchronize_attributes()\n\t\ts = \"\"\n\t\tqueue = c3.Queue()\n\t\tlevel = 0\n\t\tqueue.enqueue((1, self._root))\n\t\twhile queue.peek():\n\t\t\tnodelev, node = queue.dequeue()._data\n\t\t\tif (not node):\n\n\t\t\t\t#NODE IS NOT THERE - just a placeholder\n\t\t\t\t#print spacing and enqueue fake left and right children\n\t\t\t\t#but stops if they would be past the max depth of the tree\n\t\t\t\tif ((self._depth - nodelev + 1) <= 0):\n\t\t\t\t\tcontinue\n\n\t\t\t\tif (nodelev != level):\n\t\t\t\t\ts += \"\\n\"\n\t\t\t\t\t#PRINT THE INDENT\n\t\t\t\t\tindent = \" \"*int((self._max_chars)*(2**(self._depth - nodelev) - 1))\n\t\t\t\t\ts += indent\n\t\t\t\t\tlevel = nodelev\n\n\t\t\t\t#PRINT THE SPACING\n\t\t\t\ts += \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\n\t\t\t\t#PRINT SPACES TO REPLACE DATA\n\t\t\t\ts += \" \"*self._max_chars\n\n\t\t\t\t#Enqueue fake children\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\t\tcontinue\n\n\t\t\tif (nodelev != level):\n\t\t\t\ts += \"\\n\"\n\t\t\t\t#PRINT THE INDENT\n\t\t\t\tindent = \" \"*(self._max_chars)*(2**(self._depth - nodelev) - 1)\n\t\t\t\ts += indent\n\t\t\t\tlevel = nodelev\n\n\t\t\t#adds preceding \"|\"s if the str length of the data is smaller than the max\n\t\t\tfor i in range(int(self._max_chars - len(str(node.value())))):\n\t\t\t\ts += \"|\"\n\t\t\ts += str(node.value()) \n\n\t\t\t#PRINT THE SPACING\n\t\t\tspacing = \" \"*(self._max_chars)*(2**(self._depth - nodelev + 1) - 1)\n\t\t\ts += spacing\n\n\t\t\t#Enqueues\n\t\t\tif node.lchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.lchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\t\tif node.rchild():\n\t\t\t\tqueue.enqueue((nodelev + 1, node.rchild()))\n\t\t\telse:\n\t\t\t\t#ENQUEUES A FAKE NODE IN ORDER TO FORMAT THE TREE FOR MISSING NODES\n\t\t\t\tqueue.enqueue((nodelev + 1, None))\n\t\ts += \"\\n\"\n\t\treturn s", "def tree2str(self, root: Optional[TreeNode]) -> str:\n if not root:\n return \"\"\n if not root.left and not root.right:\n return str(root.val)\n if not root.right:\n return f'{root.val}({self.tree2str(root.left)})'\n return f'{root.val}({self.tree2str(root.left)})({self.tree2str(root.right)})'", "def serialize(self, root):\n if not root:\n return ''\n\n inOrd = []\n self._inorderTrav(root, inOrd)\n\n preOrd = []\n self._preorderTrav(root, preOrd)\n\n return ' '.join(inOrd) + '#' + ' '.join(preOrd)", "def __repr__(self):\n lines = []\n nodes = [(self.root, 0)]\n while nodes:\n node, indent = nodes.pop()\n name = str(node) if node else 'None'\n lines.append(' ' * indent + name)\n if node:\n nodes.append((node.child[True], indent + 1))\n nodes.append((node.child[False], indent + 1))\n\n return os.linesep.join(lines)", "def serialize(self, root):\n if not root:\n return ''\n def levorder(root):\n if root is None:\n return []\n re = [str(root.val)]\n q = [root]\n while True:\n temp = []\n for node in q:\n if node.left:\n temp.append(node.left)\n re.append(str(node.left.val))\n if not node.left:\n re.append('None')\n if node.right:\n temp.append(node.right)\n re.append(str(node.right.val))\n if not node.right:\n re.append('None')\n if not temp:\n return re\n q = temp\n ls = levorder(root)\n return '~'.join(ls)", "def serialize(self, root: TreeNode) -> str:\n\t\tif not root:\n\t\t\treturn '_'\n\t\telse:\n\t\t\tleft_repr = f'({self.serialize(root.left)})' if root.left else '_'\n\t\t\tright_repr = f'({self.serialize(root.right)})' if root.right else '_'\n\t\t\treturn f'{root.val} {left_repr} {right_repr}'", "def get_tree_string(self, node):\n string = \"\"\n for child in sorted(node.children):\n string += node.depth * \"\\t\"\n if node.depth > 0:\n string += \"|\"\n string += node.feature + \"=\" + child\n if node.children[child].is_leaf:\n string += \":\" + node.children[child].pred + \"\\n\"\n else:\n string += \"\\n\" + self.get_tree_string(node.children[child])\n\n return string", "def serialize(self, root: Optional[TreeNode]) -> str:\n if not root: return \"\"\n res = []\n q = collections.deque([root])\n while q:\n node = q.popleft()\n if node:\n res.append(str(node.val))\n q.append(node.left)\n q.append(node.right)\n else:\n res.append(str(-1))\n \n return \",\".join(res)", "def serialize(self, root):\n string = \"\"\n stack = [root]\n while stack:\n node = stack.pop(0)\n if node is None:\n string += \"null,\"\n continue\n else:\n string += f\"{node.val},\"\n\n stack.extend([node.left, node.right])\n\n return f\"[{string[:-1]}]\"", "def serialize(self, root):\n self.nodes_inorder = []\n self.dfs(root)\n res = ' '.join(self.nodes_inorder) ## convert list to string with seperator ' '\n return res", "def tree_str(self, depth_index=0, recursive_dict=None):\r\n if not hasattr(self,'iteritems'): return ''\r\n if recursive_dict is not None: self = TreeMap(recursive_dict)\r\n buff_str = ''\r\n \r\n for item in self.iteritems():\r\n # Starts working now.\r\n k = item[0]\r\n v = item[1]\r\n \r\n spacer = '\\n' + '| ' * depth_index\r\n \r\n if hasattr(v,'iteritems'):\r\n buff_str += spacer + '+--[ ' + k + ' ]'\r\n buff_str += self.tree_str(depth_index=depth_index + 1, recursive_dict=v)\r\n else:\r\n buff_str += spacer + '\\_.--[ ' + str(k) + ' = ' + str(v) + ' ]'\r\n \r\n return buff_str", "def ascii_tree(self, no_types: bool = False) -> str:\n return self.schema._ascii_tree(\"\", no_types)", "def _rotateCounterclockwise(self):\n p, g, l = self.parent, self.parent.parent, self.left\n\n # connect left child to parent\n if l is not None:\n l.parent = p\n p.right = l\n \n # move parent down + left\n p.parent = self\n self.left = p\n\n # move this node up + left\n self.parent = g\n if g is not None:\n if p is g.left:\n g.left = self\n elif p is g.right:\n g.right = self", "def serialize(self, root):\n def preorder(root):\n if not root:\n return ['null']\n return [str(root.val)] + preorder(root.left) + preorder(root.right)\n \n # print('[' + ','.join(preorder(root)) + ']')\n return '[' + ','.join(preorder(root)) + ']'", "def get_repr(self, *args):\n level_representation = \"--\"\n if self.level == 0:\n node = \"| \"\n else:\n node = \"+ \"\n _tree_structure = node + level_representation * self.level + ' ' + self.name\n return _tree_structure", "def tree_to_string(tree):\n if type(tree) == Tree:\n return sum(list(map(tree_to_string, tree.children)), [])\n else:\n return [str(tree)]", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def serialize(self, root):\n if not root:\n return ''\n #queue = [root]\n # visited = []\n printout = []\n def recurserialize(root):\n if not root:\n printout.append('* ')\n else:\n printout.append(str(root.val)+ ' ')\n recurserialize(root.left)\n recurserialize(root.right)\n recurserialize(root)\n return ''.join(printout)[:-1]", "def serialize1(self, root):\n if not root:\n return \"\"\n \n serial = \"\"\n stack = [root]\n while stack:\n node = stack.pop()\n if not node:\n serial += \"null,\"\n else:\n serial += str(node.val) + ','\n stack.append(node.right)\n stack.append(node.left)\n \n \n print(serial[:-1])\n return serial[:-1]", "def serialize(self, root):\n res = []\n def _dfs(root):\n if root:\n res.append(str(root.val))\n _dfs(root.left)\n _dfs(root.right)\n _dfs(root)\n \n return ''.join(res)", "def serialize(self, root):\n def preorder(node):\n if node:\n vals.append(str(node.val))\n preorder(node.left)\n preorder(node.right)\n vals = []\n preorder(root)\n return ' '.join(vals)", "def __str__(self):\n string = ''\n\n # gets the nodes at each level and puts the values into a string\n for i in range(self.get_height()+1):\n nodes = self.get_nodes_on_level(i)\n level = [str(node.value) if node else '-' for node in nodes]\n string += '{}\\n'.format(' '.join(level))\n\n return string", "def serialize(node, tree=\"\"):\n \n \n if (not node): #Base case\n tree += \"# \"\n return tree\n tree += (str(node.val) + \" \")\n tree = serialize(node.left, tree)\n tree = serialize(node.right, tree)\n\n return tree", "def tree_string(self):\n if self.body is None:\n return 0\n if self.left is None and self.right is None:\n return self.body\n left_sum = self.left.tree_string()\n right_sum = self.right.tree_string()\n if self.body == add:\n return str(left_sum) + '+' + str(right_sum)\n elif self.body == sub:\n return str(left_sum) + '-' + str(right_sum)\n elif self.body == mul:\n return str(left_sum) + '*' + str(right_sum)\n else:\n return str(left_sum) + '/' + str(right_sum)", "def serialize(self, root):\n res = []\n \n def dfs(root):\n if root is None:\n res.append(\"N\")\n return\n res.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n res = \",\".join(res)\n # print(res)\n return res", "def as_tree(self, layer=0, margin_steps=None):\n if not margin_steps:\n margin_steps = int(len(self.cluster_indices) / 1.5)\n margin = \"\\t\" * margin_steps * layer\n tree_string = margin + str(self.cluster_indices)\n if not self.subclusters:\n return tree_string\n tree_string = self.subclusters[0].as_tree(layer + 1, margin_steps) + \"\\n\" + tree_string + \"\\n\"\n tree_string += self.subclusters[1].as_tree(layer + 1, margin_steps)\n return tree_string", "def serialize(self, root):\r\n if not root:\r\n return \"#!\"\r\n res = str(root.val)+\"!\"\r\n res +=self.serialize(root.left)\r\n res +=self.serialize(root.right)\r\n return res", "def serialize(self, root):\n if not root: return ''\n stack, res = [root], ''\n while stack:\n node = stack.pop()\n if node:\n tmp = str(node.val) + '!'\n stack.append(node.right)\n stack.append(node.left)\n else:\n tmp = '#!'\n res += tmp\n return res[:-1]", "def __str__(self):\n # Tricky to do iteratively so we do it recursively.\n return BST._str(\"\", self.root)", "def serialize(self, root):\n\n if root is None:\n return \"\"\n curr_lvl=[root]\n next_lvl = []\n ans =[]\n\n while(curr_lvl):\n tmp_ans = \",\".join(str(node.val) if node is not None else \"*\" for node in curr_lvl)\n ans.append(tmp_ans)\n nxt_lvl = []\n for each in curr_lvl:\n if each is not None:\n nxt_lvl.append(each.left) \n nxt_lvl.append(each.right)\n\n curr_lvl=nxt_lvl\n\n return \";\".join(ans)", "def rcontainer_tree_str(obj):\n tree_task = ContainerTreePrintTask()\n the_recurser = ObjectRecursion(tasks=[tree_task])\n return the_recurser.recurse(obj)[0][0]", "def tree_repr(self):\n if self._tree_repr is None:\n self._tree_repr = self.to_tree(self._prufer_repr[:])\n return self._tree_repr", "def serialize(self, root):\n def traverse(root):\n if root:\n data.append(str(root.val))\n traverse(root.left)\n traverse(root.right)\n else:\n data.append('#')\n data = []\n traverse(root)\n return ' '.join(data)", "def rotate(self,isLeft,node):\n parent = node.parent\n parentLeft = False\n if parent != None and parent.left == node:\n parentLeft = True\n finalNode = node.right\n if isLeft:\n finalNode = node.right\n oldLeft = finalNode.left\n finalNode.left = node\n finalNode.parent = node.parent\n node.parent = finalNode\n node.right = oldLeft\n if oldLeft != None:\n oldLeft.parent = node\n\n else:\n # print(\"Final node before is: {} \\n \\n\".format(finalNode.value))\n # self.printTree()\n finalNode = node.left\n oldRight = finalNode.right\n finalNode.right = node\n finalNode.parent = node.parent\n node.parent = finalNode\n node.left = oldRight\n if oldRight != None:\n oldRight.parent = node\n #adjusting the parent pointer\n if parent != None:\n if parentLeft:\n parent.left = finalNode\n else:\n parent.right = finalNode\n else:\n #has to be the root\n self.root = finalNode\n #temporarly\n if finalNode.left != None:\n finalNode.left.height = max(self.height(finalNode.left.left), self.height(finalNode.left.right)) + 1\n if finalNode.right != None:\n finalNode.right.height = max(self.height(finalNode.right.left), self.height(finalNode.right.right)) + 1\n finalNode.height = max(self.height(finalNode.left), self.height(finalNode.right)) + 1\n if parent != None:\n parent.height = max(self.height(parent.left), self.height(parent.right)) + 1", "def serialize(self, root):\n if root != None:\n sum_n = 0\n else:\n sum_n = 1\n stack = [root]\n res = ''\n while stack.__len__() != 0 and stack.__len__() != sum_n:\n node = stack.pop(0)\n if node == None:\n sum_n += 1\n res += 'None/'\n stack.append(None)\n stack.append(None)\n else:\n if node.left == None:\n sum_n += 1\n if node.right == None:\n sum_n += 1\n res += str(node.val) + '/'\n stack.append(node.left)\n stack.append(node.right)\n return res", "def serialize(self, root):\n\n def rserialize(root, s):\n if not root:\n s += \"null,\"\n return s\n s += \"{},\".format(root.val)\n s = rserialize(root.left, s)\n s = rserialize(root.right, s)\n return s\n\n return rserialize(root, \"\")", "def serialize(self, root: TreeNode) -> str:\n \"\"\"\n Compared to regular binary trees, take advantage of\n BST's structure to skip marking empty children, instead,\n locate its position by comparing the value with node and \n parent. In preorder, the next node could be left child or \n right child or parent's right child, depending on its value.\n \"\"\"\n if root is None:\n return \"\"\n \n ret = str(root.val)\n if root.left:\n ret += \",\" + self.serialize(root.left)\n if root.right:\n ret += \",\" + self.serialize(root.right)\n \n return ret", "def __str__(self):\n\n\t\tif not self.root:\n\t\t\treturn str([])\n\n\t\tQ = [self.root]\n\t\tvals = []\n\t\twhile Q:\n\t\t\tnode = Q.pop(0)\n\t\t\tif node:\n\t\t\t\tvals.append(node.val)\n\t\t\t\tQ.append(node.left)\n\t\t\t\tQ.append(node.right)\n\t\t\telse:\n\t\t\t\tvals.append(None)\n\t\treturn str(vals)", "def __repr__(self: 'BinaryTree') -> str:\n return 'BinaryTree({}, {}, {})'.format(repr(self.symbol), \n repr(self.children[0]), \n repr(self.children[1]))", "def serialize(self, root):\n if not root:\n return '#!'\n res = ''\n q = [root]\n while q:\n node = q.pop(0)\n if node:\n res += str(node.val) + '!'\n q.append(node.left)\n q.append(node.right)\n else:\n res += '#!'\n return res", "def serialize(self, root: TreeNode) -> str:\n res = []\n\n def walk(node):\n if node is None:\n res.append(\"null\")\n else:\n res.append(node.val)\n walk(node.left)\n walk(node.right)\n\n walk(root)\n\n return \"[\" + \",\".join(str(val) for val in res) + \"]\"", "def __str__(self):\n stubs = ['' for _ in range(self.nChildren())]\n label = dist = ''\n for i in range(self.nChildren()):\n stubs[i] = str(self.children[i])\n if self.dist or self.dist == 0.0:\n dist = ':' + str(self.dist)\n if self.label != None:\n label = str(self.label)\n if self.nChildren() == 0:\n return label + dist\n else:\n stubstr = '('\n for i in range(len(stubs) - 1):\n stubstr += stubs[i] + ','\n return stubstr + stubs[-1] + ')' + label + dist\n # there is no label\n '''\n if not self.left and self.right:\n return ',' + right\n elif self.left and not self.right:\n return left + ','\n elif self.left and self.right:\n return '(' + left + ',' + right + ')' + dist\n '''", "def print_cr_tree(self, tree):\n str = ''\n try:\n if not tree: return \"None\"\n else:\n for x in tree: str += \" \" + x.name\n except TypeError: return tree.name\n return str", "def print_tree(self):\n tree_list = self.traverse_tree(self.root, 0, [])\n depth = self.get_depth(self.root, 0)\n\n for i in range(depth - 1):\n for j in range(len(tree_list[i])):\n if tree_list[i][j] is None:\n tree_list[i + 1].insert(2 * j, None)\n tree_list[i + 1].insert(2 * j + 1, None)\n\n tree_matrix = [['|' for i in range(2 ** depth - 1)] for j in range(depth)]\n for i in range(depth):\n for j in range(len(tree_list[i])):\n if tree_list[i][j] is not None:\n tree_matrix[i][2 ** (depth - i - 1) - 1 + j * 2 ** (depth - i)] = tree_list[i][j]\n return tree_matrix", "def serialize(self, root):\n if(not root) :\n return \"X\"\n else :\n return \",\".join([str(root.val), self.serialize(root.left), self.serialize(root.right)])", "def nltk_to_qtree(tree):\n return \"\\\\\"+\"Tree {}\".format(_compute_repr(tree))", "def get_tree(self):\n tn, td = self.__get_graph(self.path)\n tree = [[os.sep, -1, os.sep]]\n tree.extend(self.__get_node(0, tn, td, os.sep))\n return tree", "def serialize(node):\r\n serial = node.val \r\n\r\n if node.left or node.right:\r\n serial += r'('\r\n\r\n if node.left:\r\n serial += serialize(node.left)\r\n \r\n serial += r'|' \r\n \r\n if node.right:\r\n serial += serialize(node.right)\r\n \r\n serial += r')'\r\n\r\n return serial", "def serialize(self, root):\n def preorder(root):\n if root:\n val.append(root.val)\n preorder(root.left)\n preorder(root.right)\n else:\n val.append(\"*\")\n\n val = []\n preorder(root)\n return ','.join(map(str, val))", "def serialize(self, root):\n if root is None:\n return self.NULL\n return \"(%s,%s,%s)\" % (root.val, self.serialize(root.left), self.serialize(root.right))", "def showOrdered(self):\n pybtlib.showTree.restype = None\n pybtlib.showTree.argtypes = [ctypes.POINTER(Tree)]\n return pybtlib.showTree(ctypes.byref(self))", "def __str__(self):\n return self._tree.toString()", "def serialize(self, root):\n if not root:\n return \"\"\n res = [str(root.val)]\n queue = [root]\n while queue:\n node = queue.pop(0)\n if node.left:\n queue.append(node.left)\n res.append(str(node.left.val))\n else:\n res.append('#')\n if node.right:\n queue.append(node.right)\n res.append(str(node.right.val))\n else:\n res.append('#')\n print res\n return \",\".join(res)", "def __repr__(self):\n r = '> '\n r += 'depth= ' + str(self.depth)\n r += ' | ' + str(self)\n return r", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def __str__(self):\n pieces = [] # sequence of piecewise strings to compose\n self._parenthesize_recur(self.root(), pieces)\n return ''.join(pieces)", "def serialize(self, root: TreeNode) -> str:\n if root is None:\n return '[]'\n # res_list: List[Union[int, None]]\n res: str = '['\n # queue: List[Union[TreeNode, None]]\n queue: List = [root]\n i = 0\n while i < len(queue):\n curr = queue[i]\n if curr is not None:\n res += f'{curr.val},'\n if curr.left is not None:\n queue.append(curr.left)\n else:\n queue.append(None)\n if curr.right is not None:\n queue.append(curr.right)\n else:\n queue.append(None)\n else:\n res += 'null,'\n i += 1\n res = res[:-1]\n res += ']'\n return res", "def printTree(self):\n\t\tprint 'Tree:'\n\t\tprint self.root.toString(0)\n\t\tprint '\\n'", "def _RL_rotate(self, node: TreeNode) -> None:\n A = node\n B = A.right\n C = B.left\n # Update parents\n if A.parent:\n if A.parent.left == A:\n A.parent.left = C\n else:\n A.parent.right = C\n else:\n self.root = C\n\n if C.right:\n C.right.parent = B\n if C.left:\n C.left.parent = A\n B.parent, A.parent, C.parent = C, C, A.parent\n\n # Rotation\n A.right, B.left, C.right, C.left = C.left, C.right, B, A\n\n A.height = self.count_height(A)\n B.height = self.count_height(B)\n C.height = self.count_height(C)\n\n self._inspect_changes(C.parent)", "def serialize(self, root):\n if root is None:\n return '[]'\n nodes = [root]\n i = 0\n while i < len(nodes):\n if nodes[i]:\n nodes.append(nodes[i].left)\n nodes.append(nodes[i].right)\n i += 1\n \n while nodes[-1] is None:\n nodes.pop()\n \n return '[' + ','.join(str(node.val) if node else 'null' for node in nodes) + ']'", "def __repr__(self):\n return show_tree(self, lambda node: node.name,\n lambda node: node.children)", "def serialize(self, root):\n if not root:\n return \"\"\n\n queue = deque()\n queue.append(root)\n\n result = ''\n\n while queue:\n node = queue.popleft()\n\n if node:\n result += str(node.val) + \",\"\n queue.append(node.left)\n queue.append(node.right)\n\n else:\n result += \"#,\"\n\n # remove the last ','\n result = result[:-1]\n return result", "def __repr__(self):\n string = \"ROOT: {}\\n\".format(self.root)\n string += \"roots: \"+\", \".join(str(x) for x in self.roots())+'\\n'\n for node in self:\n string += str(node)\n if node.incoming:\n string += \" ({})\\n\".format(\" \".join(str(x[1].nodeid) for x in node.incoming))\n else:\n string += \" (root)\\n\"\n for label, child in sorted(node.outgoing, key=lambda x:x[1].nodeid):\n string += \" {} {}\".format(child.nodeid, label) + '\\n'\n string += \"undirected:\\n\"\n for pair in self.undirected:\n string += \" {}\\n\".format(pair)\n return string", "def serialize(self, root: 'Node') -> str:\n res = []\n self._preorder(root, res)\n return ' '.join(res)", "def serialize(self, root):\n result = []\n def dfs(root):\n if root == None:\n result.append(\"null\")\n return\n result.append(str(root.val))\n dfs(root.left)\n dfs(root.right)\n dfs(root)\n return ','.join(result)", "def serialize_preorder(self, root):\n SEP, NULL = ',', '#'\n string = ''\n def traverse(root):\n nonlocal string\n if not root:\n string += (NULL + SEP)\n return\n # 前序遍历位置\n string += (str(root.val) + SEP) # 根\n traverse(root.left) # 左\n traverse(root.right) # 右\n traverse(root)\n return string", "def tree(self):\n return self.to_geom()", "def __repr__(self: 'StarTree') -> str:\n return 'StarTree({})'.format(repr(self.children[0]))", "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))", "def repr_tree(tree, viz, current_node, rec_depth, color_map, parameters):\r\n for child in tree.children:\r\n if child.operator is None:\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if child.label is None:\r\n viz.node(this_trans_id, \"tau\", style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(child, color_map)\r\n viz.node(this_trans_id, str(child), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n condition_wo_operator = child.operator == pt_operator.Operator.XOR and len(\r\n child.children) == 1 and child.children[0].operator is None\r\n if condition_wo_operator:\r\n childchild = child.children[0]\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if childchild.label is None:\r\n viz.node(this_trans_id, str(childchild), style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(childchild, color_map)\r\n viz.node(this_trans_id, str(childchild), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n viz.attr('node', shape='circle', fixedsize='true', width=\"0.6\",\r\n fontsize=\"14\")\r\n op_node_identifier = str(uuid.uuid4())\r\n node_color = get_color(child, color_map)\r\n viz.node(op_node_identifier, str(child.operator), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, op_node_identifier)\r\n viz = repr_tree(child, viz, op_node_identifier, rec_depth + 1, color_map, parameters)\r\n return viz", "def serialize(self, root):\n def rserialize(root, string):\n \"\"\" a recursive helper function for the serialize() function.\"\"\"\n # check base case\n if root is None:\n string += 'None,'\n else:\n string += str(root.val) + ','\n string = rserialize(root.left, string)\n string = rserialize(root.right, string)\n return string\n \n return rserialize(root, '')", "def serialize(self, root):\n ret = []\n ret.append('(')\n if root is not None:\n ret.append(str(root.val))\n ret.append(self.serialize(root.left))\n ret.append(self.serialize(root.right))\n ret.append(')')\n return ''.join(ret)", "def __repr__(self) ->str:\n # Our __repr__ is recursive, because it can also be called\n # via repr...!\n return ('{}({}, {})'.format(self.__class__.__name__,\n repr(self.value),\n repr(self.children))\n if self.children\n else 'Tree({})'.format(repr(self.value)))" ]
[ "0.7079702", "0.7079702", "0.6723897", "0.6693969", "0.6500883", "0.6468781", "0.6459211", "0.64515245", "0.64155436", "0.64153516", "0.64048725", "0.6403809", "0.6297127", "0.62846375", "0.6280475", "0.62751967", "0.6263699", "0.6237177", "0.6221739", "0.6201274", "0.6197721", "0.61934394", "0.6180851", "0.6171909", "0.61672914", "0.61630684", "0.61601865", "0.6160127", "0.6155485", "0.6146115", "0.61365914", "0.61252725", "0.61177045", "0.6108716", "0.60978585", "0.6097646", "0.60910654", "0.6077338", "0.606705", "0.6063289", "0.6062593", "0.60598975", "0.60588175", "0.60588175", "0.60518503", "0.6045475", "0.60438067", "0.602745", "0.60253453", "0.59875995", "0.59698784", "0.5957324", "0.5957012", "0.5955581", "0.5954437", "0.594996", "0.59396154", "0.59236044", "0.5923538", "0.59126014", "0.5897092", "0.5887711", "0.5883103", "0.5881996", "0.5880619", "0.5854283", "0.58542436", "0.5853936", "0.58538365", "0.5853256", "0.5827685", "0.5819899", "0.5809246", "0.5795265", "0.57951075", "0.57837504", "0.5780074", "0.5764869", "0.5749922", "0.57475024", "0.5744417", "0.5735193", "0.5734751", "0.57330304", "0.573273", "0.5727662", "0.5722137", "0.571758", "0.57152736", "0.5711622", "0.57110435", "0.57102805", "0.5700308", "0.56944674", "0.56939566", "0.5691941", "0.5689052", "0.5687406", "0.5668582", "0.5665228" ]
0.62242943
18
Returns all possible velocity dispersons from all particles found in the data set. A particle filter can be passed using "filter" which is a list
def compute_velocity_dispersion(data, types = None, fields = None, filter = None): types_to_fields = {'x': 'particle_velocity_x', 'y': 'particle_velocity_y', 'z': 'particle_velocity_z', 'r': 'particle_velocity_spherical_radius', 'theta': 'particle_velocity_spherical_theta', 'phi': 'particle_velocity_spherical_phi'} if types is None and fields is None: fields = types_to_fields.values() keys = types_to_fields.keys() elif fields is None: fields = [ types_to_fields[x] for x in types ] keys = types else: keys = fields dispersion = {} for i,x in enumerate(fields): if filter is not None: v = data[x][filter] else: v = data[x] if np.size(v) == 0: dispersion[keys[i]] = 0.0 else: dispersion[keys[i]] = vel_dispersion( v.convert_to_units('km/s') ) return dispersion
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_velocities(self):\n\n return np.array([p.velocity for p in self.particles])", "def particle_forceV(R,N,sigma,epsilon,D):\n F = np.zeros((3,N))\n x = np.zeros(N-1)\n y = np.zeros(N-1)\n z = np.zeros(N-1)\n r = np.zeros(N-1)\n # loop over all particles\n for i in range(N):\n # Distances for x,y,z between particles\n x = R[0,np.arange(N)!=i]-R[0,i]\n y = R[1,np.arange(N)!=i]-R[1,i]\n z = R[2,np.arange(N)!=i]-R[2,i]\n [x,y,z] = minimal_image(x,y,z,D)\n c = np.stack((x,y,z))\n r = np.sqrt(np.sum(c**2,0))\n a = (c*4*(sigma/epsilon)*(12/r**14-6/r**8))\n F[:,i] = -np.sum(a,1)\n return F", "def particle_filter(particle_set_t, measurement_t):\n global count\n n_samples, dim = particle_set_t.shape # no of particles and dimension of each particle\n\n pred_state = np.zeros((n_samples, dim), dtype=\"float64\") # store the predicted state \n weights = np.zeros(n_samples, dtype=\"float64\") # corresponding weights for resampling\n\n particle_set_t1 = np.zeros((n_samples, dim), dtype=\"float64\") # next iteration of particles\n\n\n # this loop calculates \\bar{X_t}, i.e. the predicted belief.\n for n in range(n_samples):\n # predicted motion step:\n xn_t1 = sample_motion_model(particle_set_t[n]) # 3x1 vector: hypothetical state\n\n # measurement correction step:\n weight_xn_t1 = state_likelihood(measurement_t, xn_t1) # scalar value\n\n pred_state[n] = xn_t1\n weights[n] = weight_xn_t1\n\n \n # It was observed that if all weights are 0, the resampling step breaks. \n # Thus, adding a uniform distribution. This is obviously a very bad idea \\ \n # as the true state can easily be discarded in the resampling step: TODO!\n if np.sum(weights) > 0.0:\n weights = weights/np.sum(weights) # normalize array only when sum in not 0\n else:\n print(\"possile divergence!\")\n weights[:] = 1 / n_samples # if sum is 0 then assign uniform distribution throughout\n\n\n # the resampling step:\n # indices = monte_carlo.residual_resample(weights)\n indices = monte_carlo.stratified_resample(weights)\n count += 1\n print(count)\n\n # new particle set is particles at index locations\n for i, index in enumerate(indices):\n particle_set_t1[i] = pred_state[index]\n\n return particle_set_t1", "def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]", "def ParticleFilterParams(fix_params=False):\n\n ## Particle filter parameters\n\n # Q_c will be the time continuous covariance matrix. \n #This should be the errors in the model.\n # in the form [x_cov, y_cov, z_cov, \n # vel_x_cov, vel_y_co, vel_z_cov, \n # mass_cov, \n # sigma_cov, shape_cov, brightness_cov, tau_cov]\n \n\n Q_c = [10., 2., 2., \n 150., 50., 50., \n 5., 0, 0,\n 1e-3, 1e-10, 0., 0.0001]\n\n\n print('Qc values used:', Q_c)\n\n Q_c = np.asarray([i**2 for i in Q_c])\n\n \n # Q_c_frag is used at reinitialisation if the fragmentation option is used\n \n Q_c_frag = [0., 0., 0., \n 0.02, 0.02, 0.02, \n 0.5, 0, 0,\n 2e-3, 5e-9, 0., 0.]\n\n Q_c_frag = [i**2 for i in Q_c_frag]\n\n ## P: starting uncertainty to initialise gaussian spread of particals. \n ## P2: starting uncertainty at reinitialisation if the fragmentation option is used\n ## in the form [x_cov, y_cov, z_cov, % of vel_x_cov, % of vel_y_co, % of vel_z_cov]\n P = [50., 50., 50., 250., 250., 250.]\n P2 = [50., 50., 50., 250., 250., 250.]\n\n ## Initialise state ranges\n\n\n ## shape parameter close to a rounded brick (1.8) (A for a sphere =1.21)\n A_min = 1.21\n A_max = 3.0 \n\n ## luminosity coefficient\n tau_min = 0.0001\n tau_max = 0.1\n\n ## lists of typical meteorite densities for different types. [chond, achond, stony-iron, iron, cometary]\n pm_mean = [3000, 3100, 4500, 7500, 850]\n pm_std = [420, 133, 133, 167, 117 ]\n\n ## to choose density values according to a distribution of meteorite percentages:\n particle_choices = []\n\n # this is created using lines 257-266; uncomment if needs changing.\n random_meteor_type = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4]\n\n #random_meteor_type = []\n #for i in range(80): # 80 % Chondrites\n # random_meteor_type.append(0)\n #for i in range(11): # 11 % Achondrites\n # random_meteor_type.append(1)\n #for i in range(2):\n # random_meteor_type.append(2) # 2 % Stony-Iron\n #for i in range(5):\n # random_meteor_type.append(3) # 5 % iron\n #for i in range(2):\n # random_meteor_type.append(4) # 2 % cometary\n\n ## ablation coefficeint \n #sigma_min = 0.001*1e-6\n #sigma_max = 0.5*1e-6\n\n\n #range_params = [m0_max, A_mean, A_std, pm_mean, pm_std, random_meteor_type, cd_mean, cd_std, sigma_min, sigma_max, K_min, K_max, tau_min, tau_max]\n range_params = [A_min, A_max, pm_mean, pm_std, random_meteor_type, tau_min, tau_max]\n\n if fix_params:\n \tQ_c[-4:] = [0., 0., 0., 0.]\n \tQ_c_frag[-4:] = [0., 0., 0., 0.]\n return Q_c, Q_c_frag, P, range_params", "def getDifferentialFlowDataForAllEvents(self, particleName=\"pion\", order=2, pT_range=None, where=\"\", orderBy=\"event_id\"):\n pid = self._pid(particleName)\n whereClause = \"pid=%d and n=%d\" % (pid, order)\n if pT_range:\n whereClause += \" and %g<=pT and pT<=%g\" % (pT_range[0], pT_range[1])\n if where:\n whereClause += \" and \" + where\n RawdiffvnData = np.asarray(self.db.selectFromTable(\"diff_vn\", (\"pT\", \"vn_real\", \"vn_imag\"), whereClause=whereClause, orderByClause=orderBy))\n #nevent = self.getNumberOfEvents()\n nevent = self.db.selectFromTable(\"multiplicities\", \"count()\", \"pid = %d\" % pid)[0][0]\n npT = len(RawdiffvnData[:,0])/nevent\n diffvnData = RawdiffvnData.reshape(nevent, npT, 3)\n return diffvnData", "def init_particle_filter(self, motion_prior, n_p):\n # Define necessary components for the particle filter\n if motion_prior['mode'] == 'PositionDiffusion':\n # Diffusion\n dc_infer = motion_prior['dc']\n d_h = 2 # Dimension of hidden state (i.e. x,y = 2 dims)\n sdev = np.sqrt(dc_infer * self.dt / 2) * np.ones((d_h,))\n ipd = pf.GaussIPD(d_h, self.n_n, sdev * 0.001)\n tpd = pf.GaussTPD(d_h, self.n_n, sdev)\n ip = pf.GaussIP(d_h, sdev * 0.001)\n tp = pf.GaussTP(d_h, sdev)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n\n elif motion_prior['mode'] == 'VelocityDiffusion':\n # FIXME: save these params\n d_h = 4 # Hidden state dim, x,y,vx,vy\n\n v0 = motion_prior['v0'] # Initial Estimate for velocity\n dcv = motion_prior['dcv'] # Velocity Diffusion Constant\n st = np.sqrt(dcv * self.dt)\n adj = np.sqrt(1 - st ** 2 / v0 ** 2)\n\n eps = 0.00001 # Small number since cannot have exact zero\n sigma0 = np.array([eps, eps, v0, v0]) # Initial sigmas\n sigma_t = np.array([eps, eps, st, st]) # Transition sigmas\n\n # Transition matrix\n a = np.array([[1, 0, self.dt, 0],\n [0, 1, 0, self.dt],\n [0, 0, adj, 0],\n [0, 0, 0, adj]])\n\n ipd = pf.GaussIPD(d_h, self.n_n, sigma0)\n tpd = pf.GaussTPD(d_h, self.n_n, sigma_t, A=a)\n ip = pf.GaussIP(d_h, sigma0)\n tp = pf.GaussTP(d_h, sigma_t, A=a)\n lp = PoissonLP(self.n_n, d_h, self.tc.spike_energy)\n # Note trick where PoissonLP takes 0,1 components of the\n # hidden state which is the same for both cases\n\n else:\n raise ValueError(\n 'Unrecognized Motion Prior ' + str(motion_prior))\n\n r = np.zeros((self.n_n, self.n_t)).astype('float32')\n return pf.ParticleFilter(\n ipd, tpd, ip, tp, lp, r.transpose(), n_p)", "def k_particles(chosen_particle, positions, velocities):\n\n\n # array with all indecies of all k particles for positions\n positions_k = []\n velocities_k = []\n\n # array of new distances considering boundary conditions\n new_distances = []\n\n # check over all particles in positions\n for index in range(N):\n\n distance_x, distance_y = per_boun_distance(chosen_particle, positions[index])\n\n # distance from selected particle to particle with index\n d = np.sqrt(distance_x**2 + distance_y**2)\n\n # append this distance to array of distances\n new_distances.append(d)\n\n # Now we need a sorting algorithm (merge)\n for j in range(k+1):\n low = min(new_distances)\n\n index_k = new_distances.index(low)\n\n # get the index of the particle for velocity\n velocities_k.append(velocities[index_k])\n\n # get the index of the particle for position\n # and add position to all positions within r\n positions_k.append(positions[index_k])\n\n new_distances.pop(index_k)\n\n return velocities_k, positions_k", "def particle_velocityV(V,F,dt,Rv,sigma,epsilon,D,N): \n V += dt/2*(particle_forceV(Rv[-1], N, sigma, epsilon, D) + particle_forceV(Rv[-2], N, sigma, epsilon, D))\n return V", "def _FilterProtonsAndElectrons(self):\n self.reactants = filter(lambda c: c.compound.kegg_id not in \n ['C00080', 'C05359'], self.reactants)", "def create_particles(self):\n # xf, yf = create_fluid_with_solid_cube()\n xf, yf = create_fluid()\n uf = np.zeros_like(xf)\n vf = np.zeros_like(xf)\n m = initialize_mass(xf, yf)\n rho = initialize_density_fluid(xf, yf)\n h = np.ones_like(xf) * self.hdx * self.dx\n fluid = get_particle_array_wcsph(x=xf, y=yf, h=h, m=m, rho=rho, u=uf,\n v=vf, name=\"fluid\")\n\n xt, yt = create_boundary(self.dx / 2.)\n ut = np.zeros_like(xt)\n vt = np.zeros_like(xt)\n m = np.ones_like(xt) * 1500 * self.dx * self.dx\n rho = np.ones_like(xt) * 1000\n h = np.ones_like(xt) * self.hdx * self.dx / 2.\n tank = get_particle_array_wcsph(x=xt, y=yt, h=h, m=m, rho=rho, u=ut,\n v=vt, name=\"tank\")\n\n return [fluid, tank]", "def getTallyParticles(self):\n\n\t\tparticleNames = []\n\n\t\tif self.typeNumber > 0:\n\t\t\tparticleNames.append(particleListShort[self.typeNumber]) \n\t\telse:\n\t\t\tfor i,name in enumerate(self.particleList):\n\t\t\t\ttry:\n\t\t\t\t\tif self.tallyParticles[i] == 1:\n\t\t\t\t\t\tparticleNames.append(self.particleList[i])\n\t\t\t\texcept:\n\t\t\t\t\tpass # For some reasons there can be less than 35 particles listed. Skip in case.\n\t\treturn particleNames", "def new_plummer_distribution(number_of_particles, \n total_mass = 1.0|nbody_system.mass, \n virial_radius = 1.0|nbody_system.length,\n mass_cutoff = 0.999,\n G = None,\n **keyword_arguments): # optional arguments for UniformSphericalDistribution\n particles = new_plummer_spatial_distribution(number_of_particles, total_mass=total_mass, \n virial_radius=virial_radius, **keyword_arguments)\n \n if G is None:\n G = nbody_system.G if generic_unit_system.is_generic_unit(total_mass.unit) else constants.G\n velocity_unit = (G*total_mass/virial_radius).sqrt().unit.base_unit()\n plummer_radius = 0.1875 * numpy.pi * virial_radius\n \n escape_velocity = (1 + particles.position.lengths_squared()/plummer_radius**2)**(-0.25) | velocity_unit\n velocity = escape_velocity * sample_from_velocity_distribution(number_of_particles)\n velocity *= numpy.sqrt((G*total_mass*number_of_particles) / (2*virial_radius*velocity.length_squared()))\n particles.velocity = velocity.reshape((-1,1)) * random_direction(number_of_particles)\n return particles", "def particle_filter(\n h_obs,\n n_particles = 10000,\n dt = 0.1,\n gen = SEIHR_generator(100000),\n particles = None,\n s_min = None\n):\n \n # Get the list of particles to fit\n if particles is not None:\n n_particles = len(particles)\n t_prev = particles[0].t\n if any([t_ <= t_prev for t_, y in h_obs]):\n warnings.warn(\"Dropping observations with time below current simulation time\")\n h_obs = [(t_, y) for t_, y in h_obs if t_ > t_prev]\n \n particles = [p.copy() for p in particles]\n \n else:\n particles = [\n gen.generate(i) for i in range(n_particles)\n ]\n t_prev = 0\n \n if s_min is None:\n s_min = n_particles*0.9\n \n #Loop over observations\n for t, y in h_obs:\n \n #Using list comprehension; for each particle, step forward to the next evaluation point\n #and update weights based on the eval_value, which need not be atomic\n tmp_particles = [\n x.step(t, i, dt = dt, eval_value = y) for x, i in zip(particles, range(len(particles)))\n ]\n \n #Harvest the weights from all particles, then normalize to add to 1\n pre_weights = [x.w_t for x in tmp_particles]\n weights = np.array(pre_weights)/np.sum(pre_weights)\n s_eff = 1/np.sum(weights**2)\n \n for x, w in zip(tmp_particles, weights):\n x.w_t = w\n \n print(s_eff)\n \n #If effective sample size is below minimum, then perform resampling\n if s_eff < s_min:\n \n particles = np.random.choice(\n tmp_particles,\n p = weights, \n replace = 1, \n size = len(particles)\n )\n \n for x in particles:\n x.w_t = 1/len(particles)\n \n else:\n particles = tmp_particles\n \n return particles", "def get_velocity(self):\n\n vs = []\n pairs = [(-2, -1), (-3, -1), (-3, -1)]\n\n for i1, i2 in pairs:\n f1 = self.files[i1]\n p1 = Profile(os.path.join(self.name, f1))\n\n f2 = self.files[i2]\n p2 = Profile(os.path.join(self.name, f2))\n\n # we'll do this by looking at 3 different temperature\n # thresholds and averaging\n T_ref = [2.e9, 3.e9, 4.e9]\n\n for T0 in T_ref:\n x1 = p1.find_x_for_T(T0)\n x2 = p2.find_x_for_T(T0)\n vs.append((x1 - x2)/(p1.time - p2.time))\n\n vs = np.array(vs)\n v = np.mean(vs)\n v_sigma = np.std(vs)\n return v, v_sigma", "def sieve_function(raw_data):\n matchers = []\n return_list = []\n\n matchers.append(D1000TemperatureDataParticle.regex_compiled())\n\n for matcher in matchers:\n for match in matcher.finditer(raw_data):\n return_list.append((match.start(), match.end()))\n\n if not return_list:\n log.debug(\"sieve_function: raw_data=%r, return_list=%s\", raw_data, return_list)\n return return_list", "def pipes_velocity(p_list):\n for pipes in p_list:\n pipes.centerx -= 4\n return p_list", "def initializeParticles(self):\n import itertools\n import random\n #create a list of possible ghost permutations, where each of three ghosts can be on any of the legal positions in the boards.\n permutations = list(itertools.product(self.legalIntentions, repeat=self.numAgents))\n \n random.shuffle(permutations)\n p = len(permutations)\n n = self.numParticles\n self.particles = []\n #create the particles\n while n >= p:\n self.particles += permutations\n n -= p\n #add the remainder\n self.particles += permutations[0: n - 1]", "def get_variables(self, z0, u_inf):\n # Get the ambient data from the CTD profile\n Ta, Sa, P = self.profile.get_values(z0, ['temperature', 'salinity',\n 'pressure'])\n rho = seawater.density(Ta, Sa, P)\n \n # Compute the properties of each dispersed-phase particle\n us = np.zeros(len(self.particles))\n rho_p = np.zeros(len(self.particles))\n m_p = np.zeros(len(self.particles))\n B_p = np.zeros(len(self.particles))\n for i in range(len(self.particles)):\n m0 = self.particles[i].m0\n T0 = self.particles[i].T0\n m_p[i] = np.sum(m0) * self.particles[i].nb0\n if m_p[i] > 0.:\n # Particles exist, get properties. Make sure the algorithm \n # uses the dirty bubble properties since this is supposed\n # to be the rise velocity averaged over the whole plume.\n us[i], rho_p[i]= self.particles[i].properties(m0, T0, P, Sa, \n Ta, np.inf)[0:2]\n B_p[i] = (rho - rho_p[i]) / rho * 9.81 * (m_p[i] / rho_p[i])\n else:\n # Particles dissolved, set to ambient conditions\n us[i] = 0.\n rho_p[i] = rho\n B_p[i] = 0.\n \n # Select the correct slip velocity\n u_slip = us[0]\n for i in range(len(self.particles) - 1):\n if B_p[i+1] > B_p[i]:\n u_slip = us[i+1]\n \n # Compute the total buoyancy flux\n B = np.sum(B_p)\n \n # Get the ambient buoyancy frequency\n N = self.profile.buoyancy_frequency(z0)\n \n # Return the governing parameters\n return (B, N, u_slip, u_inf)", "def get_fitness_vector(self):\r\n vector = list()\r\n \r\n for particle in self.population: \r\n vector.append(particle.current_fitness)\r\n \r\n return vector", "def turbulence(self, particles, current_step=0):\n\n for i in range(len(particles)):\n if i % 6 == 0:\n mutated = self.mutator.mutate(particles[i])\n particles[i].vector = copy(mutated.vector)", "def select(self, test):\n survivors = []\n for particle in self.particles:\n # Find the originating particle\n parent = particle\n while parent.origin is not None:\n parent = parent.origin.initial_state[0]\n if test(parent, particle) is True:\n survivors.append(particle)\n return ParticleCollection(survivors)", "def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],\n mass: dc.float64[N], G: dc.float64):\n # Kinetic Energy:\n # KE = 0.5 * np.sum(np.sum( mass * vel**2 ))\n # KE = 0.5 * np.sum( mass * vel**2 )\n KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)\n\n # Potential Energy:\n\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r for all particle pairwise particle separations\n inv_r = np.sqrt(dx**2 + dy**2 + dz**2)\n # inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]\n I = inv_r > 0\n np.divide(1.0, inv_r, out=inv_r, where=I)\n\n # sum over upper triangle, to count each interaction only once\n # PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))\n # PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))\n tmp = -np.multiply.outer(mass, mass) * inv_r\n PE = 0.0\n for j in range(N):\n for k in range(j + 1, N):\n PE += tmp[j, k]\n PE *= G\n\n return KE, PE", "def turbulence(self, particles, current_step=0):\n\n for i in range(len(particles)):\n if i % 3 == 0:\n mutated = self.uniform_mutator.mutate(particles[i])\n elif i % 3 == 1:\n mutated = self.non_uniform_mutator.mutate(particles[i], current_step)\n particles[i].vector = copy(mutated.vector)\n return", "def velocities(self, return_np=False):\n if return_np:\n return self.si_values()[3:]\n return [self.v_x, self.v_y, self.v_z]", "def generate_particle_distribution(self, max_loop = np.inf, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n F_max = np.max(self.DF.f) ; F_min = np.min(self.DF.f)\n\n n_particles = 0\n loop_counter = 0\n \n if self.optimize:\n relative_potential = self._interpolate_relative_potential\n else:\n relative_potential = self.DF.relative_potential\n \n \n \n # Continue until max number of particles chosen, or until max loop counter\n while ((n_particles < self.N_part) and (loop_counter < max_loop)):\n \n # choose random position, eval potential, choose velocity\n r = self._choose_position()\n \n Psi = relative_potential(r) \n v = self._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n\n # interpolate along DF to find f(E) of chosen particle\n f_E = self.DF.interpolate_f(E)\n\n # random number from 0 to F_max for accept reject\n #F = np.random.rand() * F_max\n \n # HOLY CRAP....Fmax - Fmin ... not Fmin - Fmax\n F = 10.0**( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n \n if F <= f_E: # accept particle\n\n \n # convert position to cartesian using random theta and phi\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n # save particle position\n self.pos[n_particles] = r * np.array([x,y,z])\n \n # repeat for velocity using new random numbers\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n # save particle velocity\n self.vel[n_particles] = v * np.array([vx,vy,vz])\n \n \n n_particles = n_particles + 1\n \n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n \n if (not outfile == None):\n self.write_pd(outfile)\n \n return self.pos, self.vel", "def particle_initial_velocity(fignr,N,D,T,m,dim,kb):\n V = np.zeros((3,N))\n V[0:dim,:] = np.random.normal(0, kb*T/m, (dim,N))# / np.sqrt(T/(kb*m))\n plotfunctions.velocity(fignr,N,V)\n # Typical speed for particles\n return V", "def filter_sontek(vel_in):\n\n # Identify all samples where the velocity did not change\n test1 = np.abs(np.diff(vel_in, 1, 1)) < 0.00001\n\n # Identify all samples with all zero values\n test2 = np.nansum(np.abs(vel_in), 0) < 0.00001\n test2 = test2[1:] * 4 # using 1: makes the array dimension consistent with test1 as diff results in 1 less.\n\n # Combine criteria\n test_sum = np.sum(test1, 0) + test2\n\n # Develop logical vector of invalid ensembles\n invalid_bool = np.full(test_sum.size, False)\n invalid_bool[test_sum > 3] = True\n # Handle first ensemble\n invalid_bool = np.concatenate((np.array([False]), invalid_bool), 0)\n if np.nansum(vel_in[:, 0]) == 0:\n invalid_bool[0] = True\n\n # Set invalid ensembles to nan\n vel_out = np.copy(vel_in)\n vel_out[:, invalid_bool] = np.nan\n\n return vel_out", "def getInterpretedComplexDifferentialFlowsForAllEvents(self, particleName=\"pion\", order=2, pTs=np.linspace(0,2.5,10), where=\"\", orderBy=\"event_id\", verbose=False):\n diffVnData = self.getDifferentialFlowDataForAllEvents(particleName=particleName, order=order, where=where, orderBy=orderBy)\n diffVnintepBlock = []\n if verbose: print(\"Looping over {} events... (please be patient)\".format(diffVnData.shape[0]))\n for iev in range(diffVnData.shape[0]):\n diffVnintep = np.interp(pTs, diffVnData[iev,:,0], diffVnData[iev,:,1]) + 1j*np.interp(pTs, diffVnData[iev,:,0], diffVnData[iev,:,2])\n diffVnintepBlock.append(diffVnintep)\n if verbose: print(\"Done. Thanks for waiting.\")\n return np.asarray(diffVnintepBlock)", "def verlet(self,t,dt,r0,r1):\r\n r2 = np.zeros([2,self.particles.size])\r\n\r\n MX, MXT = np.meshgrid(r1[0,:],r1[0,:],copy=False)\r\n MY, MYT = np.meshgrid(r1[1,:],r1[1,:],copy=False)\r\n dx = MXT - MX\r\n dx = dx\r\n\r\n dy = MYT - MY\r\n dy = dy\r\n\r\n r2 = np.square(dx)+np.square(dy)\r\n\r\n if(np.round((t/self.dt*dt)%0.5,1) == 0): #JV: every certain amount of steps we update the list\r\n self.close_list = close_particles_list(r2,self.Nlist,self.particles.size,self.param[2]) #JV: matrix that contains in every row the indexs of the m closest particles\r\n\r\n r2 = (2*r1 - r0 + np.transpose(fv(r1[0,:],r1[1,:],dx,dy,r2,t/self.dt,True,self.param[2],self.particles.size,self.U,self.dt,self.close_list,self.Nlist,self.vel_verlet_on,self.R,self.param[3],self.param[4],self.param[5],self.grid,self.G,self.wallcount,self.X2)) * (dt**2))\r\n #The transpose is necessary because I messed up the shapes when I did the fv function.\r\n\r\n #JV: this needs to change if we want to include particles with mass diferent than 1 (in reduced units),\r\n # in other words, diferent particles than the Argon gas\r\n\r\n return r2[0,:],r2[1,:]", "def init_particles(self):\n \n # Each particle is a dimension-K vector. We generate each particle \n # uniformly at random from the space [0,1]^K. \n self.Particles = np.random.uniform(0, 1, (self.Npar, self.K))\n #print(\"Particles: \", self.Particles) \n return None", "def filter_vario_params(self, data_id=None, name=None) -> List[VarioParams]:\r\n # build the base query\r\n query = self.session.query(VarioParams)\r\n\r\n # apply filter\r\n if data_id is not None:\r\n query = query.filter(VarioParams.data_id == data_id)\r\n if name is not None:\r\n name = name.replace('*', '%')\r\n if '%' not in name:\r\n name = f'%{name}%'\r\n query = query.filter(VarioParams.name.like(name))\r\n \r\n # return\r\n return query.all()", "def fv(X,Y,dx,dy,r2,i,append,L,N,U,dt,close_list,Nlist,vel_verlet_on,R,menu,submenu,n1,grid,G,wallcount,X2):\r\n\r\n \"\"\"JV: append is a boolean. If it's true, adds the energy to our list, if it isn't, it doesn't.\r\n We do that because in some cases we will call the algorithm more times than the actual step number (and\r\n we only want to sum the value T/dt times), this is needed in the velocity-Verlet algorithm, that we call the fv()\r\n function one more time than needed just to start the loop.\"\"\"\r\n\r\n# L = self.param[2]\r\n#\r\n# N = self.particles.size\r\n\r\n #For computing all the distances I use a trick with the meshgrid function,\r\n #see the documentation on how this works if you dont see it.\r\n\r\n \"\"\"JV: X is an array that contains each position, mx is an nxn array that each column is the position of one particle (so it's a matrix\r\n that has n X rows) and mxt is the same but tranposed (so it's a matrix of n X columns)\"\"\"\r\n\r\n \"\"\"\r\n UPDATE: This block of code is commented because now it's done in a loop inside solve_verlet() (due to Numba...).\r\n Looks a little bit messy but if Numba allowed me to call the np.meshgrid() function we would do this here. Sorry, but I like to keep the comment to remind me that.\r\n \"\"\"\r\n # MX, MXT = np.meshgrid(X,X,copy=False)\r\n # MY, MYT = np.meshgrid(Y,Y,copy=False)\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n # dx = MXT - MX\r\n # dx = dx\r\n\r\n # dy = MYT - MY\r\n # dy = dy\r\n\r\n # r2 = np.square(dx)+np.square(dy)\r\n\r\n # if(menu == \"Free!\"):\r\n # #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n # dx_v2 = (np.abs(dx.copy())-1*L)\r\n # r2_v2 = dx_v2**2+dy**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # dy_v2 = (np.abs(dy.copy())-1*L)\r\n # r2_v2 = dx**2+dy_v2**2\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # r2_v2 = dx_v2**2+dy_v2**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n dUx = 0.\r\n dUy = 0.\r\n utot = np.zeros((N))\r\n f = np.zeros((N,2))\r\n\r\n for j in range(0,N):\r\n dUx = 0.\r\n dUy = 0.\r\n u = 0.\r\n\r\n #JV: we now calculate the force with only the Nlist closest particles\r\n for k in range(0,Nlist):\r\n c = int(close_list[j][k])\r\n\r\n #In the force computation we include the LJ and the walls (JV: in the verlet case). I truncate the interaction at self.R units of lenght,\r\n #I also avoid distances close to 0 (which only should affect the diagonal in the matrix of distances)\r\n #All these conditions are included using the numpy.where function.\r\n #If you want to include more forces you only need to add terms to these lines.\r\n\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c])\r\n dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c])\r\n # print(dUx,dUy,dx[j,c],r2[j,c],R[j],R[c])\r\n#JV: COMMENTED PART BECAUSE NUMBA HAS PROBLEMS WITH THIS BLOCK OF CODE THAT DOES THE CALCULATION IN THE VERLET ALGORITHM, NOW IT ONLY WORKS WITH THE VELOCITY VERLET, TO FIX\"\r\n# else:\r\n# if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n# dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n\r\n #JV: We add the energy in the corresponding array in both cases, remember that the verlet algorithm will include the energy from the walls\r\n # and that will be visible in fluctuations on the energy\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n u = u + LJverlet(r2[j,c],R[c],R[j])\r\n# else:\r\n# u = u + walls([X[j],Y[j]])#JV: TO CHANGE; NOW ONLY WORKS WITH VEL_VERLET_ON\r\n# else:\r\n# if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# u = u + LJverlet(r2[j,c],R[c],R[j],param)\r\n#\r\n# if((X[j]**2+Y[j]**2) > (0.8*L)**2):\r\n# u = u + walls([X[j],Y[j]],param)\r\n #JV: COMMENTED FOR NOW\r\n\r\n #JV: If the argument it's True, we will append the energy to our corresponding array\r\n if(append == True):\r\n utot[j] = u\r\n\r\n f[j,:] = f[j,:]+np.array([dUx,dUy])\r\n\r\n if(append == True):\r\n U[int(i)] = np.sum(utot) #JV: Finally, we add the total energy so we have the global energy in a step of time\r\n\r\n return f", "def _build_parsed_values(self):\n\n # \n # Generate a velocity data particle.\n # Note that raw_data already contains the individual fields\n # extracted and unpacked from the velocity data record.\n #\n global flags\n particle = []\n field = 0\n for flag in range(0, FLAG_RECORD_SIZE):\n #\n # If the flags indicated that this field is to be expected,\n # store the next unpacked value into the data particle.\n #\n key = VEL3D_PARAMETERS[flag][INDEX_KEY]\n if flags[flag]:\n if flag == INDEX_FLAG_Time:\n #\n # This returns a tuple, but particle wants a list.\n #\n time_array = self.raw_data[field:field + OUTPUT_TIME_SIZE]\n\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: list(time_array)})\n field += OUTPUT_TIME_SIZE\n else:\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: self.raw_data[field]})\n field += 1\n\n #\n # If flags indicate that this field is not present,\n # output a value of None.\n #\n else:\n particle.append({DataParticleKey.VALUE_ID: key,\n DataParticleKey.VALUE: None})\n\n return particle", "def velocities(self, return_np=False):\n if return_np:\n return self.si_values()[3:6]\n return [self.v_r, self.v_t, self.v_p]", "def update(self, dt):\n for p in self.listOfParticles:\n if self.willcollide(p, can.C, can.R):\n p1 = p.P\n pc, u = p.findpc()\n vp = p1.vec(p1.proj(pc.vec(can.C)))\n p1m = p1.__add__(vp.scale(vp.len()))\n v2 = pc.vec(p1m).scale(p.V.len())\n p.V = v2\n p.P = pc + p.V.scale(p.V.len()*(1-u))\n else:\n p.P = p.step(dt)", "def psogps(data_src, min_supp=MIN_SUPPORT, max_iteration=MAX_ITERATIONS, n_particles=N_PARTICLES,\n velocity=VELOCITY, coef_p=PERSONAL_COEFF, coef_g=GLOBAL_COEFF, return_gps=False):\n # Prepare data set\n d_set = DataGP(data_src, min_supp)\n d_set.init_attributes()\n # self.target = 1\n # self.target_error = 1e-6\n attr_keys = [GI(x[0], x[1].decode()).as_string() for x in d_set.valid_bins[:, 0]]\n\n if d_set.no_bins:\n return []\n\n it_count = 0\n eval_count = 0\n counter = 0\n var_min = 0\n var_max = int(''.join(['1'] * len(attr_keys)), 2)\n\n # Empty particle template\n empty_particle = structure()\n empty_particle.position = None\n empty_particle.fitness = None\n\n # Initialize Population\n particle_pop = empty_particle.repeat(n_particles)\n for i in range(n_particles):\n particle_pop[i].position = random.randrange(var_min, var_max)\n particle_pop[i].fitness = 1\n\n pbest_pop = particle_pop.copy()\n gbest_particle = pbest_pop[0]\n\n # Best particle (ever found)\n best_particle = empty_particle.deepcopy()\n best_particle.position = gbest_particle.position\n best_particle.fitness = costfxn(best_particle.position, attr_keys, d_set)\n\n velocity_vector = np.ones(n_particles)\n best_fitness_arr = np.empty(max_iteration)\n best_patterns = []\n str_best_gps = list()\n str_iter = ''\n str_eval = ''\n\n repeated = 0\n while counter < max_iteration:\n # while eval_count < max_evaluations:\n # while repeated < 1:\n for i in range(n_particles):\n # UPDATED\n if particle_pop[i].position < var_min or particle_pop[i].position > var_max:\n particle_pop[i].fitness = 1\n else:\n particle_pop[i].fitness = costfxn(particle_pop[i].position, attr_keys, d_set)\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, particle_pop[i].fitness)\n\n if pbest_pop[i].fitness > particle_pop[i].fitness:\n pbest_pop[i].fitness = particle_pop[i].fitness\n pbest_pop[i].position = particle_pop[i].position\n\n if gbest_particle.fitness > particle_pop[i].fitness:\n gbest_particle.fitness = particle_pop[i].fitness\n gbest_particle.position = particle_pop[i].position\n # if abs(gbest_fitness_value - self.target) < self.target_error:\n # break\n if best_particle.fitness > gbest_particle.fitness:\n best_particle = gbest_particle.deepcopy()\n\n for i in range(n_particles):\n new_velocity = (velocity * velocity_vector[i]) + \\\n (coef_p * random.random()) * (pbest_pop[i].position - particle_pop[i].position) + \\\n (coef_g * random.random()) * (gbest_particle.position - particle_pop[i].position)\n particle_pop[i].position = particle_pop[i].position + new_velocity\n\n best_gp = validategp(d_set, decodegp(attr_keys, best_particle.position))\n \"\"\":type best_gp: GP\"\"\"\n is_present = isduplicate(best_gp, best_patterns)\n is_sub = amcheck(best_patterns, best_gp, subset=True)\n if is_present or is_sub:\n repeated += 1\n else:\n if best_gp.support >= min_supp:\n best_patterns.append(best_gp)\n str_best_gps.append(best_gp.print(d_set.titles))\n # else:\n # best_particle.fitness = 1\n\n try:\n # Show Iteration Information\n best_fitness_arr[it_count] = best_particle.fitness\n str_iter += \"{}: {} \\n\".format(it_count, best_particle.fitness)\n except IndexError:\n pass\n it_count += 1\n\n if max_iteration == 1:\n counter = repeated\n else:\n counter = it_count\n # Output\n out = json.dumps({\"Algorithm\": \"PSO-GRAD\", \"Best Patterns\": str_best_gps, \"Iterations\": it_count})\n \"\"\":type out: object\"\"\"\n if return_gps:\n return out, best_patterns\n else:\n return out", "def velocities(self, session):\n velocities = session.query(\n Timepoint.velocity_x,\n Timepoint.velocity_y,\n Timepoint.velocity_z).filter(\n Timepoint.id.between(self.start_timepoint_id, self.end_timepoint_id))\n return np.array(velocities.all())", "def diffusive_particles(redownload: bool = False) -> Dataset:\n return Dataset.get(\"diffusive_particles\", redownload=redownload)", "def getSpectraDataForAllEvents(self, particleName=\"pion\", pT_range=None, where=\"\", orderBy=\"event_id\"):\n pid = self._pid(particleName)\n whereClause = \"pid=%d\" % pid\n if pT_range:\n whereClause += \" and %g<=pT and pT<=%g\" % (pT_range[0], pT_range[1])\n if where:\n whereClause += \" and \" + where\n RawdNdyData = np.asarray(self.db.selectFromTable(\"spectra\", (\"pT\", \"N\"), whereClause=whereClause, orderByClause=orderBy))\n #nevent = self.getNumberOfEvents()\n nevent = self.db.selectFromTable(\"multiplicities\", \"count()\", \"pid = %d\" % pid)[0][0]\n npT = len(RawdNdyData[:,0])/nevent\n dNdyData = RawdNdyData.reshape(nevent, npT, 2)\n return dNdyData", "def potential_from_particles(self, nbins = None, r_bins = None):\n\n r, dens = self.density_profile(nbins, r_bins)\n nbins = np.size(r)\n dens_function = interpolate.interp1d(r, dens)\n\n\n integrand_1 = lambda x : x * x * dens_function(x)\n integrand_2 = lambda x : x * dens_function(x)\n \n rmin, rmax = np.min(r), np.max(r)\n \n pot = np.zeros(nbins) \n for i in np.arange(nbins):\n \n\n A = integrate.quad(integrand_1, rmin, r[i])[0]\n B = integrate.quad(integrand_2, r[i], rmax)[0]\n\n pot[i] = A/r[i] + B\n\n pot = - 4.0 * np.pi * cgs.G * pot\n\n return r, pot", "def update_particles(self):\n for particle in self.particles:\n particle.update_coordinates(self.bounds)", "def getInterpretedSpectraForAllEvents(self, particleName=\"pion\", pTs=np.linspace(0,2.5,10), where=\"\", orderBy=\"event_id\", verbose=False):\n # processing\n dNdyData = self.getSpectraDataForAllEvents(particleName=particleName, where=where, orderBy=orderBy)\n dNdyintepBlock = []\n if verbose: print(\"Looping over {} events... (please be patient)\".format(dNdyData.shape[0]))\n for iev in range(dNdyData.shape[0]):\n dNdyintep = exp(np.interp(pTs, dNdyData[iev,:,0], log(dNdyData[iev,:,1])))\n dNdyintepBlock.append(dNdyintep)\n if verbose: print(\"Done. Thanks for waiting.\")\n return np.asarray(dNdyintepBlock)", "def get_variables(self, pretty=False):\n \n output = []\n #Get the model XML tree\n model = self.model.find(xmlns + 'Model')\n #Get list of metabolites\n metabolites = model.find(xmlns + 'ListOfMetabolites')\n \n for metabolite in metabolites:\n name = metabolite.attrib['name']\n simulationType = metabolite.attrib['simulationType']\n compartment_key = metabolite.attrib['compartment']\n \n if simulationType != 'fixed':\n if pretty:\n output.append(name + ' (Particle Number)')\n else:\n #Format the metabolite string as: CN=Root,Model=modelname,Vector=Compartments[compartment],Vector=Metabolites[a],Reference=ParticleNumber\n compartment_name = self._get_compartment_name(compartment_key)\n model_name = self.get_name()\n \n output_template = Template('CN=Root,Model=${model_name},Vector=Compartments[${compartment_name}],Vector=Metabolites[${name}],Reference=ParticleNumber')\n \n output_string = output_template.substitute(model_name=model_name, compartment_name=compartment_name, name=name)\n output.append(output_string)\n #Next, get list of non-fixed compartments:\n compartments = model.find(xmlns + 'ListOfCompartments')\n for compartment in compartments:\n name = compartment.attrib['name']\n simulationType = compartment.attrib['simulationType']\n \n if simulationType != 'fixed':\n if pretty:\n output.append(name + ' (' + model.attrib['volumeUnit'] + ')')\n else:\n #format the compartment string as: \"CN=Root,Model=Kummer calcium model,Vector=Compartments[compartment_2],Reference=Volume\"\n model_name = self.get_name()\n output_template = Template('CN=Root,Model=${model_name},Vector=Compartments[${name}],Reference=Volume')\n output_string = output_template.substitute(model_name=model_name, name=name)\n output.append(output_string)\n \n #Finally, get non-fixed global quantities\n values = model.find(xmlns + 'ListOfModelValues')\n #Hack - If no values have been set in the model, use the empty list to avoid a NoneType error\n if values == None:\n values = []\n for value in values:\n name = value.attrib['name']\n simulationType = value.attrib['simulationType']\n \n if simulationType != 'fixed':\n if pretty:\n output.append(name + ' (Value)')\n else:\n #format as: CN=Root,Model=Kummer calcium model,Vector=Values[quantity_1],Reference=Value\"\n model_name = self.get_name()\n output_template = Template('CN=Root,Model=${model_name},Vector=Values[${name}],Reference=Value')\n output_string = output_template.substitute(model_name=model_name, name=name)\n output.append(output_string)\n \n return output", "def velocityVerlet(XY, yh, yt, h, n):\n for l in range(n):\n yt_temp = yt + (0.5 * h * XY.grad_log_density(yh))\n yhp1 = yh + (h * yt_temp)\n ytp1 = yt_temp + (0.5 * h * XY.grad_log_density(yhp1))\n yh = yhp1\n yt = ytp1\n\n return [yhp1, ytp1]", "def VelPlanet (self, deltaT):\n\n for planet in self.planets:\n velocity = planet.velocity + (planet.acceleration * deltaT)\n planet.velocity = velocity #Each body's resulting velocity is updated to the body's information defined in the Particle class.", "def f(x):\n n_particles = x.shape[0]\n j = [f_per_particle(x[i]) for i in range(n_particles)]\n #print(\"f j: \", j)\n return np.array(j)", "def f_per_particle( m):\n alpha = 0.9\n total_features = X.shape[1]\n # Get the subset of the features from the binary mask\n if np.count_nonzero(m) == 0: \n #if the particle subset is only zeros, get the original set of attributes\n X_subset = X\n else:\n X_subset = X[:,m==1]\n particleScore = list()\n particleSize = list()\n score = abs(compute_gamma(X_subset, y))\n particleScore.append(score)\n particleSize.append(X_subset.shape[1])\n # Compute for the objective function\n j = (alpha * (1.0 - score)+ (1.0 - alpha) * (1 - (X_subset.shape[1] / total_features)))\n return j", "def update_particle_cloud(self, scan):\n\n \"\"\"\n Initialise arrays for the new particle cloud,\n particle weights and cummulative weights\n \"\"\"\n newParticleCloud = []\n particleWeights = []\n \n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n\n sensorSigma=0.1 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n\n for p in self.particlecloud.poses:\n particleWeights.append(self.sensor_model.get_weight(scan, p))\n\n for i in range(len(self.particlecloud.poses)):\n randomSelection = numpy.random.random()\n csum = 0\n for p in self.particlecloud.poses:\n weight = self.sensor_model.get_weight(scan, p) / sum(particleWeights)\n csum += weight\n if csum >= randomSelection:\n newParticle = copy.deepcopy(p)\n newParticle.position.x = newParticle.position.x + (gaussianRandomNumX[i] * noise)\n newParticle.position.y = newParticle.position.y + (gaussianRandomNumY[i] * noise)\n newParticle.position.z = newParticle.position.z\n newParticleCloud.append(newParticle)\n break\n self.particlecloud.poses = newParticleCloud\n\n pass", "def velocities(self, return_np=False):\n\n if return_np:\n return self.si_values()[3:]\n return [self.v_r, self.v_t, self.v_p]", "def __init__(self):\n self.pidDict = { # particle_name, pid\n \"total\" : 0,\n \"charged\" : 1,\n \"charged_eta\" : 2,\n \"pion\" : 6, # sum(7, 8, -7)\n \"pion_p\" : 7,\n \"pion_0\" : 8,\n \"pion_m\" : -7,\n \"kaon\" : 11, # sum(12, 13)\n \"kaon_p\" : 12,\n \"kaon_0\" : 13,\n \"anti_kaon\" : -11, # sum(-12, -13)\n \"kaon_m\" : -12,\n \"anti_kaon_0\" : -13,\n \"nucleon\" : 16, # sum(17, 18)\n \"proton\" : 17,\n \"neutron\" : 18,\n \"anti_nucleon\" : -16, # sum(-17, -18)\n \"anti_proton\" : -17,\n \"anti_neutron\" : -18,\n \"sigma\" : 21, # sum(22, 23, 24)\n \"sigma_p\" : 22,\n \"sigma_0\" : 23,\n \"sigma_m\" : 24,\n \"anti_sigma\" : -21,\n \"anti_sigma_p\" : -22,\n \"anti_sigma_0\" : -23,\n \"anti_sigma_m\" : -24,\n \"xi\" : 26, # sum(27, 28)\n \"xi_0\" : 27,\n \"xi_m\" : 28,\n \"anti_xi\" : -26,\n \"anti_xi_0\" : -27,\n \"anti_xi_m\" : -28,\n \"lambda\" : 31,\n \"anti_lambda\" : -31,\n \"omega\" : 36,\n \"anti_omega\" : -36,\n \"phi\" : 41,\n \"rho\" : 46, #sum(47, 48, -47)\n \"rho_p\" : 47,\n \"rho_0\" : 48,\n \"rho_m\" : -47,\n \"eta\" : 51,\n \"eta_prime\" : 52,\n \"gamma\" : 61,\n \"omega782\" : 65,\n \"eta\" : 71,\n \"etap\" : 72,\n }\n\n for aParticle in self.pidDict.keys():\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]+1000\n else:\n self.pidDict[aParticle+\"_hydro\"] = self.pidDict[aParticle]-1000\n if self.pidDict[aParticle]>=0:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]+2000\n else:\n self.pidDict[aParticle+\"_thermal\"] = self.pidDict[aParticle]-2000\n\n self.pidDict.update({\n \"photon_total\" : 9000,\n \"photon_total_eq\" : 9001,\n \"photon_QGP_tot\" : 9002,\n \"photon_QGP_eq\" : 9003,\n \"photon_HG_tot\" : 9004,\n \"photon_HG_eq\" : 9005,\n \"direct_gamma_shortdecay_hydro\" : 9006,\n \"decay_gamma_pi0_hydro\" : 9007,\n \"decay_gamma_eta_hydro\" : 9008,\n \"decay_gamma_omega_hydro\" : 9009,\n \"decay_gamma_phi_hydro\" : 9010,\n \"decay_gamma_etap_hydro\" : 9011,\n \"decay_gamma_Sigma0_hydro\" : 9012,\n })\n\n #UrQMD pid Dictionary, name conversion defined as in binUtility\n self.UrQMDpidDict = { #particle name, UrQMD id# : isospin*2000 + pid\n 2101 : \"pion_p\",\n -1899 : \"pion_m\",\n 101 : \"pion_0\",\n 1106 : \"kaon_p\",\n -894 : \"kaon_0\",\n -1106 : \"kaon_m\",\n 894 : \"anti_kaon_0\",\n 1001 : \"proton\",\n -999 : \"neutron\",\n -1001 : \"anti_proton\",\n 999 : \"anti_neutron\",\n 2040 : \"sigma_p\",\n -1960 : \"sigma_m\",\n 40 : \"sigma_0\",\n -2040 : \"anti_sigma_p\",\n 1960 : \"anti_sigma_m\",\n -40 : \"anti_sigma_0\",\n 1049 : \"xi_0\",\n -951 : \"xi_m\",\n -1049 : \"anti_xi_0\",\n 951 : \"anti_xi_m\",\n 27 : \"lambda\",\n -27 : \"anti_lambda\",\n 55 : \"omega\",\n -55 : \"anti_omega\",\n 109 : \"phi\",\n 102 : \"eta\",\n 107 : \"eta_prime\",\n 100 : \"gamma\",\n }\n\n #pdg pid Dictionary\n self.PDGpidDict = { #pdg id#, particle name\n 211 : \"pion_p\",\n -211 : \"pion_m\",\n 111 : \"pion_0\",\n 321 : \"kaon_p\",\n 311 : \"kaon_0\",\n -321 : \"kaon_m\",\n -311 : \"anti_kaon_0\",\n 2212 : \"proton\",\n 2112 : \"neutron\",\n -2212 : \"anti_proton\",\n -2112 : \"anti_neutron\",\n 3222 : \"sigma_p\",\n 3112 : \"sigma_m\",\n 3212 : \"sigma_0\",\n -3222 : \"anti_sigma_p\",\n -3112 : \"anti_sigma_m\",\n -3212 : \"anti_sigma_0\",\n 3322 : \"xi_0\",\n 3312 : \"xi_m\",\n -3322 : \"anti_xi_0\",\n -3312 : \"anti_xi_m\",\n 3122 : \"lambda\",\n -3122 : \"anti_lambda\",\n 3334 : \"omega\",\n -3334 : \"anti_omega\",\n 333 : \"phi\",\n 221 : \"eta\",\n 331 : \"eta_prime\",\n 22 : \"gamma\",\n }\n\n #particle mass Dictionary (unit in GeV)\n self.masspidDict = {\n \"pion\" : 0.13957,\n \"pion_p\" : 0.13957,\n \"pion_0\" : 0.13498,\n \"pion_m\" : 0.13957,\n \"kaon\" : 0.49368,\n \"kaon_p\" : 0.49368,\n \"kaon_0\" : 0.49765,\n \"anti_kaon\" : 0.49368,\n \"kaon_m\" : 0.49368,\n \"anti_kaon_0\" : 0.49765,\n \"nucleon\" : 0.93827,\n \"proton\" : 0.93827,\n \"neutron\" : 0.93957,\n \"anti_nucleon\" : 0.93827,\n \"anti_proton\" : 0.93827,\n \"anit_neutron\" : 0.93957,\n \"sigma\" : 1.18937,\n \"sigma_p\" : 1.18937,\n \"sigma_0\" : 1.19264,\n \"sigma_m\" : 1.19745,\n \"anti_sigma\" : 1.18937,\n \"anti_sigma_p\" : 1.18937,\n \"anti_sigma_0\" : 1.19264,\n \"anti_sigma_m\" : 1.19745,\n \"xi\" : 1.31483,\n \"xi_0\" : 1.31483,\n \"xi_m\" : 1.32131,\n \"anti_xi\" : 1.31483,\n \"anti_xi_0\" : 1.31483,\n \"anti_xi_m\" : 1.32131,\n \"lambda\" : 1.11568,\n \"anti_lambda\" : 1.11568,\n \"omega\" : 1.67243,\n \"anti_omega\" : 1.67243,\n \"rho\" : 0.77580,\n \"rho_p\" : 0.77580,\n \"rho_0\" : 0.77580,\n \"rho_m\" : 0.77580,\n \"phi\" : 1.01946,\n \"eta\" : 0.54775,\n \"eta_prime\" : 0.95778,\n \"gamma\" : 0.0,\n }\n for aParticle in self.masspidDict.keys():\n self.masspidDict[aParticle+\"_hydro\"] = self.masspidDict[aParticle]\n self.masspidDict[aParticle+\"_thermal\"] = self.masspidDict[aParticle]\n\n # charged hadrons list\n self.charged_hadron_list = [\n \"pion_p\", \"pion_m\", \"kaon_p\", \"kaon_m\", \"proton\", \"anti_proton\",\n \"sigma_p\", \"sigma_m\", \"anti_sigma_p\", \"anti_sigma_m\",\n \"xi_m\", \"anti_xi_m\"]", "def setupParticles(self):\n\n for ss in self.pargs['species']:\n\n # Make sure we are setting up particles, not walls (so we check for id existence)\n if 'id' in ss and 'wall' not in ss:\n if not self.rank:\n logging.info('Setting up particles for group{id}'.format(**ss))\n\n randName = np.random.randint(10**5,10**8)\n pddName = 'pdd' + '{}'.format(np.random.randint(10**5,10**8))\n\n if 'vol_lim' not in ss:\n ss['vol_lim'] = 1e-20\n\n id = ss['id'] - 1\n self.lmp.command('group group{} type {}'.format(id, ss['id']))\n\n if 'args'in ss:\n args = ss['args']\n else:\n args = ()\n\n if 'radius' in ss:\n radius = ss['radius']\n\n if not isinstance(radius, tuple):\n radius = ('constant', radius)\n\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density} radius'.format(**ss) + (' {}' * len(radius)).format(*radius) \\\n + (' {}' * len(args)).format(*args))\n else:\n self.lmp.command('fix {} '.format(randName) + 'group{}'.format(id) + ' particletemplate/{style} 15485867 volume_limit {vol_lim} atom_type {id} density constant {density}'.format(**ss) + (' {}' * len(args)).format(*args))\n \n self.lmp.command('fix {} '.format(pddName) + 'group{}'.format(id) + ' particledistribution/discrete 67867967 1'.format(**ss) + ' {} 1.0'.format(randName))\n\n if ss['style'] is 'multisphere':\n itype = ss['style']\n else:\n itype = 'nve/{style}'.format(**ss)\n\n #Do NOT unfix randName! Will cause a memory corruption error\n self.pddName.append(pddName)", "def getMultiplicities(self, particleName=\"pion\", where=\"\", orderBy=\"event_id\"):\n whereClause = \"pid=%d\" % self._pid(particleName)\n if where:\n whereClause += \" and \" + where\n tmp = np.asarray(self.db.selectFromTable(\"multiplicities\", \"N\", whereClause=whereClause, orderByClause=orderBy))\n return tmp.reshape(tmp.size)", "def init_pvelocity(self, individuals):\n for individual in individuals:\n # the initial speed is set to zero\n individual.features['velocity'] = [0] * len(individual.vector)\n\n return", "def create_vessel_components(self) -> list:\n\n # Blanket computed from plasma\n blanket = paramak.BlanketFP(\n plasma=self.plasma,\n thickness=4.06e2 - 3.52e2,\n start_angle=-70,\n stop_angle=230,\n rotation_angle=self.rotation_angle,\n vertical_displacement=self.plasma.vertical_displacement,\n offset_from_plasma=[[-70, 0, 90, 180, 230], [50, 20, 59, 16, 50]],\n name=\"blanket\",\n )\n\n # SN Divertor\n divertor = paramak.ITERtypeDivertor(\n anchors=((4.34e2, -3.3e2), (5.56e2, -3.74e2)),\n coverages=(105, 125),\n lengths=(45, 75),\n radii=(68, 65),\n tilts=(-30, 2),\n dome_height=45,\n dome_pos=0.45,\n rotation_angle=self.rotation_angle,\n name=\"divertor\",\n )\n\n # Vacuum vessel\n divertor.points # trigger the building of the points for divertor\n # the inner part of the vacuum vessel is computed from the outer\n # points of the blanket and the divertor\n vac_vessel_inner = paramak.RotateMixedShape(\n points=blanket.outer_points + divertor.casing_points,\n rotation_angle=self.rotation_angle,\n name=\"vessel\",\n )\n\n vac_vessel = paramak.RotateSplineShape(\n points=[\n (327.77, 36.5026668124882),\n (327.77, 73.37741270075162),\n (327.77, 108.31180820215741),\n (327.77, 143.2462037035632),\n (327.77, 178.18059920496898),\n (327.77, 213.11499470637477),\n (327.77, 248.04939020778068),\n (327.77, 282.98378570918646),\n (327.77, 317.9181812105922),\n (328.6121587814181, 368.23899806938385),\n (336.18303032328333, 422.4306297110355),\n (350.4835654579176, 457.5437492206628),\n (371.95910957013655, 492.47041663587777),\n (404.3208742000702, 522.0151685493631),\n (439.6516080621078, 544.4559826211985),\n (474.98234192414554, 556.3610266211815),\n (510.2245275810152, 564.0927634387052),\n (545.6438096482208, 565.1200145185009),\n (565.832800426528, 563.1864687746993),\n (580.9745435102584, 559.4390362932862),\n (616.3052773722961, 548.4109567158157),\n (651.6360112343338, 533.224020531035),\n (686.9667450963714, 515.3041214328789),\n (722.297478958409, 492.23516177329117),\n (757.6282128204466, 466.8689289401416),\n (792.9589466824843, 437.10619055069265),\n (825.7660566972336, 403.7167485984509),\n (853.525919017406, 369.42176700251196),\n (877.9209495411939, 333.90960594986575),\n (898.9511482685972, 300.5186330502012),\n (916.616515199616, 265.2383422522439),\n (932.5994662324425, 230.72194441870647),\n (946.0587934179808, 193.1122328856627),\n (956.1532888071343, 156.87835598377137),\n (962.8829523999035, 118.10702768634405),\n (967.9302000944803, 80.39197257542594),\n (968.7714080435763, 38.24754419835381),\n (968.7714080435763, 25.77097437642317),\n (964.5653682980957, -1.670738783514139),\n (956.9944967562304, -29.93883090626548),\n (956.1532888071343, -34.59540221679083),\n (946.0587934179808, -71.15339839027786),\n (931.7582582833464, -104.25874435511184),\n (914.9340993014238, -139.91477225259314),\n (898.9511482685972, -174.48160361826422),\n (883.8094051848669, -213.64300914878197),\n (867.8264541520404, -248.21908241802464),\n (851.0022951701176, -284.2078188440911),\n (834.1781361881949, -319.9470238737184),\n (818.1951851553683, -359.0978394110024),\n (800.5298182243495, -391.2313539579658),\n (776.1347877005617, -427.87174371008393),\n (744.1688856349085, -460.45530873911446),\n (708.8381517728709, -490.0255912806248),\n (673.5074179108332, -512.7040543014494),\n (638.1766840487956, -528.371873327094),\n (602.8459501867579, -539.0490644239661),\n (567.5152163247203, -546.1219131278361),\n (532.1844824626827, -548.9566889080664),\n (496.85374860064496, -547.7514325554811),\n (461.52301473860734, -541.3971156414638),\n (426.1922808765697, -527.596464992453),\n (390.8615470145321, -501.2796363633471),\n (360.57806084707124, -468.0473902249954),\n (340.389070068764, -431.4355817359209),\n (329.87397070506233, -399.072068113844),\n (327.770950832322, -357.4796824533661),\n (327.770950832322, -311.73270913617455),\n (327.770950832322, -276.79831363476876),\n (327.770950832322, -241.86391813336297),\n (327.770950832322, -206.92952263195718),\n (327.770950832322, -171.99512713055117),\n (327.770950832322, -137.06073162914538),\n (327.770950832322, -102.12633612773948),\n (327.770950832322, -67.19194062633369),\n ],\n cut=[vac_vessel_inner], # to make a hollow shape\n rotation_angle=self.rotation_angle,\n name=\"vessel_inner\",\n )\n\n return [divertor, blanket, vac_vessel, vac_vessel_inner]", "def new_gas_plummer_distribution(number_of_particles, \n total_mass = 1.0|nbody_system.mass, \n virial_radius = 1.0|nbody_system.length,\n G = None,\n **keyword_arguments): # optional arguments for UniformSphericalDistribution\n particles = new_plummer_spatial_distribution(number_of_particles, total_mass=total_mass, \n virial_radius=virial_radius, **keyword_arguments)\n \n if G is None:\n G = nbody_system.G if generic_unit_system.is_generic_unit(total_mass.unit) else constants.G\n velocity_unit = (G*total_mass/virial_radius).sqrt().unit.base_unit()\n particles.velocity = [0.0, 0.0, 0.0] | velocity_unit\n \n plummer_radius = 0.1875 * numpy.pi * virial_radius\n u_unit = (velocity_unit**2).base_unit()\n particles.u = (1 + particles.position.lengths_squared()/plummer_radius**2)**(-0.5) | u_unit\n particles.u *= 0.25 * (G*total_mass**2/virial_radius) / particles.thermal_energy()\n return particles", "def evolve_system(self,dt, energy_file = None):\n phi = self.compute_field()\n force_m = self.compute_forces_mesh()\n self.acc_new = np.zeros([len(self),2])\n #Computes the force felt by each particles and deduce the acceleration\n for i in range(len(self)):\n x,y = self.ptclgrid.ixy[i]\n x = int(x)\n y = int(y)\n self.acc_new[i][0] += (1/self.mass[i]*force_m[0][x,y])\n self.acc_new[i][1] += (1/self.mass[i]*force_m[1][x,y])\n #Evolve the position and momenta of the particle in the list\n self.particles.evolve(self.acc,self.acc_new,dt,self.size, boundary_periodic=self.boundary_periodic)\n #For non-periodic condition, deletes the particles that leave the grid from the list\n if self.boundary_periodic!=True: \n index = np.argwhere((self.particles.position>self.size-1))\n index2 = np.argwhere((self.particles.position<0))\n index = {a for a in np.append(index,index2)}\n index = list(index)\n self.particles.momentum = np.delete(self.particles.momentum,index,axis=0)\n self.acc = np.delete(self.acc,index,axis=0)\n self.acc_new = np.delete(self.acc_new,index,axis=0)\n self.mass = np.delete(self.mass,index,axis=0)\n self.particles.position = np.delete(self.particles.position,index,axis=0)\n self.acc = self.acc_new.copy()\n #Update the position of the particles on the grid\n self.ptclgrid.update_position(self.particles.position,self.mass)\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n #Write the energy in a file if on is given\n if energy_file != None:\n energy_file.write(f'{self.energy()}\\n')\n energy_file.flush()\n return self.grid_pos", "def evolve(self, dt):\n TIMESTEP = 1e-5\n num_steps = int(dt / TIMESTEP)\n\n for _ in range(num_steps):\n for particle in self.particles:\n # Calculates direction\n norm = (particle.x ** 2 + particle.y ** 2) ** 0.5\n v_x, v_y = -particle.y / norm, particle.x / norm\n\n # Calculates displacement\n dx = TIMESTEP * particle.angular_velocity * v_x\n dy = TIMESTEP * particle.angular_velocity * v_y\n\n particle.x += dx\n particle.y += dy", "def resample_particles(self):\n # make sure the distribution is normalized\n self.normalize_particles()\n\n newParticles = []\n for i in range(len(self.particle_cloud)):\n # resample the same # of particles\n choice = random_sample()\n # all the particle weights sum to 1\n csum = 0 # cumulative sum\n for particle in self.particle_cloud:\n csum += particle.w\n if csum >= choice:\n # if the random choice fell within the particle's weight\n newParticles.append(deepcopy(particle))\n break\n self.particle_cloud = newParticles", "def _varfilter(self, vrs, response, predictor) -> List[str]:\n if not response and not predictor:\n return vrs\n if response:\n vrs = _list_union(vrs, self.response_vars)\n if predictor:\n vrs = _list_union(vrs, self.predictor_vars)\n return vrs", "def calP(self):\n N = len(self.listOfParticles)\n m = self.listOfParticles[0].m\n vsum = 0\n for particle in self.listOfParticles:\n vsum += particle.V.len()\n A = np.pi*self.R**2\n F = 0.5 * A * (2*self.R) * m * N * vsum**2\n return F", "def get_filtered_t2(data, frequency, sigma_x=None, sigma_y=None):\n\n filtered_data = []\n norm_data = normalize_t2(data)\n for patient_im in norm_data:\n filt_real, filt_t2 = gabor(patient_im, frequency=frequency, sigma_x=sigma_x, sigma_y=sigma_y)\n filtered_data.append(filt_real)\n return filtered_data", "def compute_potential_components(context):\n import copy\n system = context.getSystem()\n system = copy.deepcopy(system)\n positions = context.getState(getPositions=True).getPositions(asNumpy=True)\n parameters = context.getParameters()\n for index in range(system.getNumForces()):\n force = system.getForce(index)\n force.setForceGroup(index)\n\n integrator = openmm.VerletIntegrator(1.0 * unit.femtoseconds)\n platform = openmm.Platform.getPlatformByName('Reference')\n context = openmm.Context(system, integrator, platform)\n context.setPositions(positions)\n for (parameter, value) in parameters.items():\n context.setParameter(parameter, value)\n energy_components = list()\n for index in range(system.getNumForces()):\n force = system.getForce(index)\n forcename = force.__class__.__name__\n groups = 1<<index\n potential = context.getState(getEnergy=True, groups=groups).getPotentialEnergy()\n energy_components.append((forcename, potential))\n del context, integrator\n return energy_components", "def new_plummer_spatial_distribution(number_of_particles, \n total_mass = 1.0|nbody_system.mass, \n virial_radius = 1.0|nbody_system.length,\n mass_cutoff = 0.999,\n **keyword_arguments): # optional arguments for UniformSphericalDistribution\n particles = Particles(number_of_particles)\n particle_mass = total_mass * 1.0 / number_of_particles\n particles.mass = particle_mass\n x, y, z = UniformSphericalDistribution(\n number_of_particles, mass_cutoff=mass_cutoff, **keyword_arguments).result\n \n # Now scale the uniformly distributed particle positions to match the radial density profile\n r_old = numpy.sqrt(x*x + y*y + z*z)\n scale_factor = (0.1875 * numpy.pi * virial_radius.number) / numpy.sqrt(1.0 - r_old**2)\n particles.x = scale_factor * x | virial_radius.unit\n particles.y = scale_factor * y | virial_radius.unit\n particles.z = scale_factor * z | virial_radius.unit\n return particles", "def _para_transform(self, X):\n self.check_fit()\n data = self.convert_input(X)\n\n vectors = []\n for atom in range(len(data.elements)):\n limits = self._loop_depth(atom, data.connections)\n tallies = self._tally_limits(limits, data.elements,\n data.connections)\n vec = [tallies.get(x, 0) for x in self._elements]\n if self.add_unknown:\n unknown = 0\n for key, value in tallies.items():\n if key not in self._elements:\n unknown += value\n vec.append(unknown)\n vectors.append(vec)\n return vectors", "def test_particle_velocity():\n\ttest_orientation = o_gen_instance.generate_orientation_vector()\n\ttest_speed = path_instance.generate_velocity().dot(geom_instance.source_direction)\n\tdetector_length = 0.3 + 5*0.5 # hard-coded for now\n\tassert test_speed*trial_samples > detector_length", "def get_list_of_pets(self, auth_key: json, filter: str) -> json:\n\n headers = {'auth_key': auth_key['key']}\n filter = {'filter': filter}\n\n res = requests.get(self.base_url + 'api/pets', headers=headers, params=filter)\n status = res.status_code\n\n result = \"\"\n try:\n result = res.json()\n except:\n result = res.text\n\n return status, result", "def evaluate(self, particles, **kwargs):\n truth = self.truth\n metrics = self.metrics\n moments = self.moments\n results = []\n # for key in truth:\n th_hvi = np.concatenate([particles[key].reshape(particles[key].shape[0], -1) for key in truth], axis=1)\n tr = np.concatenate([truth[key].reshape(truth[key].shape[0], -1) for key in truth], axis=1)\n for mom_name in moments:\n mom_func = moments[mom_name]\n dist = metrics(mom_func(tr, axis=0), mom_func(th_hvi, axis=0))\n result = {'moment': mom_name, 'distance': dist, **kwargs}\n self.results.append(result)\n return results", "def x_add_particles():\n particle_count_list = np.zeros(7)", "def select_defects_modifier(frame, data):\n\n # Retrieve some relevant site properties as numpy arrays.\n occupancies = data.particles['Occupancy'][...]\n site_type = data.particles['Particle Type'][...]\n num_site_types = occupancies.shape[1]\n is_si_site = data.particles['Is Si Site'][...]\n is_c_site = data.particles['Is C Site'][...]\n si_occupancy = data.particles['Si Occupancy'][...]\n c_occupancy = data.particles['C Occupancy'][...]\n total_occupancy = data.particles['Total Occupancy'][...]\n\n # Set up a particle selection by creating the Selection property:\n selection = data.particles_.create_property('Selection')\n\n # TODO Create a mask (boolean numpy array) to identify basic defect sites\n v =data.particles[\"Occupancy\"][np.arange(data.particles.count),(data.particles['Particle Type'][...] -1)]\n vacancy_mask =(total_occupancy ==0).astype(int)\n interstitial_mask = (total_occupancy >1).astype(int)\n antisite_mask = ((v==0) & (total_occupancy >0)).astype(int)\n\n data.particles_.create_property('vacancy_mask', data=vacancy_mask)\n data.particles_.create_property('interstitial_mask', data=interstitial_mask)\n data.particles_.create_property('antisite_mask', data=antisite_mask)\n\n selection[...] = vacancy_mask | interstitial_mask | antisite_mask", "def calcVelocityFromMomentum(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle velocity from momentum.\")\n values = {}\n for direction in self.p.order:\n gamma = self.calcLorentzGammaFromMomentum(direction)\n values[direction] = getattr(self.p,direction)/(gamma*self.mass)\n self.setVelocity(Cartesian3DVector(**values))\n return self.getVelocity()", "def relax(particles, drop, interactions,\n N_steps=1000, eta=10e-7, plot=False,\n interaction=True, border=True, temperature=True):\n # global potentials # just for debug\n # potentials = []\n \n N_samples = np.shape(particles)[0]\n N_particles = np.shape(particles)[1]\n for i in range(N_steps):\n T = 1.2 # temperature\n if temperature:\n particles += np.sqrt(2*eta*T)*np.random.randn(N_samples, N_particles, 2)\n\n if border:\n particles += eta*drop.F(particles)\n\n distances = interactions.get_distance_matrix(particles)\n\n if (norm(distances)> 10).any():\n raise Exception(\"A particle evaporated!!\")\n\n if interaction:\n final_force = eta*interactions.F(distances)\n\n if np.isnan(final_force).any():\n raise Exception(\"There is a NaN in the force!!\")\n\n final_force_norm = np.sqrt(np.sum(final_force**2, -1, keepdims=True))\n \n # Clip maximum force to prevent numerical explosion when\n # two particles are too close\n final_force_clip = np.clip(final_force_norm, -10e-3, 10e-3)\n particles -= final_force*final_force_clip/(final_force_norm + 10e-50)\n\n if plot:\n # potentials.append(interactions.V(distances)[0])\n if i % int(N_steps/10) == 0:\n plot_drop(0, particles, drop, interactions, show3d=True)\n\n return particles", "def function_donnee_pep():\r\n\r\n particles = data_function_particle()\r\n weather = data_function_weather()\r\n wind = data_function_wind()\r\n temperature = data_function_temperature()\r\n season = data_function_season()\r\n deaparture = data_function_departure()\r\n day = data_function_day()\r\n rank = data_function_ranking()\r\n pressure = data_function_pressure()\r\n demonstration = data_function_demonstration()\r\n\r\n return particles, weather, wind, temperature, season, deaparture,\\\r\n day, rank, pressure, demonstration", "def _para_transform(self, X):\n data = self.convert_input(X)\n dist = cdist(data.coords, data.coords)\n\n numbers = numpy.array(data.numbers)\n coords = numpy.array(data.coords)\n\n vectors = []\n for i in range(len(numbers)):\n nearest = numpy.where(dist[i, :] < self.r_cut)\n ordering = numpy.argsort(dist[i, :][nearest])\n # Add 1 to offset for the start value\n local_atoms = ordering[:self.max_occupancy + 1]\n mat = get_coulomb_matrix(numbers[local_atoms],\n coords[local_atoms],\n alpha=self.alpha,\n use_decay=self.use_decay)\n # Take away 1 for the start value\n n = self.max_occupancy - (len(local_atoms) - 1)\n mat = numpy.pad(mat, ((0, n), (0, n)), \"constant\")\n norm_vals = numpy.linalg.norm(mat, axis=0)\n norm_vals[0] = numpy.inf\n sorting = numpy.argsort(norm_vals)[::-1]\n if self.use_reduced:\n # skip the first value in the diag because it is already in\n # the first row\n diag = numpy.diag(mat)[1:].tolist()\n vectors.append(mat[sorting[0]].tolist() + diag)\n else:\n vectors.append(mat[sorting].flatten())\n return numpy.array(vectors)", "def get_fermi_velocities():\n\n vr = Vasprun('vasprun.xml')\n # eigenvalues = vr.eigenvalues\n bs = vr.get_band_structure()\n bands = bs.bands\n kpoints = bs.kpoints\n efermi = bs.efermi\n h_bar = 6.582e-16 # eV*s\n\n fermi_bands = []\n for spin in bands:\n for i in range(len(bands[spin])):\n if max(bands[spin][i]) > efermi > min(bands[spin][i]):\n fermi_bands.append(bands[spin][i])\n\n fermi_velocities = []\n for band in fermi_bands:\n for i in range(len(band)-1):\n if (band[i] < efermi < band[i+1]) or (band[i] > efermi > band[i+1]):\n dk = np.sqrt((kpoints[i+1].cart_coords[0]\n - kpoints[i].cart_coords[0])**2\n + (kpoints[i+1].cart_coords[1]\n - kpoints[i].cart_coords[1])**2)\n v_f = abs((band[i+1] - band[i]) / (h_bar * dk))\n fermi_velocities.append(v_f)\n\n return fermi_velocities # Values are in Angst./s", "def get_voltages(self):\n if self.v is None or self.dirty is True:\n v = self.simulator.get_voltages()\n n_compartments = self.neuron_collection.total_compartments()\n self.v = np.array(v).reshape([len(v) / n_compartments, n_compartments])\n\n self.dirty = False\n t = int(self.T / self.dt)\n return self.v[:t, :]", "def PSO(m, n, alpha1, alpha2, omega, lower_limit, upper_limit, iterations, fitness, o):\n\n ##initialize particles. Each row is one particle.\n f_g = np.Inf\n x = np.random.uniform(lower_limit, upper_limit, (n, m))\n v = np.zeros(x.shape)\n f_p = fitness(x, o)\n\n # p vector is the personal best vector\n p = x\n # g vector is a single vectot that is the global best\n g = x[np.argmin(f_p)]\n\n track = []\n time_track = []\n \n for i in range(iterations):\n time_first = time.time()\n f_i = fitness(x, o)\n\n # Update personal bests\n cond = f_i < f_p\n p[cond] = x[cond]\n f_p[cond] = f_i[cond]\n\n # update global best (all time)\n if np.min(f_p) < f_g:\n f_g = np.min(f_p)\n g = g = x[np.argmin(f_p)]\n\n # compute velocity\n v = omega*v + alpha1*np.random.uniform(0, 1, (n, m))*(p - x) + \\\n alpha2*np.random.uniform(0, 1, (n, m))*(g - x)\n\n # update positions \n x = x + v\n \n track.append([f_g])\n time_track.append(time.time() - time_first)\n\n time_track = np.array(time_track)\n # print(\"mean_time: {:.2e}\".format(np.mean(time_track)))\n # print(\"mean_time: {:.2e}\".format(np.std(time_track)))\n \n return np.resize(np.array(track), (3000,))", "def preevolve(self):\n\n self.in_preevolve = True\n\n myg = self.cc_data.grid\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n self.cc_data.fill_BC(\"density\")\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n # 1. do the initial projection. This makes sure that our original\n # velocity field satisties div U = 0\n\n # the coefficient for the elliptic equation is beta_0^2/rho\n coeff = 1/rho\n beta0 = self.base[\"beta0\"]\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # next create the multigrid object. We defined phi with\n # the right BCs previously\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n # solve D (beta_0^2/rho) G (phi/beta_0) = D( beta_0 U )\n\n # set the RHS to divU and solve\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-10)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi = self.cc_data.get_var(\"phi\")\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of phi and update the\n # velocities\n # FIXME: this update only needs to be done on the interior\n # cells -- not ghost cells\n gradp_x, gradp_y = mg.get_solution_gradient(grid=myg)\n\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= coeff.v()*gradp_x.v()\n v.v()[:, :] -= coeff.v()*gradp_y.v()\n\n # fill the ghostcells\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n # 2. now get an approximation to gradp at n-1/2 by going through the\n # evolution.\n\n # store the current solution -- we'll restore it in a bit\n orig_data = patch.cell_center_data_clone(self.cc_data)\n\n # get the timestep\n self.method_compute_timestep()\n\n # evolve\n self.evolve()\n\n # update gradp_x and gradp_y in our main data object\n new_gp_x = self.cc_data.get_var(\"gradp_x\")\n new_gp_y = self.cc_data.get_var(\"gradp_y\")\n\n orig_gp_x = orig_data.get_var(\"gradp_x\")\n orig_gp_y = orig_data.get_var(\"gradp_y\")\n\n orig_gp_x[:, :] = new_gp_x[:, :]\n orig_gp_y[:, :] = new_gp_y[:, :]\n\n self.cc_data = orig_data\n\n if self.verbose > 0:\n print(\"done with the pre-evolution\")\n\n self.in_preevolve = False", "async def dkp(self, ctx, *, filters: to_kwargs = None):\n\n points = eqdkp.get_points(filters)\n chunks = [points[i:i + 10] for i in range(0, len(points), 10)]\n for chunk in chunks:\n await ctx.author.send(f\"\"\"```\\n{chunk}```\"\"\")", "def parse_chunks(self): \n result_particles = []\n (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()\n\n while chunk is not None:\n #\n # Discard the Flag record since it has already been processed.\n # We also need to check for this being the first record, \n # since an end of velocity record could result in a pattern match \n # with a Flag record if the size of the velocity records are \n # greater than or equal to the Flag record size.\n #\n if self._read_state[StateKey.FIRST_RECORD] and \\\n FLAG_RECORD_MATCHER.match(chunk):\n self._increment_state(FLAG_RECORD_SIZE)\n\n #\n # If we haven't reached the end of the Velocity record,\n # see if this next record is the last one (all zeroes).\n #\n elif not self._read_state[StateKey.VELOCITY_END]:\n velocity_end = self.velocity_end_record_matcher.match(chunk)\n self._increment_state(self.velocity_record_size)\n\n #\n # A velocity data record of all zeroes does not generate\n # a data particle.\n #\n if velocity_end:\n self._read_state[StateKey.VELOCITY_END] = True\n else:\n #\n # If the file is missing an end of velocity record,\n # meaning we'll exhaust the file and run off the end,\n # this test will catch it.\n #\n velocity_fields = self.parse_velocity_record(chunk)\n if velocity_fields:\n #\n # Generate a data particle for this record and add\n # it to the end of the particles collected so far.\n #\n timestamp = self.calculate_timestamp()\n ntp_time = ntplib.system_to_ntp_time(timestamp)\n\n particle = self._extract_sample(\n Vel3dKWfpStcVelocityDataParticle,\n None, velocity_fields, ntp_time)\n\n result_particles.append((particle,\n copy.copy(self._read_state)))\n\n #\n # Ran off the end of the file. Tell 'em the bad news.\n #\n else:\n log.warn(\"EOF reading velocity records\")\n raise SampleException(\"EOF reading velocity records\")\n\n #\n # If we have read the end of velocity data records,\n # the next record is the Time data record by definition.\n # Generate the data particle and\n # add it to the end of the particles collected so far.\n #\n else:\n #\n # Make sure there was enough data to comprise a Time record.\n # We can't verify the validity of the data,\n # only that we had enough data.\n #\n time_fields = self.parse_time_record(chunk)\n if time_fields:\n #\n # Convert the tuple to a list, add the number of\n # Velocity record received (not counting the end of\n # Velocity record, and convert back to a tuple.\n #\n time_list = list(time_fields)\n time_list.append(self.calculate_record_number() - 1)\n time_fields = tuple(time_list)\n ntp_time = ntplib.system_to_ntp_time(self.time_on)\n\n particle = self._extract_sample(\n Vel3dKWfpStcTimeDataParticle, \n None, time_fields, ntp_time)\n\n self._increment_state(TIME_RECORD_SIZE)\n result_particles.append((particle,\n copy.copy(self._read_state)))\n\n else:\n log.warn(\"EOF reading time record\")\n raise SampleException(\"EOF reading time record\")\n\n self._read_state[StateKey.FIRST_RECORD] = False\n\n (timestamp, chunk, start, \n end) = self._chunker.get_next_data_with_index()\n\n return result_particles", "def particleRenderInfo(*args, attrList: Union[int, bool]=0, attrListAll: bool=True, name:\n Union[int, bool]=0, renderTypeCount: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass", "def Vps(self):\n return [elem['Vp'] for elem in self.__compartments]", "def filter(self, observations):\n\n (_, _, _, x_filtered, P_filtered) = filter(self.F, self.Q, self.H, self.R, self.x_0, self.P_0, observations)\n return x_filtered, P_filtered", "def _unity_potions_given_constraint(\n constraint: graphs.Constraint) -> List[alchemy_pb2.PotionProperties]:\n graph = graphs.create_graph_from_constraint(constraint)\n # Use any potion map, it doesn't matter since we only care about reactions.\n pm = stones_and_potions.PotionMap(dim_map=[0, 1, 2], dir_map=[1, 1, 1])\n perceived_and_latent = [(pm.apply_inverse(l), l) for l in\n stones_and_potions.possible_latent_potions()]\n return [unity_python_conversion.to_potion_unity_properties(p, l, graph)\n for p, l in perceived_and_latent]", "def create_flux_vector_pms_gr(self):\n soma_prod = 0\n soma_inj = 0\n lim4 = 1e-4\n store_velocity = {}\n store_flux = {}\n for primal in self.primals:\n #1\n primal_id = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n flux = {}\n velocity = {}\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n if adj not in fine_elems_in_primal:\n #4\n pvol = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n #3\n else:\n #4\n pvol = self.mb.tag_get_data(self.pcorr_tag, volume, flat=True)[0]\n padj = self.mb.tag_get_data(self.pcorr_tag, adj, flat=True)[0]\n #3\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n keq = self.kequiv(kvol, kadj)/(self.mi)\n keq2 = keq\n keq = keq*(np.dot(self.A, uni))\n pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n q = (grad_p)*keq - grad_z*keq*self.gama\n print((grad_p)*keq)\n print(- grad_z*keq*self.gama)\n print(q)\n print(self.store_flux_pf_gr[volume][tuple(unit)])\n print('\\n')\n import pdb; pdb.set_trace()\n\n if gid_adj > gid_vol:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n else:\n v = -((grad_p2)*keq2 - grad_z*self.gama*keq2)\n\n flux[tuple(unit)] = q\n velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n\n #2\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n store_flux[volume] = flux\n self.mb.tag_set_data(self.flux_fine_pms_tag, volume, sum(flux.values()))\n # flt = sum(flux.values())\n # if volume not in self.wells_inj and volume not in self.wells_prod:\n # lim4 = 1e-7\n # if abs(flt) > lim4:\n # print(gid_vol)\n # print(flt)\n # import pdb; pdb.set_trace()\n # flt = sum(flux.values())\n store_velocity[volume] = velocity\n\n for volume in set(self.all_fine_vols) - set(self.wells):\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n values = store_flux[volume].values()\n if sum(values) > lim4:\n print('fluxo multiescala nao esta dando conservativo')\n print('gid:{0}'.format(gid))\n print(sum(values))\n import pdb; pdb.set_trace()\n\n with open('fluxo_multiescala_gr.txt', 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat= True)[0]\n values = store_flux[volume].values()\n if volume in self.wells_inj:\n soma_inj += sum(values)\n else:\n soma_prod += sum(values)\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(soma_inj))\n arq.write('soma_prod:{0}\\n'.format(soma_prod))\n\n return store_flux", "def pions ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import FilterDesktop\n ##\n if self['NOPIDHADRONS'] :\n from StandardParticles import StdAllNoPIDsPions as inpts\n pioncut = self['PionCut']\n else :\n from StandardParticles import StdAllLooseANNPions as inpts\n pioncut = \"(%s)&(%s)\" % ( self['PionCut'] , self['PionPIDCut'] ) \n ##\n return self.make_selection (\n 'Pion' ,\n FilterDesktop ,\n [ inpts ] ,\n Code = pioncut ,\n )", "def sphere_cart()\ndef simulator(nparticles, ninteractions, vacradius, vesradius):\n for i in range(nparticles):\n #neutron = neutron_func(i)\n energy = 14E6\n phi = calc_phi()\n theta = calc_theta()\n xneut = 0\n yneut = 0\n zneut = 0\n d = collision_distance(phi, theta, xneut, zneut)\n r = -np.log(random.random(seed))/sigma_t(energy)\n j = 0\n while (j <= ninteractions)\n xneut = sphere_cart(scatter(energy, A)[0:2])", "def __init__(self, particles):\n self.particles = particles", "def _calculateParticlesProducedDerivOpt(N, gluonDOF, momentaMagSquared, omegaFFT):\n # Where we will calculate dN/d^2k \n particleProduction = np.zeros((N,N))\n\n # # 2D Levi-Cevita symbol\n LCS = np.array([[0,1],[-1,0]])\n\n # # 2D Delta function\n KDF = np.array([[1,0],[0,1]])\n\n # Note that unlike in the rest of the code, i and j *do not* refer to the\n # spacial indices here: x and y do (too many indices... :/ )\n for y in range(N):\n for x in range(N):\n # To prevent any divide by zero errors\n if momentaMagSquared[y,x] == 0:\n continue\n \n # All of these 2s are for our two dimensions, x and y\n for i in range(2):\n for j in range(2):\n for l in range(2):\n for m in range(2):\n\n for a in range(gluonDOF):\n particleProduction[y,x] += np.real(2/(2*np.pi)**3 / momentaMagSquared[y,x] * (\n (KDF[i,j]*KDF[l,m] + LCS[i,j]*LCS[l,m])) * (\n omegaFFT[y,x,i,j,a] * np.conj(omegaFFT[y,x,l,m,a])))\n\n return particleProduction", "def init_pvelocity(self, population):\n for individual in population:\n # the initial speed is set to zero\n individual.features['velocity'] = [0] * len(individual.vector)\n\n return", "def normal_ics(nparticles,pscale=1,vscale=1,masses=None):\n from core import Particles\n \n pos = pscale*np.randn(3,nparticles)\n vel = vscale*np.randn(3,nparticles)\n \n if masses is None:\n return Particles(pos,vel)\n else:\n return Particles(pos,vel,masses)", "def __init__(self,nparticles,initial_condition):\n self.nparticles = nparticles\n self.particles = np.array([Particle(mass,x,y) for x,y,mass in initial_condition])\n self.mass = np.array([self.particles[i].mass for i in range(len(self.particles))])\n self.position = np.array([self.particles[i].position for i in range(len(self.particles))])\n self.momentum = np.array([self.particles[i].momentum for i in range(len(self.particles))])", "def generate_var_scatter(self):\n pass", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def normalize_particles(self):\n tot_weight = sum([particle.w for particle in self.particle_cloud]) or 1\n for particle in self.particle_cloud:\n particle.w = particle.w / tot_weight;", "def getDifferentialFlowDataForOneEvent(self, event_id=1, particleName=\"pion\", order=2, pT_range=None, where=\"\", orderBy=\"pT\"):\n whereClause = \"event_id=%d and pid=%d and n=%d\" % (event_id, self._pid(particleName), order)\n if pT_range:\n whereClause += \" and %g<=pT and pT<=%g\" % (pT_range[0], pT_range[1])\n if where:\n whereClause += \" and \" + where\n return np.asarray(self.db.selectFromTable(\"diff_vn\", (\"pT\", \"vn_real\", \"vn_imag\"), whereClause=whereClause, orderByClause=orderBy))", "def MultiplePositions(radius,velocity,time,dt):\n \n # Stop the calculation when the outermost point takes a whole revolution\n # Outermost point position\n outerposition = radius[len(radius)-1]\n \n # Calculate the positions of outermost point:\n xouter = CalculatePosition(radius[len(radius)-1],velocity[len(radius)-1],time,dt)[0]\n \n # Circumference of the outer circle\n circouter = 2*np.pi*outerposition\n \n # Distance the outer object traveled\n distance = 0\n istop = 0\n \n # New time\n storedtime = CalculatePosition(radius[0],velocity[0],time,dt)[2]\n for t in storedtime:\n if distance < circouter:\n distance = velocity[len(radius)-1]*t\n else:\n istop = np.where(storedtime == t)[0] # find index of the numpy array \n break\n \n istop = int(istop)\n newstoredtime = storedtime[:istop-1]\n \n xmultiple = []\n ymultiple = []\n\n for i in range(len(radius)):\n x = CalculatePosition(radius[i],velocity[i],time,dt)[0]\n y = CalculatePosition(radius[i],velocity[i],time,dt)[1]\n xmultiple.append(x)\n ymultiple.append(y)\n\n xmultiple = np.array(xmultiple)\n ymultiple = np.array(ymultiple)\n \n return xmultiple, ymultiple, newstoredtime", "def XsamsParticles(Particles):\n if not Particles:\n return\n yield \"<Particles>\"\n for Particle in makeiter(Particles):\n cont, ret = checkXML(Particle)\n if cont:\n yield ret\n continue\n G = lambda name: GetValue(name, Particle=Particle)\n yield \"\"\"<Particle stateID=\"S%s-%s\" name=\"%s\">\"\"\" % (G('NodeID'), G('ParticleStateID'), G('ParticleName'))\n yield \"<ParticleProperties>\"\n yield \"<ParticleCharge>%s</ParticleCharge>\" % G(\"ParticleCharge\")\n yield makeDataType(\"ParticleMass\", \"ParticleMass\", G)\n yield \"<ParticleSpin>%s</ParticleSpin>\" % G(\"ParticleSpin\")\n yield \"<ParticlePolarization>%s</ParticlePolarization>\" % G(\"ParticlePolarization\")\n yield \"</ParticleProperties>\"\n yield \"</Particle>\"\n yield \"</Particles>\"", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def _create_velocities(self):\n velocities = []\n for boid in self.boids:\n neighbouring = self.get_neighbours(boid)\n boid_v_x = boid.v * math.cos(boid.direction)\n boid_v_y = boid.v * math.sin(boid.direction)\n if neighbouring:\n coh_x, coh_y = ReynoldsModel.calculate_cohesion(neighbouring)\n coh_x -= boid.x\n coh_y -= boid.y\n align_x, align_y = ReynoldsModel.calculate_alignment(neighbouring)\n sep_x, sep_y = self.calculate_separation(boid, neighbouring)\n velocities.append((\n boid_v_x + self.coh_coef * coh_x + self.align_coef * align_x + self.sep_coef * sep_x,\n boid_v_y + self.coh_coef * coh_y + self.align_coef * align_y + self.sep_coef * sep_y\n ))\n else:\n velocities.append((boid_v_x, boid_v_y))\n return velocities" ]
[ "0.5962206", "0.57577115", "0.5713186", "0.56626225", "0.56291974", "0.5555897", "0.5501763", "0.5466105", "0.54309624", "0.54021454", "0.5381622", "0.53503567", "0.53398895", "0.5317946", "0.52965754", "0.5289197", "0.5287057", "0.52832603", "0.52564645", "0.5244272", "0.52303654", "0.5228992", "0.5225467", "0.52220017", "0.5197605", "0.51926786", "0.51897305", "0.5189419", "0.5178475", "0.51722455", "0.5168742", "0.5131559", "0.51224107", "0.5115878", "0.5112627", "0.51113397", "0.5104814", "0.50914085", "0.5079742", "0.5078039", "0.507538", "0.50741935", "0.5058848", "0.5052289", "0.50263584", "0.50160134", "0.5013342", "0.50102955", "0.5005571", "0.5002891", "0.50009775", "0.49941006", "0.49871683", "0.49863562", "0.4986183", "0.4974769", "0.49707666", "0.49702522", "0.4968898", "0.49596176", "0.4957179", "0.49528039", "0.4948118", "0.49405995", "0.4935902", "0.49235913", "0.49203196", "0.49125376", "0.49041143", "0.4876932", "0.48735866", "0.487344", "0.48672396", "0.48658693", "0.48590147", "0.4858487", "0.4854112", "0.4847165", "0.48454887", "0.4839366", "0.4839338", "0.48369563", "0.48292932", "0.48173648", "0.48122895", "0.48022714", "0.4800221", "0.4796741", "0.47945115", "0.47904697", "0.47852916", "0.47837558", "0.4782244", "0.47793567", "0.4777836", "0.47734308", "0.47689307", "0.4761407", "0.4760073", "0.47545" ]
0.6084097
0
This api does not return xml
def xml(self): raise NotImplementedError('This api does not return xml')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xml(self, request):\n raise Exception(\"Not Implemented\")", "def content_api_xml(url, request):\n headers = {'content-type': 'application/xml'}\n content = 'xml string'\n return response(status_code=200,\n content=content,\n headers=headers,\n request=request)", "def make_request_xml(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`xml`\n if self.is_good_enough_xml(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def _api_call(url: str) -> ET.Element:\n result = requests.get(url)\n if result.status_code != 200:\n raise RequestException(f\"API status code {result.status_code} for URL: {url}\")\n\n # Remove HTML line breaks (which cause confusion in the XML parsing)\n t: str = re.sub(r\"\\s*(<br/>)+\\s*\", r\" \", result.text)\n\n x_tree = ET.fromstring(t)\n return x_tree", "def api(self) -> str:", "def parse(self, response):", "def test_xml_direct(self): \n response = client.result(True, 'xml', 'unittest', test_data = self.test_data)\n root = ET.fromstring(response)\n first_name = root[0][0][0].text\n self.assertEqual(first_name,'John', 'Should print John')\n nationality = '<nationality>' in response\n self.assertFalse(nationality, 'Nationality should not be present')", "def _xml_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response", "def render_GET(self, request):\n return etree.tostring(self.xml(request), pretty_print=True)", "def test():\n r = Response(response=\"This worked!\", status=200,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r", "def read_xml(self):\n pass", "def xml(self):\n raise NotImplementedError('must be implemented by all subclasses')", "def __call_api(self, values):\n # Add auth key to the request dictionary if not supplie\n if 'auth' not in values:\n values['auth'] = self.auth_data['auth']\n\n # Encode the data for a GET request\n data = urllib.parse.urlencode(values)\n\n #print values\n\n # Try to make the request\n xml_string = urllib.request.urlopen(self.xml_rpc + '?' + data).read()\n\n # Parse the XML\n response_data = xmltodict(self.__sanitize(xml_string))\n\n # Ensure that there was XML to parse\n if not response_data:\n return None\n\n # Grab the root element\n response_data = response_data['root'][0]['child']\n\n return response_data", "def xml():\n response = make_response(render_template(\"sample.xml\"))\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def parse_response(self):\n pass", "def read(self, return_string=False):\r\n # Get result data from debugger engine and verify length of response\r\n data = self.read_data()\r\n\r\n # Show debug output\r\n debug('[Response data] %s' % data)\r\n\r\n # Return data string\r\n if return_string:\r\n return data\r\n\r\n # Remove special character quoting\r\n data = self.unescape(data)\r\n\r\n # Replace invalid XML characters\r\n data = ILLEGAL_XML_RE.sub('?', data)\r\n\r\n # Create XML document object\r\n document = ET.fromstring(data)\r\n return document", "def main_response(self, data):", "def main_response(self, data):", "def xml(self):\n return self._xml", "def xml(self):\n return self._xml", "def get_usercp_xml(self,):\n response = self.session.get('https://ngb.to/usercp.php?type=xml')\n return response.text", "def parsexml(self):\n raise NotImplementedError", "def get_data(self):", "def get(self):\n xml = self._robot.GetCapabilitiesXml()\n self.response.headers['Content-Type'] = 'text/xml'\n self.response.out.write(xml)", "def request_xml(self):\n xml_filename = pkg_resources.resource_filename(__name__, 'data/request.xml')\n with open(xml_filename, 'r') as xml_file:\n xml = xml_file.read()\n xml = xml.format(username=self.username,\n password=self.password,\n timestamp=time.time(),\n hardware_id=self.hardware_id(),\n advertisement_id=self.advertisement_id(),\n locale=self.locale)\n return xml", "def retreive_xml(**options):\n get_query = []\n for k, v in options.iteritems():\n get_query.append(k + '=' + v)\n\n url = twitter_api + '&'.join(get_query)\n p = urllib.urlopen(url)\n content = p.read()\n return content", "def execute(self):\n headers = {\n 'Content-type': 'application/x-www-form-urlencoded',\n 'Accept-Charset': 'utf-8',\n 'User-Agent': USER_AGENT\n }\n request = urllib2.Request(self.url(), headers=headers)\n response = urllib2.urlopen(request)\n \n return etree.parse(response)", "def get_pmid_xml(api_key,idtype,searchid):\n headers = {'X-ELS-APIKey':api_key}\n url = \"http://api.elsevier.com/content/search/index:SCOPUS?query=\" + idtype + \"(\" + searchid + \")&field=doi\"\n #url = 'http://api.elsevier.com/content/abstract/scopus_id:' + str(pmid)\n print url\n web = requests.get(url, headers=headers)\n try:\n doi = web.json()['search-results']['entry'][0]['prism:doi']\n except: \n print web.json()\n doi = False\n \n try: \n scopus_abstract = web.json()['search-results']['entry'][0]['prism:url']\n web_scopus = requests.get(scopus_abstract, headers=headers)\n xml_text = web_scopus.text\n except: \n scopus_abstract = False\n xml_text = False\n\n output = {'doi':doi, 'scopus_abstract':scopus_abstract, 'xml_text':xml_text}\n return(output)", "def get_xml(self):\n with io.StringIO() as string:\n string.write(ET.tostring(self.root, encoding=\"unicode\"))\n return string.getvalue()", "def serve_metadata():\n xml = generate_xml_metadata()\n return Response(xml, mimetype=\"application/gzip\")", "def xml():\n try:\n return Response(render_template(\n 'lti.xml.j2'), mimetype='application/xml'\n )\n except:\n app.logger.error(\"Error with XML.\")\n return return_error('''Error with XML. Please refresh and try again. If this error persists,\n please contact support.''')", "def get_response(self, response, pack):\n\n pass", "def retrieval():\n try:\n if request.method == 'GET':\n country = request.args.get('country') # If no key then null\n year = request.args.get('year') # If no key then null\n return spout(country, year)\n except Exception as e:\n # Unfortunately I'm not going to wrap this in indv. strings\n r = Response(response=error_msg+str(e),\n status=404,\n mimetype=\"application/xml\")\n r.headers[\"Content-Type\"] = \"text/xml; charset=utf-8\"\n return r", "def get_details(self):", "def result(self): \n return self.body", "def content_api_xhtml(url, request):\n headers = {'content-type': 'application/xhtml'}\n content = 'xhtml string'\n return response(status_code=200,\n content=content,\n headers=headers,\n request=request)", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def getIdentifierMetaXML(base_url, identifier):\n\n\tquery_url = base_url + \"/meta/\" + urllib.quote_plus(identifier)\n\tprint(\"\\t\\t%s\" % query_url)\n\n\ttry:\n\t\trequest = urllib2.urlopen(query_url)\n\t\tresponse = request.read()\n\t\tresponse_xml = ET.fromstring(response)\n\texcept:\n\t\tprint \"\\t\\tFailed request: %s\" % query_url\n\t\tresponse_xml = None\n\n\treturn response_xml", "def getFunc(method):\n\tclient = ParameterClient.ParameterClient(host, port)\n\tresult = client.new_strp()\n\tret = client.get(method, result)\n\n\toutput = xml_ver + xml_style\n\toutput += client.strp_value(result)\n\n\tresponse_headers = [('Content-type', 'text/xml'),\n\t\t\t ('Pragma', 'no-cache'),\n\t\t\t ('Cache-Control', 'no-cache'),\n\t\t\t ('Expires', '-1'),\n\t\t\t ('Content-Length', str(len(output)))]\n return response_headers, output", "def get_single_xml_metadata(_oid):\n record = Metadata.objects.get_or_404(pk=_oid)\n\n json_rec = json.loads(record.to_json())\n\n d_fmt = '%Y-%m-%d'\n\n d_fmt1 = '%Y-%m-%dT%H:%M:%SZ'\n\n try:\n #start/end date might not exist yet\n if record.start_date is not None:\n json_rec['start_date'] = record.start_date.isoformat() + '.000Z'\n if record.end_date is not None:\n json_rec['end_date'] = record.end_date.isoformat() + '.000Z'\n if record.first_pub_date is not None:\n json_rec['first_pub_date'] = record.first_pub_date.strftime(d_fmt)\n if record.md_pub_date is not None:\n json_rec['md_pub_date'] = record.md_pub_date.strftime(d_fmt1)\n\n except AttributeError:\n # if we get an attribute error, continue; any other error will still\n # cause the program to fail\n pass\n\n json_rec['last_mod_date'] = record.last_mod_date.strftime(d_fmt1)\n\n\n # for XSLT, need something inside of each <item> in this generic XML\n _enclose_word = lambda k: {'word': k}\n _enclose_words = lambda words: map(_enclose_word, words)\n\n json_rec['thematic_keywords'] = _enclose_words(\n json_rec['thematic_keywords'])\n\n json_rec['place_keywords'] = _enclose_words(json_rec['place_keywords'])\n\n json_rec['data_format'] = _enclose_words(json_rec['data_format'])\n\n json_rec['topic_category'] = _enclose_words(json_rec['topic_category'])\n\n _enclose_url = lambda url: {'url': url}\n\n json_rec['online'] = map(_enclose_url, json_rec['online'])\n\n if record.md_pub_date is not None:\n json_rec['download_url'] = \\\n app.config['ATTACHMENT_DOWNLOAD_BASE_URL'] + str(record.id)\n\n xml_str = dicttoxml(dict(record=json_rec)) # , attr_type=False)\n\n return Response(xml_str, 200, mimetype='application/xml')", "def test_xml_file(self):\n response = client.result(False, 'xml', 'unittest', file = 'test_file.csv')\n root = ET.fromstring(response)\n first_name = root[0][0][0].text\n self.assertEqual(first_name,'John', 'Should print John')\n nationality = '<nationality>' in response\n self.assertFalse(nationality, 'Nationality should not be present')", "def parse(self):", "def device_xml() -> Response:\n xml = render_template('device.xml',\n device_model=config.device_model,\n device_version=config.device_version,\n friendly_name=locast_service.city,\n uid=uid,\n host_and_port=host_and_port)\n return Response(xml, mimetype='text/xml')", "def test_get_request_output(self):\n pass", "def web_service_response_example(self, action, controller):", "def output_xml(data,code,headers=None):\r\n resp = make_response(dumps({'response': data}), code)\r\n resp.headers.extend(headers or {})\r\n return resp", "def get(self, data):\n pass", "def get_response_data(self):\r\n raise NotImplementedError", "def epg_xml() -> Response:\n xml = render_template('epg.xml',\n stations=locast_service.get_stations(),\n url_base=host_and_port)\n return Response(xml, mimetype='text/xml')", "def getIdentifierObjectXML(base_url, identifier):\n\n\tquery_url = base_url + \"/object/\" + urllib.quote_plus(identifier)\n\tprint(\"\\t\\t%s\" % query_url)\n\n\ttry:\n\t\trequest = urllib2.urlopen(query_url)\n\t\tresponse = request.read()\n\t\tresponse_xml = ET.fromstring(response)\n\texcept:\n\t\tprint \"\\t\\tFailed request: %s\" % query_url\n\n\t\tresponse_xml = None\n\n\treturn response_xml", "def api():\n\treturn \"The API call\"", "def testServiceXml(self):\n\n text = \"This is a test sentence. And another sentence to split.\"\n results = self.client.post(\"workflow\", json={\"name\": \"xml\", \"elements\": [text]}).json()\n\n self.assertEqual(len(results), 1)\n self.assertEqual(len(results[0]), 1)", "def fetch_data(self):", "def parse_api(self, soup):\n return {}", "def get():", "def get():", "def get(self, url):\n h = httplib2.Http('.tmp')\n (response, xml) = h.request(url, \"GET\")\n if int(response['status']) >= 400:\n if 'verbose' in self.args and self.args.verbose:\n print \"URL: %s\" % url\n raise ValueError(\"URL %s response: %s\" % (url, response['status']))\n self.xml = xml\n return True", "def fourohfour(error):\n response = make_response('<?xml version=\"1.0\"?>\\n<updates>\\n</updates>')\n response.mimetype = 'text/xml'\n return response", "def getVotacion(self, url):", "def get(self):\n resp = Response()\n return resp", "def _scrape(self):", "def parse(self, response):\n return super().parse(response)", "def parse(self, response):\n return super().parse(response)", "def acceptxml(func):\n\n @wraps(func)\n def return_xml(*args, **kwargs):\n if request.headers.get('Accept', '*/*') == 'application/xml':\n return json_toXML(g.payload_json)\n\n return return_xml", "def test_01_FindXml(self):", "def to_api_repr(self):\n raise NotImplementedError", "def xml(self):\n return parse_xml(self, tab=\"\\t\", id=self.id or \"\")", "def test_api_response_data(self):", "def get_rpc_resp(self,rpc, ignore_warning, format):\n # data comes in JSON format, needs to be converted \n rpc_val = xmltodict.unparse(rpc) \n rpc_val = rpc_val.encode('utf-8')\n parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')\n rpc_etree = etree.fromstring(rpc_val, parser=parser)\n resp = self.dev.rpc(rpc_etree, normalize=bool(format == 'xml'), ignore_warning=ignore_warning)\n if(format == 'json'):\n return resp\n return etree.tostring(resp)", "def parse_response(self, response: Any) -> Any:\n return response", "def is_good_enough_xml(self, resp):\n content_type = resp.headers['Content-Type'].lower()\n \n return (resp.status_code == 200 \n and content_type is not None \n and content_type.find('xml') > -1)", "def get(self, request, format=None):\n \n return Response(\"ahla Rami\")", "def getInfo():", "def index():\n\n return \"\"\"\n <div>\n <h1> Image Captioning REST API </h1>\n <h3> The following API end points are valid </h3>\n <ul>\n <h4> Inception V3 </h4>\n <li> <code>/inception/v3/ping </code> - <br/>\n <b> Description : </b> checks availability of the service. returns \"pong\" with status 200 when it is available\n </li>\n <li> <code>/inception/v3/caption/image</code> - <br/>\n <table>\n <tr><th align=\"left\"> Description </th><td> This is a service that can caption images</td></tr>\n <tr><th align=\"left\"> How to supply Image Content </th></tr>\n <tr><th align=\"left\"> With HTTP GET : </th> <td>\n Include a query parameter <code>url </code> which is an http url of JPEG image <br/>\n Example: <code> curl \"localhost:8764/inception/v3/caption/image?url=http://xyz.com/example.jpg\"</code>\n </td></tr>\n <tr><th align=\"left\"> With HTTP POST :</th><td>\n POST JPEG image content as binary data in request body. <br/>\n Example: <code> curl -X POST \"localhost:8764/inception/v3/caption/image\" --data-binary @example.jpg </code>\n </td></tr>\n </table>\n </li>\n <ul>\n </div>\n \"\"\"", "def get_full_representation(self):\n return self.xpath", "def get_content(self):\n return self.__response.content", "def getxml(url, **kwargs):\n xml = fetch_resource(url, **kwargs)\n return etree.fromstring(xml)", "def getContent(self) -> object:\n ...", "def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def serialize_response(self, response):\n raise NotImplementedError()", "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def get_data(self):\r\n pass", "def convert():\n d = request.get_json()\n return xmltodict.unparse(d), 201", "def get_list(self, request, **kwargs):\n # :TODO modify top_level_serializer or pass a list with self as\n # argument?\n registry = {getattr(self._meta , 'resource_name'): self}\n content = serializers.top_level_serializer(registry)\n response = HttpResponse(\n content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response", "def get_list(self, request, **kwargs):\n # :TODO modify top_level_serializer or pass a list with self as\n # argument?\n registry = {getattr(self._meta , 'resource_name'): self}\n content = serializers.top_level_serializer(registry)\n response = HttpResponse(\n content = content,\n content_type = 'application/xml')\n response = add_das_headers(response)\n return response", "def test_catalog_xhtml(self):\n client = Client()\n response = client.get('/catalog/')\n print 'status code for catalog', response.status_code\n self.failUnlessEqual(response.status_code, 200)", "def simulate_response(self, documents):", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def get(self):\r\n return http.Request('GET', self.get_url()), parsers.parse_json", "def __fetch_data(self, url):\n try:\n response = urlopen(url)\n root = ET.fromstring(response.read())\n except HTTPError as exc:\n root = ET.fromstring(exc.read())\n raise ValueError(root.get('message'))\n return root", "def xmlrpc_response(data):\n xml = xmlrpc_marshal(data)\n response = webob.Response(xml)\n response.content_type = 'text/xml'\n response.content_length = len(xml)\n return response", "def _send_xml(self, url, xml):\n http = httplib2.Http()\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Authorization\": \"Basic %s\" % self.get_basic_auth()}\n return http.request(url, \"POST\", xml, headers=headers)", "def getResponse(self,params):\n return requests.get(self.url,params = params).json()", "def test_xmlrpc(text):\r\n return \"Here's a response! %s\" % str(text)", "def serialize(self, root):", "def getXML(self,url_base):\n\n version=\"1.1.1\"\n print \"Adress %s \" %(url_base[0])\n try :\n ## Read xml with urllib2\n url=url_base[0]+'?service=WMS&version='+version+'&request=GetCapabilities'\n request = urllib2.Request(url, headers={\"Accept\" : \"application/xml\"})\n u = urllib2.urlopen(request)\n u=urllib2.urlopen(url)\n value=u.read()\n tree= ET.fromstring( value )\n #print ET.dump(tree)\n dict_var={}\n cap = tree.findall('Capability')[0]\n layer1 = cap.findall('Layer')[0]\n layer2 = layer1.findall('Layer')[0]\n layers = layer2.findall('Layer')\n for l in layers:\n ## Find Variable name\n variable_name=l.find('Name').text\n print 'variable %s ' %(variable_name)\n ## Find are of product\n list_area=[]\n box=l.find('BoundingBox')\n lonmin=box.attrib['minx']\n list_area.append(lonmin)\n lonmax=box.attrib['maxx']\n list_area.append(lonmax)\n latmin=box.attrib['miny']\n list_area.append(latmin)\n latmax=box.attrib['maxy']\n list_area.append(latmax)\n ## Find time and prof\n dims=l.findall('Extent')\n list_prof=[]\n list_time=[]\n list_tot=[]\n for dim in dims : \n if dim.attrib['name'] == 'elevation' :\n list_prof=str(dim.text).split(',')\n if dim.attrib['name'] == 'time' :\n list_time=str(dim.text).split(',')\n if list_prof == [] : \n list_prof.append('0')\n list_tot.append(list_prof)\n list_tot.append(list_time)\n list_tot.append(list_area)\n dict_var[str(variable_name)]=list_tot\n except:\n raise\n print \"Error in WMS procedure\"\n sys.exit(1)\n return dict_var", "def SoapAction(self) -> str:", "def get_full_metadata(self, nuxeo_id):\n parts = urlparse.urlsplit(self.nx.conf[\"api\"])\n url = '{}://{}/Merritt/{}.xml'.format(parts.scheme, parts.netloc, nuxeo_id)\n \n return url", "def __call__(self, name: str, arg: str) -> XMLContent:\n raise NotImplementedError", "def real_obj(self):\n return Request(xml_data=etree.tostring(self))" ]
[ "0.6906242", "0.681684", "0.66007024", "0.64178306", "0.6360245", "0.629127", "0.61903584", "0.61852777", "0.6081651", "0.6022426", "0.6001489", "0.5988445", "0.59532", "0.5934724", "0.5896389", "0.5798428", "0.57974744", "0.57974744", "0.5797071", "0.5797071", "0.576466", "0.57562596", "0.57530856", "0.56981385", "0.565673", "0.56447184", "0.5608147", "0.5604755", "0.5597347", "0.55895483", "0.55847603", "0.5569119", "0.5555407", "0.5552301", "0.5546522", "0.5542946", "0.5510196", "0.5500629", "0.5496174", "0.5495016", "0.54863757", "0.5477446", "0.54706556", "0.54699177", "0.5461773", "0.545922", "0.54557717", "0.5453467", "0.5447369", "0.5444419", "0.54338425", "0.5420353", "0.54035425", "0.5402709", "0.53993595", "0.53993595", "0.5396563", "0.53924537", "0.5390674", "0.5390458", "0.5384209", "0.5378896", "0.5378896", "0.5376279", "0.5375841", "0.5374757", "0.53678936", "0.53676236", "0.53614914", "0.53578496", "0.53405464", "0.5337926", "0.5327119", "0.5324993", "0.53226435", "0.5322271", "0.5315052", "0.531358", "0.530414", "0.5304001", "0.5298355", "0.5298168", "0.5278098", "0.5276572", "0.5276572", "0.5264528", "0.52622944", "0.5261744", "0.5261744", "0.52563584", "0.5255981", "0.52504414", "0.5249884", "0.52480984", "0.5246021", "0.5243153", "0.52345735", "0.5231706", "0.52278644", "0.52278286" ]
0.75670606
0
Spring error is either an error in the wrapper response or an error returned by the api in the json
def error(self): error = self._wrapped.error if error: return error return self.json['response'].get('error')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_api_exception(error):\n response = flask.jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def _handle_api_error(ex):\n if request.path.startswith('/api/'):\n message, detail = str(ex).split(\": \")\n return jsonify(message=message, detail=detail), ex.code\n else:\n return ex", "def _error_response(self):\r\n response_dict = {'success': False, 'version': 1}\r\n self.send_response(\r\n 400, content=json.dumps(response_dict),\r\n headers={'Content-type': 'application/json'}\r\n )", "def service_errors(error):\r\n\r\n response = {'error': {'message': error.message, 'code': error.status_code}}\r\n\r\n return jsonify(response), error.status_code", "def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_invalid_usage(error):\n\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_error(self, message):\n data = {\n \"success\": False,\n \"error\": message\n }\n\n return JsonResponse(data, status=200)", "def json_error(message):\n return json_response(isError=True, message=message)", "def response_error(error, status=400):\n\n response = {\n 'status': 'failed',\n 'error': error\n }\n\n return response_json(response, status=400)", "def process_error_response(self, resources, resource, api, operation,\n error_response, context):\n pass", "def handle_invalid_usage(error):\n logging.warn(error.message)\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def generic_errors(error, code):\n errors = {}\n errors[\"error\"] = error\n response = jsonify(errors)\n response.status_code = code\n return response", "def handle_root_exception(error):\n code = 400\n if hasattr(error, 'code'):\n code = error.code\n d = dict(_error=str(error))\n s = json.dumps(d)\n return (s, code, [('Content-Type', 'application/json')])", "def http_exception(error):\n data = {'error': str(error)}\n return app.response_class(\n response=json.dumps(data),\n status=error.code,\n mimetype='application/json'\n )", "def _rest_error(self, status_code, error_code, message):\n return {\"status_code\": status_code, \"error_code\": error_code, \"message\": message}", "def internal_error(error):\n return jsonify({'error': \"Internal Server Error. \"\n \"Bitte die Logdatei für Details anschauen.\"}), 500", "def process_exception(self, request, exception):\n logging.error(\"ERROR\")\n logging.error(traceback.format_exc())\n response = set_response(\"Internal server error\", False, 500, {})\n return JsonResponse(response, status=response[\"http_code\"])", "def failure(self, error):\n \n self.request.response.status_int = 400\n return None", "def _err_response(self, msg):\r\n return {'success': False, 'error': msg}", "def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response", "def internal_server_error(error):\n return flask.jsonify({\"error\": \"Internal Server Error\"}), 500", "def error(\n status=500,\n message=\"Internal Server Error\"\n):\n return make_response(\n jsonify(error=message),\n status,\n )", "def handle_api_error(self, response):\n code = response.status_code\n self.__log(f'Handling API error with status code {code}.', 'error')\n if code == 401:\n self.__log(f'Invalid credentials. Please make sure your token is correct.', 'error')\n raise InvalidCredentialsError\n if code == 404:\n self.__log(f'File not found on query. Make sure query URL is correct and retry.', 'error')\n raise FileNotFoundError\n if code == 422:\n content = json.loads(response.content)\n for error in content['errors']:\n self.__log(f'API could not process the request. Message: {error[\"message\"]}.', 'error')\n raise UnprocessableRequestError(f'Issue with field {error[\"field\"]}: {error[\"message\"]}')\n if code == 429:\n self.__log(f'Monthly request limits exceeded. Upgrade billing or change token.', 'error')\n raise MonthlyRequestLimitExceededError\n self.__log(f'Response for code: \"{code}\" was unhandled by wrapper. Sorry to not be more helpful.', 'error')\n raise UnknownApiError(\"An unhandled API exception occurred\")", "def bad_request_400(error):\n return jsonify({\n 'success': False,\n 'message': 'Bad request',\n 'error': 400\n }), 400", "def auth_error(error):\n return jsonify(error.error), error.status_code", "def handle_error(error):\n if isinstance(error, ClientError):\n message = {\"message\": \"Error - Unexpected \" + error.response.get(\"Error\").get(\"Code\")}\n return generate_http_response(message), 500\n if isinstance(error, MissingParameterException):\n return generate_http_response(error.response), 400\n message = {\"message\": \"Error: Unexpected error\"}\n return generate_http_response(message), 500", "def _handle_api_error(self, error):\n status_code = error.response.status_code\n message = error.message\n\n if 403 == status_code:\n raise NewRelicInvalidApiKeyException(message)\n elif 404 == status_code:\n raise NewRelicUnknownApplicationException(message)\n elif 422 == status_code:\n raise NewRelicInvalidParameterException(message)\n else:\n raise NewRelicApiException(message)", "def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response", "def bad_request(self, error):\n return jsonify({'error': 'BAD REQUEST'}), 400", "def errorResponse(errormessage, format, extraJSON={}): \n \n if format == 'csv':\n return CSVResponse(\n [{'errormessage': errormessage}],\n fields=('errormessage',) )\n \n else:\n json_objects = extraJSON.copy()\n json_objects['error'] = True\n json_objects['errormessage'] = errormessage\n return JSONResponse(json_objects)", "def internal_error_400(error):\n return jsonify({'error':\n \"Die Anfrage wurde syntaktisch falsch erstellt.\"}), 400", "def _create_error_response(self, error):\n status = error.status\n try:\n body = json.loads(error.body)\n except Exception:\n body = {}\n if status in [403, 429]:\n # Parse differently if the error message came from kong\n errors = [ApiError(None, body.get(Responses.message, None))]\n else:\n errors = [ApiError(err.get(Responses.context, None),\n err.get(Responses.message, None))\n for err in body.get(Responses.errors, {})]\n return ErrorResponse(status, errors, headers=error.headers)", "def handle_api_error(e):\n return f\"Failed to call Giphy API: {e}\", 500", "def handle_error(self, err): # pragma: no cover\n # log every exception raised in the application\n print('we ended up in the API handle_error()', err, err.__class__)\n\n # catch other HTTP errors\n if isinstance(err, HTTPException):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'success': False,\n 'error': err.code,\n \"message\": getattr(err.error, 'message')\n }), err.code\n\n # if 'message' attribute isn't set, assume it's a core Python exception\n if not getattr(err, 'message', None):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'message': 'Server has encountered an unknown error'\n }), 500\n\n # Handle application-specific custom exceptions\n return jsonify(**err.kwargs), err.http_status_code", "def internal_server_error(error): # pylint: disable=unused-argument\n response = jsonify(\n {\n \"success\": False,\n \"error_code\": 500,\n \"message\": \"Internal Server Error\",\n }\n )\n return response, 500", "def handle_error(self, e):\n code = getattr(e, 'code', 500) # Gets code or defaults to 500\n if code == 404:\n return self.make_response({\n 'message': 'not-found',\n 'code': 404\n }, 404)\n return super(MyApi, self).handle_error(e) # handle others the default way", "def response_json_error_info(func):\n def wrapper(request):\n try:\n return func(request)\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def internal_server_error(error):\n return jsonify({\n 'success': False,\n 'error': STATUS_INTERNAL_SERVER_ERROR,\n 'message': ERROR_MESSAGES[STATUS_INTERNAL_SERVER_ERROR]\n }), STATUS_INTERNAL_SERVER_ERROR", "def errorResponse(self):\n return self._errorResponse", "def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n response.content_type = \"application/json\"\n return response", "def iftttError(code, error):\n return {\n \"statusCode\": code,\n \"body\": json.dumps({\n \"errors\": [\n {\n \"message\":error\n }\n ],\n }),\n }", "def un_processable_422(error):\n return jsonify({\n 'success': False,\n 'message': 'request cannot be processed',\n 'error': 422\n }), 422", "def make_json_error(ex):\n if isinstance(ex, HTTPException):\n return ex;\n elif isinstance(ex, ResourceException):\n info = ex.to_dict()\n status_code = ex.http_status\n info[\"type\"] = \"exception\"\n else:\n message = \"There was an internal server error. Please try again later.\"\n info = {\"code\": \"internal_server_error\", \"message\": message, \"type\": \"exception\"}\n status_code = 500\n # generally we should log these 500 errors with the stacktrace somewhere -- we used splunk at Box.\n\n response = jsonify(**info)\n response.status_code = status_code\n return response", "def json_or_error(response):\n if 200 <= response.status_code < 300:\n if response.content:\n return response.json()\n else:\n # Response has no body. Return a status in a way that is consistent with other requests\n return {\n 'status': 'SUCCESS',\n 'httpStatusCode': response.status_code,\n 'httpStatus': httplib.responses[response.status_code],\n }\n else:\n raise JsonApiError('API request to {} failed with HTTP status {}: {}'.format(\n response.url, response.status_code, response.text))", "def unprocessable_entity(error): # pylint: disable=unused-argument\n response = jsonify(\n {\n \"success\": False,\n \"error_code\": 422,\n \"message\": \"Unprocessable Entity\",\n }\n )\n return response, 422", "def json_err(msg: str) -> Response:\n return jsonify({\"success\": False, \"error\": msg})", "def error_response(error_text):\n return Response(json.dumps({'error' : error_text}), status=404, mimetype='application/json')", "def error_return(content, status):\n content = '{' + '\"status\":{},\"message\":\"{}\"'.format(status, content) + '}'\n return Response(content, status=status, mimetype='application/json')", "def server_error(error=None):\n return jsonify({\n 'Error': 'Check if the request causes a server error'\n }), 500", "def handle_500_error(_error):\n return make_response(jsonify(SERVER_ERROR), 500)", "def _raise_http_error(self, *args, **kwargs):", "def __get_response_error(message, response):\n\n rjson = response.json()\n error_description = \"Code %s - %s\" %(str(response.status_code), rjson.get('message'))\n\n return {\n 'app_message': \"%s\" % (message),\n 'error_description': \"[%s] - %s\" % (message, error_description),\n 'code': response.status_code\n }", "def handle_error_response(resp):\n error_message = ''\n error_message_with_reason = ''\n try:\n error_message = (\n resp.json()\n .get('fireeyeapis', {})\n .get('description', '')\n .strip()\n )\n error_message = error_message.replace('\\n', '')\n if error_message:\n error_message_with_reason = f'Reason: {error_message}'\n except ValueError: # ignoring json parsing errors\n pass\n if resp.headers.get('Content-Type', '') == CONTENT_TYPE_ZIP:\n error_message = error_message_with_reason = resp.text\n\n status_code_messages = {\n 400: f\"{MESSAGES['BAD_REQUEST_ERROR']} {error_message_with_reason}\",\n 401: MESSAGES['AUTHENTICATION_ERROR'],\n 403: error_message,\n 404: error_message,\n 406: error_message,\n 407: MESSAGES['PROXY_ERROR'],\n 500: MESSAGES['INTERNAL_SERVER_ERROR'],\n 503: MESSAGES['INTERNAL_SERVER_ERROR'],\n }\n\n if resp.status_code in status_code_messages:\n demisto.debug(\n f'Response Code: {resp.status_code}, Reason: {status_code_messages[resp.status_code]}'\n )\n raise DemistoException(status_code_messages[resp.status_code])\n else:\n raise DemistoException(resp.raise_for_status())", "def jsonify_exception(error: HTTPException) -> Response:\n exc_resp = error.get_response()\n response: Response = jsonify(reason=error.description)\n response.status_code = exc_resp.status_code\n return response", "def application_error(e):\n message = {\n 'status': 500,\n 'message': 'Sorry, unexpected error: ' + format(e)\n }\n resp = jsonify(message)\n resp.status_code = 500\n\n return resp", "def handle_unknown_errors(exc):\n return jsonify(dict(\n traceback=traceback.format_exc(),\n message=str(exc),\n )), 500", "def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n print(response.data)\n response.content_type = \"application/json\"\n return response", "def error(self, http_error):\n return HTTPResponse(str(http_error), status=http_error.status)", "def handle_rest_exceptions(exception):\n current_app.logger.exception(exception)\n return exception.get_response()", "def check_error(self, response):\n if type(response) is dict and response.has_key('status_code'):\n if response['status_code'] != 200:\n raise rocket.RocketAPIException(response['status_code'],\n response['status_text'])", "def return_request_error(error_message: str, http_status_code: int, response: Response):\n response.status_code = http_status_code\n return {\n 'error': error_message\n }", "def error_msg(error):\n if request.path.startswith(\"/api\"):\n return jsonify({\"message\": str(error)}), 500\n else:\n return render_template(\"error.html\", message=error), 500", "def _f_resp(self, error):\n if self.response is not None:\n return self.response()(self.formatter, error)\n\n if self.content_type == \"text/html\":\n return HTMLResponse()(self.formatter, error)\n\n return JSONResponse()(self.formatter, error)", "def return_error(self, status, payload=None):\n resp = None\n if payload is not None:\n payload = json.dumps(payload)\n resp = self.make_response(payload, status=status)\n\n if status in [405]:\n abort(status)\n else:\n abort(status, response=resp)", "def raise_for_status(response):\n if response.status_code != 200:\n res_data = response.json()\n if (response.status_code, res_data['error']) in error_map:\n raise error_map[(response.status_code, res_data['error'])](res_data['error_description'])\n raise ShoperApiError(res_data['error_description'])\n\n return response", "def raise_for_json_status(self, response_data: JSON) -> None:\n status = response_data['status']\n if status != '1':\n message = response_data.get('message', 'No error message given')\n raise self.error_class(\n f'Error status \"{status}\" in JSON response: {message}'\n )", "def error(self) -> 'outputs.StatusResponse':\n return pulumi.get(self, \"error\")", "def not_found(self, error):\n return jsonify({'error': 'NOT FOUND'}), 404", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "def invalid_response():\n return Response(\n '{\"error\": \"Invalid request\"}',\n status=400,\n mimetype='application/json'\n )", "def bad_request(error):\n return jsonify({\n 'success': False,\n 'error': STATUS_BAD_REQUEST,\n 'message': ERROR_MESSAGES[STATUS_BAD_REQUEST]\n }), STATUS_BAD_REQUEST", "def return_api_error(self, reason=None):\n self.return_result({'ok': False, 'reason': reason})", "def internal_error(error):\n return jsonify(error='configuration could not be generated')", "def handle_uncaught_error(e):\n status_code = 500\n\n result = {\n \"error_message\": \"Unknown or unexpected error.\",\n \"error_code\": \"INTERNAL_SERVER_ERROR\"\n }\n return jsonify(result), status_code", "def _process_error(self, result):\n self.error = result\n if result['errorCode'] == 901:\n raise Exceptions.APIKeyInvalid\n elif result['errorCode'] == 902:\n raise Exceptions.APISecretInvalid\n elif result['errorCode'] == 903:\n raise Exceptions.InvalidRequestToken\n elif result['errorCode'] == 904:\n raise Exceptions.RequestTokenExpired\n elif result['errorCode'] == 905:\n raise Exceptions.InvalidAccessToken\n elif result['errorCode'] == 906:\n raise Exceptions.TokenExpired(self.access.expire)\n elif result['errorCode'] == 907:\n raise Exceptions.ParameterMissing\n elif result['errorCode'] == 908:\n raise Exceptions.ParameterNotFormatted\n elif result['errorCode'] == 909:\n raise Exceptions.FeatureNotSupported\n elif result['errorCode'] == 910:\n raise Exceptions.EndPointNotSupported\n else:\n raise Exceptions.UnknownJsonError(result)", "def on_request_validation_error(err):\n print(err)\n return jsonify(message='Bad request'), 400", "def bad_request(error): # pylint: disable=unused-argument\n response = jsonify(\n {\"success\": False, \"error_code\": 400, \"message\": \"Bad Request\"}\n )\n return response, 400", "def openapi_validation_error(\n context: t.Union[RequestValidationError, ResponseValidationError], request: Request\n) -> Response:\n if isinstance(context, RequestValidationError):\n logger.warning(context)\n if isinstance(context, ResponseValidationError):\n logger.error(context)\n\n extract_errors = request.registry.settings[\"pyramid_openapi3_extract_errors\"]\n errors = list(extract_errors(request, context.errors))\n\n # If validation failed for request, it is user's fault (-> 400), but if\n # validation failed for response, it is our fault (-> 500)\n if isinstance(context, RequestValidationError):\n status_code = 400\n for error in context.errors:\n if isinstance(error, InvalidSecurity):\n status_code = 401\n\n if isinstance(context, ResponseValidationError):\n status_code = 500\n\n return exception_response(status_code, json_body=errors)", "def error_response(status_code, message=None):\n payload = {'error': str(status_code)+\" : \"+HTTP_STATUS_CODES.get(status_code, \"Unknown Error\")}\n if message:\n payload['message'] = message\n response = jsonify(payload)\n response.status_code = status_code\n return response", "def return_json_error(msg, status_code):\n return Response(response=json.dumps({'message': str(msg)}), status=status_code, mimetype=\"application/json\")", "def handle_error(e: ODPAPIError):\n\n if e.status_code == 401:\n flash('Your session has expired. Please log in again to continue.', category='error')\n return redirect(url_for('hydra.logout'))\n\n if e.status_code == 403:\n flash('You do not have permission to access that page.', category='warning')\n return redirect(request.referrer or url_for('home.index'))\n\n if e.status_code == 503:\n flash('Service unavailable. Please try again in a few minutes.', category='error')\n return\n\n try:\n detail = e.error_detail['detail']\n if e.status_code == 422 and isinstance(detail, list):\n # duplicate validation errors are returned when multiple\n # server-side dependencies validate the same input; we\n # eliminate duplicates by packing them into a dict\n errors = {\n error['loc'][1]: error['msg']\n for error in detail\n }\n for field, msg in errors.items():\n flash(f'{field}: {msg}', category='error')\n else:\n flash(detail, category='error')\n\n except (TypeError, KeyError, IndexError):\n flash(e.error_detail, category='error')", "def _err_response(msg):\r\n return HttpResponse(json.dumps({'success': False, 'error': msg}),\r\n mimetype=\"application/json\")", "def not_found(error):\n return jsonify({\n 'success': False,\n 'error': STATUS_NOT_FOUND,\n 'message': ERROR_MESSAGES[STATUS_NOT_FOUND]\n }), STATUS_NOT_FOUND", "def handle_missing_objects(exc):\n return jsonify(dict(\n message=str(exc)\n )), exc.code", "def gateway_error_response(self, exc):\n if hasattr(exc, \"get_stacks\"):\n # Process potentially multiple stacks.\n full_error, exc_stacks = \"\", exc.get_stacks()\n for i in range(len(exc_stacks)):\n full_error += exc_stacks[i][0] + \"\\n\"\n if i == 0:\n full_error += \"\".join(traceback.format_exception(*sys.exc_info()))\n else:\n entry = ApplicationException.format_stack(exc_stacks[i][1])\n full_error += entry + \"\\n\"\n\n exec_name = exc.__class__.__name__\n else:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n exec_name = exc_type.__name__\n full_error = \"\".join(traceback.format_exception(*sys.exc_info()))\n\n status_code = getattr(exc, \"status_code\", 400)\n if self.log_errors:\n if self.develop_mode:\n if status_code == 401:\n log.warn(\"%s: %s\", exec_name, exc)\n else:\n log.error(full_error)\n else:\n if status_code == 401:\n log.info(\"%s: %s\", exec_name, exc)\n else:\n log.info(full_error)\n\n result = {\n GATEWAY_ERROR_EXCEPTION: exec_name,\n GATEWAY_ERROR_MESSAGE: str(exc.message),\n GATEWAY_ERROR_EXCID: getattr(exc, \"exc_id\", \"\") or \"\"\n }\n if self.develop_mode:\n result[GATEWAY_ERROR_TRACE] = full_error\n\n if RETURN_MIMETYPE_PARAM in request.args:\n return_mimetype = str(request.args[RETURN_MIMETYPE_PARAM])\n return self.response_class(result, mimetype=return_mimetype)\n\n self._log_request_error(result, status_code)\n\n resp = self.json_response({GATEWAY_ERROR: result, GATEWAY_STATUS: status_code})\n # Q: Should HTTP status be the error code of the exception?\n resp.status_code = status_code\n return resp", "def _process_response (self, response, component):\n # check if we´re not authorized to make thios call\n if response.status_code == 401:\n return {\n 'error': True,\n 'message': 'Session invalid',\n 'code': 401\n }\n # check if somethign else failed\n if response.status_code != 200:\n return {\n 'error': True,\n 'message': 'API call for \"' + component + '\" failed',\n 'code': response.status_code\n }\n # return the parsed response & everything´s fine\n return response.json()", "def _writeJSONErrorResponse(f, request):\n code = getattr(f.value, 'code', CODE.UNKNOWN)\n _writeJSONResponse(\n result=f.getErrorMessage().decode('ascii'),\n request=request,\n code=code,\n status=_mapErrorCodeToStatus(code))\n raise f", "def internal_server_error(error_msg):\n return jsonify(error=str(error_msg))", "def error_code(self):\n return self.json['response'].get('error_code')", "def handle_rest_api_result(result):\n\n if (result.status_code < 200) or (result.status_code > 299):\n try:\n json_result = result.json()\n except ValueError:\n raise VMRayRESTAPIError(\"API returned error {}: {}\".format(result.status_code, result.text),\n status_code=result.status_code)\n\n raise VMRayRESTAPIError(json_result.get(\"error_msg\", \"Unknown error\"), status_code=result.status_code)", "def not_found(e):\n\n return json.dumps({\"error\": \"Endpoint not found\"})", "def internal_error(error):\n return f'{\"code\": 500, \"message\": \"{str(error)}\"}', 500", "def _handle_response(self, response):\n if response.status_code >= 500:\n raise ServerError(response.content, response.status_code)\n elif response.status_code >= 300:\n raise ClientError(response.json(), response.status_code)\n\n return Response(response)", "def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404", "def throw_error(self, error, status_code=400, **extra):\n data = dict(success=False, data=dict(message=error, **extra))\n raise ShortCircuitHttpChain(response=JsonResponse(data, status=status_code))", "def handle_errors(func):\n def wrapper(*args, **kwargs):\n try:\n response = func(*args, **kwargs)\n except Exception as e:\n response = jsonify({\"success\": False, \"message\": str(e)})\n return response\n wrapper.func_name = func.func_name\n return wrapper" ]
[ "0.75166994", "0.7210059", "0.70859104", "0.70615435", "0.69839483", "0.6966496", "0.69491005", "0.6889269", "0.6889269", "0.688472", "0.68772686", "0.68692976", "0.6838407", "0.6829438", "0.6813062", "0.67832464", "0.67662054", "0.6756578", "0.6753409", "0.675096", "0.6724432", "0.6707206", "0.6694417", "0.66913545", "0.6658517", "0.6643494", "0.6631777", "0.66252816", "0.66210824", "0.66120154", "0.6607411", "0.6605047", "0.65833426", "0.65829796", "0.6574014", "0.6559114", "0.6552064", "0.6550383", "0.6532138", "0.6527613", "0.6527566", "0.6527537", "0.65261054", "0.6518665", "0.6500882", "0.6472784", "0.6461239", "0.6451693", "0.6451224", "0.64494246", "0.64297795", "0.64249045", "0.6423183", "0.6407128", "0.6406246", "0.64001167", "0.639336", "0.6373172", "0.6373091", "0.63606226", "0.6350193", "0.63448817", "0.6338163", "0.63216555", "0.63193685", "0.6318777", "0.6312711", "0.63110584", "0.63018745", "0.6296309", "0.62922037", "0.6288889", "0.6287313", "0.6285438", "0.6280911", "0.62767434", "0.62651116", "0.6262089", "0.62604535", "0.6248225", "0.6246717", "0.6244038", "0.62418044", "0.6240503", "0.6236649", "0.623635", "0.62298673", "0.62250555", "0.62200403", "0.62096786", "0.6192584", "0.6190941", "0.61741346", "0.6173695", "0.6159536", "0.61592436", "0.615784", "0.6152215", "0.6140135", "0.61281747" ]
0.70127034
4
return the error code
def error_code(self): return self.json['response'].get('error_code')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error_code(self) -> int:\n return pulumi.get(self, \"error_code\")", "def error_code(self) -> int:\n return self._error_code", "def error_code(self):\n # type: () -> int\n return self._error_code", "def error_code(self):\n return self._error_code", "def errorcode(self):\n return self._errorcode", "def error_code(self) -> str:\n return self.__error_code", "def error_code(self) -> str:\n return self._error_code", "def get_error_code(self):\n return self.__errorCode", "def _get_error_code(self, data) -> int:\n return int(self._error_code)", "def error_code(self):\r\n return self._arm.error_code", "def code(self):\n return self.m_errorCode", "def error_handler(self):\n if self.ctx.exit_code is not None:\n return self.ctx.exit_code", "def _errno(err):\n return err.errno", "def code(self):\n ret = libxml2mod.xmlErrorGetCode(self._o)\n return ret", "def error_code(self, obj, statusCode):\n pass", "def get_exit_code(self):", "def error_code(self) -> CustomErrorCode:\n enforce(self.is_set(\"error_code\"), \"'error_code' content is not set.\")\n return cast(CustomErrorCode, self.get(\"error_code\"))", "def errno(self):\n return self._errno", "def error():\n return None", "def retcode(self):\n if not self.result:\n return None\n return self.result.retcode", "def get_retcode(self):\n return self._retcode", "def error_code(self) -> pulumi.Input[Union[str, 'CopyCompletionErrorReason']]:\n return pulumi.get(self, \"error_code\")", "def ReturnCode(rc):\r\n return _hiew.ReturnCode(rc)", "def error_check(self, message):\n matches = ERROR_SYNTAX.match(message)\n if matches:\n error_code = int(matches.group(1))\n error_message = matches.group(2)\n return error_code, error_message\n return None", "def SocketErrorCode(self) -> SocketError:", "def _errno(err):\n return err.args[0]", "def status_code(self) -> int:\n raise NotImplementedError # pragma: no cover", "def determine_exit_code(self) -> int:", "def error(self) -> 'outputs.StatusResponse':\n return pulumi.get(self, \"error\")", "def error(self):\n return self._error", "def error(self):\n return self._error", "def error(self):\n return self._error", "def return_code(self):\n return self._failures", "def error_count():\n return cpp_style.error_count()", "def error(self):\n return self['error']", "def getErrorId(self):\n return _libsbml.XMLError_getErrorId(self)", "def code(self):\n\t\treturn self.status_code", "def returnsErrorCode(self):\n return self.rtype == \"int\"", "def _errno(self):\n return c_int.in_dll(self.lib, \"i_errno\").value", "def getError(self):\n \n return self.resp[\"error\"]", "def error(message, code=None):\n print_error(message)\n sys.exit(code or 1)", "def error(self):\n retval = self.resource.error()\n if not retval and self._exception:\n retval = _Error(\n self._exception.code,\n str(self._exception),\n None)\n return retval", "def error(msg=\"Invalid query\", code=400):\n\tjson = {'error': msg}\n\t#return jsonify(json), code\n\tabort(make_response(jsonify(json), code))", "def get_error(self):\n\t\treturn handle_to_object(call_sdk_function('PrlJob_GetError', self.handle))", "def getReturnCode(self):\n retcode = self.sendCmd(\"echo $?\")\n try:\n return int(retcode)\n except:\n return retcode", "def check_errors(self) -> None:\n # TODO check the manual for error codes & interpert them.\n return self.send(self.cmd.GET_GLOBALSTATUS_CURRENTERROR)", "def get_ret_code(self):\n\t\treturn call_sdk_function('PrlJob_GetRetCode', self.handle)", "def error(self):\n pass", "def get_validation_status(processed_code):\n if is_code_has_unknown_digit(processed_code):\n if is_code_valid_checksum(processed_code):\n return VALID_CODE_STATUS\n else:\n return CHECKSUM_ERROR_STATUS\n else:\n return DIGIT_ERROR_STATUS", "def lastError(self):\n self.bib.DapiGetLastError.argtypes = [] # None geht nicht?!\n self.bib.DapiGetLastError.restype = c_ulong\n error = self.bib.DapiGetLastError()\n if error > self.DAPI_ERR_NONE:\n print(\"Delib error number: {\", hex(error),\"}\")\n self.bib.DapiClearLastError()\n return(hex(error))\n else:\n #print(\"OK\",end=\"\", flush=True)\n return(0)", "def _check_error(return_value):\n if return_value < 0:\n raise IOError(pm.lib.Pm_GetErrorText(return_value))", "def test_get_errorCode(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, ERROR_CODE_IDX, ERROR_CODE_SUB)\n param_obj = self.__dict__[servo_type]._get_errorCode()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in errorCode...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def error(code, message):\n sys.stderr.write(message)\n sys.exit(code)", "def return_code(self) -> int:\n raise NotImplementedError(\"Base method not implemented\")", "def error(self):\n return self.get('error')", "def execute_failure(self, *args, **kwargs):\n return 1, \"\", None", "def error(self):\n error = int(self._dll.JLINKARM_HasError())\n if error == 0:\n return None\n return error", "def shellExecErrorCode(cmd):\n return subprocess.call(cmd, shell=True)", "def error(self, error):\n pass", "def error_from_code(code):\n if code in _by_codes:\n return _by_codes[code]\n else:\n return XTTError(code)", "def error(self):\n ...", "def failure(self, error):\n print \"comm failed Reason:\", error\n return error", "def getErrorCode(cls):\n # We use the CRC32 digest of the class name as a unique code.\n # We follow the recommendation of the Python docs to ensure\n # that this value is signed 32 bit integer.\n code = zlib.crc32(cls.__name__) & 0xffffffff\n return code", "def getError(self, status):\r\n nBuffer = 512\r\n msgBuffer = ctypes.create_string_buffer(nBuffer)\r\n # ViStatus status = Acqrs_errorMessage(ViSession instrumentID,\r\n # ViStatus errorCode, ViChar errorMessage[],ViInt32 errorMessageSize);\r\n AgDLL['Acqrs_errorMessage'](self.session, status, msgBuffer,\r\n ViInt32(nBuffer))\r\n return msgBuffer.value", "def ERR(self):", "def get_error(self):\n return self.e", "def boto3_errors(exception):\n error_code = exception.response['Error']['Code']\n if error_code == 'InvalidInstanceID.Malformed':\n message = f\"Invalid instance ID.\"\n status = 404\n elif error_code == 'UnauthorizedOperation':\n message = f\"You are not authorized to perform that action.\"\n status = 401\n elif error_code == \"InvalidInstanceID.NotFound\":\n message = f\"Instance not found.\"\n status = 404\n else:\n message = f\"Unknown error has occurred: {error_code}\"\n status = 400\n\n return message, status", "def get_error(self):\n return self.exc_info", "def get_gripper_err_code(self, **kwargs):\r\n return self._arm.get_gripper_err_code(**kwargs)", "def decode_error_code(err_code, s, d):\n\n config.logger.warn('Failure: %d %s %s', err_code, s, d)\n\n return {\n 0: 'Request completed successfully. No error',\n 1: 'Invalid API key',\n 2: 'Unknown Request',\n 3: 'Invalid arguements',\n 4: 'Invalid service',\n 5: 'Invalid session',\n 6: 'Insufficient bandwidth available',\n 7: 'No path between src and dst with that service type',\n 8: 'Internal VELOX error',\n 9: 'Nothing to modify',\n -1: 'Server comms error',\n }.get(err_code, 'Unknown error code')", "def error(self, code, msg):\r\n self.status = code\r\n self.status_message = str(msg)", "def raise_on_error(error_code):\n if error_code == 0: # SUCCESS\n return\n elif error_code == 1: # ERR_BAD_MORPH_OP\n raise ValueError('invalid morhology operation code')\n elif error_code == 2: # ERR_BAD_TYPE\n raise ValueError('invalid type')\n elif error_code == 3: # ERR_BAD_CUDA_DEVICE\n raise ValueError('invalid device number')\n elif error_code == 4: # ERR_NO_AVAILABLE_CUDA_DEVICE\n raise RuntimeError('no CUDA device available')\n elif error_code == 5: # ERR_BAD_APPROX_TYPE\n raise RuntimeError('invalid approximation type')\n elif error_code == -1: # ERR_UNCAUGHT_EXCEPTION\n raise RuntimeError('an unaught C++ exception occured')\n else:\n raise ValueError('invalid error code: {}'.format(error_code))", "def query_error(self):\n return self.details[KEY_QUERY_ERROR]", "def error(message, code=1):\n if message:\n print('ERROR: {0}'.format(message), file=sys.stderr)\n else:\n print(file=sys.stderr)\n sys.exit(code)", "def return_code(self):\n return self.__process.returncode", "def error(msg):\n print 'ERROR: %s' % msg\n sys.exit(1)", "def geterror(self):\n c = [self.nsdchat, '-s', self.connection_string, '-c', 'geterror']\n process = subprocess.Popen(\n c, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n out, err = process.communicate()\n except subprocess.TimeoutExpired:\n process.kill()\n raise\n if (process.returncode != 0):\n Connection.logger.error(\"nsdchat exited with errorcode \"\n \"{rc}.\".format(rc=process.returncode))\n else:\n return out.decode('utf-8')", "def error(self):\n if self.p_err.poll():\n return self.p_err.recv()", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)", "def _get_error_code(ganglia_metrics, hostname, metric, warning, critical):\n\n lines = ganglia_metrics.split('\\n')\n for i, line in enumerate(lines):\n if hostname in line:\n for j in range(i, len(lines)):\n if metric in lines[j]:\n m = re.search(VALUE_PARSING_RE, lines[j])\n val = float(m.group(1))\n if (not critical is None) and val > critical:\n print (\"ERROR - hostname %s, metric %s, val %s, critical %s\" %\n (hostname, metric, val, critical,))\n return(2)\n if (not warning is None) and val > warning:\n print (\"WARNING - hostname %s, metric %s, val %s, warning %s\" %\n (hostname, metric, val, warning,))\n return(1)\n print (\"OK - hostname %s, metric %s, val %s, warning %s\" %\n (hostname, metric, val, warning,))\n return(0)\n print (\"WARNING - no value for hostname %s, metric %s\" %\n (hostname, metric))\n return(1)", "def server_error(err):\n log.error(err)\n return err.msg, 500", "def exitcode(self):\n\n return self._exitcode", "def returncode(self):\n return self._proc.returncode", "def get_error_type(self):\n\n return self.err_type", "def failure(self, error):\n \n self.request.response.status_int = 400\n return None", "def error_codes(self):\n self._sort_measurements()\n return self._error_codes", "def status_code(self):\r\n return int(self._status[:3])", "def exitcode(self):\n try:\n return int(self.results.most_significant_state)\n except ValueError:\n return 3", "def expected_failure(self) -> int:\n return 139", "def process_error(self, id, code, error):\n raise NotImplementedError('process_error not implemented in BaseService')", "def error(self) -> Optional[pulumi.Input['ErrorArgs']]:\n return pulumi.get(self, \"error\")", "def error(self) -> Optional[pulumi.Input['ErrorArgs']]:\n return pulumi.get(self, \"error\")", "def test_error_message_header_port_mod_failed_codes(self):\n\n error_type = 7\n error_type_value = Error.ErrorType.OFPET_PORT_MOD_FAILED\n\n error_code = 0\n\n iter_given_code = Error.ErrorType.get_class(error_type_value).__iter__()\n length = Error.ErrorType.get_class(error_type_value).__len__()\n\n while error_code < self.MAX_PORT_MOD_FAILED_CODE_VALUE or length > 0:\n data = UBInt32(random.randint(2, 250)).pack()\n xid = random.randint(2, 250)\n\n test_value = b'\\x05\\x01\\x00\\x10' + UBInt32(xid).pack() + UBInt16(error_type).pack() + \\\n UBInt16(error_code).pack() + data\n\n if error_code < self.MAX_PORT_MOD_FAILED_CODE_VALUE:\n error_code += 1\n\n length -= 1\n\n test_object_error_messages = Error.ErrorMsg(xid, error_type_value, iter_given_code.__next__(), data).pack()\n\n self.assertEqual(test_value, test_object_error_messages)", "def status_code(self) -> int:\n return pulumi.get(self, \"status_code\")", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500", "def application_error(e):\n return 'Sorry, unexpected error: {}'.format(e), 500" ]
[ "0.8525533", "0.84996057", "0.8444067", "0.83733237", "0.83133256", "0.8211999", "0.80976784", "0.8081181", "0.8055439", "0.80443174", "0.7319064", "0.7141462", "0.7095967", "0.7091699", "0.7091612", "0.7072328", "0.705872", "0.70341647", "0.6983146", "0.692198", "0.69102186", "0.69025797", "0.68934786", "0.6890697", "0.68814003", "0.68782544", "0.68617415", "0.68615615", "0.6832079", "0.68086547", "0.68086547", "0.68086547", "0.6808461", "0.6806863", "0.680516", "0.6791009", "0.67887044", "0.6763241", "0.67400515", "0.6705342", "0.6662142", "0.66529834", "0.66462207", "0.6631286", "0.6622646", "0.6612099", "0.6604368", "0.6588711", "0.6588449", "0.6577513", "0.6577362", "0.6542156", "0.6537502", "0.6524998", "0.6520227", "0.65112984", "0.65005106", "0.6496379", "0.6487961", "0.64856476", "0.6472288", "0.6426481", "0.64098763", "0.6403848", "0.63910663", "0.6388413", "0.63574123", "0.6349645", "0.6346086", "0.63407063", "0.63124555", "0.63049537", "0.6286186", "0.6272058", "0.6261052", "0.62595934", "0.6256766", "0.62484425", "0.6234542", "0.6233971", "0.6228743", "0.62168026", "0.62106097", "0.62032187", "0.6199646", "0.6199556", "0.6199304", "0.6198035", "0.6192273", "0.6192031", "0.6183383", "0.61791795", "0.61791795", "0.61780125", "0.61736065", "0.6172568", "0.6172568", "0.6172568", "0.6172568", "0.6172568" ]
0.76482093
10
Returns whether erorr is NOAUTH
def noauth(self): try: # some endpoints dont return json return self.json['response'].get('error_id') == 'NOAUTH' except: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auth_error():\n return unauthorized('Invalid credentials')", "def unauthorized():\n return HttpError(401)", "def check_auth_none(self, username):\n return AUTH_FAILED", "def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])", "def test_no_auth(self) -> None:\n channel = self.make_request(\"GET\", self.url, {})\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def is_auth_error(error: Exception) -> bool:\n if not isinstance(error, Fault):\n return False\n return (\n any(\n \"NotAuthorized\" in code\n for code in extract_subcodes_as_strings(error.subcodes)\n )\n or \"auth\" in stringify_onvif_error(error).lower()\n )", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def check_auth():", "def test_retrieve_user_unautherized(self):\n res = self.client.get(ME_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def auth_failure():\n return \"Request denied due to failed authorization\", 201, {'Content-Type': 'text/html'}", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def test_retrive_user_unauthenticated(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_if_not_available_for_unauthorized(self):\r\n res = self.not_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n # HTTP GET Request\n response = self.client.get(ME_URL)\n\n # If you call the URL without authorization\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def assertHttpUnauthorized(self, resp):\r\n return self.assertEqual(resp.status_code, 401)", "def get_authenticated_denied(self):", "def unauthorized():\n return {'errors': ['Unauthorized']}, 401", "def token_auth_error():\n logger.debug(\"Token authentication failed.\")\n return unauthorized(\"Invalid credentials.\")", "def __check_http_err(self, status_code):\n if status_code == 403:\n raise exceptions.APIAuthenticationError(self.api_key)\n elif status_code == 503:\n raise exceptions.APITimeoutError()\n else:\n return False", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_no_auth(self) -> None:\n channel = self.make_request(\"DELETE\", self.url)\n\n self.assertEqual(401, channel.code, msg=channel.json_body)\n self.assertEqual(Codes.MISSING_TOKEN, channel.json_body[\"errcode\"])", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def test_is_unauthenticated(self):\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def unauthorized(self, error):\n return jsonify({'error': \"NOT AUTHORIZED\"}), 401", "def test_error_find_no_authentication_header(self, test_client):\n url = '/api/v1/auth/me'\n response = test_client.get(url)\n\n assert response.status_code == 401\n assert response.json['msg'] == 'Missing Authorization Header'", "def test_loggin_required(self):\n response = self.client.get(RESGATE_URL)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def giveup(exc):\n return isinstance(exc, aiohttp.client_exceptions.ClientResponseError) and exc.code in (403, 404)", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n self.assertEqual(res.status_code,status.HTTP_401_UNAUTHORIZED)", "def check_authentication(self):\n try:\n cookies = os.environ['HTTP_COOKIE'].split('; ')\n except KeyError:\n cookies = []\n for c in cookies:\n prefix = Auth.AUTH_COOKIE_NAME + '='\n if (c.startswith(prefix) and\n self.is_authentication_token(c[len(prefix):])):\n return True\n print 'Status: 403 Forbidden'\n print 'Content-Type: application/json'\n print self.logout_headers()\n print json.JSONEncoder().encode({'error': 'Not authenticated.'})\n sys.exit(1)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_auth_required(self):\n res = self.client.get(RECIPE_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unauthorized_exception(exception_app):\n request, response = exception_app.test_client.get('/401')\n assert response.status == 401\n\n request, response = exception_app.test_client.get('/401/basic')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') is not None\n assert response.headers.get('WWW-Authenticate') == \"Basic realm='Sanic'\"\n\n request, response = exception_app.test_client.get('/401/digest')\n assert response.status == 401\n\n auth_header = response.headers.get('WWW-Authenticate')\n assert auth_header is not None\n assert auth_header.startswith('Digest')\n assert \"qop='auth, auth-int'\" in auth_header\n assert \"algorithm='MD5'\" in auth_header\n assert \"nonce='abcdef'\" in auth_header\n assert \"opaque='zyxwvu'\" in auth_header\n\n request, response = exception_app.test_client.get('/401/bearer')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') == \"Bearer\"", "def response_unauthorised():\n\n response = {\n 'status': 'failed',\n 'error': 'Not Authorised'\n }\n\n return response_json(response, status=401)", "def test_get_unauthenticated(self):\n del self.client.request_kwargs['auth']\n self.verify_get_response(self.client.get(STATUS_PATH))", "def test_auth_required(self):\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def basic_auth_error():\n logger.debug(\"Basic authentication failed.\")\n return unauthorized(\"Invalid credentials.\")", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.get(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def _non_authorized_error(error_msg):\n return _error(error_msg, NON_AUTHORIZED_STATUS_CODE)", "def check_auth_interactive_response(self, responses):\n return AUTH_FAILED", "def unauthorized(error=None):\n return jsonify({\n 'Message': 'You are not authorized to make this request. Check if you are logged in.'\n })", "def test_auth_neg(self):\n username, password = 'new12312', '1231231231'\n response = self.api_client.login(username, password)\n assert response == 'UNAUTHORIZED'", "def test_auto_auth_disabled(self):\r\n response = self.client.get(self.url)\r\n self.assertEqual(response.status_code, 404)", "def test_status_unauthenticated(self):\n rv = self.client.post('/statusize/', data={'message': 'foo'},\n follow_redirects=True)\n eq_(rv.status_code, 403)", "def test_auth_code_negative(self, api):\n resp = api.login_user(\"QWERTY\", \"QWERTY\")\n assert resp.status_code == 400", "def test_not_authenticated_uri(self):\n request = self.factory.get(self.uri)\n response = self.view(request)\n response.render()\n self.assertEqual(response.status_code, 401,\n 'Expected Response Code 401, received {0} instead.'\n .format(response.status_code))", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.post(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_failure(self):\n \n result = self.authenticator.authenticate(\n username=u'thruflo', \n password=u'wrong'\n )\n self.assertTrue(result is None)", "def test_not_auth(self):\n rv = self.get('/queue/')\n self.assertJSONError(rv, 'TagalleryMissingLoginInformation')\n return", "def test_unauthorized_access(flask_test_client, http_method, endpoint):\n response = flask_test_client.open(\n method=http_method, path=endpoint, headers=get_headers()\n )\n assert response.status == \"401 UNAUTHORIZED\"\n assert response.content_type == \"application/json\"\n assert response.json[\"message\"] == \"Access token is invalid or expired.\"", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def is_error(self):\r\n if self.status not in [STATUS_CODES['200'], ]:\r\n return True\r\n else:\r\n return False", "def server_failure(self, resp):\n return resp[0] in FAILURE_CODES", "def unauthorized(error):\n return jsonify({\n 'success': False,\n 'error': STATUS_UNAUTHORIZED,\n 'message': ERROR_MESSAGES[STATUS_UNAUTHORIZED]\n }), STATUS_UNAUTHORIZED", "def _handle_authentication_error(self):\n response = make_response('Access Denied')\n response.headers['WWW-Authenticate'] = self.auth.get_authenticate_header()\n response.status_code = 401\n return response", "def test_retrieve_users_unauthorized(setup_client):\n client = setup_client\n res = client.get(ME_URL)\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def test_post_answer_if_not_autheticated(self):\n response = self.post_answer()\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def _authenticated_or_die(self):\n if not self._authenticated:\n raise Exception('The client is not authenticated!')", "def authenticate():\n return abort(401)", "def test_unauthorized_user(self):\n response_decoded_json = requests.post(URL_AUTH['url_login'], \n data=json.dumps(AUTH_PAYLOADS['payload_unauth']),\n headers=HEADER['header'])\n mes = response_decoded_json.json()\n assert 400 == response_decoded_json.status_code, \"You have BAD REQUEST\"\n assert \"User not found\" == mes, \"There is unexpected ability to login as unknown user\"", "def rest_test_no_auth():\n # Permission check is not applicable here\n return jsonify({\"answer\": 42})", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def test_login_required(self):\n res = self.client.get(RETETA_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def _check_token_is_revoked(self, jti: str) -> None:\n redis = self._conn_redis()\n entry = redis.get(jti)\n if entry and entry == 'true':\n raise HTTPException(status_code=401,detail=\"Token has been revoked\")", "def check_auth_token_validity(self):\n endpoint = self.url + 'api/v1/readiness'\n response = self.perform_get_request(endpoint)\n\n if response.status_code != 200:\n self.print_error_response(response, \"error\")\n return response.status_code == 200", "def error_invalid_response(self):\r\n return self.type() == 0x00", "def _is_error_call(self, response):\n status = response.get('ResponseMetadata', {}).get('HTTPStatusCode')\n return status != 200", "def test_read_not_authenticated(self):\n response = self.client.get(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n format='json',\n )\n\n content = {'detail': 'Authentication credentials were not provided.'}\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_restaurant_unauthorized(self):\n resp = self.test_client.get(self.API_BASE, headers={})\n self.assertEqual(resp.status_code, 401)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['success'], False)", "def should_skip_auth(flask_request):\n return flask_request.method in ['HEAD', 'OPTIONS']", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def check_auth(self):\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n # Throws AuthenticationFailed if invalid credentials but\n # does not deduct from the rate limit.\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False", "def test_unauthorized_request(self):\n # test false token\n user_id = self.create_user()[0]\n question_id = int(self.create_question(user_id)[0])\n false_token = self.post_data(question_id, headers={\"Authorization\":\"Bearer wrongtoken\"})\n self.assertEqual(false_token.status_code, 401)", "def noaccess(self):\n self.assertEqual(self.client.get(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.post(self.ENDPOINT).status_code, 403)\n self.assertEqual(self.client.delete(self.ENDPOINT).status_code, 403)", "def error_on_unauthorized():\n\n username = get_jwt_identity()\n user = Login.query.filter_by(username=username).first()\n\n if user is None:\n raise APIError(400, \"User {username} does not exist on this server\".format(username=username))\n elif user.role is not Role.admin:\n raise APIError(401, \"Only administrators have access to this page\")", "def test_authorization_header_not_present(self, _get_key_secret):\n request = Request(self.environ)\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def check_response_errors(self, resp):\n return True", "def unauthorized(self, message=None):\n return self.send_message(message, status=401)", "def test_no_auth(self):\n url = 'https://domain.com/project/objects.inv'\n expected = 'https://domain.com/project/objects.inv'\n actual = _strip_basic_auth(url)\n assert expected == actual", "def test_no_token_auth_required(self, client):\n assert_hook_status(client, status=401)", "def server_failure(self, resp, ignore_codes=[]):\n return (resp[0] in FAILURE_CODES and resp[0] not in ignore_codes)", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def test_get_household_not_successful(self):\n res = self.client.get(HOUSEHOLD_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_no_images_unauthorized(self):\n res = self.client.get(IMAGE_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_get_without_token(self):\n client = Client()\n response = client.get('/reviews/')\n self.assertEqual(response.status_code, 401)", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def test_login_required(self):\n res = self.client.get(INGREDIENT_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)" ]
[ "0.7120319", "0.70609397", "0.7014564", "0.6881835", "0.6881835", "0.6855839", "0.67338675", "0.67045474", "0.66409737", "0.66409737", "0.66238505", "0.6619812", "0.66152024", "0.659969", "0.65859014", "0.6582713", "0.6556535", "0.6549733", "0.6540464", "0.6489589", "0.6488741", "0.6488231", "0.64701575", "0.6464409", "0.6462344", "0.6462344", "0.64472467", "0.6370041", "0.63248056", "0.63104796", "0.6294968", "0.6282944", "0.62798834", "0.62642056", "0.6261444", "0.62571365", "0.6256713", "0.6248483", "0.6248483", "0.6240356", "0.62390554", "0.62386227", "0.623074", "0.6224856", "0.6220053", "0.62195516", "0.62186205", "0.62000793", "0.6191518", "0.6180963", "0.617167", "0.6165507", "0.61604095", "0.6150506", "0.615018", "0.61497813", "0.6115771", "0.61107224", "0.60940444", "0.6091264", "0.6085116", "0.60814637", "0.608057", "0.60720485", "0.6060415", "0.6049202", "0.6028709", "0.6027842", "0.60110873", "0.6008582", "0.6005613", "0.5996507", "0.59823585", "0.5965032", "0.59644717", "0.5954127", "0.5952359", "0.594034", "0.5931493", "0.5928558", "0.59221405", "0.5919756", "0.5919756", "0.5919756", "0.590957", "0.5908119", "0.5898808", "0.58983374", "0.588913", "0.58883506", "0.5878218", "0.5873451", "0.58713", "0.5870025", "0.5849854", "0.5845011", "0.5842832", "0.5836369", "0.5830202", "0.58280337" ]
0.81090945
0
Write a custom auth property where we grab the auth token and put it in the headers
def authenticate(self): #it's weird i have to do this here, but the code makes this not simple auth_json={'email':self.user, 'password':self.password} #send a post with no auth. prevents an infinite loop auth_response = self.post('/auth', data = json.dumps(auth_json), auth = None) _token = auth_response.json['token'] self._token = _token self._wrapped.auth = SpringAuth(_token)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, r):\n r.headers[\"x-aims-auth-token\"] = self._token\n return r", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def authorization(self):\n return {'auth-token': '{token}'.format(token=self.token)}", "def add_auth_to_headers(self):\n if not hasattr(self, \"headers\"):\n self.headers = {\"Content-Type\": \"application/json\"}\n\n login = {\"account_number\": self.account[\"account_number\"],\n \"pin\": self.account[\"pin\"]}\n token = json.loads(self.client.post(\n \"/accounts/login\",\n data=json.dumps(login),\n headers=self.headers).get_data())[\"token\"]\n self.headers[\"Authorization\"] = \"Bearer \" + token", "def _auth_headers(self):\n if self.token_str:\n return {'Authorization': 'Bearer {}'.format(self.token_str)}\n else:\n return {}", "def get_authenticate_header(self):\n return f'Basic realm=\"{self.www_authenticate_realm}\"'", "def auth_token(self):", "def add_header(response):\n response.headers['Authorization'] = response\n return response", "def _get_authorization_header(self):\n return f\"token {self._context.get_github_token()}\"", "def auth_header(self):\n return self._auth_header", "def create_auth_header(api_token):\n return {'Authorization': f'token {api_token}'}", "def get_authorization_header(self):\n return {\"Authorization\": \"Bearer {}\".format(self.get_jwt())}", "def auth_headers(current_user_token: str) -> Dict[str, str]:\n return {\"Authorization\": f\"Bearer {current_user_token}\"}", "def __call__(self, r):\n r.headers['Authorization'] = 'Bearer %s' % self.get_access_token()\n return r", "def __call__(self, r):\n if (self.token):\n r.headers['access-token'] = self.token\n return r", "def __call__(self, r):\n r.headers['Authorization'] = 'OAuth ' + self._access_token\n return r", "def authenticate_header(self, request):\n return '{0} realm=\"{1}\"'.format(settings.JWT_AUTH_HEADER_PREFIX,\n self.www_authenticate_realm)", "def bearer_authentication(self, token: str) -> None:\n self.api_session.headers.update({'Authorization': f'Bearer {token}'})", "def header_token(token):\n return {'Authorization': '{0} {1}'.format('JWT', token)}", "def apply(self, headers):\n headers['Authorization'] = 'Bearer ' + self._metadata_service.auth_token", "def __call__(self, request):\n request.headers['Authorization'] = f'Token {self.token}'\n return request", "def _add_auth_header(\n self,\n headers: t.Union[None, t.Dict[str, t.Any]] = None,\n ) -> t.Dict[str, t.Any]:\n if headers is None:\n headers = {}\n headers[self.AUTH_HEADER_NAME] = f'{self.AUTH_PREFIX}{self.API_TOKEN}'\n return headers", "def _make_header(self, token):\n header = HEADER.copy()\n header['Authorization'] = \"Bearer {}\".format(token)\n\n return header", "def __call__(self, resp):\r\n if not self.auth_token:\r\n self.auth()\r\n resp.register_hook('response', self.handle_error)\r\n resp.headers['X-Auth-Token'] = self.auth_token\r\n return resp", "def buildHeader(self):\n if self.key:\n userString = self.user+b\":\"+self.key\n else:\n userString = self.user+b\":\"\n \n encodedUserString = b64encode(userString)\n decodedUserString = encodedUserString.decode(\"ascii\")\n self.basicAuthHeader = {\"Authorization\": \"Basic \" + decodedUserString}", "def _headers(self) -> dict[str, str]:\n headers = super()._headers()\n headers[\"Authorization\"] = f\"Bearer {self.__token}\"\n return headers", "def build_header(self):\n authstring = \"Bearer \" + self.auth_token\n header = {\n \"Authorization\": authstring,\n \"Content-Type\": \"application/json\",\n \"User-Agent\": self.user_agent,\n \"Accept-Encoding\": \"gzip\"\n }\n return header", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def api_client_authz_header():\n return assemble_authorization_header(API_TOKEN)", "def create_authorization_header(self, **kwargs):\n return {\"Authorization\": \"Bearer {}\".format(self.create_jwt(**kwargs))}", "def set_auth_headers(self, access_token, client_id):\n\t\tself.headers['X-Udemy-Bearer-Token'] = access_token\n\t\tself.headers['X-Udemy-Client-Id'] = client_id\n\t\tself.headers['Authorization'] = \"Bearer \" + access_token\n\t\tself.headers['X-Udemy-Authorization'] = \"Bearer \" + access_token", "def add_auth(self, http_request):\r\n pass", "def get_authenticate_header(self):\n pass", "def get_auth_header(self) -> Mapping[str, Any]:\n return {}", "def _set_auth_header(self, refresh=False):\n if isinstance(self._token_man, TokenManager):\n self._api_client.set_default_header(Headers.authorization,\n self._token_man.get_header(refresh=refresh))\n else:\n self._api_client.set_default_header(Headers.x_auth_token,\n self._token_man.get_session_token(refresh=refresh))", "def get_headers(self):\n return {\n 'Authorization': 'JWT {}'.format(self.token)\n }", "def _token_header(token=None):\n if not token:\n return None\n\n message = '{token}:Ignored'.format(token=token)\n headers = {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}\n return headers", "def _create_auth_headers(self):\n auth_headers = {**self.get_headers()}\n auth_headers['Authorization'] = 'Bearer ' + self.get_access_token()\n return auth_headers", "def basic_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.auth, \n \"Content-type\": \"text/plain\" }", "def _make_header(self, token: str) -> dict:\n\n header = HEADER.copy()\n # modify to represent how to build the header\n header['Authorization'] = f\"Bearer {token}\"\n\n return header", "def generate_headers_with_auth(self, token_type: str = 'access'):\n if re.search('access', token_type, re.I):\n bearer_token = self._access_token\n elif re.search('refresh', token_type, re.I):\n bearer_token = self._refresh_token\n else:\n raise (Exception('Please check docstrings and change token_type value'))\n\n return {\n 'accept': 'application/json',\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + bearer_token\n }", "def _addAuthenticationToRequestHeader(request, client):\n request.addAuthorization(client.id, client.secret)", "def add_auth_token(self):\n auth_token = json.loads(os.getenv('AUTH_TOKEN'))\n self.driver.add_cookie(auth_token)", "def __header_base64(self):\n header_base64 = base64.b64encode(f'{self.client_id}:{self.client_secret}'.encode('ascii'))\n header_base64 = str(header_base64).split(\"'\")[1]\n return {'Authorization': f'Basic {header_base64}'}", "def test_headers(self):\n token = 'abc123'\n requests.get(self.url, auth=BearerAuth(token))\n self.assertEqual(httpretty.last_request().headers['Authorization'], 'Bearer {}'.format(token))", "def request_http_header( self ) -> dict:\n return {'content-type': 'application/json','Authorization':f'NLAuth nlauth_account={self._acct_number},nlauth_email={self._auth_email},nlauth_signature={self._acct_signature},nlauth_role=1090'}", "async def gen_headers(auth_string):\n return {\n \"Authorization\": f\"Basic {str(b64encode(bytearray(auth_string, 'utf8')), 'utf-8')}\"\n }", "def get_api_header(token):\n return {\n 'Authorization': 'Token ' + str(token)}", "def auth_headers(self, path, payload=\"\"):\n rand = hexlify(Random.new().read(16))\n auth = self.souma.sign(\"\".join([self.souma.id, rand, path, payload]))\n return [(\"Glia-Rand\", rand), (\"Glia-Auth\", auth), (\"Glia-Souma\", self.souma.id)]", "def get_auth_header(self):\n if not self.verify():\n return None\n\n auth_val = self.encode_auth_header_val()\n if not auth_val:\n return None\n\n return {'Authorization': auth_val.replace('\\n', '')}", "def get_basic_auth_token(self):\n return f'Basic {self.key}'", "def build_header(token: str = None):\n return {\n \"Content-Type\": \"application/json\",\n \"X-Auth-Token\": token or get_project_token(),\n }", "def authenticationToken(self):\n return self.authToken", "def token_header(token):\n message = '{token}:ignored'.format(token=token)\n return {'Authorization': 'Basic {code}'.format(\n code=base64.b64encode(message))}", "def _get_auth_string(self):", "def __MakeHeaders(self, auth):\n\n headers = dict()\n headers[\"X-Blip-api\"] = BLIP_API_VERSION\n headers[\"Accept\"] = JSON\n if (auth and self.userName != None and self.password != None):\n credentials = self.userName + \":\" + self.password;\n headers[\"Authorization\"] = \"Basic \"+base64.b64encode(credentials)\n if (self.userAgent != None):\n headers[\"User-Agent\"] = self.userAgent\n\n return headers", "def authenticate(self):\n\n headers = {\n 'Authorization': 'Bearer ' + self.access_token,\n 'ClientId': self.client_id,\n }\n self.headers.update(headers)", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers", "def _headers(self, **kwargs):\n headers = BASE_HEADERS.copy()\n if self._token:\n headers['X-Plex-Token'] = self._token\n headers.update(kwargs)\n return headers", "def init_headers(token):\n headers = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + token\n }\n return headers", "def __http_build_headers(self, with_authentication):\n\n dynamic_headers = {\n 'timestamp': str(self.__current_milli_time())\n }\n if with_authentication and self.__login_token:\n dynamic_headers['Authorization'] = 'Bearer ' + self.__login_token\n \n dynamic_headers.update(self.__http_default_headers)\n return dynamic_headers", "def __init__(self, token):\n self.token = token\n self.session = requests.Session()\n self.session.headers.update({\"Authorization\": \"Bearer {token}\".format(token=self.token)})", "def add_headers():\n # the actual access token -\n g.x_tapis_token = request.headers.get('X-Tapis-Token')\n\n # the tenant associated with the subject of the request; used, for instance, when the subject is different\n # from the subject in the actual access_token (for example, when the access_token represents a service account).\n g.x_tapis_tenant = request.headers.get('X-Tapis-Tenant')\n\n # the user associated with the subject of the request. Similar to x_tapis_tenant, this is used, for instance, when\n # the subject is different from the subject in the actual access_token (for example, when the access_token\n # represents a service account).\n g.x_tapis_user = request.headers.get('X-Tapis-User')\n\n # a hash of the original user's access token. this can be used, for instance, to check if the original user's\n # access token has been revoked.\n g.x_tapis_user_token_hash = request.headers.get('X-Tapis-User-Token-Hash')", "def EstablishAuthToken(self, opener):\n raise NotImplementedError()", "def __call__(self, context, callback):\r\n\r\n callback((('authorization', 'Bearer ' + self.token_hash ),), None)", "def polling_header(self):\n self.auth = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\\n', '')\n return { \n #\"Authorization\" : \"Basic %s\" % self.cmd.auth,\n #\"X-Atmosphere-Transport\" : \"long-polling\",\n #\"X-Atmosphere-tracking-id\" : self.atmos_id,\n \"X-Atmosphere-Framework\" : \"1.0\",\n \"Accept\" : \"application/json\" }", "def get_headers(self):\r\n return {\r\n 'authenticate': {\r\n 'complexType': 'PortalLoginToken',\r\n 'userId': self.user_id,\r\n 'authToken': self.auth_token,\r\n }\r\n }", "def set_access_token(self, token):\n\n self.__current_request_mock.headers['Authorization'] = token", "def _headers(self):\n auth = AuthenticationProvider.currentAuth()\n\n return {\n 'Authorization': '%s %s' % (auth.tokenType, auth.accessToken),\n 'Content-Type': 'application/json'}", "def get_auth_headers(self,email,passwd):\n #获取认证后的http头\n\n postdata = urllib.urlencode({'Email':email,'Passwd':passwd,'service':'reader','source':self.ClIENT})\n req = urllib2.Request(self.AUTH_URL,postdata)\n if self.host:\n req.set_proxy(self.host, self.type)\n f = urllib2.urlopen(req)\n auth_value = f.read().split()[2][5:]\n f.close()\n self.Author_Headers = {'Authorization':'GoogleLogin auth=%s'%auth_value}", "def set_auth_header(self):\n self.auth_header = self.get_auth_header(self.login, self.password)\n return True if self.auth_header else False", "def get_headers():\n return {'Authorization': f'token {settings.GITHUB_AUTH_TOKEN}'}", "def get_token_header(cls, token):\n if token is EMPTY_KNOX_TOKEN:\n return {}\n else:\n return {'HTTP_AUTHORIZATION': 'token {}'.format(token)}", "def authorization(self):\n token = self.create_auth_token(\n self.api_key.user, self.api_key.key, self.api_key.secret\n )\n return f'JWT {token}'", "def auth_key(event):\n headers = event.get('header')\n if not headers:\n raise RestException(\"Headers are missing\", 400)\n auth = headers.get('Authorization')\n if not auth:\n raise RestException('Header Authorization is missing', 400)\n if not auth.lower().startswith('bearer '):\n raise RestException(\"Authorization missing Bearer keyword\", 400)\n auth = auth.replace('Bearer ', '')\n auth = auth.replace('bearer ', '')\n return auth.strip()", "def auth(self):\n return self.api(self.token)", "def get_headers(self):\r\n return {\r\n 'authenticate': {\r\n 'username': self.username,\r\n 'apiKey': self.api_key,\r\n }\r\n }", "def __init__(self, authtoken, organization_id):\n self.headers = {\n 'Authorization': 'Zoho-oauthtoken ' + authtoken,\n }\n self.details = {\n 'organization_id': organization_id\n }", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n if not auth:\n return \"authorization_header_missing\"\n\n parts = auth.split()\n\n if parts[0].lower() != \"bearer\":\n return \"invalid_header\"\n elif len(parts) == 1:\n return \"invalid_header\"\n elif len(parts) > 2:\n return \"invalid_header\"\n\n token = parts[1]\n return token", "def __init__(\n self,\n uri,\n audience,\n get_token,\n **kwargs\n ):\n super(JWTTokenAuth, self).__init__(uri, audience, kwargs.pop(\"token_type\", TOKEN_TYPE_JWT), get_token)\n self.get_token = get_token", "def __call__(self, request):\n self._logger.debug(f'__call__, {request.url} adding Authorization header')\n request.headers[\"Authorization\"] = self._get_auth_value()\n request.register_hook(\"response\", self._handle_401)\n return request", "def get_header( self ):\n\t\tkey = self.key\n\t\tvalue = self.value\n\t\tpath = self.path\n\t\texpires = self.expires.strftime( \"%a, %d-%m-%y %H:%M:%S GMT\" )\n\t\treturn ( \"Set-Cookie\", \"%(key)s=%(value)s; Path=%(path)s; Expires=%(expires)s;\" % locals() )", "def bearer_oauth(r):\n\n r.headers[\"Authorization\"] = f\"Bearer {bearer_token}\"\n r.headers[\"User-Agent\"] = \"S-n-Tweet Alpha\"\n return r", "def headers(self):\n headers = BASE_HEADERS\n if self.token:\n headers['X-Plex-Token'] = self.token\n return headers", "def asterisk_in_header():\n auth_token = get_auth_token()\n\n headers = '{\"Host\":\"$host\",\"User-Agent\":\"$user_agent\",\"Date\":\"DATE\",'\n headers += '\"Accept\": \"*/*\",\"Accept-Encoding\": \"gzip\",'\n headers += '\"X-Project-ID\": \"$project_id\",'\n headers += '\"X-Auth-Token\": \"$token\"}'\n headers = string.Template(headers)\n\n return headers.substitute(host=CFG.host, user_agent=CFG.user_agent,\n project_id=CFG.project_id, token=auth_token)", "def authenticate_by_token(self, apitoken):\n self.__headers.update({'Authorization': 'APIToken {}'.format(apitoken)})", "def for_authenticate_only(self):\n self.token['type'] = 'auth'\n\n return self", "def auth(self):\n return auth.get_auth()", "def _authenticate_for(self, resp):\n # Get the auth. info from the headers\n scheme, params = resp.headers['Www-Authenticate'].split(None, 1)\n assert (scheme == 'Bearer')\n info = {k: v.strip('\"') for k, v in (i.split('=')\n for i in params.split(','))}\n\n # Request a token from the auth server\n params = {k: v for k, v in info.items() if k in ('service', 'scope')}\n auth = HTTPBasicAuth(self.username, self.password)\n r2 = requests.get(info['realm'], params=params,\n auth=auth, verify=self.verify_ssl)\n\n if r2.status_code == 401:\n raise RuntimeError(\"Authentication Error\")\n r2.raise_for_status()\n\n self.auth = BearerAuth(r2.json()['token'])", "def set_auth_token_header(self):\n\n username = 'test-user'\n passwd = 'testuserpass1234'\n user = User.objects.create(username=username)\n user.set_password(passwd)\n user.save()\n\n assert Account.objects.get(user=user) is not None\n url = reverse('token_obtain_pair')\n res = self.client.post(url,\n data={'username': username, 'password': passwd})\n self.client.credentials(HTTP_AUTHORIZATION=\n f\"Bearer {res.data['access']}\")\n return user", "def bearer_auth():\n authorization = request.headers.get(\"Authorization\")\n if not (authorization and authorization.startswith(\"Bearer \")):\n response = app.make_response(\"\")\n response.headers[\"WWW-Authenticate\"] = \"Bearer\"\n response.status_code = 401\n return response\n slice_start = len(\"Bearer \")\n token = authorization[slice_start:]\n\n return jsonify(authenticated=True, token=token)", "def get_token(self):\n self.session.headers.pop(\"Authorization\", None) # delete old token if was\n\n data = json.dumps({\"password\": self.password, \"username\": self.username})\n answer = self.server_request(self._authTokenPath, data=data)\n\n try:\n self.token = json.loads(answer)[\"token\"]\n self.session.headers.update({\"Authorization\": \"Token \" + self.token})\n except KeyError as err:\n print_unexpected_json_error_key(err, answer, self._authTokenPath)\n exit(1)", "def authentication_header():\n with open(KEY_FILE, \"r\") as file:\n header = json.load(file)\n return header", "def get_token_auth_header():\n auth = request.headers.get(\"Authorization\", None)\n print(auth)\n\n if not auth:\n raise AuthError({\"code\": \"authorization_header_missing\",\n \"description\":\n \"Authorization header is expected\"}, 401)\n \n parts = auth.split()\n \n if parts[0].lower() != \"bearer\":\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must start with\"\n \" Bearer\"}, 401)\n elif len(parts) == 1:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\": \"Token not found\"}, 401)\n elif len(parts) > 2:\n raise AuthError({\"code\": \"invalid_header\",\n \"description\":\n \"Authorization header must be\"\n \" Bearer token\"}, 401)\n\n token = parts[1]\n return token", "def get_token_auth_header():\n auth = request.headers.get('Authorization', None)\n if not auth:\n raise AuthError({'code': 'authorization_header_missing',\n 'description': 'Authorization header is expected'}, 401)\n\n parts = auth.split()\n\n if parts[0].lower() != 'bearer':\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header must start with Bearer'}, 401)\n\n if len(parts) < 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Token not found after Bearer'}, 401)\n\n if len(parts) > 2:\n raise AuthError({'code': 'invalid_header',\n 'description': 'Authorization header is an invalid token structure'}, 401)\n\n return parts[1]", "def oxe_set_headers(token, method=None):\n\n # basic method GET\n headers = {\n 'Authorization': 'Bearer ' + token,\n 'accept': 'application/json'\n }\n\n # addition for POST & PUT\n if method in ('POST', 'PUT'):\n headers.update({'Content-Type': 'application/json'})\n # addition for DELETE\n elif method == 'DELETE':\n headers.update({'Content-Type': 'text/plain'})\n return headers", "def set_token(self, token):\n # type: (Token) -> None\n self.token = token\n self._token_header = \"Bearer \" + token[\"access_token\"]", "def headers(group_id, token):\n return { \n \"active-group\": group_id,\n \"Authorization\" : \"Bearer: {}\".format(token) \n }", "def set_requests_auth(self):\n self.__auth = OAuth2(token=self.bearer_token)" ]
[ "0.7161087", "0.7057166", "0.6970341", "0.6970341", "0.6921795", "0.68667036", "0.67996633", "0.6784417", "0.6764053", "0.66727406", "0.66410804", "0.6630715", "0.66150844", "0.6613414", "0.6583362", "0.65666574", "0.6546947", "0.6528426", "0.6521229", "0.6509429", "0.64885724", "0.64683807", "0.6465539", "0.64653766", "0.6446665", "0.637545", "0.6356642", "0.63558835", "0.63277066", "0.63277066", "0.63096464", "0.6301908", "0.6295405", "0.628945", "0.6274963", "0.6246223", "0.62333673", "0.62333494", "0.6230232", "0.618832", "0.6172884", "0.616688", "0.61577165", "0.61508465", "0.61501974", "0.6137991", "0.6101909", "0.60997856", "0.60972965", "0.6083374", "0.60781026", "0.60656196", "0.60457563", "0.6039963", "0.6032903", "0.60067856", "0.6006133", "0.6000747", "0.5990458", "0.5990458", "0.5986704", "0.5985856", "0.59825397", "0.5976816", "0.59273535", "0.5917832", "0.5908729", "0.59062403", "0.58950627", "0.58925265", "0.5880798", "0.58785415", "0.5878321", "0.58720535", "0.5870622", "0.5855475", "0.5851044", "0.5825631", "0.58075994", "0.5797097", "0.57726085", "0.5767756", "0.5765293", "0.5757139", "0.5747815", "0.5746138", "0.57403547", "0.5737745", "0.5729536", "0.57282186", "0.5725681", "0.5724671", "0.5722894", "0.57221377", "0.5689014", "0.5678615", "0.5666714", "0.56666636", "0.566492", "0.56508875" ]
0.5789005
80
Returns the token from the api to tell us that we have been logged in
def token(self): if not self._token: self._token = self.authenicate().token return self._token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_token(self):\n return user.get_token()", "def get_token(self):\n response = self.client.post(\n url_for('auth.login'),\n data=json.dumps({'username': '[email protected]', 'password': 'denno'}),\n headers={'content_type': 'application/json'})\n return json.loads(response.data)['token']", "def get_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.signup_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.login_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def login_user(self):\n response = self.client.post(self.login_url, self.login_data, format='json')\n return response.data['token']", "def auth_token(self):", "def getUser():\n\n if 'token' in session:\n return \"Authenticated\"\n else:\n return \"Unauthenticated\"", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get_token():\n if g.current_user.is_anonymous or g.token_used:\n return unauthorized('Invalid credentials')\n return jsonify({'token': g.current_user.generate_auth_token(\n expiration=3600), 'expiration': 3600})", "def get(self):\n if current_user and not current_user.is_anonymous:\n user = current_user\n tok = Token(user, 3600)\n return tok\n return jsonify({404: 'User not found'})", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "async def token(request: Request):\n return get_token()", "def UserToken(self) -> object:", "def get_auth_token():\n token = g.user.generate_auth_token(24*3600)\n return jsonify({'user_id': g.user.id, 'token': token.decode('ascii')})", "def get_token(self):\n self.session.headers.pop(\"Authorization\", None) # delete old token if was\n\n data = json.dumps({\"password\": self.password, \"username\": self.username})\n answer = self.server_request(self._authTokenPath, data=data)\n\n try:\n self.token = json.loads(answer)[\"token\"]\n self.session.headers.update({\"Authorization\": \"Token \" + self.token})\n except KeyError as err:\n print_unexpected_json_error_key(err, answer, self._authTokenPath)\n exit(1)", "def GetToken(self):\n if self.auth_token_:\n return self.auth_token_\n raise RuntimeError('ClientLoginAuthPolicy is not logged in.')", "def getToken(self):\n \n data = '''\n {\n \"auth\": \n {\n \"username\" : \"%s\",\n \"password\" : \"%s\"\n }\n }\n ''' % (self.username, self.password)\n \n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'api.appnexus.com'\n }\n r = requests.post(self.auth_url, data=data, \n headers=headers)\n ac_data = r.json()\n \n if ac_data['response']['status'] != 'OK':\n self.stream_logger.error('Error while retrieving access token')\n self.stream_logger.error('Status code {0}'\\\n .format(ac_data['response']['status']))\n return False\n\n return ac_data['response']['token']", "def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._post(path, data=self.api_token, skip_auth=True)\n return base64.b64encode(res.text)", "def get_token(self):\n self.register_user(self.user_data)\n result = self.login_user(self.login_data)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def get_token(self):\n token = self._session.token\n return token", "def get_token():\n params = {'get_token': 'get_token'}\n return load_page(API, params=params, headers={'content-type': 'application/json'})['token']", "def authenticationToken(self):\n return self.authToken", "def get_auth_token(cls):\n return jsonify({\n 'user': current_user.serialize(),\n 'token': current_user.get_auth_token(),\n })", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def get_token(self):\n url = '/auth-token/'\n data = self._http_post(url, self.credentials)\n token = data['token']\n assert len(token) == 40, 'The length of seahub api auth token should be 40'\n self.token = 'Token ' + token", "def auth_token(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_token\")", "def auth(self):\n return self.api(self.token)", "def get_token(self):\n self.token = self._session.fetch_token(\n token_url=CLOUD_URLS[\"get_token\"][1],\n client_id=self._client_id,\n client_secret=self._client_secret\n )", "def get(self):\n # Login of authorized user stores in Flask g object\n user = User.query.filter_by(username=g.user.username).first()\n # Generate token\n token = user.generate_auth_token()\n # Send token in ASCII format\n return {'token': token.decode('ascii')}", "def get_new_token(self):\n self.register_user(self.user_data2)\n result = self.login_user(self.login_data2)\n header_access_token = json.loads(result.data.decode())['header_access_token']\n return header_access_token", "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def get_api_token(self):\n integration_context = demisto.getIntegrationContext()\n api_token = integration_context.get('api_token')\n valid_until = integration_context.get('valid_until')\n\n # Return api token from integration context, if found and not expired\n if api_token and valid_until and time.time() < valid_until:\n demisto.debug('Retrieved api-token from integration cache.')\n return api_token\n\n headers = {'Accept': CONTENT_TYPE_JSON}\n\n demisto.debug('Calling authentication API for retrieve api-token')\n resp = self.http_request(\n method='POST', url_suffix=URL_SUFFIX['GET_TOKEN'], headers=headers\n )\n integration_context = self.set_integration_context(resp)\n\n return integration_context.get('api_token')", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n token = getpass.getpass('Paste in your RDR API token and press Enter:')\n return {'Authorization': 'token ' + token}", "def get_auth_token_student():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def api_token(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_token\")", "def get_auth_token(self):\r\n\r\n self._authenticating = True\r\n\r\n auth_data = {\r\n \"auth\": {\r\n \"identity\": {\r\n \"methods\": [\r\n \"password\"\r\n ],\r\n \"password\": {\r\n \"user\": {\r\n \"domain\": {\r\n \"name\": self._config['user_domain'] if 'user_domain' in self._config else self._config[\r\n 'domain']\r\n },\r\n \"name\": self._config['user'],\r\n\r\n \"password\": self._config['password']\r\n }\r\n }\r\n },\r\n \"scope\": {\r\n \"project\": {\r\n \"domain\": {\r\n \"name\": self._config['domain']\r\n },\r\n \"name\": self._config['project'],\r\n }\r\n }\r\n }\r\n }\r\n\r\n # profile = prof,\r\n # user_agent = 'toil',\r\n # auth_url = self._config['auth_url'],\r\n # project_name = self._config['project'],\r\n # project_domain_name = self._config['domain'],\r\n # user_domain_name = self._config['domain'],\r\n # username = self._config['user'],\r\n # password = self._config['password']\r\n\r\n response = self.post(None, self.URL_AUTH_TOKEN, data=json.dumps(auth_data))\r\n\r\n self._authenticating = False\r\n\r\n json_response = response.json()\r\n self._token = json_response['token']\r\n self._token_x_subject = response.headers['x-subject-token']\r\n\r\n catalog = json_response['token']['catalog']\r\n\r\n for service in catalog:\r\n self._services[service['name']] = service", "def token(self):\n return self[\"token\"]", "def get_token(): \n \n # Token url\n token_endpoint = \"https://api.signicat.io/oauth/connect/token\"\n # Setting the grant type to client_credentials\n data = {'grant_type':'client_credentials', 'scope':'identify'}\n # Posting to token url with HTTP basic authentication\n token = requests.post(token_endpoint, data=data,allow_redirects=True, auth=(config.CLIENT_ID, config.CLIENT_SECRET))\n # Converting json string to json\n token_json = json.loads(token.text)\n \n # Returning the access_token\n return token_json['access_token']", "def getUser(self, authenticationToken):\r\n pass", "def getToken(email, password):\n r = requests.post(r\"https://opendata.hopefully.works/api/login\", json={\"email\":email, \"password\":password})\n if r.status_code == 200: \n return r.json()[\"accessToken\"]\n else:\n return \"\"", "def login(self):\n r = self._login_token()", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def get_token(self):\n client_auth = requests.auth.HTTPBasicAuth(self.client, self.secret)\n post_data = {'grant_type': 'password', 'username': self.user, 'password': self.password}\n headers = {'User-Agent': self.user_agent}\n response = requests.Session()\n response2 = response.post(self.token_url, auth=client_auth, data=post_data, headers=headers)\n self.token = response2.json()['access_token']\n self.t_type = response2.json()['token_type']", "def __get_authentication_token(self):\n cache = load_json(self._tokenPath)\n return cache[\"authentication_token\"]", "def BearerToken():\n\t\tif bpformation._bearer_token: return(bpformation._bearer_token)\n\n\t\tif not bpformation._CONTROL_COOKIES: \n\t\t\tWeb._LoginScrape()\n\n\t\t# Ping - validate if we need to login\n\t\ttry:\n\t\t\tr = bpformation.web.CallScrape(\"GET\",\"/\")\n\t\t\tif not re.search(\"<title> Control Portal Dashboard </title>\",r.text):\n\t\t\t\traise(bpformation.BPFormationLoginException)\n\t\texcept requests.exceptions.ConnectionError:\n\t\t\traise\n\t\t\traise(bpformation.BPFormationLoginException)\n\t\t\n\t\t# Extract token\n\t\tm = re.search(\"\"\"shell.user.set\\(\\{\"token\":\"(.+?)\",\"userName\":\"(.+?)\"\\}\\);\"\"\",r.text)\n\t\tusername = m.group(2)\n\t\ttoken = m.group(1)\n\n\t\treturn(token)", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def get_token(user, password):\n url = urljoin(PivotalTrackerService.URI, \"me\")\n auth = (user, password)\n response = PivotalTrackerService.get_response(\"get\", url, auth=auth)\n\n try:\n response.raise_for_status()\n data = response.json()\n ret_val = data[\"api_token\"]\n except RequestException:\n ret_val = None\n\n return ret_val", "def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token", "def _getApiAuthToken(self):\n return settings.EBAY_API_AUTH_TOKEN", "def refresh_token(self):\n # basic function to get an access token\n api_response = requests.get(\n self.api_config.get_api_url() + \"authentication/g?username=\" + self.api_config.get_api_username() + \"&password=\" + self.api_config.get_api_password())\n\n if api_response.status_code >= 200:\n self.API_TOKEN = api_response.content.decode()\n\n return self.API_TOKEN\n else:\n return None", "def get_oauth_token():\n return session.get('remote_oauth')", "def login():\n token = request.form.get('idtoken')\n if verify_token(token):\n session['logged_in'] = True\n return '', 204\n else:\n return '', 401", "def login_get(self):\n return json.dumps({\"user_email\": auth.current_user()[0]}), 200", "def auth_header_value(self):\n return f\"token {self.API_TOKEN}\"", "def EstablishAuthToken(self, opener):\n url = 'https://www.pivotaltracker.com/services/v3/tokens/active'\n data = parse.urlencode((('username', self.username),\n ('password', self.password)))\n try:\n req = opener.open(url, data.encode())\n except error.HTTPError as e:\n if e.code == 404:\n raise NoTokensAvailableException(\n 'Did you create any? Check https://www.pivotaltracker.com/profile')\n else:\n raise\n\n res = req.read()\n\n dom = minidom.parseString(res)\n token = dom.getElementsByTagName('guid')[0].firstChild.data\n\n return token", "def get_auth_token(self):\n data = [str(self.id), self.password]\n return login_serializer.dumps(data)", "def _get_token() -> str:\n username = si.get_env_var(\"EODDATA_USERNAME\")\n password = si.get_env_var(\"EODDATA_PASSWORD\")\n\n _LOG.info(\"Logging into EODData API ...\")\n\n response = get_client().service.Login(Username=username, Password=password)\n\n if response[\"Token\"] is None:\n dbg.dfatal(\"Login Failed: '%s'\", response[\"Message\"])\n\n return str(response[\"Token\"])", "def login_to_api(self):\n\n # set the API endpoint and POST the username/password to it\n endpoint = app.config['API']['url'] + 'login'\n response = requests.post(\n endpoint,\n verify = app.config['API']['verify_ssl'],\n json = {\n 'username': self.username,\n 'password': self.password\n }\n )\n\n # if the response is good, return True\n if response.status_code == 200:\n user = response.json()\n self._id = ObjectId(user['_id'])\n self.token = user['access_token']\n return True", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def get_token():\n\n try:\n url = params.BASE_URL\n payload={}\n headers = {}\n response = requests.request(\"GET\", url, headers=headers, data=payload)\n response = json.loads(response.text)\n base_url = response.get(params.CONTEXT)\n token = base_url.split(\"/\")[-2]\n return (True, token)\n except Exception as e:\n return (False, str(e))", "def authenticate():\n token = request.form[\"token\"]\n decoded_token = firebase_admin.auth.verify_id_token(token)\n user_uid = decoded_token['uid']\n user = firebase_admin.auth.get_user(user_uid)\n session_login(user)\n return user_uid", "def get_api_token(self, app, user, pwd):\n authorization = ('Basic ' + base64.b64encode(user + \":\" + pwd))\n api_token_resp = app.post('/v1/api_token', headers={'Authorization': authorization})\n if api_token_resp.status != '200 OK':\n raise ValueError(api_token_resp.status)\n api_token = json.loads(api_token_resp.data)['api_token']\n return api_token", "def get_auth_token_teacher():\n\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600})", "def get_token(self):\n oauth_provider = UserSocialAuth.objects.get(provider='drchrono')\n access_token = oauth_provider.extra_data['access_token']\n return access_token", "def get_user_token(self):\n user = User.objects.get(username=self.user.username)\n social = user.social_auth.get(provider='github')\n return social.extra_data['access_token']", "def access_token(self):\n return self.access_token_str", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def auth(self):\n return self.user.get('current')", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def authcheck():\n user = get_user()\n return jsonify({'current_identity': user.username})", "def check_user():\n token = request.headers['Authorization'].replace('Bearer ', '')\n return jsonify({\"access_token\": token}), 200", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def connect(self):\n r = authentication.token(connection=self)\n\n\n self.auth_token = r.json().get('token')", "def get_current(self):\n auth_token = session.get(\"auth_token\")\n print(auth_token)\n if not auth_token:\n return None\n user = db.user.find_one({\"auth_token\":auth_token})\n\n return user", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def get_token(request):\n request_json = request.get_json()\n # response = dict()\n if request.authorization and 'password' in request.authorization and 'username' in request.authorization:\n pwd = request.authorization.get('password')\n user = request.authorization.get('username')\n if pwd == 'password':\n token = jwt.encode({\"user\": user,\n 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=20)}, SECRET_KEY,\n algorithm=\"HS256\")\n\n return jsonify({'token': token})\n\n return make_response(\"basic login required\", 404, {\"www-authenticate\": \"basic login required\"})", "def get_token(id=None, name=None):\n\tif id is None and name is None:\n\t\tname = config['username']\n\treturn get_user(id=id, name=name, get_missing=False).token", "def get_auth_token(self, username, password):\n url = '/'.join([self.base_url, self.TOKEN_ENDPOINT])\n r = requests.get(url, auth=(username, password))\n if r.status_code == 200:\n return r.content\n return r", "def get_token(username, password):\n\t\ttoken = cf.get_token(username, password)\n\t\treturn token", "def generate_token(self):\n self.__get_auth_token_and_secret()\n return self.get_token()", "def get_token(self):\r\n token = {'id': self.catalog['access']['token']['id'],\r\n 'expires': self.catalog['access']['token']['expires'], }\r\n try:\r\n token['user_id'] = self.catalog['access']['user']['id']\r\n token['tenant_id'] = (\r\n self.catalog['access']['token']['tenant']['id'])\r\n except Exception:\r\n # just leave the tenant and user out if it doesn't exist\r\n pass\r\n return token", "def get_auth_token(self):\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)", "def _getAuth(self):\r\n parameters = {\r\n 'service' : 'reader',\r\n 'Email' : self.username,\r\n 'Passwd' : self.password,\r\n 'accountType' : 'GOOGLE'}\r\n req = requests.post(ClientAuthMethod.CLIENT_URL, data=parameters)\r\n if req.status_code != 200:\r\n raise IOError(\"Error getting the Auth token, have you entered a\"\r\n \"correct username and password?\")\r\n data = req.text\r\n #Strip newline and non token text.\r\n token_dict = dict(x.split('=') for x in data.split('\\n') if x)\r\n return token_dict[\"Auth\"]", "def token():\n return os.environ.get('TOKEN', None)", "def get_token():\n req = request.get_json()\n username = str(req['username'])\n password = str(req['password'])\n if User.username_password_match(username, password):\n expiration_date = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=100)\n token = jwt.encode({'exp': expiration_date}, app.config['SECRET_KEY'], algorithm='HS256')\n return token\n return Response('', 401, mimetype='application/json')", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def log_in(jwt):\n return current_app.library_registry.admin_controller.log_in(jwt)", "def get_token():\n\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\",\n scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"],\n client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")\n raise Exception", "def get_admin_token(self):\n\t\tself.client.post('/api/v1/auth/signup', data=json.dumps(self.admin_user), content_type='application/json')\n\t\tresponse = self.client.post('/api/v1/auth/login', data=json.dumps(self.admin_user), content_type='application/json')\n\t\tresp = json.loads(response.data.decode())\n\t\treturn 'Bearer ' + resp['access_token']", "def get_juicebox_token(self, save=False):\n logger.debug('Getting JB token from Public API')\n url = '{}/token/'.format(get_public_api())\n data = {\n 'data': {\n 'attributes': {\n 'username': self.username,\n 'password': self.password,\n 'endpoint': self.endpoint\n },\n 'type': 'auth'\n }\n }\n headers = {'content-type': 'application/json'}\n response = jb_requests.post(url, data=json.dumps(data),\n headers=headers)\n if response.status_code != 201:\n logger.debug(response)\n raise AuthenticationError('I was unable to authenticate you with '\n 'those credentials')\n token = response.json()['data']['attributes']['token']\n self.token = token\n logger.debug('Successfully retrieved JB token')\n\n if save:\n logger.debug('Saving token to netrc')\n self.update_netrc()", "def auth_authenticate():\n data = {'LoginName': username, 'Password': password}\n parameters = data_to_json(data)\n url = base_url + 'general/authentication/authenticate'\n response = make_request(url, parameters)\n r_value = ''\n if response['Status'] == 0:\n r_value = response['Value']['Token']\n return r_value", "def get_auth_token():\n headers = {\n 'Content-Type': 'text/plain;charset=UTF-8', }\n data = '{ \\\n \"auth\": { \\\n \"identity\": { \\\n \"methods\": [ \\\n \"password\" \\\n ], \\\n \"password\": { \\\n \"user\": { \\\n \"name\": \"zheng_zhao\", \\\n \"password\": \"ZhaoZheng0426\", \\\n \"domain\": { \\\n \"name\": \"hwstaff_y00465251\" \\\n } \\\n } \\\n } \\\n }, \\\n \"scope\": { \\\n \"project\": { \\\n \"id\": \"454add6b26d04f53ae5c593551acf1ff\" \\\n } \\\n } \\\n } \\\n }'\n\n r = requests.post('https://iam.cn-north-1.myhuaweicloud.com/v3/auth/tokens',\n headers=headers, data=data)\n\n # print(r.status_code)\n # print(r.headers)\n token = r.headers.get('X-Subject-Token')\n\n return token" ]
[ "0.7961458", "0.7928275", "0.78518677", "0.7729143", "0.7523873", "0.750345", "0.7439859", "0.7439859", "0.73954666", "0.7394398", "0.7386385", "0.736755", "0.7338549", "0.7287521", "0.7270584", "0.7262894", "0.72374797", "0.7226831", "0.71970665", "0.71939373", "0.7190025", "0.71311516", "0.7121867", "0.71033096", "0.7098925", "0.70963466", "0.708599", "0.7083708", "0.7083055", "0.7082412", "0.70691377", "0.70600724", "0.7059497", "0.7059497", "0.7059497", "0.7058413", "0.7058413", "0.70497715", "0.70458436", "0.7036429", "0.7032804", "0.6990911", "0.698755", "0.6978599", "0.6932621", "0.69305503", "0.6926032", "0.6926032", "0.69256186", "0.69036436", "0.6902249", "0.6879262", "0.6872634", "0.6866028", "0.6864764", "0.6863825", "0.685354", "0.6848693", "0.6847755", "0.6824079", "0.68185896", "0.679847", "0.6791231", "0.67859846", "0.6776731", "0.67708784", "0.67625153", "0.675663", "0.6752443", "0.6741153", "0.6732942", "0.6712293", "0.67108583", "0.6710438", "0.67092615", "0.66994464", "0.66949123", "0.66781664", "0.66736215", "0.66704607", "0.66580755", "0.6654649", "0.6645972", "0.66449684", "0.6640174", "0.6640025", "0.6633304", "0.66287833", "0.6621802", "0.6619165", "0.6618987", "0.66189855", "0.66189855", "0.66189855", "0.6618568", "0.66149384", "0.66032064", "0.6597092", "0.6596662", "0.6590593" ]
0.72815895
14
Open website and verify that it is load successfully.
def test_should_open_website(self): search_str = "//*[text()='Generate']" els = self.driver.find_elements_by_xpath(search_str) self.assertGreater(len(els), 0, 'Page loads failed!')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_open(self):\n page, resources = self.ghost.open(base_url)\n self.assertEqual(page.url, base_url)\n \n self.ghost.click(\"#run\")", "def simple_test_open_url(url):\n try:\n return requests.get(url, headers={\"User-Agent\": random.choice(useragents.useragents())}).status_code\n except Exception as _:\n return False", "def open_url(self, url):\n try:\n if url != \"\":\n self.driver.maximize_window()\n self.driver.get(url)\n print(url + \" : url is opened\")\n else:\n print(\"Please enter valid url\")\n except Exception as e:\n print(str(e))", "def check_is_alive(url):\n print(url)\n try:\n requests.get(url, allow_redirects=False, timeout=10.0)\n except requests.exceptions.ConnectionError as exc:\n print('- Website doesn\\'t exists: ', exc)\n with open('result_test.txt', 'a') as result_test: # Appending urls\n result_test.write(url + '\\n')", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1", "def open_website(url):\n browser = webdriver.Firefox()\n browser.get(url)\n return browser", "def openSite(url):\n\timport webbrowser\n\twebbrowser.open('http://www.' + url + '.com', 2)", "def __open_page(self, url):\n try:\n # Opens the url\n page = request.urlopen(url)\n except Exception as e:\n print(e, url)\n return ''\n else:\n # Avoid that None will be returned to that, try to open the web page again.\n return page if page is not None else self.__open_page(url)", "def test_urls_work(url):\n with requests.get(url) as r:\n assert r.status_code == 200", "def open_url(self, url):\n\n self.driver.get(url)", "def open(url):\r\n webbrowser.open(url)", "def i_am_on_the_zoo_website():\n driver.get(\"http://www.thetestroom.com/webapp/\")", "def go_to_url(self, url):\n if self.browser is not None:\n self.browser.get(url)\n else:\n print('Browser is not running')", "def test_open_mainpage(open_browser, url_param):\n open_browser.get(url_param)\n assert open_browser.current_url == url_param\n open_browser.close()", "def open_url(self, url: str):\n self.driver.get(url)", "def openurl(url):\n\n # Open the URL\n webbrowser.open(url)", "def allocine_connect(url):\n #go to allocine page\n driver.get(url)\n #sleep until the page load\n sleep(10)\n #click on cookies button\n print(\"cookies checking\")\n cookies_check_v2()\n sleep(1)\n driver.get(url)", "def test_load_crawl():\n\n cmdline = [\"kivy\", \"-m\", \"SimpleHTTPServer\", \"8866\"]\n\n web_server = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=os.path.join(os.getcwd(), \"tests\", \"test_data\"))\n try:\n\n # Let the web server wake up in another process\n time.sleep(1.0)\n\n web_server.poll()\n if web_server.returncode is not None:\n raise AssertionError(\"Web server process did not start up: {}\".format(\" \".join(cmdline)))\n\n result = load_and_run(\"http://localhost:8866#hello1:hello\")\n assert result == \"Hello there\"\n finally:\n if web_server.returncode is None:\n web_server.terminate()", "def open_web_browser(url: str):\n Popen(web_browser + [url], stdout=DEVNULL, stderr=DEVNULL)", "def test_get_indexhtml(self):\n url = self.baseurl + \"/main\"\n req = urllib2.urlopen(url, None, 3)\n self.assertTrue( req.getcode() == 200 , \"200 OK Not FOUND!\")", "def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code", "def open_link(self):\n try:\n # webbrowser.open(self.url) # if you are on Windows OS\n webbrowser.get('safari').open_new_tab(self.url) # if you are on Mac OS\n except(AttributeError):\n self.ids.label.text = self.error_msg", "def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']", "def run(self, url=''):\n if url:\n webbrowser.open(url)", "def test_loadUrl(self):\n urllib2.urlopen = self.urlopen_mock\n self.assertEquals(self.html, lunchr.loadUrl('http://www.example.com'))", "def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))", "def open_news_url(self, url):\n\n try:\n if not webbrowser.open_new_tab(url):\n raise webbrowser.Error\n except webbrowser.Error:\n print('Unable to open a web browser, try accessing this URL manually instead:\\n{0}'.format(url))", "def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200", "def open_location(self):\n try:\n self.assertEqual(self.test_location, self.selenium.get_location())\n except AssertionError, self.e:\n self.verificationErrors.append(str(self.e))", "def open_url(name):\n url = localReadConfig.get_webServer(name)\n browser = open_browser()\n browser.get(url)\n return browser", "def internet_on(): \n try:\n urlopen('http://www.google.com', timeout=2)\n return True\n except urlopen.URLError as err: \n return False", "def check_conn():\n try:\n urllib2.urlopen(\"http://www.google.com\", timeout=5)\n return True\n except urllib2.URLError:\n pass\n return False", "def test_grab_url_success(self):\n self.mini_spider_thread.grab_url('http://example.com/graburl/success')\n self.assertTrue(self.mini_spider_thread.grab_url_success)\n self.assertEqual(self.mini_spider_thread.url_response.read(), 'Grab url success.')", "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def start_test(url):\n \n Debug.user(' ################# start Test ######################')\n App.open('firefox --private-window '+url)\n wait(\"1501595436606.png\", 10)\n\n click(\"1501595453560.png\")\n\n if exists():\n \n click()\n else:\n click()\n \n\n\n if exists(\"1499781534684.png\"):\n click(\"1499781552298.png\")\n type('root')\n click(\"1499781563870.png\")\n else:\n pass\n click(\"1499781591282.png\")", "def test_connect_to_http_uri(self):\n\n self.browser.proxy_client.new_har(\"page\")\n\n po = self.catalog.load_pageobject('GenericPage')\n po.goto_page(self.http_authority)\n\n har_entry = self.browser.page_load_details(self.http_authority)\n\n assert har_entry is not None, \\\n \"failed to load the uri %s. http archive unavailable.\" \\\n % (self.http_authority)\n\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"failed to load uri %s. http archive response follows:\\n%s\" \\\n % (self.http_authority,pprint.pformat(har_entry))", "def is_internet_on() -> bool:\n try:\n urlopen('https://www.google.com/', timeout=5)\n return True\n except: \n return False", "def check_site_availability(url):\n\n try:\n conn = urllib.request.urlopen(url)\n except urllib.error.HTTPError as e:\n # Return code error (e.g. 404, 501, ...)\n print('HTTPError: {}'.format(e.code))\n logging.info('HTTPError: {}'.format(e.code))\n return int(e.code)\n except urllib.error.URLError as e:\n # Not an HTTP-specific error (e.g. connection refused)\n print('URLError: {}'.format(e.reason))\n logging.info('URLError: {}'.format(e.reason))\n return -7\n except Exception as e:\n # other reasons such as \"your connection is not secure\"\n print(e)\n logging.info(e)\n return -8\n\n # check if redirected\n if conn.geturl() != url:\n print(\"Redirected to {}\".format(conn.geturl()))\n logging.info(\"Redirected to {}\".format(conn.geturl()))\n return 302\n\n # reaching this point means it received code 200\n print(\"Return code 200\")\n logging.info(\"Return code 200\")\n return 200", "def is_alive(self):\n conn = HTTPConnection(self.browser.host, self.browser.port)\n conn.request(\"HEAD\", \"/invalid\")\n res = conn.getresponse()\n return res.status == 404", "def test_visit(self, client, site, landing_page):\n response = client.get(landing_page.relative_url(site))\n assert response.status_code == 200", "def _execute_action_http(self, action):\n browser = self._get_browser()\n\n # Open URL.\n browser.open(url=action['url'])\n\n while browser.is_busy is True:\n self._logger.debug(\"[~] Firefox is busy.\")\n time.sleep(1)\n\n self._logger.info('[+] HTTP: Opened URL %s.', action['url'])\n time.sleep(5)", "def open_browser():\n def _open_browser():\n if AIPS_WEBSERVER_HOST == \"localhost\":\n webbrowser.open(WEBSERVER_URL + '/%s' % FILE)\n thread = threading.Timer(0.5, _open_browser)\n thread.start()", "def verify(self):\n if self.geturl():\n return True\n return False", "def test_visit(self, client, site, content_page):\n response = client.get(content_page.relative_url(site))\n assert response.status_code == 200", "def url_is_alive(url: str) -> object:\n from ssl import _create_unverified_context\n from urllib.error import HTTPError, URLError\n from urllib.request import urlopen\n\n try:\n return urlopen(url, context=_create_unverified_context())\n except HTTPError:\n return False\n except URLError:\n return False", "def test_access_url(url, time_pause_sec=5):\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n time.sleep(5)\n r = requests.get(url)\n if r.status_code != requests.codes.ok:\n raise Exception('Error: status code is %s for URL: %s' %\n (str(r.status_code), url))", "def test_link(link):\n r = requests.get(link)\n if (r.status_code != 200):\n return False\n else:\n return True", "def go(self, url):\n self.driver.get(url)", "def open_webpage(browser, url, case, version, package):\n browser_obj = Browser(browser, version, case, package, url)\n if browser == \"firefox\":\n firefox(browser_obj)\n elif browser == \"opera\":\n opera(browser_obj)\n elif package == \"chromium\":\n chromium(browser_obj)\n elif browser == \"ie\":\n iexplorer(browser_obj)\n elif browser == \"edge\":\n edge(browser_obj)", "def step_impl(context):\r\n context.browser.get('https://opensource-demo.orangehrmlive.com/')\r\n time.sleep(10)", "def tela_inicial_do_challenge_1():\r\n # primeiro\r\n _url_site = \"http://rpachallenge.com/\"\r\n _current_url = _browser.current_url\r\n\r\n assert _current_url == _url_site", "def check_link(url):\n try:\n\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n print('Connection Failed!!!')", "def is_alive(self, site):\n try:\n return requests.get(site).status_code == 200\n except Exception:\n pass", "def onAboutLeoUrl(self,event=None):\n \n try:\n import webbrowser\n webbrowser.open(self.url)\n except:\n g.es(\"not found: \" + self.url)", "def goto_url(self, url):\n try:\n self._browser.get(url)\n except Exception as e:\n self.logger.error(\"Error going to url '\" + url + \"' : \" + str(e))\n raise", "def test_url(quartus, part, url):\n print(\"\\rChecking %s/%s \" % (quartus, part), end='')\n try:\n response = urllib.request.urlopen(url)\n headers = response.getheaders()\n return True\n except KeyboardInterrupt:\n sys.exit(1)\n except:\n return False", "def check_connection(url=\"http://example.com/\"):\n try:\n requests.head(url)\n return True\n except requests.ConnectionError:\n spinner.warn(\"No internet connecction 🤭\")\n sys.exit(1)", "def check_mitm_status_page(self, check_url):\n response = requests.get(check_url)\n if response.status_code == 200:\n return response\n else:\n sys.exit(2)", "def check_heartbeat(self):\n try:\n req = request(self.values['url'].data)\n response = urllib.urlopen(req)\n the_page = response.read()\n return True\n except urllib.HTTPError as e:\n if e.code == 400:\n return True\n else:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False\n except Exception:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False", "def open_in_browser(self):\n webbrowser.open(self.url)", "def test_main_page(remote_browser, base_url, logger_fixture):\n logger_fixture.info(\"===== test_main_page =====\")\n main_page = MainPage(remote_browser, base_url)\n main_page\\\n .open(logger_fixture)\\\n .check_featured_block_is_not_empty()", "def open_url(url):\n logger.debug('Opening %s', url)\n _stderr = os.dup(2)\n os.close(2)\n _stdout = os.dup(1)\n os.close(1)\n fd = os.open(os.devnull, os.O_RDWR)\n os.dup2(fd, 2)\n os.dup2(fd, 1)\n try:\n webbrowser.open(url)\n finally:\n os.close(fd)\n os.dup2(_stderr, 2)\n os.dup2(_stdout, 1)", "def url_checker(url_str):\n file_msg = fd.Program_Msg(__file__)\n ## Checking input parameters\n if not (isinstance(url_str, str)):\n msg = '{0} `url_str` ({1}) is not a STRING!'.format(file_msg,\n type(url_str))\n raise LSSUtils_Error(msg)\n ##\n ## Checking Website\n request_url = requests.get(url_str)\n if (request_url.status_code != 200):\n msg = '{0} `url_str` ({1}) does not exist!'.format(file_msg, url_str)\n raise LSSUtils_Error(msg)", "def test_unfetchable_url(self):\r\n url = u'file://test.html'\r\n read = readable.ReadUrl.parse(url)\r\n self.assertEqual(read.status, 901)", "def urlopen(url):\n response = object()\n try :\n request = urllib2.Request(url.encode(\"ascii\",\"ignore\"))\n request.add_header('User-agent',\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36')\n request.add_header(\"Accept-Language\", \"en-US\")\n request.add_header('Accept-Encoding', 'gzip')\n response = urllib2.urlopen(request)\n #print(\"LEN =\",response.info()[\"Content-length\"])\n return(response)\n except urllib2.URLError as e:\n # TEST\n #print(\"EROOR ->\", e)\n #print(\"EROOR ->\", e.code)\n return ThinBrowser.urlopen(url)\n except RuntimeError as e:\n return False", "def open_firefox():\r\n driver = install_firefox_proxy(LOCALHOST, PROXY_PORT_NUMBER)\r\n driver.get(STARTING_WEBSITE)", "def open_browser(url):\n import webbrowser\n webbrowser.open_new(url)", "def test_pages_are_valid(self):\n\n url = 'http://www.example.com'\n\n r = LiveRedirect(url=url,duration=HALF_DAY)\n r.save()\n\n TEST_URLS = [\n '%s/' % self.live_server_url,\n '%s/%s' % (self.live_server_url,r.slug),\n '%s/%s/' % (self.live_server_url,r.slug),\n ]\n\n for url in TEST_URLS:\n self.browser.get(url)\n\n body = self.browser.find_element_by_tag_name('body')\n title = self.browser.find_element_by_tag_name('title')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text,\"%s returns 404!\" % url)\n self.assertNotIn('500',body.text,\"%s returns 500!\" % url)\n\n # Check that title is valid\n\n self.assertNotIn('NO-TITLE',title.text,\"%s is using default base title!\" % url)\n self.assertIsNotNone(title.text, \"%s has no title!\" % url)\n self.assertNotEquals('',title.text, \"%s has no title!\" % url)", "def the_service_page_should_open(driver):\n assert wait_on_element(driver, 5, xpaths.services.title)\n time.sleep(1)", "def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:", "def test_main_page_load(self):\n response = self.client.get(reverse(\"index\"))\n self.assertEqual(response.status_code, 200)", "def assertHttpOk(self, url):\n host = urlsplit(url)\n conn = HTTPConnection(host.netloc, timeout=4)\n conn.request('HEAD', host.path)\n res = conn.getresponse()\n self.assertEqual(res.status, 200)", "def prescreenUrl(url):\n if url == '':\n return False\n\n cmd = 'wget --spider -t 1 -T 10 {0} -O /dev/null'.format(url)\n p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n out, err = p.communicate()\n\n if p.returncode != 0:\n return False\n\n failed = ['404 Not Found', 'Giving up', 'failed', 'FAILED',\n 'Connection timed out']\n if any(x in out for x in failed):\n return False\n return True", "def browser_open(url):\n FNULL = open(os.devnull, 'w')\n subprocess.Popen([udata.browser, url], stdout=FNULL, stderr=subprocess.STDOUT )", "def call_website(link: str) -> str:\n r = requests.get(link)\n\n if r.status_code != 200:\n sys.exit(1)\n\n return r.text", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def check_site(self, url, checks):\n success = False\n parse = urlparse.urlparse(url)\n connection = httplib.HTTPConnection(parse.netloc, timeout=30)\n retry_count = 0 \n\n while not success and retry_count < self.retries:\n try: \n connection.request(\"GET\", parse.path) #TODO: More complex in the future?\n success = True\n except:\n connection = httplib.HTTPConnection(parse.netloc)\n retry_count += 1\n time.sleep(self.secs_between_retries)\n\n if not success:\n return {\"error\":True, \"reason\":\"Failed to contact website\"}\n\n response = connection.getresponse()\n \n if response.status != 200:\n error_explanation = str(response.status)+\" \"+response.reason\n return {\"error\":True, \"reason\":error_explanation}\n \n url_content = response.read()\n for check in checks:\n if url_content.find(check) == -1:\n error_explanation = \"Failed to find '\"+check+\"' in page\"\n return {\"error\":True, \"reason\":error_explanation}\n \n return {\"error\":False, \"reason\":\"Site Seems Up\"}", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_view_success_code(self):\n tmp = self.get_url()\n response = self.client.get(tmp)\n self.assertEqual(response.status_code, 200)", "def test_connect_to_https_uri(self):\n\n self.browser.proxy_client.new_har(\"page\")\n\n po = self.catalog.load_pageobject('GenericPage')\n po.goto_page(self.https_authority)\n\n har_entry = self.browser.page_load_details(self.https_authority)\n\n assert har_entry is not None, \\\n \"failed to load the uri %s. http archive unavailable.\" \\\n % (self.https_authority)\n\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"failed to load uri %s. http archive response follows:\\n%s\" \\\n % (self.https_authority,pprint.pformat(har_entry))", "def can_connect(test_url):\n try:\n requests.get(test_url)\n except (OSError):#connection error\n logger.warning('couldn\\'t reach server on: {test_url}')\n return False\n return True", "def test_visit(self, client, site, homepage):\n response = client.get(homepage.relative_url(site))\n assert response.status_code == 200", "def is_ok(url: str) -> bool:\n try:\n resp = requests.get(url)\n except:\n return False\n return True if math.floor(resp.status_code / 100) == 2 else False", "def test_if_home_is_successful(client):\n\n url = reverse(\"home\")\n response = client.get(url)\n assert response.status_code == 200", "def test_can_create_redirect(self):\n\n #Homepage\n self.browser.get(self.live_server_url)\n\n #Look for the submit button and the two inputs\n url_field = self.browser.find_element_by_id('id_url')\n\n #duration is a slider now, so find that\n duration_field = self.browser.find_element_by_id('slider')\n\n # Enter something into the inputs\n url_field.send_keys('www.example.com')\n duration_field.send_keys(Keys.RIGHT)\n\n submit_button = self.browser.find_element_by_tag_name('input')\n submit_button.submit()\n\n # Ensure that the submit doesn't redirect the user somewhere stupid\n body = self.browser.find_element_by_tag_name('body')\n\n # Check that it is not a 404 or 500\n self.assertNotIn('404',body.text)\n self.assertNotIn('500',body.text)", "def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data", "def do_status(self, args):\n webbrowser.open(f\"{args.host}:{args.port}\")", "def isonline():\n\n conn = httplib.HTTPConnection(\"www.google.com\", timeout=5)\n try:\n conn.request(\"HEAD\", \"/\")\n conn.close()\n return True\n except:\n conn.close()\n return False", "def checkStatus(url):\n def checkForIndexPage(r):\n \"\"\"Checks whether it a given url is actually an Index Of page. Takes in a Request object\"\"\"\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"\n\n returnString = \"\"\n try:\n r = requests.get(url)\n returnString += str(r.status_code) \n if r.status_code == 200: # if the page is accessible, then check whether it displays properly\n returnString += \"\\n\\t\" + checkForIndexPage(r)\n return returnString\n except Exception as e:\n return(e)", "def open(self):\r\n try:\r\n print('open( open_url=%s, force_refresh=%s, scraper_url=%s)'%( str(self.open_url), str(self.force_refresh), str(self.get_scraper_url())))\r\n if ((not self.open_url) or self.force_refresh) and not self.driver is None and not self.get_scraper_url() is None:\r\n current_url = self.web_driver.driver.current_url\r\n scraper_url = self.get_scraper_url()\r\n self.driver = self.web_driver.driver\r\n if '?' in scraper_url:\r\n scraper_url = scraper_url.split('?')[0]\r\n if not scraper_url in current_url:\r\n print( 'refresh %s ' % scraper_url )\r\n print('---------------------------------- open value is scraper url %s --------------------------- ' % scraper_url)\r\n self.driver.execute_script('window.open();')\r\n handles = self.driver.window_handles\r\n self.driver.switch_to.window(handles[-1])\r\n self.driver.get(scraper_url)\r\n print(self.driver.title)\r\n self.driver.switch_to.window(handles[0])\r\n self.driver.close()\r\n self.driver.switch_to.window(handles[-1])\r\n #self.driver.refresh()\r\n self.driver.implicitly_wait(15)\r\n self.parser_kwargs['driver'] = self.driver\r\n self.parser_kwargs['web_driver'] = self.web_driver\r\n try:\r\n self.default_parser = self.parser_kwargs.get('default_parser', None)\r\n if self.default_parser is None:\r\n self.default_parser = self.cache_manager.cache_output_component_func( self.parser_kwargs.get('default_parser', 'BeautifulSoupParser'), **self.parser_kwargs)\r\n self.default_parser.driver = self.web_driver.driver\r\n self.default_parser.reload_content()\r\n except:\r\n print('exception making parser')\r\n traceback.print_exc()\r\n self.open_url = True\r\n return True\r\n else:\r\n return False\r\n except:\r\n traceback.print_exc()\r\n return False", "def check_if_exist(self,url):\r\n\t\t\"\"\" verefier si un lien existe \"\"\"\r\n\t\trequest = mechanize.Request(url)\r\n\t\tBAD_REQ = [400,401,404]\r\n\t\ttry :\r\n\t\t\tresponse = mechanize.urlopen(request)\r\n\t\t\tif response.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True\r\n\t\texcept urllib2.HTTPError, error:\r\n\t\t\tif error.code in BAD_REQ:\r\n\t\t\t\treturn False\r\n\t\t\telse:\r\n\t\t\t\treturn True", "def Open(self, url):\n fp = self._opener.open(url)\n response = fp.read()\n fp.close()\n return response", "def _verify_page(self):", "def open(webpage_url):\n\twith youtube_dl.YoutubeDL(dict(forceurl=True)) as ydl:\n\t\tr = ydl.extract_info(webpage_url, download=False)\n\t\tmedia_url = r['formats'][-1]['url']\n\twebbrowser.open('googlechromes://' + media_url[8:] )", "def do_get(self, url):\n self.driver.get(url)" ]
[ "0.7061345", "0.6986661", "0.681532", "0.67337334", "0.6537021", "0.64900905", "0.64784175", "0.6409724", "0.6388426", "0.63827145", "0.63675433", "0.6326356", "0.6323878", "0.6320799", "0.631518", "0.62944365", "0.6293973", "0.62766635", "0.62705076", "0.6237263", "0.6229745", "0.6227832", "0.621815", "0.62038827", "0.61845624", "0.6147639", "0.61460435", "0.61057884", "0.60797256", "0.60634", "0.60575795", "0.6048104", "0.60356635", "0.6027001", "0.601555", "0.60115117", "0.5996046", "0.59872085", "0.59843946", "0.59798986", "0.59748346", "0.5965213", "0.5958871", "0.59397984", "0.5921852", "0.59012383", "0.589668", "0.5890764", "0.58848494", "0.5882415", "0.5867595", "0.5866432", "0.58659995", "0.5864511", "0.58522904", "0.5841976", "0.58387375", "0.5828643", "0.58243394", "0.58140904", "0.57911086", "0.5790972", "0.57758784", "0.57753754", "0.57740355", "0.5771291", "0.57679564", "0.57668865", "0.5766028", "0.57634026", "0.57634026", "0.5745737", "0.5736355", "0.57314503", "0.5727791", "0.57252216", "0.57118446", "0.5703788", "0.57036793", "0.5698149", "0.5698149", "0.5698149", "0.5698149", "0.5698149", "0.56886816", "0.5686549", "0.5679775", "0.5671326", "0.5661313", "0.56586593", "0.56578207", "0.56567514", "0.5653499", "0.5651874", "0.5637378", "0.5634823", "0.56300175", "0.5625149", "0.5621893", "0.5619548" ]
0.65583813
4
Verify can select Maven option
def test_should_choose_maven(self): search_str = "//*[text()='Maven Project']" els = self.driver.find_elements_by_xpath(search_str) self.assertGreater(len(els), 0, 'Maven project is not found!') els[0].click()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_install_project(self):\n return True", "def test_target_repo(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check bennr01:dev\", exitcode=None)\n self.assertIn(\"Target: bennr01:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)", "def test_project_administrator(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n return False", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def Checktest(self, expectedoutput):\n\n if expectedoutput == 0:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"The configuration file does not exist.\", result.output)\n return\n\n if expectedoutput == 1:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Name: Name\", result.output)\n self.assertIn(\"Email: [email protected]\", result.output)\n self.assertIn(\"Github username: GhUser\", result.output)", "def checkBuildStatus(self):\n pass", "def test_default_repo(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check dev\", exitcode=None)\n self.assertIn(\"Target: ywangd:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Read only', loginAsUser=True)\n \n #Navigate to Repositories Page\n self.get_RepositoriesPage(\"Firmware\")\n \n self.logout()\n \n #Verify Options", "def test_version():\n with open(\"pyproject.toml\") as f:\n tomllines = f.read().splitlines()\n tomlversion = set([l for l in tomllines if \"version =\" in l])\n initversion = set([f'version = \"{mei2volpiano.__version__}\"'])\n # set is there to catch any duplicate/additional entries\n assert initversion == tomlversion", "def mvn(version):\n\tif version is None:\n\t\tmvn_list = get_mvn_list()\n\t\t_err('Available Maven versions: {0}'.format(mvn_list))\n\tget_mvn(version)", "def test_project_reader(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n if is_project_reader(project):\n return True\n return False", "def test_default(self):\r\n self.assertEqual(self.option.default, False)", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def test_installed(self):\n check_output('unity --help', shell=True)", "def test_version_dropdown(plugin_dialog):\n widget = plugin_dialog.available_list.item(1).widget\n assert widget.version_choice_dropdown.currentText() == \"3\"\n # switch from PyPI source to conda one.\n widget.source_choice_dropdown.setCurrentIndex(1)\n assert widget.version_choice_dropdown.currentText() == \"4.5\"", "def test_build_tools(self):\n #raise AssertionError(\"%s not implemented\" % sys._getframe().f_code.co_name)\n if self.status: self.status.Warning(\"By default build tools is Xilinx this can be changed in demo/nysa_platform.py\")\n if find_xilinx_path() is None:\n return False\n return True", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def verify_package_status(self):\n pass", "def verify_package_status(self):\n pass", "def check_options(options, parser):\n if not options.get('release_environment', None):\n print(\"release environment is required\")\n parser.print_help()\n return os.EX_USAGE\n\n return 0", "def _in_travis(): # pragma: no cover\n return 'TRAVIS' in os.environ", "def package_version_check(args, parser):\n if (args.build or args.check) and args.package_version:\n parser.error('--package-version works only with --create')", "def test_default(self):\r\n self.assertEqual(self.option.default, 'testing')", "def test_default_select():\n # Arange\n REPO = \"https://foo.bar/foobar\"\n\n # Act\n rm = gcbo.RepoManager(REPO)\n\n # Assert\n assert rm.select() == REPO", "def test_check_no_download(self):\n output = self.run_command(\"selfupdate --check\", exitcode=0)\n contains_latest_version = (\"Already at latest version\" in output)\n contains_new_version = (\"New version available\" in output)\n assert (contains_latest_version or contains_new_version)\n self.assertNotIn(\"Url: \", output)\n self.assertNotIn(\"Update completed.\", output)\n self.assertNotIn(\"Failed to update. Please try again.\", output)", "def __gitVerify(self):\n self.vcs.gitVerify(self.project.getProjectPath())", "def test_project_writer(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n return False", "def check(self, context):\r\n return context.config.preset is not None", "def _cmake_needed():\n if \"NOX_INSTALL_CMAKE\" in os.environ:\n return True\n\n return shutil.which(\"cmake\") is None", "def check_requirement(self):\n raise NotImplementedError", "def soft_assert_cannot_make_proposal(info_page, soft_assert):\n soft_assert.expect(not info_page.is_propose_changes_btn_exists,\n \"'Propose Changes' button should not be displayed.\")", "def _check_version(self, project, targetdir):\r\n versionfile = os.path.join(targetdir, 'project.version')\r\n if (os.path.exists(versionfile)):\r\n file_ = open(versionfile, \"r\")\r\n projectname = file_.read().strip()\r\n file_.close()\r\n if (projectname == project.objectname):\r\n return True\r\n return False", "def test_default(self):\r\n self.assertEqual(self.option.default, 'hello')", "def test_has_role_public(self):\n self.project.set_public()\n self.assertFalse(self.category.has_role(self.user_bob))\n self.assertTrue(self.project.has_role(self.user_bob))", "def check_in_repo():\n if not os.path.isfile(\"setup.py\"):\n return \"Not in root-level PyTorch repo, no setup.py found\"\n with open(\"setup.py\") as f:\n s = f.read()\n if \"PyTorch\" not in s:\n return \"Not in PyTorch repo, 'PyTorch' not found in setup.py\"", "def in_travis_pr():\n # NOTE: We're a little extra cautious and make sure that the\n # PR environment variable is an integer.\n try:\n int(os.getenv(TRAVIS_PR_ENV, ''))\n return True\n except ValueError:\n return False", "def test_is_revoked(self):\n self.assertEqual(self.project.is_revoked(), False)", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def checkAttributes(self):\n if len(self.lSteps) == 0:\n msg = \"ERROR: missing compulsory option --step\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if len(self.lSteps) > 1:\n msg = \"ERROR: --step takes a single step\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.lSteps[0] not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\n msg = \"ERROR: unknown --step %s\" % self.lSteps[0]\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"1\" in self.lSteps or \"2\" in self.lSteps or \"3\" in self.lSteps \\\n or \"4\" in self.lSteps:\n if not self.project1Id:\n msg = \"ERROR: missing compulsory option --proj1\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"4\" in self.lSteps or \"5\" in self.lSteps or \"6\" in self.lSteps \\\n or \"7\" in self.lSteps or \"8\" in self.lSteps:\n if not self.project2Id:\n msg = \"ERROR: missing compulsory option --proj2\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.project1Id and \"_\" in self.project1Id:\n msg = \"ERROR: forbidden underscore '_' in project identifier '%s'\" \\\n % self.project1Id\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.project2Id and \"_\" in self.project2Id:\n msg = \"ERROR: forbidden underscore '_' in project identifier '%s'\" \\\n % self.project2Id\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.samplesFile:\n msg = \"ERROR: missing compulsory option --samples\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(self.samplesFile):\n msg = \"ERROR: can't find file %s\" % self.samplesFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.scheduler:\n msg = \"ERROR: missing compulsory option --schdlr\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.scheduler == \"OGE\":\n self.scheduler = \"SGE\"\n if not self.queue:\n msg = \"ERROR: missing compulsory option --queue\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.lSteps == []:\n msg = \"ERROR: missing compulsory option --step\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"1\" in self.lSteps:\n if not Utils.isProgramInPath(\"fastqc\"):\n msg = \"ERROR: can't find 'fastqc' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"2\" in self.lSteps:\n if not Utils.isProgramInPath(\"demultiplex.py\"):\n msg = \"ERROR: can't find 'demultiplex.py' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n obsMajVer, obsMinVer = ProgVersion.getVersion(\"demultiplex.py\")\n if not (obsMajVer == 1 and obsMinVer >= 14):\n msg = \"ERROR: 'demultiplex.py' is in version %s.%s\" % \\\n (obsMajVer, obsMinVer)\n msg += \" instead of >= 1.14.0\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"3\" in self.lSteps:\n if not Utils.isProgramInPath(\"cutadapt\"):\n msg = \"ERROR: can't find 'cutadapt' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.adpFile:\n msg = \"ERROR: missing compulsory option --adp\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(self.adpFile):\n msg = \"ERROR: can't find file %s\" % self.adpFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.maxNPerc < 0 or self.maxNPerc > 1:\n msg = \"ERROR: --maxNp %f should be between 0 and 1\" \\\n % self.maxNPerc\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"4\" in self.lSteps:\n if not Utils.isProgramInPath(\"bwa\"):\n msg = \"ERROR: can't find 'bwa' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not Utils.isProgramInPath(\"samtools\"):\n msg = \"ERROR: can't find 'samtools' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not Utils.isProgramInPath(\"picard.jar\"):\n msg = \"ERROR: can't find 'picard.jar' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.dictFile:\n msg = \"ERROR: missing compulsory option --dict\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(self.dictFile):\n msg = \"ERROR: can't find file %s\" % self.dictFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if os.path.dirname(self.dictFile) == '':\n self.dictFile = \"%s/%s\" % (os.getcwd(), self.dictFile)\n if not self.queue2:\n msg = \"ERROR: missing compulsory option --queue2\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"5\" in self.lSteps:\n if not Utils.isProgramInPath(\"GenomeAnalysisTK.jar\"):\n msg = \"ERROR: can't find 'GenomeAnalysisTK.jar' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n obsMajVer, obsMinVer = ProgVersion.getVersionGatk()\n expMajVer = 3\n expMinVer = 5\n if not (obsMajVer == expMajVer and obsMinVer >= expMinVer):\n msg = \"ERROR: 'GATK' is in version %s.%s\" % \\\n (obsMajVer, obsMinVer)\n msg += \" instead of >= %i.%i\" % (expMajVer, expMinVer)\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.knownIndelsFile and not os.path.exists(self.knownIndelsFile):\n msg = \"ERROR: can't find file %s\" % self.knownIndelsFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"6\" in self.lSteps or \"7\" in self.lSteps or \"8\" in self.lSteps or \\\n \"9\" in self.lSteps:\n if not Utils.isProgramInPath(\"GenomeAnalysisTK.jar\"):\n msg = \"ERROR: can't find 'GenomeAnalysisTK.jar' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n obsMajVer, obsMinVer = ProgVersion.getVersionGatk()\n if not (obsMajVer == 3 and obsMinVer >= 5):\n msg = \"ERROR: 'GATK' is in version %s.%s\" % \\\n (obsMajVer, obsMinVer)\n msg += \" instead of >= 3.5\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"4\" in self.lSteps or \"5\" in self.lSteps or \"6\" in self.lSteps or \\\n \"7\" in self.lSteps or \"8\" in self.lSteps or \"9\" in self.lSteps:\n if not self.pathToPrefixRefGenome:\n msg = \"ERROR: missing compulsory option --ref\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(\"%s.bwt\" % self.pathToPrefixRefGenome):\n msg = \"ERROR: can't find file %s.bwt\" % self.pathToPrefixRefGenome\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(\"%s.fa.fai\" % self.pathToPrefixRefGenome):\n msg = \"ERROR: can't find file %s.fa.fai\" % self.pathToPrefixRefGenome\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if os.path.dirname(self.pathToPrefixRefGenome) == \"\":\n self.pathToPrefixRefGenome = \"%s/%s\" % (os.getcwd(),\n self.pathToPrefixRefGenome)\n if \"8\" in self.lSteps or \"9\" in self.lSteps:\n if not self.jointGenoId:\n msg = \"ERROR: missing compulsory option --jgid\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n \n if \"9\" in self.lSteps:\n if self.restrictAllelesTo not in [\"ALL\", \"BIALLELIC\",\n \"MULTIALLELIC\"]:\n msg = \"ERROR: unknown option --rat %s\" % self.restrictAllelesTo\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.famFile:\n if not os.path.exists(self.famFile):\n msg = \"ERROR: can't find file %s\" % self.famFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.excludeSampleFile:\n if not os.path.exists(self.excludeSampleFile):\n msg = \"ERROR: can't find file %s\" % self.excludeSampleFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)", "def _check_options(self, options):\r\n xmi_file = options.get(\"xmi_file\")\r\n if not xmi_file or not os.path.exists(xmi_file):\r\n self._error(\"Select XMI file\")\r\n return \r\n\r\n target_folder = options[\"target_folder\"]\r\n if not target_folder:\r\n self._error(\"Select target folder\")\r\n return\r\n \r\n if not os.path.exists(target_folder):\r\n self._error(\"Target folder not exists\")\r\n return \r\n \r\n return True", "def _check_config(self):", "def test_getboolean(self):\n self.assertEqual(self.config.getboolean('advanced','bool'),True)", "def test_get_project(self):\n pass", "def test_passes_check():\n from django.core.management import call_command\n\n call_command('check', 'django_recommend')", "def test_default(self):\r\n self.assertEqual(self.option.default, 1234)", "def check_settings(self):\r\n pass", "def test_installed(self):\n self.assertTrue(self.qi.isProductInstalled(PROJECTNAME))", "def _check_for_license_acceptance(self, dep):\n if \"license\" in self.dependency_dict[dep]:\n license_name = self.dependency_dict[dep][\"license\"]\n else:\n license_name = \"restrictive\"\n if \"license_file\" in self.dependency_dict[dep]:\n license_text = Path(\n self.dependency_dict[dep][\"license_file\"]\n ).read_text()\n logger.warning(license_text)\n while \"invalid answer\":\n reply = (\n str(\n input(\n f\"Do you accept this {license_name} license? (y/n): \"\n )\n )\n .lower()\n .strip()\n )\n if len(reply) > 0:\n if reply[0] == \"y\":\n return True\n if reply[0] == \"n\":\n return False", "def toolHasOptions(*args, **kwargs)->bool:\n pass", "def test_quick_build(self):\n pass", "def check(self) -> bool:\n return self.run_poetry([\"check\", \"-vvv\"])", "def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0", "def test_quick_build1(self):\n pass", "def test_get_repo_url_OK(self):\n repo_url = doxi.get_repo_url()\n self.assertEqual(repo_url,TestDoxygenIFX.repo_url)", "def test_ensure_xcode_available(self, mock_run):\n build_cmake_project.ensure_xcode_available()\n mock_run.assert_called_once_with(\n args='xcode-select --print-path', check=True, shell=True)", "def test_scan_and_find_dependencies_maven():\n manifests = [{\n \"filename\": \"dependencies.txt\",\n \"filepath\": \"/bin/local\",\n \"content\": open(str(Path(__file__).parent / \"data/dependencies.txt\")).read()\n }]\n res = DependencyFinder().scan_and_find_dependencies(\"maven\", manifests)\n assert \"result\" in res\n resolved = res['result'][0]['details'][0]['_resolved'][0]\n assert resolved['package'] == \"io.vertx:vertx-core\"\n assert len(resolved['deps']) == 15", "def test_version(self):\n pass", "def check(self):\n json = JsonBackend(\"../src/builder/projects.json\")\n json.load()\n\n TM_ITSELF = 1\n expected_files = TM_ITSELF + sum(p.downloadable is True\n for p in json.projects)\n self.downloads_for_project('tots', expected_files)\n\n expected_files = TM_ITSELF + sum(p.softcatala is True and\n p.downloadable is True\n for p in json.projects)\n\n self.downloads_for_project('softcatala', expected_files)\n\n expected_files = 1\n for project_dto in json.projects:\n if not project_dto.downloadable:\n continue\n\n self.downloads_for_project(project_dto.name, expected_files)\n self.check_project_link(project_dto.projectweb)", "def check_packages(options):\n print '\\033[1;33m# Checking direct VIKI dependencies\\033[1;m'\n installed_ok = dependencies.check_installed_packages()\n print '\\n\\033[1;33m# Checking second level ROS dependencies, using rosdep\\033[1;m'\n second_level_ok = dependencies.get_second_level_dependencies()\n\n if installed_ok and second_level_ok:\n print '\\033[1;32mAll dependencies satisfied!\\033[1;m'\n else:\n print '\\033[1;31mTry running [viki install-dependencies] to install the dependencies\\033[1;m'", "def test_pm_Completeness(self):\n pass", "def verify_inputs(self):\n if self.has_source():\n raise Exception(\"Installation from source is only available for \"\n \"`virtualenv` manager\")\n if self.has_extras():\n raise Exception(\"Installation of extras only possible for \"\n \"`virtualenv` manager\")", "def test_check_version(self):\n\n # Check if pandaseq version is supported for this test\n accepted_version = (2,4)\n command = \"pandaseq -v\"\n version_cmd = Popen(command, shell=True, universal_newlines=True,\n stdout=PIPE,stderr=STDOUT)\n stdout = version_cmd.stdout.read()\n #print stdout\n version_string = stdout.strip().split()[1]\n #print version_string\n try:\n version = tuple(map(int, version_string.split('.')))\n #print version\n pass_test = version == accepted_version\n except ValueError:\n pass_test = False\n version_string = stdout\n self.assertTrue(pass_test,\n \"Unsupported pandaseq version. %s is required, but running %s.\" \n %('.'.join(map(str, accepted_version)), version_string))", "def test_get_repo_name_OK(self):\n repo_name = doxi.get_repo_name(TestDoxygenIFX.repo_url)\n self.assertEqual(repo_name,'repository-name')", "def dependencies_met():\n # Check Java VM command line runner.\n try:\n Popen(['java'], shell=False, stderr=PIPE).communicate()[1]\n except:\n print 'Dependecy unmet. Java virtual machine command line runner not ' \\\n 'found.'\n return False\n # Check selenium-server.jar is ready to run.\n output = Popen(('java -jar %s -unrecognized_argument' % SELENIUM_RC_PATH\n ).split(), shell=False, stderr=PIPE).communicate()[1]\n if not re.search('Usage: java -jar selenium-server.jar', output):\n print 'Dependecy unmet. Selenium RC server (selenium-server.jar) not ' \\\n 'found.'\n return False\n # Check selenium RC python driver is available.\n try:\n import selenium\n except:\n print 'Dependecy unmet. Selenium RC python driver (selenium.py) not ' \\\n 'found.'\n return False\n # Check CherryPy wsgi server is available.\n try:\n import wsgiserver\n except:\n print 'Dependecy unmet. CherryPy wsgi server (wsgiserver.py) not found.'\n return False\n # Check fixture support is implemented for the database engine.\n if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']:\n print 'Dependecy unmet. Fixture support for database engine %s not ' \\\n 'implemented.' % settings.DATABASE_ENGINE\n return False\n return True", "def in_travis():\n return os.getenv(IN_TRAVIS_ENV) == 'true'", "def check_config(self):\n # Check if tool is at all included in workflow\n if \"external\" not in self.config[\"tools\"][\"dna\"]:\n return # External not run, don't check configuration # pragma: no cover", "def test_is_installed():\n assert _is_installed('coverage') is True # regular dependency\n assert _is_installed('pytest') is True # dev dependency\n assert _is_installed('missing') is False # missing dependency", "def test_2x_only_python_version_deploy():\n pass", "def test_input1(self):\n in1 =\"elephant\"\n result = options.check(in1)\n self.assertEqual(result,True)", "def main() -> int:\n version: str | None = None\n\n if (path_pyproject := Path(\"pyproject.toml\")).is_file():\n with open(path_pyproject, \"rb\") as fp:\n data = tomllib.load(fp)\n\n try:\n version = data[\"project\"][\"version\"]\n except KeyError:\n pass\n\n if version is None and (path_setup_cfg := Path(\"setup.cfg\")).is_file():\n parser = configparser.ConfigParser()\n parser.read(path_setup_cfg)\n\n try:\n version = parser[\"metadata\"][\"version\"]\n except KeyError:\n pass\n\n if version is None:\n return 1\n print(version)\n return 0", "def test_default_repo_branch(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Target: ywangd:dev\", output)", "def test_verify_recursive_and_transverse_acl_options():", "def check_config(cfg):", "def check_settings(self):\n pass", "def check_cmake():\n chk = Popen(\"cmake --version\", shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = chk.communicate()\n stdout = stdout.decode()\n if stderr and OS_VERSION[0] == \"Windows\":\n stdout, stderr = check_cmake_windows()\n if stderr:\n out_error(\"CMake could not be found. See \"\n \"https://github.com/deepfakes/faceswap/blob/master/INSTALL.md#cmake \"\n \"for instructions\")\n return\n cmake = [re.sub(\" +\", \" \", line.strip())\n for line in stdout.splitlines()\n if line.lower().strip().startswith(\"cmake\")][0]\n version = cmake[cmake.rfind(\" \") + 1:]\n out_info(\"CMake version: {}\".format(version))", "def check(self):\n with working_dir(self.build_directory):\n self._if_ninja_target_execute(\"test\", parallel=False)", "def test_3x_only_python_versions_deploy():\n pass", "def testJava(self):\n self.assertEqual(\n self.java,\n self.config.java\n )", "def test_is_remote_source(self):\n self.assertEqual(self.project.is_remote(), False)", "def testNeedProjectSetup(self, mock_ans):\n # Test need project setup.\n self.gcp_env_runner.project = \"\"\n self.gcp_env_runner.zone = \"\"\n self.assertTrue(self.gcp_env_runner._NeedProjectSetup())\n # Test no need project setup and get user's answer.\n self.gcp_env_runner.project = \"test_project\"\n self.gcp_env_runner.zone = \"test_zone\"\n self.gcp_env_runner._NeedProjectSetup()\n mock_ans.assert_called_once()", "def postCheckDeps(self):\n if( self.mode == \"install\" ):\n\n # check for make\n if( not isinPath( \"make\" )):\n self.abort( \"make not found on your system!!\" )\n\n # check for tee\n if( not isinPath( \"tee\" )):\n self.abort( \"tee not found on your system!!\" )", "def check_phenotools():\n if not os.path.exists(PHENOTOOLS):\n os.system('../setup.py')\n else:\n print(\"[INFO] we found \")", "def test_package_check_download(self, isfile):\n isfile.return_value = False\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": false}')\n isfile.return_value = True\n r = self.client.get(self.check_download_url)\n eq_(r.status_code, 200)\n eq_(r.content, '{\"ready\": true}')", "def test_get_project(self):\n self.assertEqual(self.remote_project.get_project(), self.project)", "def test_build_property_ro(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n try:\n v1.build = 9001\n except AttributeError:\n passed = True\n else:\n passed = False\n\n self.assertTrue(passed)", "def test_version() -> None:\n assertion.assert_(Version, nanoqm.__version__)", "def test_0030_check_workflow_repository(self):\n repository = self.test_db_util.get_repository_by_name_and_owner(workflow_repository_name, common.test_user_1_name)\n strings_displayed = ['Workflows', 'New workflow for 0060_filter', '0.1']\n strings_not_displayed = ['Valid tools', 'Invalid tools']\n self.display_manage_repository_page(repository, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed)", "def test_dependencies_installed(self):\n installer = getattr(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('plone.app.dexterity'))", "def test_input2(self):\n in1 =\"aple\"\n result = options.checkw(in1)\n self.assertEqual(result,False)", "def test_semantic_version():\n semantic_version.Version(settings.VERSION)", "def test_version(self) -> None:\n with open(\"pyproject.toml\") as f:\n for line in f:\n if \"version\" in line:\n version = line.split()[-1].replace('\"', \"\")\n break\n self.assertEqual(__version__, version)", "def enforce_clean_option(args, run):\n repos = run.experiment_info[\"repositories\"]\n if not repos:\n raise RuntimeError(\n \"No version control detected. \"\n \"Cannot enforce clean repository.\\n\"\n \"Make sure that your sources under VCS and the \"\n \"corresponding python package is installed.\"\n )\n else:\n for repo in repos:\n if repo[\"dirty\"]:\n raise RuntimeError(\n \"EnforceClean: Uncommited changes in \"\n 'the \"{}\" repository.'.format(repo)\n )", "def test_read_project(self):\n pass", "def test_read_project(self):\n pass", "def test_is_prime_valid(self):\n sol = solution.Solution();\n self.assertTrue(sol.isPrime(2))\n self.assertTrue(sol.isPrime(3))\n self.assertTrue(sol.isPrime(7))\n #self.assertTrue(sol.isPrime(863))", "async def verifyset(self, ctx: commands.Context):\n if ctx.invoked_subcommand is None:\n guild = ctx.guild\n data = await self.config.guild(guild).all()\n color = await ctx.embed_color()\n role_config = data[\"temprole\"], data[\"autoroles\"]\n logs, toggle = data[\"logs\"], data[\"toggle\"]\n temprole = \"No temporary role set, use `[p]verifyset temprole` to use one.\"\n autoroles = \"See `{prefix}verifyset autorole list` for a list of roles given.\".format(\n prefix=ctx.prefix\n )\n if role_config[0]:\n temprole = discord.utils.get(ctx.guild.roles, id=role_config[0])\n\n if logs is None:\n log_info = (\n \"No channel for logging has been set, use `{prefix}verifyset log`\"\n \"first.\".format(prefix=ctx.prefix)\n )\n else:\n log_info = discord.utils.get(ctx.guild.text_channels, id=int(logs))\n\n embed = discord.Embed(color=color)\n embed.title = \"{}'s Settings\".format(guild.name)\n embed.description = (\n \"Please make sure you setup the Verification Channel and Selected Role.\\nOnce \"\n \"that's done, make sure to set the Active to True or else this won't work.\"\n )\n embed.add_field(name=\"Active:\", value=toggle, inline=False)\n embed.add_field(name=\"Temporary Role:\", value=temprole, inline=True)\n embed.add_field(name=\"Role to give after verification:\", value=autoroles, inline=True)\n embed.add_field(name=\"Logging Channel:\", value=log_info, inline=True)\n await ctx.send(embed=embed)", "def args_ok(inoptions, pos_args):\n\n if inoptions.factory_properties:\n for key in inoptions.factory_properties:\n setattr(inoptions, key, inoptions.factory_properties[key])\n\n if inoptions.list_masters:\n return True\n\n if inoptions.build_properties and not inoptions.master_dir:\n if inoptions.build_properties['mastername']:\n inoptions.mastername = inoptions.build_properties['mastername']\n else:\n print >>sys.stderr, 'error: build properties did not specify a ',\n print >>sys.stderr, 'mastername'\n return False\n else:\n if not (inoptions.master_dir or pos_args):\n print >>sys.stderr, 'error: you must provide a mastername or ',\n print >>sys.stderr, 'directory!'\n return False\n else:\n if not inoptions.master_dir:\n inoptions.mastername = pos_args.pop(0)\n\n if inoptions.stepfilter:\n if inoptions.stepreject:\n print >>sys.stderr, ('Error: can\\'t specify both stepfilter and '\n 'stepreject at the same time!')\n return False\n\n try:\n inoptions.step_regex = re.compile(inoptions.stepfilter)\n except re.error as e:\n print >>sys.stderr, 'Error compiling stepfilter regex \\'%s\\': %s' % (\n inoptions.stepfilter, e)\n return False\n if inoptions.stepreject:\n if inoptions.stepfilter:\n print >>sys.stderr, ('Error: can\\'t specify both stepfilter and '\n 'stepreject at the same time!')\n return False\n try:\n inoptions.stepreject_regex = re.compile(inoptions.stepreject)\n except re.error as e:\n print >>sys.stderr, 'Error compiling stepreject regex \\'%s\\': %s' % (\n inoptions.stepfilter, e)\n return False\n\n if inoptions.list_builders:\n return True\n\n if inoptions.build_properties and not (inoptions.slavehost or\n inoptions.builder):\n if inoptions.build_properties['buildername']:\n inoptions.builder = inoptions.build_properties['buildername']\n else:\n print >>sys.stderr, 'error: build properties did not specify a '\n print >>sys.stderr, 'buildername!'\n return False\n else:\n if not (pos_args or inoptions.slavehost or inoptions.builder):\n print >>sys.stderr, 'Error: you must provide a builder or slave hostname!'\n return False\n\n inoptions.spec = {}\n if inoptions.builder:\n inoptions.spec['builder'] = inoptions.builder\n elif inoptions.slavehost:\n inoptions.spec['hostname'] = inoptions.slavehost\n else:\n inoptions.spec['either'] = pos_args.pop(0)\n\n if inoptions.list_steps:\n return True\n\n if inoptions.logfile == '-' or inoptions.annotate:\n inoptions.log = sys.stdout\n else:\n try:\n inoptions.log = open(inoptions.logfile, 'w')\n except IOError as err:\n errno, strerror = err\n print >>sys.stderr, 'Error %d opening logfile %s: %s' % (\n inoptions.logfile, errno, strerror)\n return False\n\n if hasattr(inoptions, 'build_properties') and not hasattr(\n inoptions, 'svn_rev'):\n if inoptions.build_properties['revision']:\n try:\n setattr(inoptions, 'revision', int(\n inoptions.build_properties['revision']))\n except ValueError:\n setattr(inoptions, 'revision', None)\n\n if not (hasattr(inoptions, 'revision') and inoptions.revision) and (\n inoptions.build_properties['got_revision']):\n try:\n setattr(inoptions, 'revision', int(\n inoptions.build_properties['got_revision']))\n except ValueError:\n setattr(inoptions, 'revision', None)\n\n if not inoptions.revision or inoptions.revision < 1:\n print >>sys.stderr, 'Error: revision must be a non-negative integer!'\n return False\n else:\n print >>sys.stderr, 'error: build properties did not specify a revision!'\n return False\n\n print >>sys.stderr, 'using revision: %d' % inoptions.revision\n inoptions.build_properties['revision'] = '%d' % inoptions.revision\n else:\n if inoptions.svn_rev:\n try:\n inoptions.revision = int(inoptions.svn_rev)\n except ValueError:\n inoptions.revision = None\n\n if not inoptions.revision or inoptions.revision < 1:\n print >>sys.stderr, 'Error: svn rev must be a non-negative integer!'\n return False\n\n if not inoptions.annotate:\n print >>sys.stderr, 'using revision: %d' % inoptions.revision\n else: # nothing specified on command line, let's check LKGR\n inoptions.revision, errmsg = get_lkgr()\n if not inoptions.revision:\n print >>sys.stderr, errmsg\n return False\n if not inoptions.annotate:\n print >>sys.stderr, 'using LKGR: %d' % inoptions.revision\n\n return True", "def __gitVerifyBundle(self):\n self.vcs.gitVerifyBundle(self.project.getProjectPath())", "def test_getVersion(self):\n version = Version('foo', 2, 1, 0)\n project = self.makeProject(version)\n self.assertEquals(project.getVersion(), version)", "def check_requirements(self): # pylint: disable=no-self-use\n self.is_skipped = False" ]
[ "0.56208634", "0.5418946", "0.5402192", "0.5377991", "0.53591835", "0.53054255", "0.5300273", "0.52879065", "0.52866113", "0.5282876", "0.52460307", "0.5236804", "0.5199385", "0.51885027", "0.5179016", "0.5172662", "0.5153789", "0.5144154", "0.5144154", "0.51356703", "0.50819343", "0.5067656", "0.5066592", "0.5057324", "0.50542575", "0.50278634", "0.50239307", "0.5019979", "0.50186574", "0.50186473", "0.5017039", "0.5013133", "0.5011863", "0.5007255", "0.500432", "0.5001735", "0.49974167", "0.49913672", "0.49897492", "0.49892962", "0.49862337", "0.49821925", "0.4977226", "0.4976591", "0.4976579", "0.49754527", "0.49681485", "0.49669418", "0.4955426", "0.49482456", "0.4947297", "0.49410823", "0.49387097", "0.4937102", "0.49368513", "0.4932951", "0.49238497", "0.49163303", "0.4914062", "0.4911446", "0.49048924", "0.4903034", "0.48935255", "0.48922887", "0.4891449", "0.48903897", "0.48899361", "0.48868832", "0.48838347", "0.4879476", "0.4879022", "0.487198", "0.48675567", "0.48662376", "0.48636046", "0.48506236", "0.48441076", "0.48374918", "0.48286498", "0.48220223", "0.4816657", "0.480961", "0.4809066", "0.48052534", "0.47977263", "0.4795495", "0.47918743", "0.4776642", "0.47757208", "0.4771791", "0.4771504", "0.47651538", "0.4757617", "0.4757617", "0.47558546", "0.47554868", "0.4750294", "0.4744804", "0.47443035", "0.47439796" ]
0.7265652
0
Checks that all transformers in self.transformer_list are compatible with methods fit, transform and fit_transform.
def _check_transformers(self): assert all([hasattr(trf, "fit") for trf in self.transformer_list]), "At least one transformer object is not " \ "compatible with 'fit' method." assert all([hasattr(trf, "transform") for trf in self.transformer_list]), "At least one classifier object " \ "is not compatible with " \ "'transform' method." assert all([hasattr(trf, "fit_transform") for trf in self.transformer_list]), "At least one classifier " \ "object is not compatible with " \ "'fit_transform' method."
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_transforms(self):\n if len(self.transforms) > 1:\n for transform in self.transforms:\n if transform.applies is None:\n raise ValueError(\n 'If more than one transform is provided, each '\n 'provided transform must provide an apply field.',\n )", "def _validate_transforms(self):\n if len(self.transforms) > 1:\n for transform in self.transforms:\n if transform.applies is None:\n raise ValueError(\n 'If more than one transform is provided, each '\n 'provided transform must provide an apply field.',\n )", "def _ensure_transform(\n self, message: dict, transformers: Optional[List[Callable]] = None\n ) -> None:\n required_transformers = self.__requiredtransformers__\n\n missing_transformers = None\n if required_transformers and not transformers:\n missing_transformers = required_transformers\n\n called = set()\n if transformers:\n for func in transformers:\n if isinstance(func, functools.partial):\n called.add(func.func.__name__)\n else:\n called.add(func.__name__)\n\n func(message=message)\n\n if required_transformers != called:\n missing_transformers = required_transformers.difference(called)\n\n if missing_transformers:\n raise MissingTransformersError(self.__class__.__name__, missing_transformers)", "def check_if_it_can_fit(object):\n if hasattr(object, \"fit\") and hasattr(object, \"predict\") and hasattr(object, \"get_params\") and hasattr(object,\n \"set_params\"):\n return object\n else:\n raise Exception(\"Pass an estimator that has methods fit predict set_params get_params\")", "def _check_integrity(self):\n for f in self.list_func:\n if(not(isinstance(f, (pFunc_collec, pFunc_base, pFunc_fromcallable)))):\n raise ValueError('type %s while expecting pFunc_base or collection'\n ' ' % (str(type(f))))\n f._check_integrity()", "def validate_bettertransformer(self):\n if self.num_heads is None:\n raise ValueError('Number of heads not set for `BetterTransformer` integration.')\n if self.embed_dim is None:\n raise ValueError('Embedding dimension not set for `BetterTransformer` integration.')\n if self.norm2_eps is None or self.norm1_eps is None:\n raise ValueError('`norm2_eps` and `norm1_eps` not set for `BetterTransformer` integration.')\n if self.pos_emb_type is not None and self.pos_emb_type != 'absolute':\n raise ValueError(f'Positional embedding type {self.pos_emb_type} not supported for `BetterTransformer` integration')\n if self.norm1_eps != self.norm2_eps:\n raise ValueError('norm1_eps and norm2_eps must be equal for `BetterTransformer` integration.')\n if self.act_fn in USE_AT_OWN_RISK_ACTIVATION_FUNCTIONS:\n logger.warning(f'Overridding {self.act_fn} activation with gelu. Use the transformed model at your own risk, the output logits could be significantly different.')\n self.act_fn = 'gelu'\n elif self.act_fn not in SUPPORTED_ACTIVATION_FUNCTIONS:\n raise ValueError(f'Activation function {self.act_fn} not supported for `BetterTransformer` integration.')\n self.use_gelu = self.act_fn == 'gelu' or self.act_fn == 'gelu_new'\n if self.num_heads % 2 == 1:\n raise ValueError(f'Number of heads {self.num_heads} is not supported for `BetterTransformer` integration. Number of heads must be even.')", "def assert_transformation_available(self, data):\n mt = ModelType.from_view_type(data)\n\n for record in self._validators:\n mt_other = ModelType.from_view_type(record.view)\n if not mt.has_transformation(mt_other):\n raise AssertionError(\n 'Could not validate %s using %r because there was no'\n ' transformation from %r to %r' %\n (self.concrete_type, record.validator.__name__,\n mt._view_name, mt_other._view_name)\n )", "def validate(self):\n for validator in self.exttype_validators:\n validator.validate(self.ext_type)", "def fit(self, X, y=None, **fitparams):\n \n self.fitted_transformers_ = []\n for transformer in self.list_of_transformers:\n fitted_trans = clone(transformer).fit(X, y=None, **fitparams)\n self.fitted_transformers_.append(fitted_trans)\n return self", "def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])", "def _verify_fit(self) -> None:\n if not hasattr(self, 'X_train') or not hasattr(self, 'Y_train'):\n raise ValueError('Training data not set. Call `fit` and pass training data first.')", "def _is_transformable(self):\n if not self._app.get_paths():\n raise NotTransformable(\"No image to\")\n elif not edit_supported(self._app.get_path()):\n raise NotTransformable(\"Filetype not supported for\")\n # Some operations only make sense if we are allowed to save to file\n elif not settings[\"autosave_images\"].get_value():\n message = \"\"\n if self._app[\"thumbnail\"].toggled:\n message = 'When operating in thumbnail mode ' \\\n '\"autosave_images\" must be enabled for'\n elif self._app[\"mark\"].marked:\n message = 'When images are marked ' \\\n '\"autosave_images\" must be enabled for'\n if message:\n raise NotTransformable(message)", "def is_sklearn_transformer(obj):\n return is_sklearn_estimator(obj) and sklearn_scitype(obj) == \"transformer\"", "def test_class_methods(self):\n\n x = BaseTransformer()\n\n h.test_object_method(obj=x, expected_method=\"fit\", msg=\"fit\")\n\n h.test_object_method(obj=x, expected_method=\"transform\", msg=\"transform\")\n\n h.test_object_method(\n obj=x, expected_method=\"columns_set_or_check\", msg=\"columns_set_or_check\"\n )\n\n h.test_object_method(\n obj=x, expected_method=\"columns_check\", msg=\"columns_check\"\n )", "def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )", "def _check_initialized(self):\n check_is_fitted(self, 'estimators_')", "def _check_is_fitted(self):\n # Do not check `b` as some classifiers do not set it\n check_is_fitted(self, 'w')\n super(CClassifierLinear, self)._check_is_fitted()", "def check_regressor(self):\n\n # Sklearn and Mlxtend stacking regressors, as well as \n # LightGBM, XGBoost, and CatBoost regressor \n # do not adhere to the convention.\n try:\n super().check_regressor\n except:\n print(f'{_MODEL_DICT[self.regressor_choice]} does not adhere to sklearn conventions.')", "def test_compatibility_with_sklearn(self) -> type(None):\n check_estimator(StackingClassifier)", "def check_transforms_match(self, transform: Mapping) -> None:\n xform_id = transform.get(TraceKeys.ID, \"\")\n if xform_id == id(self):\n return\n # TraceKeys.NONE to skip the id check\n if xform_id == TraceKeys.NONE:\n return\n xform_name = transform.get(TraceKeys.CLASS_NAME, \"\")\n warning_msg = transform.get(TraceKeys.EXTRA_INFO, {}).get(\"warn\")\n if warning_msg:\n warnings.warn(warning_msg)\n # basic check if multiprocessing uses 'spawn' (objects get recreated so don't have same ID)\n if torch.multiprocessing.get_start_method() in (\"spawn\", None) and xform_name == self.__class__.__name__:\n return\n raise RuntimeError(\n f\"Error {self.__class__.__name__} getting the most recently \"\n f\"applied invertible transform {xform_name} {xform_id} != {id(self)}.\"\n )", "def validate(self):\n validation_methods = get_validation_methods(Layout)\n\n for method in validation_methods:\n getattr(self, method)()", "def check_for_fit(cls, method):\n\n @wraps(method)\n def _check_for_fit(self, *args, **kwargs):\n klass = type(self).__name__\n if not self._is_fitted:\n raise PipelineNotYetFittedError(\n f\"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}.\"\n )\n\n return method(self, *args, **kwargs)\n\n return _check_for_fit", "def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.transform, expected_arguments=[\"self\", \"X\"]\n )", "def test_scikit_learn_compatibility():\n\n # sklearn tests in:\n # https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/utils/estimator_checks.py\n\n skip_tests = {\n \"check_dtype_object\", # the error message required to pass is too specific and incorrect for us\n \"check_classifiers_one_label\", # TODO: fix this! We should accept 1 category\n \"check_classifiers_regression_target\", # we're more permissive and convert any y values to str\n \"check_supervised_y_no_nan\", # error message too specific\n \"check_supervised_y_2d\", # we ignore useless added dimensions\n \"check_fit2d_predict1d\", # we accept 1d for predict\n \"check_fit2d_1sample\", # TODO: we allow fitting on 1 sample, but this kind of input is likely a bug from the caller, so change this\n \"check_regressors_no_decision_function\", # TODO: fix this!\n }\n for estimator, check_func in check_estimator(\n ExplainableBoostingClassifier(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n ExplainableBoostingRegressor(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n DPExplainableBoostingClassifier(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()\n\n for estimator, check_func in check_estimator(\n DPExplainableBoostingRegressor(), generate_only=True\n ):\n f = check_func.func\n module = f.__module__\n shortname = f.__name__\n fullname = f\"{module}.{shortname}\"\n if shortname not in skip_tests:\n try:\n check_func(estimator)\n except BaseException as e:\n print(fullname)\n print(f\"{type(e).__name__}: {e}\")\n print()", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.fit,\n expected_arguments=[\"self\", \"X\", \"y\"],\n expected_default_values=(None,),\n )", "def fit(self, data):\n if not self._transformers:\n return\n\n transformed_data = self._preprocess(data)\n final_step = self._transformers[-1]\n final_step[1].fit(transformed_data)", "def check_sklearn_attributes(sklearn_preprocess:object):\n\t\tcoder_type = str(type(sklearn_preprocess))\n\t\tstringified_coder = str(sklearn_preprocess)\n\n\t\tif (inspect.isclass(sklearn_preprocess)):\n\t\t\traise ValueError(dedent(\"\"\"\n\t\t\tYikes - The encoder you provided is a class name, but it should be a class instance.\\n\n\t\t\tClass (incorrect): `OrdinalEncoder`\n\t\t\tInstance (correct): `OrdinalEncoder()`\n\t\t\t\\n\"\"\"))\n\n\t\tif ('sklearn.preprocessing' not in coder_type):\n\t\t\traise ValueError(dedent(\"\"\"\n\t\t\tYikes - At this point in time, only `sklearn.preprocessing` encoders are supported.\n\t\t\thttps://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing\n\t\t\t\\n\"\"\"))\n\t\telif ('sklearn.preprocessing' in coder_type):\n\t\t\tif (not hasattr(sklearn_preprocess, 'fit')): \n\t\t\t\traise ValueError(dedent(\"\"\"\n\t\t\t\tYikes - The `sklearn.preprocessing` method you provided does not have a `fit` method.\\n\n\t\t\t\tPlease use one of the uppercase methods instead.\n\t\t\t\tFor example: use `RobustScaler` instead of `robust_scale`.\n\t\t\t\t\\n\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'sparse')):\n\t\t\t\tif (sklearn_preprocess.sparse == True):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `sparse==True` attribute of {stringified_coder}.\n\t\t\t\t\tFYI `sparse` is True by default if left blank.\n\t\t\t\t\tThis would have generated 'scipy.sparse.csr.csr_matrix', causing Keras training to fail.\\n\n\t\t\t\t\tPlease try again with False. For example, `OneHotEncoder(sparse=False)`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'encode')):\n\t\t\t\tif (sklearn_preprocess.encode == 'onehot'):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `encode=='onehot'` attribute of {stringified_coder}.\n\t\t\t\t\tFYI `encode` is 'onehot' by default if left blank and it results in 'scipy.sparse.csr.csr_matrix',\n\t\t\t\t\twhich causes Keras training to fail.\\n\n\t\t\t\t\tPlease try again with 'onehot-dense' or 'ordinal'.\n\t\t\t\t\tFor example, `KBinsDiscretizer(encode='onehot-dense')`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'copy')):\n\t\t\t\tif (sklearn_preprocess.copy == True):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `copy==True` attribute of {stringified_coder}.\n\t\t\t\t\tFYI `copy` is True by default if left blank, which consumes memory.\\n\n\t\t\t\t\tPlease try again with 'copy=False'.\n\t\t\t\t\tFor example, `StandardScaler(copy=False)`.\n\t\t\t\t\t\"\"\"))\n\t\t\t\n\t\t\tif (hasattr(sklearn_preprocess, 'sparse_output')):\n\t\t\t\tif (sklearn_preprocess.sparse_output == True):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `sparse_output==True` attribute of {stringified_coder}.\n\t\t\t\t\tPlease try again with 'sparse_output=False'.\n\t\t\t\t\tFor example, `LabelBinarizer(sparse_output=False)`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\tif (hasattr(sklearn_preprocess, 'order')):\n\t\t\t\tif (sklearn_preprocess.sparse_output == 'F'):\n\t\t\t\t\traise ValueError(dedent(f\"\"\"\n\t\t\t\t\tYikes - Detected `order=='F'` attribute of {stringified_coder}.\n\t\t\t\t\tPlease try again with 'order='C'.\n\t\t\t\t\tFor example, `PolynomialFeatures(order='C')`.\n\t\t\t\t\t\"\"\"))\n\n\t\t\t\"\"\"\n\t\t\t- Attempting to automatically set this. I was originally validating based on \n\t\t\t whether or not the encoder was categorical. But I realized, if I am going to \n\t\t\t rule them out and in... why not automatically set it?\n\t\t\t- Binners like 'KBinsDiscretizer' and 'QuantileTransformer'\n\t\t\t will place unseen observations outside bounds into existing min/max bin.\n\t\t\t- Regarding a custom FunctionTransformer, assuming they wouldn't be numerical\n\t\t\t as opposed to OHE/Ordinal or binarizing.\n\t\t\t\"\"\"\n\t\t\tcategorical_encoders = [\n\t\t\t\t'OneHotEncoder', 'LabelEncoder', 'OrdinalEncoder', \n\t\t\t\t'Binarizer', 'MultiLabelBinarizer'\n\t\t\t]\n\t\t\tonly_fit_train = True\n\t\t\tfor c in categorical_encoders:\n\t\t\t\tif (stringified_coder.startswith(c)):\n\t\t\t\t\tonly_fit_train = False\n\t\t\t\t\tbreak\n\t\t\treturn only_fit_train", "def can_retransform(self):\r\n return self._can_retransform", "def _check_inputs(self):\n\n # Check if attributes exists\n if self.attributes is None:\n print(\"attributes is missing; call set_attributes(new_attributes) to fix this! new_attributes should be a\",\n \"populated dataset of independent variables.\")\n return False\n\n # Check if labels exists\n if self.labels is None:\n print(\"labels is missing; call set_labels(new_labels) to fix this! new_labels should be a populated dataset\",\n \"of dependent variables.\")\n return False\n\n # Check if attributes and labels have same number of rows (samples)\n if self.attributes.shape[0] != self.labels.shape[0]:\n print(\"attributes and labels don't have the same number of rows. Make sure the number of samples in each\",\n \"dataset matches!\")\n return False\n\n # Type-checking for fit_intercept, normalize, and copy_X isn't needed; these can accept truthy/falsy values\n\n # Check if n_jobs is an integer or None\n if self.n_jobs is not None and not isinstance(self.n_jobs, int):\n print(\"n_jobs must be None or an integer; call set_n_jobs(new_n_jobs) to fix this!\")\n return False\n\n # Check if test_size is a float or None\n if self.test_size is not None and not isinstance(self.test_size, (int, float)):\n print(\"test_size must be None or a number; call set_test_size(new_test_size) to fix this!\")\n return False\n\n return True", "def test_sklearn_compatible_estimator(estimator: Any, check: Any) -> None:\n check(estimator)", "def check_signature(cls, name, bases, attr):\n check_bases = []\n for base in bases:\n all_bases = base.__mro__\n for i in all_bases:\n if (\n i is not object\n and \"sign_check\" in i.__dict__\n and i not in check_bases\n ):\n check_bases.append(i)\n\n for methodName in attr:\n f = attr[methodName]\n if not isinstance(f, types.FunctionType):\n continue\n\n for baseClass in check_bases:\n try:\n fBase = getattr(baseClass, methodName)\n if isinstance(fBase, types.FunctionType):\n if not inspect.signature(f) == inspect.signature(fBase):\n debtcollector.deprecate(\n \"{}.{} Method signature are not identical with base class {}\".format(\n name, methodName, baseClass\n ),\n category=UserWarning,\n )\n break\n else:\n debtcollector.deprecate(\n \"{}.{} Method is not FunctionType in base class {}\".format(\n name, methodName, baseClass\n ),\n category=UserWarning,\n )\n break\n except AttributeError:\n # This method was not defined in this base class,\n # So just go to the next base class.\n continue", "def test_compatibility_with_sklearn(self) -> type(None):\n check_estimator(StackingRegressor)", "def clean(self):\n if not self.is_pipeline() and not self.is_method():\n raise ValidationError(\"Transformation with pk={} is neither Method nor Pipeline\".format(self.pk))\n\n for curr_input in self.inputs.all():\n curr_input.clean()\n for curr_output in self.outputs.all():\n curr_output.clean()\n self.check_input_indices()\n self.check_output_indices()", "def test_check_numeric_columns_call(self, mocker):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\", copy=True)\n\n x.fit(df)\n\n expected_call_args = {0: {\"args\": (d.create_df_2(),), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.base.BaseTransformer,\n \"transform\",\n expected_call_args,\n return_value=d.create_df_2(),\n ):\n\n x.transform(df)", "def _check_parameters_support(self, parameters=()):\n for parameter in parameters:\n assert parameter in self._supported, \"Estimation %s is not implemented yet\" % parameter", "def is_compatible(self, function, arguments):", "def _register_builtin_coercers(self):\n type(self).__registry.extend(\n [\n # Check if the annotaion is a date-type\n Coercer(checks.isdatetype, self.cls._coerce_datetime),\n # Check if the annotation maps directly to a builtin-type\n # We use the raw annotation here, not the origin, since we account for\n # subscripted generics later.\n Coercer(\n checks.isbuiltintype, self.cls._coerce_builtin, check_origin=False\n ),\n # Check for a class with a ``from_dict()`` factory\n Coercer(checks.isfromdictclass, self.cls._coerce_from_dict),\n # Enums are iterable and evaluate as a Collection,\n # so we need to short-circuit the next set of checks\n Coercer(checks.isenumtype, self.cls._coerce_enum),\n # Check for a subscripted generic of the ``Mapping`` type\n Coercer(checks.ismappingtype, self.cls._coerce_mapping),\n # Check for a subscripted generic of the ``Collection`` type\n # This *must* come after the check for a ``Mapping`` type\n Coercer(checks.iscollectiontype, self.cls._coerce_collection),\n # Finally, try a generic class coercion.\n Coercer(inspect.isclass, self.cls._coerce_class),\n ]\n )", "def test_all_estimators(name, Estimator):\n allow_nan = (hasattr(checks, 'ALLOW_NAN') and\n Estimator().get_tags()[\"allow_nan\"])\n if allow_nan:\n checks.ALLOW_NAN.append(name)\n if name in [\"ShapeletModel\"]:\n # Deprecated models\n return\n check_estimator(Estimator)", "def check_for_fit(cls, method):\n\n @wraps(method)\n def _check_for_fit(self, X=None, y=None):\n klass = type(self).__name__\n if not self._is_fitted and self.needs_fitting:\n raise ComponentNotYetFittedError(\n f\"This {klass} is not fitted yet. You must fit {klass} before calling {method.__name__}.\"\n )\n else:\n return method(self, X, y)\n\n return _check_for_fit", "def test_arguments(self):\n\n h.test_function_arguments(\n func=ScalingTransformer.transform,\n expected_arguments=[\"self\", \"X\"],\n expected_default_values=None,\n )", "def test_super_transform_called(self, mocker):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n x.fit(df)\n\n expected_call_args = {0: {\"args\": (d.create_df_2(),), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.base.BaseTransformer,\n \"transform\",\n expected_call_args,\n return_value=d.create_df_2(),\n ):\n\n x.transform(df)", "def test_inheritance(self):\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n h.assert_inheritance(x, tubular.base.BaseTransformer)", "def check(self):\n raise NotImplementedError", "def fitTranformers(self, lGraph,lY=None):\n self._node_transformer[0].fit([nd for g in lGraph for nd in g.getNodeListByType(0)])\n self._node_transformer[1].fit([nd for g in lGraph for nd in g.getNodeListByType(1)])\n \n self._edge_transformer[0].fit([e for g in lGraph for e in g.getEdgeListByType(0, 0)])\n self._edge_transformer[1].fit([e for g in lGraph for e in g.getEdgeListByType(0, 1)])\n #self._edge_transformer[2].fit([e for g in lGraph for e in g.getEdgeListByType(1, 0)])\n #self._edge_transformer[3].fit([e for g in lGraph for e in g.getEdgeListByType(1, 1)])\n \n return True", "def verify_tools(self):\n super().verify_tools()\n if self.use_docker:\n Docker.verify(tools=self.tools)", "def fit_transform(self, data):\n if not self._transformers:\n return self._preprocess(data)\n\n transformed_data = self._preprocess(data)\n final_step = self._transformers[-1]\n return final_step[1].fit_transform(transformed_data)", "def __check_attributes(self, attr_list, **kwargs):\n dependancies = [\n # I & Q will need A_masq\n ([\"I\", \"Q\"], [\"I\", \"Q\", \"A_masq\"]),\n # Calibration data depends on the I, Q & A_masq raw data\n ([\"calfact\", \"Icc\", \"Qcc\", \"P0\", \"R0\", \"interferograms\", \"continuum\"], [\"I\", \"Q\", \"A_masq\"]),\n # For any requested telescope position, read them all\n ([\"F_tl_Az\", \"F_tl_El\", \"F_sky_Az\", \"F_sky_El\"], [\"F_tl_Az\", \"F_tl_El\"]),\n ]\n\n _dependancies = self._KidsRawData__check_attributes(attr_list, dependancies=dependancies, **kwargs)\n\n if _dependancies is not None:\n self.calib_raw()", "def _check_extend_function(self):\n for function_name, parameters in self._extend_function_dic.iteritems():\n if not apply(function_name, parameters):\n return False\n return True", "def _check_do_transform(df, reference_im, affine_obj):\n try:\n crs = getattr(df, 'crs')\n except AttributeError:\n return False # if it doesn't have a CRS attribute\n\n if not crs:\n return False # return False for do_transform if crs is falsey\n elif crs and (reference_im is not None or affine_obj is not None):\n # if the input has a CRS and another obj was provided for xforming\n return True", "def _check_do_transform(df, reference_im, affine_obj):\n try:\n crs = getattr(df, 'crs')\n except AttributeError:\n return False # if it doesn't have a CRS attribute\n\n if not crs:\n return False # return False for do_transform if crs is falsey\n elif crs and (reference_im is not None or affine_obj is not None):\n # if the input has a CRS and another obj was provided for xforming\n return True", "def test_required_methods(self):\n\n required_methods = ('__init__', 'load')\n\n for method in required_methods:\n self.assertIn(method, dir(DatasetLoader_Jakob2019))", "def check_compliance(objects):\n for name, obj in objects.items():\n if isinstance(obj, SaveableInterface):\n continue\n # explicitly check for required methods\n for attr_to_check in {\"state_dict\", \"load_state_dict\"}:\n if not hasattr(obj, attr_to_check):\n raise TypeError(\"{} of {} needs to implement the {} fn\".format(\n obj, type(obj), attr_to_check))", "def _cross_validate(self, fit_params={}):\n\n # Flatten the true labels for the training data\n y_train = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n\n if self.model.estimator_type == \"classifier\":\n\n # Get unique labels for classification\n labels = np.unique(y_train)\n\n # Set up a dictionary for the scoring metrics\n scoring = {'accuracy':'accuracy'}\n\n # Prepare arguments for the scorers\n metric_args = self.model.metric_args\n \n if 'average' in metric_args and metric_args['average'] is not None:\n # If the score is being averaged over classes a single scorer per metric is sufficient\n scoring['precision'] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall'] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore'] = metrics.make_scorer(metrics.f1_score, **metric_args)\n\n output_format = \"clf_overall\"\n else:\n # If there is no averaging we will need multiple scorers; one for each class\n for label in labels:\n metric_args['pos_label'] = label\n metric_args['labels'] = [label]\n scoring['precision_'+str(label)] = metrics.make_scorer(metrics.precision_score, **metric_args)\n scoring['recall_'+str(label)] = metrics.make_scorer(metrics.recall_score, **metric_args)\n scoring['fscore_'+str(label)] = metrics.make_scorer(metrics.f1_score, **metric_args)\n \n output_format = \"clf_classes\"\n\n elif self.model.estimator_type == \"regressor\":\n scoring = ['r2', 'neg_mean_squared_error', 'neg_mean_absolute_error', 'neg_median_absolute_error', 'explained_variance']\n \n # Perform cross validation using the training data and the model pipeline\n scores = cross_validate(self.model.pipe, self.X_train, y_train, scoring=scoring, cv=self.model.cv, fit_params=fit_params, return_train_score=False)\n\n # Prepare the metrics data frame according to the output format\n if self.model.estimator_type == \"classifier\": \n # Get cross validation predictions for the confusion matrix\n y_pred = cross_val_predict(self.model.pipe, self.X_train, y_train, cv=self.model.cv, fit_params=fit_params)\n\n # Prepare the confusion matrix and add it to the model\n self._prep_confusion_matrix(y_train, y_pred, labels)\n\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"])\n\n if output_format == \"clf_overall\": \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.average(scores[\"test_precision\"]), np.std(scores[\"test_precision\"]),\\\n np.average(scores[\"test_recall\"]), np.std(scores[\"test_recall\"]),\\\n np.average(scores[\"test_fscore\"]), np.std(scores[\"test_fscore\"])]\n\n elif output_format == \"clf_classes\":\n # Add accuracy which is calculated at an overall level\n metrics_df.loc[0] = [\"overall\", np.average(scores[\"test_accuracy\"]), np.std(scores[\"test_accuracy\"]),\\\n np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN]\n\n # Add the metrics for each class to the data frame\n for i, label in enumerate(labels):\n metrics_df.loc[i+1] = [label, np.NaN, np.NaN, np.average(scores[\"test_precision_\"+str(label)]),\\\n np.std(scores[\"test_precision_\"+str(label)]), np.average(scores[\"test_recall_\"+str(label)]),\\\n np.std(scores[\"test_recall_\"+str(label)]), np.average(scores[\"test_fscore_\"+str(label)]),\\\n np.std(scores[\"test_fscore_\"+str(label)])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"class\", \"accuracy\", \"accuracy_std\", \"precision\", \"precision_std\", \"recall\",\\\n \"recall_std\", \"fscore\", \"fscore_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"accuracy\"].values[0]\n\n elif self.model.estimator_type == \"regressor\":\n # Create an empty data frame to set the structure\n metrics_df = pd.DataFrame(columns=[\"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"])\n \n # Add the overall metrics to the data frame\n metrics_df.loc[0] = [np.average(scores[\"test_r2\"]), np.std(scores[\"test_r2\"]),\\\n np.average(scores[\"test_neg_mean_squared_error\"]), np.std(scores[\"test_neg_mean_squared_error\"]),\\\n np.average(scores[\"test_neg_mean_absolute_error\"]), np.std(scores[\"test_neg_mean_absolute_error\"]),\\\n np.average(scores[\"test_neg_median_absolute_error\"]), np.std(scores[\"test_neg_median_absolute_error\"]),\\\n np.average(scores[\"test_explained_variance\"]), np.std(scores[\"test_explained_variance\"])]\n \n # Finalize the structure of the result DataFrame\n metrics_df.loc[:,\"model_name\"] = self.model.name\n metrics_df = metrics_df.loc[:,[\"model_name\", \"r2_score\", \"r2_score_std\", \"mean_squared_error\", \"mean_squared_error_std\",\\\n \"mean_absolute_error\", \"mean_absolute_error_std\", \"median_absolute_error\", \"median_absolute_error_std\",\\\n \"explained_variance_score\", \"explained_variance_score_std\"]]\n\n # Add the score to the model\n self.model.score = metrics_df[\"r2_score\"].values[0]\n\n # Save the metrics_df to the model\n self.model.metrics_df = metrics_df", "def validate(self):\n # should this just be folded into the constructor for ProgramNode?\n for func in self.functions:\n func.validate()\n self.validated = True", "def version_check(self):\n # anchor_matcher --> matcher\n if hasattr(self, \"anchor_matcher\"):\n self.matcher = self.anchor_matcher\n if hasattr(self, \"head_in_features\"):\n self.in_features = self.head_in_features\n if hasattr(self, \"test_topk_candidates\"):\n self.topk_candidates = self.test_topk_candidates\n if hasattr(self, \"test_score_thresh\"):\n self.score_threshold = self.test_score_thresh", "def test_valid_method(method: str) -> None:\n mapie = MapieClassifier(method=method)\n mapie.fit(X_toy, y_toy)\n check_is_fitted(\n mapie,\n [\n \"single_estimator_\",\n \"n_features_in_\",\n \"n_samples_val_\",\n \"scores_\"\n ]\n )", "def test_check_numeric_columns_call(self, mocker):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n expected_call_args = {0: {\"args\": (d.create_df_2(),), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.numeric.ScalingTransformer,\n \"check_numeric_columns\",\n expected_call_args,\n return_value=d.create_df_2(),\n ):\n\n x.fit(df)", "def _check_mapper(self, mapper):\n if not hasattr(mapper, 'parse') or not callable(mapper.parse):\n raise ValueError('mapper must implement parse()')\n if not hasattr(mapper, 'format') or not callable(mapper.format):\n raise ValueError('mapper must implement format()')", "def _check_if_estimator(estimator):\n msg = (\"This %(name)s instance has no attribute \\\"fit\\\".\")\n if not hasattr(estimator, \"fit\"):\n raise AttributeError(msg % {'name': type(estimator).__name__})", "def checkCompatibility(self, *args):\n return _libsbml.SBase_checkCompatibility(self, *args)", "def test_super_fit_call(self, mocker):\n\n df = d.create_df_2()\n\n x = ScalingTransformer(columns=[\"a\"], scaler=\"standard\")\n\n expected_call_args = {0: {\"args\": (d.create_df_2(), None), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker, tubular.base.BaseTransformer, \"fit\", expected_call_args\n ):\n\n x.fit(df)", "def _validate_estimator(self):\n\n if self.smote is not None:\n if isinstance(self.smote, SMOTE):\n self.smote_ = self.smote\n else:\n raise ValueError('smote needs to be a SMOTE object.'\n 'Got {} instead.'.format(type(self.smote)))\n else:\n self.smote_ = SMOTE(ratio=self.ratio, k_neighbors=3,\n random_state=self.random_state)\n\n if self.tomek is not None:\n if isinstance(self.tomek, TomekLinks):\n self.tomek_ = self.tomek\n else:\n raise ValueError('tomek needs to be a TomekLinks object.'\n 'Got {} instead.'.format(type(self.tomek)))\n else:\n self.tomek_ = TomekLinks(ratio=\"all\",\n random_state=self.random_state)", "def _check(self):\n if not isinstance(self.fc_layers, tuple):\n raise TypeError(f'fc_layers require tuple, get {type(self.fc_layers)}')\n if not isinstance(self.use_dropout, tuple):\n raise TypeError(f'use_dropout require tuple, get {type(self.use_dropout)}')\n if not isinstance(self.drop_prob, tuple):\n raise TypeError(f'drop_prob require tuple, get {type(self.drop_prob)}')\n if not isinstance(self.use_activation, tuple):\n raise TypeError(f'use_activation require tuple, get {type(self.use_activation)}')\n l_fc_layer = len(self.fc_layers)\n l_use_drop = len(self.use_dropout)\n l_drop_prob = len(self.drop_prob)\n l_use_activation = len(self.use_activation)\n pass_check = l_fc_layer >= 2 and l_use_drop < l_fc_layer and l_drop_prob < l_fc_layer and l_use_activation < l_fc_layer and l_drop_prob == l_use_drop\n if not pass_check:\n msg = 'Wrong BaseDiscriminator parameters!'\n raise ValueError(msg)", "def _sanity_check_implementations(self, other):\n if not cryptomath.m2cryptoLoaded:\n self._remove_all_matches(other.cipherImplementations, \"openssl\")\n if not cryptomath.pycryptoLoaded:\n self._remove_all_matches(other.cipherImplementations, \"pycrypto\")\n\n if not other.cipherImplementations:\n raise ValueError(\"No supported cipher implementations\")", "def __isFastener(f):\n\n if type(f) != Fastener:\n raise TypeError(\"FastnerGroups may contain only Fasteners\")\n else:\n return True", "def test_validate_compatibility(self):\r\n self.assertEqual(self.cs_overview._validate_compatibility(), None)\r\n\r\n self.cs_overview.DistanceMatrices = [self.single_ele_dm]\r\n self.assertRaises(ValueError, self.cs_overview._validate_compatibility)\r\n\r\n self.cs_overview.DistanceMatrices = [self.overview_dm]\r\n self.cs_overview.MetadataMap = self.test_map\r\n self.assertRaises(ValueError, self.cs_overview._validate_compatibility)", "def check(self):\n raise NotImplementedError('Must be implemented by subclass.')", "def check_supported_features(self):", "def __check(self):\n assert self.name is not None, \"Empty name!\"\n assert self.in_spc, \"Empty in_spc!\"\n assert self.out_spc, \"Empty out_spc!\"\n assert self.num_clss > 0, \"Invalid number of output classes!\"\n if not isinstance(self, SvmSklearnWrapper):\n assert self.los_fnc is not None, \"No loss function!\"\n assert self.opt is not None, \"No optimizer!\"", "def test_columns_set_or_check_called(self, mocker):\n\n df = d.create_df_1()\n\n x = BaseTransformer(columns=\"a\")\n\n expected_call_args = {0: {\"args\": (df,), \"kwargs\": {}}}\n\n with h.assert_function_call(\n mocker,\n tubular.base.BaseTransformer,\n \"columns_set_or_check\",\n expected_call_args,\n ):\n\n x.fit(X=df)", "def check_already_chain():\n all_are_already_rigids = True\n for transform, _ in self._pairs:\n if not transform.shape(type=\"rdRigid\"):\n all_are_already_rigids = False\n break\n\n assert not all_are_already_rigids, (\n \"Every transform is already dynamic\"\n )", "def check_inputs(self):\n if self.res_references is None or self.res_predictions is None:\n raise TypeError(\"Both predictions and references have to be passed.\")\n return", "def transform(self, inputs):\n msg = 'The function transform() is not available in the class ' \\\n '`EnsembleDetector`.'\n LOGGER.error(TAG, msg)\n raise NotImplementedError(msg)", "def verify(self):\n if len(self.headers) not in [1, 5]:\n raise IncorrectNumberOfExtensions(\"header\", \"5\", self)\n if len(self.pixeldata) not in [1, 2, 3]:\n raise IncorrectNumberOfExtensions(\"pixel\", \"1, 2, or 3\", self)\n if len(self.tabledata) not in [0,4]:\n raise IncorrectNumberOfExtensions(\"table\", \"4\", self)", "def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.columns_set_or_check, expected_arguments=[\"self\", \"X\"]\n )", "def check(self) -> None:\n\n raise NotImplementedError", "def _apply_transform(self):\n pass", "def test_valid_method(method: str) -> None:\n mapie = MapieRegressor(method=method)\n mapie.fit(X_toy, y_toy)\n check_is_fitted(\n mapie,\n [\n \"n_features_in_\",\n \"single_estimator_\",\n \"estimators_\",\n \"k_\",\n \"residuals_\"\n ]\n )", "def _validate_compatibility(self):\r\n for dm in self.DistanceMatrices:\r\n for samp_id in dm.ids:\r\n if samp_id not in self.MetadataMap.SampleIds:\r\n raise ValueError(\"The sample ID '%s' was not found in the \"\r\n \"metadata map.\" % samp_id)\r\n for cat in self.Categories:\r\n if cat not in self.MetadataMap.CategoryNames:\r\n raise ValueError(\"The category '%s' was not found in the \"\r\n \"metadata map.\" % cat)", "def check_parameters(self):\n\n torch = import_optional_dependency('torch')\n if not isinstance(self.model, torch.nn.Module):\n self._raise_format_error('self.model', 'torch.nn.Module', f'{ type(self.model) }')\n if not isinstance(self.optimizer, torch.optim.Optimizer):\n self._raise_format_error('self.optimizer', 'torch.optim.Optimizer', f'{ type(self.optimizer) }')\n if not isinstance(self.train_dataset, torch.utils.data.DataLoader):\n self._raise_format_error('self.train_dataset', 'torch.utils.data.DataLoader', f'{ type(self.train_dataset) }')\n if not isinstance(self.eval_dataset, torch.utils.data.DataLoader):\n self._raise_format_error('self.eval_dataset', 'torch.utils.data.DataLoader', f'{ type(self.eval_dataset) }')", "def checkCompatibility(self):\n return _libsbml.CobraToFbcConverter_checkCompatibility(self)", "def accepts(self, problem):\n # TODO check if is matrix stuffed.\n self.import_solver()\n if not problem.objective.args[0].is_affine():\n return False\n for constr in problem.constraints:\n if type(constr) not in MOSEK.SUPPORTED_CONSTRAINTS:\n return False\n for arg in constr.args:\n if not arg.is_affine():\n return False\n return True", "def test_scaler_fit_call(self, mocker, scaler, scaler_type_str):\n\n df = d.create_df_3()\n\n x = ScalingTransformer(\n columns=[\"b\", \"c\"], scaler=scaler, scaler_kwargs={\"copy\": True}\n )\n\n mocked = mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.fit\", return_value=None\n )\n\n x.fit(df)\n\n assert mocked.call_count == 1, \"unexpected number of calls to scaler fit\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n expected_positional_args = (df[[\"b\", \"c\"]],)\n\n h.assert_equal_dispatch(\n expected=expected_positional_args,\n actual=call_pos_args,\n msg=f\"unexpected positional args in {scaler_type_str} fit call\",\n )\n\n assert call_kwargs == {}, f\"unexpected kwargs in {scaler_type_str} fit call\"", "def _check_path_availability(self, methods: typing.Iterable[str, ...]) -> None:\n\n for method in methods:\n self.analizer._check_path_availability(method)", "def test_has_validate(self):\n for klass in Event.__subclasses__():\n self.assertTrue(hasattr(klass, 'validate'),\n f'{klass.__name__} is missing validate() method')\n self.assertTrue(inspect.isfunction(klass.validate),\n f'{klass.__name__} is missing validate() method')", "def check_all_user_inputs_valid(self):\n self.check_RNN_layers_valid()\n self.check_activations_valid()\n self.check_embedding_dimensions_valid()\n self.check_initialiser_valid()\n self.check_y_range_values_valid()\n self.check_return_final_seq_only_valid()", "def _check_params(self):\n\n # verify that estimator1 and estimator2 have predict_proba\n if (not hasattr(self.estimator1_, 'predict_proba') or\n not hasattr(self.estimator2_, 'predict_proba')):\n raise AttributeError(\"Co-training classifier must be initialized \"\n \"with classifiers supporting \"\n \"predict_proba().\")\n\n if (self.p_ is not None and self.p_ <= 0) or (self.n_ is not None and\n self.n_ <= 0):\n raise ValueError(\"Both p and n must be positive.\")\n\n if self.unlabeled_pool_size <= 0:\n raise ValueError(\"unlabeled_pool_size must be positive.\")\n\n if self.num_iter <= 0:\n raise ValueError(\"num_iter must be positive.\")", "def _platform_auto_calibrate_check(self):\n if isinstance(self.pwm_controller, MockPWMController):\n raise AntennyMotorException(\"Can not auto calibrate without a motor\")\n if isinstance(self.imu, MockImuController):\n raise AntennyIMUException(\"Can not auto calibrate without an imu\")", "def __validate_inputs(self):\n if self.train_df is None:\n raise ValueError(\"Dataframe cannot be null\")\n\n if (\n self.test_df is not None\n and self.train_df.shape[1] != self.test_df.shape[1]\n ):\n raise KeyError(\n \"Target variable in still present in one of the datasets or\"\n \" the number of columns in both test and train are not equal.\"\n )\n\n # target_label should not be in list of columns\n if self.target_label is None:\n warnings.warn(\n \"Parameter 'target_label' is empty. If not provided and is present in dataframe, it may get encoded. \"\n \"To mitigate, provide the target_label from dataframe or provide explicit list of columns for encoding \"\n \"via the 'cat_cols' parameter\",\n UserWarning,\n )\n if (\n self.target_label is not None\n and self.cat_cols is not None\n and (self.target_label in self.cat_cols)\n ):\n raise ValueError(\n f\"Target column: {self.target_label} will be encoded. Remove it from cat_cols if in there.\"\n )\n\n if self.ord_dict is not None:\n for key, mapping in self.ord_dict.items():\n if mapping is None or mapping == {}:\n raise ValueError(\n f\"Expected a weight mapping for ordinal column {key}.\"\n f\" Received {self.ord_dict[key]}\"\n )", "def check_params(self, params):\n legal_params_fns = [\n Sequential.fit, Sequential.predict, Sequential.predict_classes,\n Sequential.evaluate\n ]\n if self.build_fn is None:\n legal_params_fns.append(self.__call__)\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n legal_params_fns.append(self.build_fn.__call__)\n else:\n legal_params_fns.append(self.build_fn)\n\n legal_params = []\n for fn in legal_params_fns:\n legal_params += tf_inspect.getargspec(fn)[0]\n legal_params = set(legal_params)\n\n for params_name in params:\n if params_name not in legal_params:\n if params_name != 'nb_epoch':\n raise ValueError('{} is not a legal parameter'.format(params_name))", "def test_fit(self):\n data = pd.DataFrame({\n \"x\": np.random.random(size=100),\n \"y\": np.random.choice([\"yes\", \"no\"], size=100)\n })\n\n transformer = DataTransformer()\n transformer._fit_continuous = Mock()\n transformer._fit_continuous.return_value = ColumnTransformInfo(\n column_name=\"x\", column_type=\"continuous\", transform=None,\n transform_aux=None,\n output_info=[SpanInfo(1, 'tanh'), SpanInfo(3, 'softmax')],\n output_dimensions=1 + 3\n )\n\n transformer._fit_discrete = Mock()\n transformer._fit_discrete.return_value = ColumnTransformInfo(\n column_name=\"y\", column_type=\"discrete\", transform=None,\n transform_aux=None,\n output_info=[SpanInfo(2, 'softmax')],\n output_dimensions=2\n )\n\n transformer.fit(data, discrete_columns=[\"y\"])\n\n transformer._fit_discrete.assert_called_once()\n transformer._fit_continuous.assert_called_once()\n assert transformer.output_dimensions == 6", "def check_plugins(classes):\n class_names = get_module_class_names(classes)\n check_duplicate_class_names(class_names)\n for _class in classes:\n check_implemented_functions(_class)", "def transform(self, X):\n check_is_fitted(self, 'estimators_')\n return self._concatenate_predictions(X, [\n getattr(est, meth)(X)\n for est, meth in zip(self.estimators_,\n self.method_estimators_)\n if est is not None])", "def type_check(train_tensors: BaseDatasetInputType,\n val_tensors: Optional[BaseDatasetInputType] = None) -> None:\n for i in range(len(train_tensors)):\n check_valid_data(train_tensors[i])\n if val_tensors is not None:\n for i in range(len(val_tensors)):\n check_valid_data(val_tensors[i])", "def named_transformers_(self):\n ...", "def validate(self, trainingSet): \n if self.regression:\n return self._validateRegression(trainingSet) \n else:\n return self._validateClassification(trainingSet)", "def check(self) -> None:\n # validate training config\n super().check()", "def test_scaler_transform_call(self, mocker, scaler, scaler_type_str):\n\n df = d.create_df_3()\n\n x = ScalingTransformer(\n columns=[\"b\", \"c\"], scaler=scaler, scaler_kwargs={\"copy\": True}\n )\n\n x.fit(df)\n\n mocked = mocker.patch(\n f\"sklearn.preprocessing.{scaler_type_str}.transform\",\n return_value=df[[\"b\", \"c\"]],\n )\n\n x.transform(df)\n\n assert mocked.call_count == 1, \"unexpected number of calls to scaler fit\"\n\n call_args = mocked.call_args_list[0]\n call_pos_args = call_args[0]\n call_kwargs = call_args[1]\n\n expected_positional_args = (df[[\"b\", \"c\"]],)\n\n h.assert_equal_dispatch(\n expected=expected_positional_args,\n actual=call_pos_args,\n msg=f\"unexpected positional args in {scaler_type_str} transform call\",\n )\n\n assert (\n call_kwargs == {}\n ), f\"unexpected kwargs in {scaler_type_str} transform call\"", "def test_arguments(self):\n\n h.test_function_arguments(\n func=BaseTransformer.columns_check, expected_arguments=[\"self\", \"X\"]\n )", "def testMulti(self):\n affineClass = xyTransformRegistry[\"affine\"]\n wrapper0 = OneXYTransformConfig()\n wrapper0.transform.retarget(affineClass)\n affineConfig0 = wrapper0.transform\n affineConfig0.translation = (-2.1, 3.4)\n rotAng = 0.832 # radians\n xScale = 3.7\n yScale = 45.3\n affineConfig0.linear = (\n math.cos(rotAng) * xScale, math.sin(rotAng) * yScale,\n -math.sin(rotAng) * xScale, math.cos(rotAng) * yScale,\n )\n\n wrapper1 = OneXYTransformConfig()\n wrapper1.transform.retarget(affineClass)\n affineConfig1 = wrapper1.transform\n affineConfig1.translation = (26.5, -35.1)\n rotAng = -0.25 # radians\n xScale = 1.45\n yScale = 0.9\n affineConfig1.linear = (\n math.cos(rotAng) * xScale, math.sin(rotAng) * yScale,\n -math.sin(rotAng) * xScale, math.cos(rotAng) * yScale,\n )\n\n multiClass = xyTransformRegistry[\"multi\"]\n multiConfig = multiClass.ConfigClass()\n multiConfig.transformDict = {\n 0: wrapper0,\n 1: wrapper1,\n }\n with lsst.utils.tests.getTempFilePath(\".py\") as filePath:\n self.checkConfig(multiClass, multiConfig, filePath)\n multiXYTransform = multiClass(multiConfig)\n\n affine0 = affineClass(affineConfig0)\n affine1 = affineClass(affineConfig1)\n transformList = (affine0, affine1)\n refMultiXYTransform = RefMultiXYTransform(transformList)\n\n self.checkBasics(refMultiXYTransform)\n\n for fromPoint in self.fromIter():\n toPoint = multiXYTransform.forwardTransform(fromPoint)\n predToPoint = refMultiXYTransform.forwardTransform(fromPoint)\n for i in range(2):\n self.assertAlmostEqual(toPoint[i], predToPoint[i])" ]
[ "0.6600919", "0.6600919", "0.6245045", "0.60071653", "0.60061026", "0.597027", "0.5953561", "0.57135946", "0.566122", "0.5652529", "0.5621234", "0.55733705", "0.5559613", "0.5533336", "0.54827064", "0.5438628", "0.5429233", "0.5416639", "0.5386236", "0.53803736", "0.5358731", "0.53483915", "0.5336691", "0.53317505", "0.53241795", "0.5294224", "0.528637", "0.5285609", "0.5275569", "0.5274284", "0.5272805", "0.52544004", "0.5202129", "0.5196584", "0.5186116", "0.51729083", "0.51663744", "0.5147884", "0.5146909", "0.5144805", "0.5143069", "0.5125235", "0.51132905", "0.5109395", "0.51004964", "0.50932586", "0.5087195", "0.50630116", "0.5062596", "0.5062596", "0.50462747", "0.50457627", "0.5045327", "0.5041315", "0.50387305", "0.50382876", "0.5037513", "0.5028296", "0.5021594", "0.5017426", "0.50113195", "0.5005491", "0.49996433", "0.49916357", "0.49868748", "0.4982529", "0.4979062", "0.49623644", "0.49617988", "0.4958924", "0.49567363", "0.4944487", "0.49416587", "0.49380398", "0.49365515", "0.49330044", "0.4926662", "0.49207604", "0.4919934", "0.49186155", "0.4915057", "0.49133494", "0.49098873", "0.49059337", "0.49040833", "0.49020636", "0.48962232", "0.4886667", "0.4880217", "0.48725298", "0.48710978", "0.4868505", "0.48616642", "0.48573673", "0.48572862", "0.4852336", "0.48486865", "0.4845702", "0.48391423", "0.48310626" ]
0.9007204
0
Runs back and forth between the ball and a random point in the field.
def warm_up(self): self.velocity = self.steering_behaviours.calculate() self.pos += self.velocity self.pos = Point(int(self.pos.x), int(self.pos.y)) if not self.is_moving(): if self.steering_behaviours.target == self.soccer_field.ball.pos: # let's go back towards where I was. self.steering_behaviours.target = self.initial_pos else: # let's go towards the ball. self.steering_behaviours.target = self.soccer_field.ball.pos self.direction = Vec2d(self.steering_behaviours.target - self.pos).normalized()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spawn_ball(direction):\n global ball_pos, ball_vel \n ball_pos = [WIDTH / 2, HEIGHT / 2]\n ball_vel = ball_generate_velocity(direction) # Ball velocity randomization ", "def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1", "def ball_reset(self):\n self.ball.x = (self.window.width - BALL_RADIUS)/2\n self.ball.y = (self.window.height - BALL_RADIUS)/2\n self.__dx = random.randint(1, MAX_X_SPEED)\n if random.random() > 0.5:\n self.__dx = -self.__dx", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def bouncing(self):\n x = random.randint(-250, 250) # where the ball will bounce on the X axis\n left_x = -850\n right_x = 850\n rand_y = random.randint(-350, 350) # random height where the ball goes\n floor = -350 # bouncing floor\n\n if self.xcor() > 300:\n self.goto(x, floor)\n self.goto(left_x, rand_y)\n elif self.xcor() < -300:\n self.goto(x, floor)\n self.goto(right_x, rand_y)", "def update(self):\n if self.x<0:\n self.x = 0\n\n if self.y <0:\n self.y = 0\n\n if bool(randint(0, 1))==True:\n if self.walker == True:\n self.x += randint(-2, 2)\n self.y += randint(-2, 2)", "def move(self):\n self.x += self.speed_x / FPS\n self.y += self.speed_y / FPS\n self.draw_ball()\n if self.x >= 1100:\n self.speed_x = randint(-100, -10)\n if self.x <= 50:\n self.speed_x = randint(10, 100)\n if self.y >= 800:\n self.speed_y = randint(-100, -10)\n if self.y <= 50:\n self.speed_y = randint(10, 100)", "def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1", "def throw(self):\n\n self.vx = (2 * random.random()) - 1\n self.vy = (4 * random.random()) + 4", "def make_ball():\n ball = Ball()\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n\n # Speed and direction of rectangle\n ball.change_x = random.randrange(-2, 2)\n ball.change_y = random.randrange(-2, 2)\n\n return ball", "def __init__(self):\n #random.uniform(1, 10) = random float values for x coordinate to make sure ball spawns on left edge of screen with random values\n #random.unform(1, 330) = 1-330 was chosen to make sure the ball can spawn randomly either below or on top of left edge of the screen\n self.x = random.uniform(1, 10)\n self.y = random.uniform(1, 330)", "def spawn_ball(direction):\r\n \r\n global ball_pos, ball_vel # these are vectors stored as lists\r\n global strike_counter\r\n \r\n # clear strike counter each new run\r\n strike_counter = 0\r\n \r\n # determine new initial velocity\r\n ball_pos = [WIDTH / 2, HEIGHT / 2]\r\n if direction == RIGHT:\r\n ball_vel = [random.randrange(120, 240) / REFRESH_RATE, -random.randrange(60, 180) / REFRESH_RATE]\r\n elif direction == LEFT:\r\n ball_vel = [-random.randrange(120, 240) / REFRESH_RATE, -random.randrange(60, 180) / REFRESH_RATE]", "def bounce(self):\n self.__dx = random.randint(1, MAX_X_SPEED)\n self.__dy = -abs(self.__dy)\n if random.random() > 0.5:\n self.__dx = -self.__dx", "def move_aim(self):\n self.color = random.choice(COLORS)\n self.x += 3 * self.speed_x / FPS\n self.y += 3 * self.speed_y / FPS\n self.r -= 1\n self.draw_aim()\n if self.r <= 10:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(50, 100)\n self.speed_x = randint(-200, 200)\n self.speed_y = randint(-200, 200)\n if self.x >= 1100:\n self.speed_x = randint(-100, -10)\n if self.x <= 50:\n self.speed_x = randint(10, 100)\n if self.y >= 800:\n self.speed_y = randint(-100, -10)\n if self.y <= 50:\n self.speed_y = randint(10, 100)", "def go(self):\n # if we want to go to the right, we need to decrease x and increase y\n # if we want to go to the left, we need to increase x and decrease y\n h = random.randrange(2, 4)\n v = random.randrange(1, 3)\n if not bool(random.getrandbits(1)):\n h = - h\n self.velocity = [h, -v]\n self.explode.play()", "def update_ball(self):\n\t\tself.ball_x += self.velocity_x\n\t\tself.ball_y += self.velocity_y\n\t\tif self.ball_y < 0:\n\t\t\tself.ball_y = -self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_y > 1:\n\t\t\tself.ball_y = 2 - self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_x < 0:\n\t\t\tself.ball_x = -self.ball_x\n\t\t\tself.velocity_x = -self.velocity_x\n\t\tif self.ball_x < 1:\n\t\t\treturn 0\n\t\tif self.ball_y > self.paddle_y + State.paddle_height or self.ball_y < self.paddle_y:\n\t\t\treturn -1\n\t\tself.ball_x = 2 - self.ball_x\n\t\tself.velocity_x = random.uniform(-0.015, 0.015) - self.velocity_x\n\t\tif abs(self.velocity_x) < 0.03:\n\t\t\tself.velocity_x = 0.03 if self.velocity_x > 0 else -0.03\n\t\tself.velocity_y = random.uniform(-0.03, 0.03) - self.velocity_y\n\t\tself.velocity_x = max(min(self.velocity_x, 1.0), -1.0)\n\t\tself.velocity_y = max(min(self.velocity_y, 1.0), -1.0)\n\t\treturn 1", "def reset_ball(self, paddle_1, paddle_2):\r\n\r\n # Reset position and select new speeds\r\n self.x = 100\r\n self.y = np.random.randint(1, self.screen_Height-1)\r\n\r\n self.vx = np.random.randint(25, 30)\r\n self.vy = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n\r\n\r\n p1_state, p2_state = self.state_observation(paddle_1, paddle_2)\r\n\r\n return p1_state, p2_state", "def _step_their_paddle(self):\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n else:\n if self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()", "def __set_ball_velocity(self):\n self.__dx = random.randint(1, MAX_X_SPEED) if random.random() < 0.5 else -random.randint(1, MAX_X_SPEED)\n self.__dy = INITIAL_Y_SPEED", "def run(self):\n MAX_ANGULAR_VELOCITY = 3.14/2 * 0.5\n\n # After 1.5 meters, we don't care about how far the ball is. It doesn't make us\n # approach it any faster.\n DISTANCE_THRESHOLD = 1.5\n \n # Factor to multiply thresholded distance by to get a maximum value equal to one\n DISTANCE_CONSTANT = 2/3.\n \n # Ball pursing thresholds\n MAX_FORWARD_VELOCITY = .75\n MIN_FORWARD_VELOCITY = 0.50\n \n if self.getTime() > 2.0:\n self.postSignal(\"restart\")\n \n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if not ball.seen:\n return\n \n # Reset the timer to act as a failsafe against losing the ball\n self.reset()\n \n # Ball in the bottom frame?\n if not ball.fromTopCamera:\n self.finish()\n \n # Ball coordinates\n ball_x, ball_y = ball.imageCenterX, ball.imageCenterY\n \n # Calculate forward velocity\n ball_distance = ball.visionDistance / 1000\n# print('Ball distance: {}'.format(ball_distance))\n ball_distance = min(ball_distance, DISTANCE_THRESHOLD)\n \n # Cache the ball distances\n PursueBall.ball_distances = (PursueBall.ball_distances + [ball_distance])[-30:]\n# print('Ball distances: {}'.format(PursueBall.ball_distances))\n slope = sum(PursueBall.ball_distances[-10:])/10 - sum(PursueBall.ball_distances[:10])/10\n# print('Slope: {} - {} = {}'.format(sum(PursueBall.ball_distances[-10:]) / 10,\n# sum(PursueBall.ball_distances[:10]) / 10,\n# slope))\n# print('Input: {}'.format(1 / slope if slope else 1))\n \n \n # Get the maximum velocity to be 1\n forward_vel = ball_distance * DISTANCE_CONSTANT\n forward_vel *= MAX_FORWARD_VELOCITY\n forward_vel = max(MIN_FORWARD_VELOCITY, forward_vel)\n# print('forward velocity: {}'.format(forward_vel))\n \n # Calculate sideways velocity\n angular_vel = -(ball_x-160.0) / 160.0 * MAX_ANGULAR_VELOCITY\n# print('Sideways Amount: {}'.format(angular_vel))\n \n commands.setWalkVelocity(forward_vel, 0, angular_vel)", "def advance(self):\n #x and y coordinates move and advance by adding the randomly generated velocity \n self.center.x += self.velocity.dx\n self.center.y += self.velocity.dy\n return", "def update(self):\n self.rect.right += self.raspspeed\n if self.rect.left >= SCREENRECT.right:\n self.rect.right = 0\n self.rect.top = self.randY()", "def step(self):\n\n self.ball_x = self.ball_x + self.vel_x\n self.ball_y = self.ball_y + self.vel_y\n if self.ball_y >= 480:\n self.vel_y *= -1\n elif self.ball_y <= 0:\n self.vel_y *= -1\n if self.ball_x >= 640:\n self.vel_x *= -1\n elif self.ball_x <= 0:\n self.vel_x *= -1", "def update(self):\r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-1000,-700)\r\n else:\r\n self.rect.y += 6", "def handle_collide(self):\r\n self.x = random.randrange(games.screen.width)\r\n self.y = random.randrange(games.screen.height)", "def main():\n pygame.init()\n\n # Set the height and width of the screen\n size = [SCREEN_WIDTH, SCREEN_HEIGHT]\n screen = pygame.display.set_mode(size)\n\n pygame.display.set_caption(\"Bouncing Balls\")\n\n # Loop until the user clicks the close button.\n done = False\n\n # Used to manage how fast the screen updates\n clock = pygame.time.Clock()\n\n ball_list = []\n ball_points = []\n\n for i in range(NUM_BALLS):\n ball_list.append(make_ball())\n\n # -------- Main Program Loop -----------\n while not done:\n # --- Event Processing\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n elif event.type == pygame.KEYDOWN:\n # Space bar! Spawn a new ball.\n if event.key == pygame.K_q:\n done = True\n if event.key == pygame.K_SPACE:\n ball_list.append(make_ball())\n if event.key == pygame.K_c:\n if len(ball_list) > 1:\n ball_list.pop()\n\n # --- Logic\n ball_points = []\n for ball in ball_list:\n if abs(ball.change_x) > 9:\n ball.change_x = random.choice([-1, 1])\n if abs(ball.change_y) > 9:\n ball.change_y = random.choice([-1, 1])\n ball_points.append([int(ball.x), int(ball.y)])\n # Move the ball's center\n ball.x += ball.change_x\n ball.y += ball.change_y\n\n # Bounce the ball if needed\n if ball.y >= SCREEN_HEIGHT - BALL_SIZE or ball.y <= BALL_SIZE:\n ball.change_y *= -1\n if ball.x >= SCREEN_WIDTH - BALL_SIZE or ball.x <= BALL_SIZE:\n ball.change_x *= -1\n if ball.y > SCREEN_HEIGHT + BALL_SIZE or ball.y < -BALL_SIZE or ball.x > SCREEN_WIDTH + BALL_SIZE or ball.x < -BALL_SIZE:\n ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n\n # --- Drawing\n # Set the screen background\n screen.fill(BLACK)\n\n # Draw the balls\n for ball in ball_list:\n pnts = closest_node(\n (int(ball.x), int(ball.y)), ball_points, LINE_NUM+1)\n pygame.draw.circle(\n screen, WHITE, [int(ball.x), int(ball.y)], BALL_SIZE)\n\n for line in range(0, len(pnts)):\n pygame.draw.line(screen, ball.color, ball_points[pnts[line]], [\n int(ball.x), int(ball.y)], 1)\n\n\n\n for _ in range(2):\n random.choice(ball_list).change_x += random.random() * \\\n random.choice([-1, 1])\n random.choice(ball_list).change_y += random.random() * \\\n random.choice([-1, 1])\n # --- Wrap-up\n # Limit to 60 frames per second\n clock.tick(60)\n\n # Go ahead and update the screen with what we've drawn.\n pygame.display.update()\n\n # Close everything down\n pygame.quit()", "def update(self):\r\n \r\n # Desplaza el bloque un píxel hacia abajo.\r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-300, -20) \r\n \r\n else:\r\n self.rect.y += 5\r\n \r\n # Si el bloque estuviera muy abajo, lo restablecemos a la parte superior de la pantalla.\r", "def __init__(self):\n # start x position\n self.x = random.randrange(size_x)\n # start y position\n self.y = - random.randrange(100)\n # drift x (amount of change each loop along the x axis)\n self.dx = random.randrange(3) - random.randrange(6)\n # drift y (amount of change each loop along the y axis)\n self.dy = random.randrange(1, 20) + random.randrange(4)\n # the size of the circular snowflake\n self.size = random.randrange(1, 4)\n # the colour of the snowflake (from sludgy grey to snowy white)\n c = random.randrange(200, 256)\n self.color = [c, c, c]", "def run(self):\n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if not ball.seen or not ball.fromTopCamera:\n return\n \n # Ball coordinates\n ball_x, ball_y = ball.imageCenterX, ball.imageCenterY\n \n # Calculate forward velocity\n ball_distance = ball.visionDistance / 1000\n print('Ball distance: {}'.format(ball_distance))\n ball_distance = min(ball_distance, DISTANCE_THRESHOLD)\n \n # Cache the ball distances\n PursueBall.ball_distances = (PursueBall.ball_distances + [ball_distance])[-30:]\n print('Ball distances: {}'.format(PursueBall.ball_distances))\n slope = sum(PursueBall.ball_distances[-10:])/10 - sum(PursueBall.ball_distances[:10])/10\n print('Slope: {} - {} = {}'.format(sum(PursueBall.ball_distances[-10:]) / 10,\n sum(PursueBall.ball_distances[:10]) / 10,\n slope))\n print('Input: {}'.format(1 / slope if slope else 1))\n \n \n # Get the maximum velocity to be 1\n forward_vel = ball_distance * DISTANCE_CONSTANT\n forward_vel *= MAX_FORWARD_VELOCITY\n forward_vel = max(MIN_FORWARD_VELOCITY, forward_vel)\n print('forward velocity: {}'.format(forward_vel))\n \n # Calculate sideways velocity\n angular_vel = -(ball_x-160.0) / 160.0 * MAX_ANGULAR_VELOCITY\n print('Sideways Amount: {}'.format(angular_vel))\n \n commands.setWalkVelocity(forward_vel, 0, angular_vel)", "def update(self):\r\n self.rect.y += 12\r\n\r\n if self.rect.y > 500:\r\n self.rect.y = random.randrange(-1000, -60)", "def make_ball(canvas):\n random_x = random.randint(0, canvas.get_canvas_width() - 2 * BALL_RADIUS)\n random_y = random.randint(0, canvas.get_canvas_height() - 2 * BALL_RADIUS)\n ball = canvas.create_oval(random_x, random_y, random_x + 2 * BALL_RADIUS, random_y + 2 * BALL_RADIUS)\n canvas.set_color(ball, 'blue')\n return ball", "def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)", "def __move_ball(self):\n while not self.__game_is_over():\n self.__ball.move(self.__dx, self.__dy)\n self.__handle_wall_collision()\n if self.__num_lives == 0:\n self.__game_over_picture()\n break\n elif self.__bricks_total == 0:\n self.__game_over_picture('You Win!!')\n break\n pause(FRAME_RATE)", "def goToBall(state):\n return goTo(state, state.ball_pos)", "def handle_collide(self):\n\t\tself.x = random.randrange(games.screen.width)\n\t\tself.y = random.randrange(games.screen.height)", "def movev2(self, abz_increase, abz_decrease, bbz_increase, bbz_decrease, north, south, west):\n if self._z >= 75:\n a = random.randint(1, 100)\n# print(str(a))\n if a <= abz_increase:\n self._z += 1\n if a > abz_increase and a <= (abz_increase + abz_decrease):\n self._z -= 1\n if a > (abz_increase + abz_decrease):\n self._z = self._z\n else:\n c = random.randint(1, 100)\n# print(str(c))\n if c <= bbz_increase:\n self._z += 1\n if c > bbz_increase and c <= (bbz_increase + bbz_decrease):\n self._z -= 1\n if c > (bbz_increase + bbz_decrease):\n self._z = self._z\n self._z -= 1\n \n b = random.randint(1, 100)\n# print(str(b))\n if b <= north:\n self._y += 1\n if b > north and b <= (north + south):\n self._y -= 1\n if b > (north + south) and b <= (north + south + west):\n self._x -= 1\n if b > (north + south + west):\n self._x += 1", "def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()", "def tick():\n move_balls(targets_speed)\n move_super_balls(targets_speed * 2)", "def run(self):\n memory.speech.say('Spinning!')\n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if ball.seen:\n self.finish()\n \n commands.setWalkVelocity(0, 0, -0.25)", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def __init__(self, screen_Size, paddle_Width):\r\n self.screen_Width, self.screen_Height = screen_Size\r\n\r\n # Setup x,y limits for ball position\r\n self.left_x = paddle_Width\r\n self.right_x = self.screen_Width - paddle_Width\r\n self.top_y = self.Radius\r\n self.bot_y = self.screen_Height - self.Radius\r\n\r\n self.x = self.screen_Width//2\r\n self.y = np.random.randint(self.Radius, self.screen_Height-self.Radius)\r\n\r\n self.vx = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n self.vy = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n\r\n # Ralley counter to see game progress\r\n self.rallies = 0", "def click_aim(self, pos):\n x, y = pos\n if (self.x - x) ** 2 + (self.y - y) ** 2 <= self.r ** 2:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(50, 100)\n self.speed_x = randint(-200, 200)\n self.speed_y = randint(-200, 200)\n return True\n else:\n return False", "def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")", "def run(self):\n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if ball.seen:\n self.finish()\n \n commands.setWalkVelocity(0, 0, -0.25)", "def random_location(self):\r\n\r\n while True:\r\n pt = (random.uniform(self.worldbox.tl[0], self.worldbox.br[0]),\r\n random.uniform(self.worldbox.tl[1], self.worldbox.br[1]))\r\n if not self.is_wall(pt) and not self.is_target(pt):\r\n return pt", "def moveBall(self):\n \n #move ball one step\n vx = self._ball.get_vx()\n vy = self._ball.get_vy()\n self._ball.x = self._ball.x + vx\n self._ball.y = self._ball.y + vy\n \n #COLLISIONS\n if vy > 0:\n balltop = self._ball.y + BALL_DIAMETER\n if balltop >= GAME_HEIGHT:\n self._ball.set_vy(-vy)\n if (self._getCollidingObject() != None and\n self._getCollidingObject() != self._paddle):\n self._ball.set_vy(-vy)\n self._wall.removebrick(self._getCollidingObject())\n if vy < 0:\n ballbottom = self._ball.y\n if ballbottom <= 0:\n self._lostlife = True\n if self._getCollidingObject() == self._paddle:\n self._ball.set_vy(-vy)\n if (self._getCollidingObject() != None and\n self._getCollidingObject() != self._paddle):\n self._ball.set_vy(-vy)\n self._wall.removebrick(self._getCollidingObject())\n if vx > 0:\n ballright = self._ball.x + BALL_DIAMETER\n if ballright >= GAME_WIDTH:\n self._ball.set_vx(-vx)\n if vx < 0:\n ballleft = self._ball.x\n if ballleft <= 0:\n self._ball.set_vx(-vx)", "def handle_collide(self): # New for Rev2.0\r\n self.x = random.randrange(games.screen.width)\r\n self.y = random.randrange(games.screen.height)\r\n Sound.play()", "def __init__(self):\n #random.uniform(1, 5) = random float values from 1-5 which will determine the velocity \n self.dx = random.uniform(1, 5)\n self.dy = random.uniform(1, 5)", "def ball_generate_velocity(direction):\n x_comp = random.randrange(120,240) / 60.0\n y_comp = -(random.randrange(60,180) / 60.0)\n if direction != RIGHT:\n x_comp = -x_comp\n return [x_comp, y_comp]", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def step(self, crowd):\n\n for boid in crowd:\n random_int = random.randint(0, 5)\n\n # if random_int > 4:\n # random_int = random.randint(0, 5)\n # if random_int > 4:\n # for i in range (1, 500):\n # goalX, goalY = self.goals[boid.goalNr]\n # x, y = boid.position\n\n # if (goalX + 10 >= x >= goalX - 10) and (goalY + 10 >= y >= goalY - 10):\n # boid.reached_goal(goalX + 10, goalY + 10)\n\n # dx = random.randint(0, self.width) - x\n # dy = random.randint(0, self.height) - y\n\n # # Unit vector in the same direction\n # distance = math.sqrt(dx * dx + dy * dy)\n # dx /= distance\n # dy /= distance\n\n # # And now we move:\n # x += dx\n # y += dy\n\n # boid.set_goal(dx, dy)\n\n # boid.position += boid.velocity\n #else:\n # boid.position += boid.velocity\n \n # Vector from me to cursor\n\n\n goalX, goalY = self.goals[boid.goalNr]\n x, y = boid.position\n\n if (goalX + 10 >= x >= goalX - 10) and (goalY + 10 >= y >= goalY - 10):\n boid.reached_goal(goalX + 10, goalY + 10)\n\n else:\n dx = goalX - x\n dy = goalY - y\n\n # Unit vector in the same direction\n # distance = np.linalg.norm(dx * dx + dy * dy)\n distance = math.sqrt(dx * dx + dy * dy)\n dx /= distance\n dy /= distance\n\n # And now we move:\n x += dx\n y += dy\n\n boid.set_goal(dx, dy)\n\n boid.position += boid.velocity", "def update(self):\r\n # Desplaza el bloque un píxel hacia abajo. s\r\n if self.rect.left < 50 or self.rect.right > 600:\r\n self.speed[0] = -self.speed[0]\r\n if self.rect.top < 0 or self.rect.bottom > 200:\r\n self.speed[1] = -self.speed[1]\r\n self.rect.move_ip((self.speed[0], self.speed[1])) \r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-400,-200)\r\n self.rect.y += 5", "def move_ball():\n print(\"Current position: ({},{}). \"\n \"Direction: ({},{}). Value: {}\".format(shared.ball_yy, shared.ball_xx,\n shared.direction[0], shared.direction[1],\n map_data[shared.ball_yy][shared.ball_xx]))\n if does_apply_direction():\n shared.ball_yy += shared.direction[0]\n shared.ball_xx += shared.direction[1]\n else:\n pass\n # shared.ball_yy = shared.ball_yy + shared.direction[0] \\\n # if default_positions.get(collision)[0] == None else default_positions.get(collision)[0]\n # shared.ball_xx = shared.ball_xx + shared.direction[1] \\\n # if default_positions.get(collision)[1] == None else default_positions.get(collision)[1]", "def resetball(self):\n self._ball = Ball()", "def move_ball(self, from_point, to_point):\n color = self.grid.cells[from_point].ball_color\n self.grid.cells[to_point].place_ball(color)\n self.grid.cells[from_point].button.get_child().destroy()\n self.grid.cells[from_point].is_ball = False\n self.grid.cells[from_point].ball_color = None\n # sprawdzamy czy jest 5 kul w danej orientacji\n self.grid.check_balls()\n # sprawdzamy czy uzytkownik nie zapelnił całej planszy\n self.if_player_lose()\n # losujemy i ustawiamy kolejne kule\n self.grid.place_balls(BALLS_PER_CLICK)\n # sprawdzamy czy jest 5 kul w danej orientacji\n self.grid.check_balls()", "def bonus_food(self):\n self.penup()\n self.shape(\"turtle\")\n self.color(\"red\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Bonus Food {self.x_cordinates} and {self.y_cordinates}\")", "def regenerate(self, random_state):\n self._walls_body.geom.clear()\n corridor_width = variation.evaluate(self._corridor_width,\n random_state=random_state)\n corridor_length = variation.evaluate(self._corridor_length,\n random_state=random_state)\n self._current_corridor_length = corridor_length\n self._current_corridor_width = corridor_width\n\n self._ground_plane.pos = [corridor_length / 2, 0, 0]\n self._ground_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, corridor_width / 2, 1]\n\n self._left_plane.pos = [\n corridor_length / 2, corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._left_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._right_plane.pos = [\n corridor_length / 2, -corridor_width / 2, _SIDE_WALL_HEIGHT / 2]\n self._right_plane.size = [\n corridor_length / 2 + _CORRIDOR_X_PADDING, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._near_plane.pos = [\n -_CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._near_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]\n\n self._far_plane.pos = [\n corridor_length + _CORRIDOR_X_PADDING, 0, _SIDE_WALL_HEIGHT / 2]\n self._far_plane.size = [corridor_width / 2, _SIDE_WALL_HEIGHT / 2, 1]", "def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))", "def retractionPoint(ball_loc, point, direction, t, delta = 0.999 ):\n # ball radius is given in meters\n ball_radius = 33.42\n force_direction = direction\n #print \"dir\", direction\n #print ball_radius\n # where to kick the ball\n contact_point = ball_loc - (direction * ball_radius) \n (retract_distance ,output) = g(point, contact_point, force_direction, ball_loc, t)\n \n return contact_point, (1 - delta) * retract_distance + delta * (output)", "def handle_collide(self):\n self.x = fuck.randrange(games.screen.width)\n self.y = fuck.randrange(games.screen.height)", "def handle_collide(self):\n self.x = fuck.randrange(games.screen.width)\n self.y = fuck.randrange(games.screen.height)", "def handle_collide(self):\n self.x = fuck.randrange(games.screen.width)\n self.y = fuck.randrange(games.screen.height)", "def move(self, direction):\n newx = self.x\n newy = self.y\n newy += random.randint(-1, 1)\n newx += random.randint(-1, 1)\n if self.tmap.contents[newy][newx] != '#':\n self.x = newx\n self.y = newy", "def hit_paddle(self):\n pass\n\n #Implement if collision with paddle is detected\n\n #Add randomness to how ball direction will change and return value", "def reset_ball(self):\n self.window.add(self.ball,x=(self.window.width - self.radius * 2) / 2,\n y=(self.window.height - self.radius * 2) / 2)\n self.window.add(self.start_label,x=self.window.width/5,y=self.window.height*0.666)\n self.__dx = 0\n self.__dy = 0", "def ball_move(self):\n self.ball.move(self.__dx, self.__dy)", "def gen_ball():\n ball_radius = randint(30, 80)\n ball_x = randint(ball_radius, screen_width - ball_radius)\n ball_y = randint(ball_radius, screen_height - ball_radius)\n ball_color = COLORS[randint(0, len(COLORS) - 1)]\n return [ball_color, ball_x, ball_y, ball_radius]", "def move():\n if randrange(40) == 0:\n y = randrange(-150, 150)\n target = vector(200, y)\n targets.append(target)\n\n for target in targets: # velocidad de los targets\n target.x -= target_speed\n\n if inside(ball):\n speed.y -= 0.35\n ball.move(speed)\n\n dupe = targets.copy()\n targets.clear()\n\n for target in dupe:\n if abs(target - ball) > 13:\n targets.append(target)\n\n for target in targets:\n if not inside(target):\n target.x = 200\n\n draw()\n\n ontimer(move, 50)", "def disappear(self):\n x_cor = random.randint(-270, 270)\n y_cor = random.randint(-270, 270)\n self.goto(x_cor, y_cor)", "def add_ball():\n global speed\n new_ball = Ball()\n balls.append(new_ball)\n if speed > 10: speed -= 5\n canvas.after(2500, add_ball)", "def click(self, pos):\n x, y = pos\n if (self.x - x) ** 2 + (self.y - y) ** 2 <= self.r ** 2:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(30, 50)\n self.speed_x = randint(-100, 100)\n self.speed_y = randint(-100, 100)\n return True\n else:\n return False", "def main():\n pygame.init() # Prepare the pygame module for use\n surfaceSize = 480 # Desired physical surface size, in pixels.\n\n clock = pygame.time.Clock() #Force frame rate to be slower\n\n # Create surface of (width, height), and its window.\n mainSurface = pygame.display.set_mode((surfaceSize, surfaceSize))\n\n # Create the ball object using it's position, size and color\n redBall = Ball([50,100], 30, (255, 0, 0)) # A color is a mix of (Red, Green, Blue)\n redBall.setSpeed(1)\n redBall.setDirection(0)\n \n while True:\n ev = pygame.event.poll() # Look for any event\n if ev.type == pygame.QUIT: # Window close button clicked?\n break # ... leave game loop\n elif ev.type == pygame.MOUSEBUTTONUP:\n redBall.setDirectionToPoint(ev.pos)\n pass\n\n # Update your game objects and data structures here...\n\n\n # We draw everything from scratch on each frame.\n # So first fill everything with the background color\n mainSurface.fill((0, 200, 255))\n\n #Move the circle\n redBall.update()\n # Draw the circle on the surface\n redBall.draw(mainSurface)\n \n # Now the surface is ready, tell pygame to display it!\n pygame.display.flip()\n \n clock.tick(60) #Force frame rate to be slower\n\n pygame.quit() # Once we leave the loop, close the window.", "def during(self, robot):\n self.counter += 1\n randint = random.randint(1, 5)\n\n if 1 <= randint <= 4 and not robot.is_blocked():\n robot.forward()\n else:\n robot.start_rotate()", "def update(self):\n if self.iteration > self.rate:\n self.iteration = 0\n heading = (random.random() * 180) - 90\n self.speed = 0.1\n if heading >= 0:\n self.heading = heading\n else:\n self.heading = 360 + heading\n self.iteration += 1\n self.setVector(self.speed, self.heading)", "def step(self):\n prey_neighbors = [x for x in self.model.space.get_neighbors(self.pos, self.vision+ 20, False) if isinstance(x,boid.Boid)]\n nearby_obstacles = [x for x in self.model.space.get_neighbors(self.pos, self.vision + 15, False) if isinstance(x, Obstacle)]\n self.velocity += (self.avoid_collision(nearby_obstacles) * self.collision_separation +\n self.attack(prey_neighbors)) / 2\n self.velocity /= np.linalg.norm(self.velocity)\n new_pos = self.pos + self.velocity * self.speed\n self.model.space.move_agent(self, new_pos)\n self.eat(prey_neighbors)\n\n\n # update for drawing\n self.update()", "def main():\r\n\r\n # create a ball at the initial position\r\n ball.filled = True\r\n ball.fill_color = 'black'\r\n window.add(ball, x=START_X, y=START_Y)\r\n\r\n # Click to trigger the animation\r\n onmouseclicked(ball_dropping)", "def click_car(self, pos):\n a = self.h / 50\n x, y = pos\n if ((x > self.x) and (x < self.x + 260 * a) and (y > self.y - 40 * a)\n and (y < self.y + self.h + 25 * a)):\n self.x = randint(200, 500)\n self.y = randint(200, 500)\n self.h = randint(10, 50)\n self.dir = 1\n self.speed_x = randint(10, 200)\n return True\n else:\n return False", "def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,distance)\n turtle.left(angle)\n turtle.forward(d)", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def step_empty(self):\n if random.random() < self.world.p:\n self.set_state(\"green\")", "def run(self):\n while self.ball_size > 0:\n self.deflate() #balloon deflates\n sleep(self.pace_deflate) #delay\n self.ball_size = self.ball_size - self.size_decr\n #the radius decreases by one", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def ml_loop(side: str):\n\n # === Here is the execution order of the loop === #\n # 1. Put the initialization code here\n ball_served = False\n blocker_last_x = 0\n\n class Pred:\n pred = 100\n blocker_pred_x = 0\n last_command = 0\n blocker_vx = 0\n\n \n def move_to(player, pred) : #move platform to predicted position to catch ball \n if player == '1P':\n if scene_info[\"platform_1P\"][0]+20 > (pred-10) and scene_info[\"platform_1P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_1P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n else :\n if scene_info[\"platform_2P\"][0]+20 > (pred-10) and scene_info[\"platform_2P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_2P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n\n def ml_loop_for_1P(): \n # ball slicing\n if scene_info[\"ball_speed\"][1] > 0 and (scene_info[\"ball\"][1]+scene_info[\"ball_speed\"][1]) >= 415 and Pred.last_command == 0:\n print(\"------\")\n ball_x = scene_info[\"ball\"][0]\n ball_y = scene_info[\"ball\"][1]\n ball_vx = scene_info[\"ball_speed\"][0]\n ball_slice_vx = scene_info[\"ball_speed\"][0]+np.sign(scene_info[\"ball_speed\"][0])*3\n ball_vy = scene_info[\"ball_speed\"][1] \n blocker_x = scene_info['blocker'][0] + Pred.blocker_vx\n \n y = abs((415 - ball_y) // ball_vy)\n pred_ball_1P = ball_x + ball_vx * y\n\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_slice_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n \n y = abs((415 - 260) // ball_vy)\n Pred.blocker_pred_x = blocker_x + Pred.blocker_vx * y \n if Pred.blocker_pred_x < 0: Pred.blocker_pred_x = abs(Pred.blocker_pred_x)\n elif Pred.blocker_pred_x > 170: Pred.blocker_pred_x = 170 - (Pred.blocker_pred_x - 170)\n \n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"slice will hit blicker\")\n # don't slice \n # use origin ball vx to predict will hit blocker or not\n # if will hit blicker let ball go reverse direction\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n\n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"will hit blocker, hit reversed direction\")\n if scene_info[\"ball_speed\"][0] > 0: return 2\n else: return 1\n else: \n print(\"will not hit blicker, do nothing\")\n return 0\n else:\n # slice\n print(\"slice will not hit blocker\")\n if scene_info[\"ball_speed\"][0] > 0: return 1\n else: return 2\n\n elif scene_info[\"ball_speed\"][1] > 0 : # 球正在向下 # ball goes down\n x = ( scene_info[\"platform_1P\"][1]-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] # 幾個frame以後會需要接 # x means how many frames before catch the ball\n Pred.pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) # 預測最終位置 # pred means predict ball landing site \n bound = Pred.pred // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n Pred.pred = Pred.pred - bound*200 \n else :\n Pred.pred = 200 - (Pred.pred - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n Pred.pred = abs(Pred.pred - (bound+1) *200)\n else :\n Pred.pred = Pred.pred + (abs(bound)*200)\n return move_to(player = '1P',pred = Pred.pred)\n \n else : # 球正在向上 # ball goes up\n return move_to(player = '1P',pred = 100)\n\n\n\n def ml_loop_for_2P(): # as same as 1P\n if scene_info[\"ball_speed\"][1] > 0 : \n return move_to(player = '2P',pred = 100)\n else : \n x = ( scene_info[\"platform_2P\"][1]+30-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] \n pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) \n bound = pred // 200 \n if (bound > 0):\n if (bound%2 == 0):\n pred = pred - bound*200 \n else :\n pred = 200 - (pred - 200*bound)\n elif (bound < 0) :\n if bound%2 ==1:\n pred = abs(pred - (bound+1) *200)\n else :\n pred = pred + (abs(bound)*200)\n return move_to(player = '2P',pred = pred)\n\n # 2. Inform the game process that ml process is ready\n comm.ml_ready()\n\n # 3. Start an endless loop\n while True:\n # 3.1. Receive the scene information sent from the game process\n scene_info = comm.recv_from_game()\n\n # 3.2. If either of two sides wins the game, do the updating or\n # resetting stuff and inform the game process when the ml process\n # is ready.\n if scene_info[\"status\"] != \"GAME_ALIVE\":\n # Do some updating or resetting stuff\n ball_served = False\n\n # 3.2.1 Inform the game process that\n # the ml process is ready for the next round\n comm.ml_ready()\n continue\n\n # 3.3 Put the code here to handle the scene information\n\n # 3.4 Send the instruction for this frame to the game process\n if not ball_served:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"SERVE_TO_LEFT\"})\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = 0\n ball_served = True\n else:\n if side == \"1P\":\n Pred.blocker_vx = scene_info[\"blocker\"][0] - blocker_last_x\n if scene_info[\"blocker\"][0] == 0: Pred.blocker_vx = 5\n elif scene_info[\"blocker\"][0] == 170: Pred.blocker_vx = -5\n command = ml_loop_for_1P()\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = command\n else:\n command = ml_loop_for_2P()\n\n if command == 0:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"NONE\"})\n elif command == 1:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_RIGHT\"})\n else :\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_LEFT\"})", "def specific_reset(self) -> None:\n self.old_velocity = 0.\n self.agent.specific_reset()\n max_dist_to_origin = 4.\n min_dist_to_origin = 2\n\n agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)\n positioning_done = False\n while not positioning_done:\n agent_pos = np.random.uniform(-max_dist_to_origin,\n max_dist_to_origin, 2)\n if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:\n positioning_done = True\n\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # set agent orientation in forward run direction\n y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2\n y += self.agent.init_rpy[2]\n quaternion = self.bc.getQuaternionFromEuler([0, 0, y])\n self.agent.set_orientation(quaternion)", "def move_ball(self):\r\n self.canvas.move(self.ball, (self.x_speed * self.speed), (self.y_speed * self.speed))\r\n (leftPos, topPos, rightPos, bottomPos) = self.canvas.coords(self.ball)\r\n if leftPos <= 0 or rightPos >= 400:\r\n self.x_speed = -self.x_speed\r\n if topPos <= 0 or bottomPos >= 400:\r\n self.y_speed = -self.y_speed", "def set_velocity(self):\n self.__dx = random.randint(1, MAX_X_SPEED)\n self.__dy = INITIAL_Y_SPEED\n if random.random() > 0.5:\n self.__dx = -self.__dx\n if random.random() > 0.5:\n self.__dy = -self.__dy", "def draw():\n ant.move(aim)\n ant.x = wrap(ant.x)\n ant.y = wrap(ant.y)\n\n aim.move(random() - 0.5)\n aim.rotate(random() * 10 - 5)\n\n clear()\n goto(ant.x, ant.y)\n dot(4)\n\n ontimer(draw, 100)", "def move(self):\n a = random.randint(0, len(self.state) - 1)\n b = random.randint(0, len(self.state) - 1)\n self.state[a], self.state[b] = self.state[b], self.state[a]\n\n # change type of restoration for one state\n c = random.choice(self.restoration_types)\n self.state[a] = (self.state[a][0], c)", "def serveBall(self):\n self._ball = Ball(GAME_WIDTH/2, BALL_HEIGHT, BALL_VY, BALL_DIAMETER, colormodel.BLUE)\n self.draw()", "def reset_pos(self):\r\n self.rect.x = random.randrange(50, 640)\r\n self.rect.y = random.randrange(-300, -80)", "def setRandDirection(self):\n phi = 2*math.pi*random.random()\n u = 2*random.random() - 1\n v = math.sqrt(1-u*u)*math.cos(phi)\n w = math.sqrt(1-u*u)*math.sin(phi)\n self.direction = (u,v,w)", "def draw_ball():\n\n draw_circle(ball, 'yellow')", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def update(self):\n if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\n elif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\n \n self.fire_bullet()", "def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)", "def move_coarse(self, direction, count=1):\n if self._direction != direction and self.simulate_backlash:\n self._direction = direction\n backlash_offset = randint(-maximum_backlash, maximum_backlash)\n self._move(direction, 1, 8 + backlash_offset)\n self._move(direction, count - 1, 8)\n self.backlash_count += 1\n else:\n self._direction = direction\n self._move(direction, count, 8)", "def polago(x , y, size, n, clr):\n # turtle setting\n turtle.screensize(1000)\n turtle.speed(30)\n turtle.setheading(0)\n turtle.color(clr)\n turtle.fillcolor(clr) \n turtle.goto(x, y)\n # draw random polagon \n while n > 1:\n # make random polagon\n turtle.pendown()\n turtle.begin_fill()\n # random size\n s = random.randint(10, size)\n a = random.randint(3, 8)\n for i in range (a):\n turtle.forward(s)\n turtle.left(360 / a) \n turtle.end_fill()\n n -= 1\n turtle.penup()\n turtle.goto(random.uniform(-300, 300), random.uniform(-300, 300))\n\n turtle.done", "def spinAround(self):", "def reset_pos(self):\n self.rect.y = random.randrange(-1000, -10)\n self.rect.x = random.randrange(0, WIDTH)", "def make_ball(id):\n ball = Ball()\n\n ball.id = id\n\n # Size of the ball\n # ball.size = random.randrange(10, 30)\n ball.size = 10\n\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(ball.size, WINDOW_WIDTH - ball.size)\n ball.y = random.randrange(ball.size, WINDOW_HEIGHT - ball.size)\n\n # Speed and direction\n ball.speed(DEFAULT_SPEED)\n\n # Color\n ball.color = (0, 0, random.randrange(128,256))\n\n return ball" ]
[ "0.6942022", "0.6932368", "0.6906408", "0.6877452", "0.685227", "0.6844614", "0.68308836", "0.68072754", "0.6727945", "0.66941935", "0.6596098", "0.65557665", "0.6538567", "0.65164626", "0.64776844", "0.64438", "0.63699085", "0.6309126", "0.6304336", "0.62798154", "0.62781817", "0.62281877", "0.6204747", "0.6189616", "0.6169147", "0.6148562", "0.61464334", "0.6128239", "0.6113658", "0.61059517", "0.61015016", "0.60987765", "0.60874486", "0.60603845", "0.60562754", "0.6052552", "0.6050209", "0.6043956", "0.6040296", "0.6039989", "0.5992535", "0.59783405", "0.5977584", "0.59528", "0.5935689", "0.58911794", "0.5877429", "0.58698386", "0.5853145", "0.58514667", "0.5848332", "0.58441114", "0.5841686", "0.5837163", "0.58272576", "0.58175087", "0.57985294", "0.5797367", "0.5790152", "0.579003", "0.579003", "0.579003", "0.57895565", "0.5774942", "0.57740104", "0.5765234", "0.5762197", "0.5761108", "0.57572496", "0.57555383", "0.5748795", "0.5747288", "0.5724822", "0.5716387", "0.571341", "0.57116413", "0.57109004", "0.57090276", "0.5701553", "0.57013476", "0.5690279", "0.5673341", "0.56720686", "0.56690556", "0.5655772", "0.56356436", "0.56330675", "0.56302434", "0.56246394", "0.56196463", "0.56137365", "0.561225", "0.5599687", "0.55982316", "0.55957556", "0.55951375", "0.55924666", "0.5583983", "0.5577835", "0.55776197" ]
0.5636123
85
Redundant, implemented to demonstrate _find
def find(self, cargo): return self._find(cargo).cargo
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find(self, p):\n pass", "def find(self, sub) -> int:\n pass", "def find(self):\n raise NotImplementedError", "def rfind(self, sub) -> int:\n pass", "def testFind(self):\n\n N = randint(20,150)\n s = SplayTree()\n for i in xrange(N):\n self.s.insert(i,1)\n for i in xrange(N):\n a = self.s.find(i)\n self.assertTrue(a)\n N-=a\n\n self.assertEqual(N, 0)", "def find(f, seq):\n for item in seq:\n if f(item): \n return item", "def search(self, find_val):\n return False", "def _find(xs, predicate):\n for x in xs:\n if predicate(x):\n return x\n return None", "def find(self, egg):", "def find(iteratee, seq):\n for item in filter(iteratee, seq):\n return item", "def find(self, a):\n while a != self.ids[a]:\n a = self.ids[a]\n return a", "def find(f, seq):\n\tfor num,item in enumerate(seq):\n\t\tif f(item): return num\n\treturn -1", "def find(function, seq):\r\n for item in seq:\r\n if function(item): \r\n return item\r\n return None", "def main():\n print(search(range(1, 21, 2), 9)) # will print 4\n print(search(range(1, 21, 2), 0)) # will print -1", "def search(self, word):", "def _gen_find(subseq, generator):\n if isinstance(subseq, bytes):\n subseq = bytearray(subseq)\n subseq = list(subseq)\n pos = 0\n saved = []\n\n for c in generator:\n saved.append(c)\n if len(saved) > len(subseq):\n saved.pop(0)\n pos += 1\n if saved == subseq:\n return pos\n return -1", "def test_suite():\n ss = \"Python strings have some interesting methods.\"\n test(find(ss, \"s\") == 7)\n test(find(ss, \"s\", 7) == 7)\n test(find(ss, \"s\", 8) == 13)\n test(find(ss, \"s\", 8, 13) == -1)\n test(find(ss, \".\") == len(ss) - 1)\n \n test(ss.find(\"s\") == 7)\n test(ss.find(\"ave\", 3, 20) == 16)", "def compare():\n sequence, query = get_sequence_and_query(arguments['<input>'])\n\n start = time()\n for i in range(1000):\n for x in findall(sequence, query):\n pass\n print('''It took the string.find iteration method {0} seconds to \\\ncomplete 1000 repetitions\\n'''.format(time() - start))\n\n start = time()\n for i in range(1000):\n for x in find_by_startswith(sequence, query):\n pass\n print('''It took the find_by_startswith iteration method {0} seconds to \\\ncomplete 1000 repetitions\\n'''.format(time() - start))", "def find(self, *args):\n return _ida_hexrays.hexwarns_t_find(self, *args)", "def find(self, *args):\n self.find_count += 1\n self.total_ops += 1\n return super(BulkOperator, self).find(*args)", "def find(self, *args):\n return _ida_hexrays.qvector_ccase_t_find(self, *args)", "def list_find(f, items):\n for i, x in enumerate(items):\n if f(x):\n return i\n return None", "def test_find_string():\n assert pi_finder.find_string('j', hex_dict) == (5, 6)\n assert pi_finder.find_string('l', hex_dict) == (61, 62)\n assert pi_finder.find_string('c', hex_dict) == (72, 73)", "def find(self,i):\n if self.par[i]!=self.par[self.par[i]]: \n self.par[i] = self.find(self.par[i])\n return self.par[i]", "def search():\n pass", "def find_all(self):", "def find(f, seq):\n for item in seq:\n if f(item): \n return item\n\n \"\"\"\n Example usage of iterate: \n >>> c = []; \\\n c.append(node(0.5,1,'a')); \\\n c.append(node(0.25,2,'b')); \\\n c.append(node(0.125,3,'c')); \\\n c.append(node(0.125,4,'d')); \\\n iterate(c) ; reportcode(c) # doctest: +NORMALIZE_WHITESPACE\n #Symbol Count Codeword\n a (0.5) 1\n b (0.25) 01\n c (0.12) 000\n d (0.12) 001\n \"\"\"", "def find(self, sub, start=0, end=None):\n return find(self, sub, start, end)", "def find(function, iterable):\n for x in iterable:\n if function(x) == True:\n return x", "def find(self, *args):\n return _ida_hexrays.qvector_carg_t_find(self, *args)", "def lookup():", "def findInstance(self, text, term):\n\t\tindexList = set()\n\t\tindex = 0\n\t\ttext = text.upper()\n\t\tterm = \" {0} \".format(term.upper())\n\n\t\t# CALL THESE JUST ONCE BEFORE LOOP(S)\n\t\tadd = indexList.add\n\t\tfind = text.find\t \n\t\t# - - - - - - - - - - - - - - - - - -\n\n\t\twhile True:\n\t\t\tindex = find(term, index)\n\t\t\tif index == -1: \n\t\t\t\treturn sorted(indexList)\n\t\t\telse:\n\t\t\t\tadd(index + len(term[1:-1]) + 1)\n\t\t\t\tadd(index + 1)\n\t\t\t\tindex += len(term)", "def test_suite():\n ss = \"Python strings have some interesting methods.\"\n test(find(ss, \"s\") == 7)\n test(find(ss, \"s\", 7) == 7)\n test(find(ss, \"s\", 8) == 13)\n test(find(ss, \"s\", 8, 13) == -1)\n test(find(ss, \".\") == len(ss) - 1)", "def find(items, term, key=None):\n if key is None:\n key = lambda other: term == other\n \n for item in items:\n if key(item):\n return item", "def udcall_map_find(*args):\n return _ida_hexrays.udcall_map_find(*args)", "def doFind(self, str):\n for value in self.doId2do.values():\n if repr(value).find(str) >= 0:\n return value", "def find(self, value):\n return value in self.pair\n\n\n\n # Your TwoSum object will be instantiated and called as such:\n # obj = TwoSum()\n # obj.add(number)\n # param_2 = obj.find(value)", "def find(found_item, _):\n if found_item:\n return found_item[1]\n else:\n return default", "def find(node, arr):\n if arr[node] != node:\n arr[node] = find(arr[node], arr)\n return arr[node]", "def find(self, var):\n return self if (var in self) else self.outer.find(var)", "def search(d,key):\n\treturn dfs(d,key)", "def find_exact(self, **kwargs):\n results = list(self.find(**kwargs))\n if len(results) == 1:\n return results[0]\n return None", "def find_one(cls, *a, **ka):\n try:\n return cls.find(*a, **ka).next()\n except StopIteration:\n raise KeyError", "def eamap_find(*args):\n return _ida_hexrays.eamap_find(*args)", "def find(p):\n if p != parent[p]:\n parent[p] = find(parent[p])\n return parent[p]", "def _find(self, hashV):\n return self.table.search(hashV)", "def _find_by_key(self, key, find):\n index = hashId(key, self.capacity) # Get the index/ bucket based on hash code of the key\n \n hash_table_cell = self._entry[index]\n found_item = None\n for item in hash_table_cell: #Iterrate the entry array and check the key is matching and if key is same than get the value\n if item[0] == key:\n found_item = item\n break\n\n return find(found_item, hash_table_cell)", "def search(f):\n x = 0\n while not f(x):\n x += 1\n return x", "def _generic_find(controller, heading, patterns):\n msg.info(heading)\n msg.info(\"--------------------------\")\n msg.blank()\n for pattern in patterns:\n for entry in controller.find(pattern):\n if hasattr(entry, \"uuid\"):\n eid = entry.uuid\n elif hasattr(entry, \"fqn\"):\n eid = entry.fqn\n else:\n eid = entry.name\n text = \"{} | {} \".format(eid, entry.root)\n msg.arb(text, [msg.cenum[\"cwarn\"],\n msg.cenum[\"cstds\"]], '|')", "def _find(self, key, items, places, human_name, join, multi):\r\n if key in self:\r\n return self[key]\r\n\r\n human_name = human_name or key\r\n\r\n # expand env variables in `places` and split on colons\r\n places = itertools.chain.from_iterable(os.path.expandvars(p).split(os.pathsep) for p in places)\r\n places = map(os.path.expanduser, places)\r\n\r\n glob_places = itertools.chain.from_iterable(glob(p) for p in places)\r\n \r\n print 'Searching for', human_name, '...',\r\n results = []\r\n for p in glob_places:\r\n for i in items:\r\n path = os.path.join(p, i)\r\n if os.path.exists(path):\r\n result = path if join else p\r\n if not multi:\r\n print colorize(result, 'green')\r\n self[key] = result\r\n return result\r\n results.append(result)\r\n\r\n if results:\r\n if len(results) > 1:\r\n formatted_results = ''.join(['\\n - ' + x for x in results])\r\n print colorize('found multiple: %s' % formatted_results, 'green')\r\n else:\r\n print colorize(results[0], 'green')\r\n\r\n self[key] = results\r\n return results\r\n\r\n print colorize('FAILED', 'red')\r\n raise Abort(\"%s not found. Searched in following places: %s\" %\r\n (human_name, ''.join(['\\n - ' + p for p in places])))", "def find_op(self, *args):\n return _ida_hexrays.cexpr_t_find_op(self, *args)", "def test_only_pos_that_not_match(self):\n eq_(None, grepit(\"naranja\", [\"ob\"]))", "def _find(self, candidates, target, lb, rb):\n # we'v made sure there's no duplicate in candidates\n li, ri = lb, rb\n while li < ri:\n mi = (li + ri) // 2\n if candidates[mi] < target:\n li = mi + 1\n elif candidates[mi] > target:\n ri = mi - 1\n else:\n return mi\n\n if li == ri:\n if candidates[li] <= target:\n return li\n else:\n return li - 1\n\n if ri < lb:\n return ri\n\n if li == rb:\n return rb - 1\n\n # now it's like c[ri] < target < c[li]\n # actually these 3 cases are all ri...\n return ri", "def find(a, sub, start=0, end=None):\n return _vec_string(\n a, int_, 'find', [sub, start] + _clean_args(end))", "def linear_search(vlist, srchval): # somewhat different from book\n#Look at each item in list. If it equals the value you are looking for, stop.\n # linear_search_2.py\n index = 0\n for item in vlist:\n if item == srchval:\n return index # implicit break\n index += 1\n \n return -1", "def find(self,x):\n assert x in self\n if self.__parent[x]!=x:\n self.__parent[x] = self.find(self.__parent[x])\n return self.__parent[x]", "def find(func, list_seq):\n for list_item in list_seq:\n if func(list_item):\n return list_item", "def search_single_word(word):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def on_find(self, data: Any = None) -> Any:\n raise NotImplementedError", "def find(self, parent, i):\n if parent[i] == i:\n return i\n return self.find(parent, parent[i])", "def findWhere(cls, args):\n return cls.search(args)[0][0]", "def find(self, value):\n for n in self.num:\n if n + n == value:\n if self.num[n] >= 2:\n return True\n elif self.num.get(value - n):\n return True\n return False\n\n\n\n # Your TwoSum object will be instantiated and called as such:\n # obj = TwoSum()\n # obj.add(number)\n # param_2 = obj.find(value)", "def preorder_search(self, start, find_val):\n return False", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive beleft, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def find(self, predicate):\n return [d for d in self.iter_tree if predicate(d)]", "def find(self, var):\n if var in self: return self\n elif self.outer: return self.outer.find(var)\n else:\n raise Exception(\"Unresolved symbol: %s\", var)", "def search(self, find_val):\n return self.preorder_search(self.root, find_val)", "def search(self, find_val):\n return self.preorder_search(self.root, find_val)", "def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def linear_search(array, item):\n # implement linear_search_iterative and linear_search_recursive below, then\n # change this to call your implementation to verify it passes all tests\n # return linear_search_iterative(array, item)\n return linear_search_recursive(array, item)", "def find(self, objectclass, **kwargs):\n raise NotImplementedError", "def search(self, search):\n raise NotImplementedError", "def cyclic_metasploit_find(subseq, sets = None):\n sets = sets or [ string.ascii_uppercase.encode(), string.ascii_lowercase.encode(), string.digits.encode() ]\n\n if isinstance(subseq, six.integer_types):\n subseq = packing.pack(subseq, 'all', 'little', False)\n\n return _gen_find(subseq, metasploit_pattern(sets))", "def finditem(func, seq):\n return next((item for item in seq if func(item)))", "def test_search_array_not_found(self):\r\n self.assertEqual(search_array([6, 4, 9, 10], 15), -1)", "def findSet(self, pai, i):\n if pai[i] == i:\n return i\n return self.findSet(pai,pai[i])", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def find(self,i):\r\n if self.parent[i]<0:\r\n return i\r\n return self.find(self.parent[i])", "def find(self, key, condition) -> list:\n pass", "def find_matches(self, match_fn):\n return\n yield # Turns this function into a generator but that is empty", "def find_matches(self, match_fn):\n return\n yield # Turns this function into a generator but that is empty", "def find(self, *args):\n return _ida_hexrays.qvector_lvar_t_find(self, *args)", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def find(self, i):\n\n s = self._find(i)\n self._compress(i, s)\n return s", "def find(self,v):\n for i in range(len(self)):\n if near(self[i],v):\n return i\n return -1", "def search(self, term):", "def find(self, *args):\n return _ida_frame.xreflist_t_find(self, *args)", "def do_poortego_find(self, arg):\n poortego_find(self.my_interface, arg)", "def __contains__(self, item):", "def linear_search_foundornot(arr: IntList, query: int) -> bool:\n position: int = 0\n found: bool = False\n while position < len(arr) and not found:\n if arr[position] == query:\n found = True\n position += 1\n return found", "def found_needed_docstr(self):\n self.needed += 1\n self.found += 1", "def find(self, value):\n if value in self.s:\n return True\n\n for k in self.l:\n if value - k in self.l:\n if value - k == k and self.l[k] == 1:\n continue\n self.s[value] = value - k\n return True\n return False", "def _find_huc(source, shply, crs, hint):\n logging.debug('searching: %s'%hint)\n hint_level = len(hint)\n search_level = hint_level + 2\n if search_level > source.lowest_level:\n return hint\n\n _, subhus = get_hucs(source, hint, search_level, crs)\n \n for subhu in subhus:\n inhuc = _in_huc(shply, subhu)\n\n if inhuc == 2:\n # fully contained in try_huc, recurse\n hname = workflow.sources.utils.get_code(subhu, search_level)\n logging.debug(' subhuc: %s contains'%hname)\n return _find_huc(source, shply, crs, hname)\n elif inhuc == 1:\n hname = workflow.sources.utils.get_code(subhu, search_level)\n logging.debug(' subhuc: %s partially contains'%hname)\n # partially contained in try_huc, return this\n return hint\n else:\n hname = workflow.sources.utils.get_code(subhu, search_level)\n logging.debug(' subhuc: %s does not contain'%hname)\n assert(False)", "def find(listy, x):\n return listy.index(x) if x in listy else None", "def test_linked_list_search_failure(new_ll):\n from linked_list import Linked_List\n result = new_ll.search('owt')\n assert result is None", "def search(T,k):\r\n for t in T.data:\r\n if k == t.word:\r\n return t\r\n if T.isLeaf:\r\n return None\r\n return search(T.child[findChildB(T,k)],k)", "def searchOneShotPairBinaryNoOrient(_session, _beg, _end, _const):\n return searchOneShot(searchPairsBinaryNoOrient(_session, _beg, _end, _const))", "def _best_case(): # pragma no cover\n setup_code = \"\"\"\nfrom __main__ import _best_case\nfrom bst import BinarySearchTree\nfrom bst import Node\n \"\"\"\n test_code = \"\"\"\nnew_tree = BinarySearchTree([4,2,3,1,6,5,7])\nnew_tree.search(7)\n \"\"\"\n return(timeit.repeat(setup=setup_code, stmt=test_code, number=100000))" ]
[ "0.7247958", "0.70327836", "0.70220166", "0.66316307", "0.6484891", "0.6347973", "0.6303143", "0.6281496", "0.6251936", "0.62015027", "0.6197718", "0.5998828", "0.5998442", "0.5941339", "0.59164965", "0.5911454", "0.58913594", "0.5884457", "0.58645743", "0.584961", "0.57515025", "0.5750195", "0.5745287", "0.57425845", "0.5730028", "0.57175833", "0.5696791", "0.5692755", "0.56838244", "0.5667486", "0.56559205", "0.5641379", "0.5609672", "0.5606319", "0.56056094", "0.5601415", "0.5565207", "0.5545916", "0.552947", "0.5522281", "0.55151093", "0.54757774", "0.54629385", "0.54611546", "0.54555905", "0.5453968", "0.5443654", "0.54367155", "0.541065", "0.5407928", "0.5406541", "0.5406122", "0.5404171", "0.53929466", "0.538746", "0.53673273", "0.5367201", "0.5366309", "0.5361433", "0.53543377", "0.53543293", "0.53536546", "0.5337007", "0.53265214", "0.53265", "0.53264844", "0.5313077", "0.5313077", "0.53118926", "0.5311168", "0.5311168", "0.5311168", "0.5311168", "0.53104705", "0.53071624", "0.53000265", "0.52987194", "0.5292131", "0.5287362", "0.52846205", "0.52830726", "0.52753776", "0.5264072", "0.5264072", "0.52632993", "0.52563137", "0.5254491", "0.5244265", "0.5243068", "0.52370286", "0.52369905", "0.5235736", "0.52308995", "0.5210791", "0.5190123", "0.5186297", "0.5183704", "0.5177779", "0.516463", "0.51642257", "0.51636994" ]
0.0
-1
This is a raw test so there is only a single line of 32x32 blocks
def testPatternBasic(tiles=8, cols=16): matrix = Adafruit_RGBmatrix(32, tiles) # cols, rows tileSize = (32, 32) sizeX = tileSize[0] * tiles / cols sizeY = 32 # cols = int(tileSize[0] * tiles / sizeX) rows = 1 imageToRender = Image.new("RGBA", (tileSize[0] * tiles, tileSize[1] * rows)) draw = ImageDraw.Draw(imageToRender) print("imageToRender", imageToRender, sizeX) # Print tile numbers font = ImageFont.truetype("/home/pi/RPI/fonts/freefont/FreeSerifBold.ttf", 30) count = 0 for n in range(0, tiles): xPos = count * 32 yPos = -5 draw.text((xPos, yPos), str(count), (255, 0, 0), font=font) count = count + 1 iid = imageToRender.im.id matrix.SetImage(iid, 0, 0) time.sleep(4) # (0,200,0) wheel = [ (255, 0, 0), (255, 125, 0), (255, 255, 0), (0, 255, 0), (0, 255, 125), (0, 0, 255), (125, 0, 255), (255, 0, 255), ] n = clri = 0 b = 1 cName1 = wheel[clri] cName2 = (10, 10, 10) t1 = time.clock() t2 = time.clock() for c in range(0, cols): xPos = c * sizeX + 0 yPos = 0 xPos2 = xPos + sizeX yPos2 = yPos + sizeY b = 1 draw.rectangle( (xPos, yPos, xPos2, yPos2), fill=(int(cName1[0] * b), int(cName1[1] * b), int(cName1[2] * b)), ) n += 1 if n > len(wheel): b *= 0.8 # print(n, clri, xPos, yPos, xPos2, yPos2) if clri < len(wheel) - 1: clri += 1 else: clri = 0 cName1 = wheel[clri] cName2 = (10, 10, 10) t2 = time.clock() print(t2 - t1) iid = imageToRender.im.id matrix.SetImage(iid, 0, 0) print(time.clock() - t2) time.sleep(10) draw.rectangle((0, 0, tileSize[0] * cols, tileSize[1] * rows), fill=0, outline=1) matrix.SetImage(iid, 0, 0) time.sleep(0.1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_blocks_to_line_1(self):\n grid = self.sudoku.inicializate_sudoku(9)\n\n # Assigment especify values\n grid[0][0] = 1 # Block 1\n grid[1][1] = 2 # Block 1\n grid[2][2] = 3 # Block 1\n grid[5][5] = 9 # Block 5\n grid[7][4] = 7 # Block 8\n grid[6][6] = 7 # Block 9\n\n values_grid = (grid[0][0],\n grid[1][1],\n grid[2][2],\n grid[5][5],\n grid[7][4],\n grid[6][6])\n\n # Transform blocks to lines\n line_blocks = self.sudoku.blocks_to_line(grid)\n\n values_blocks = (line_blocks[0][0], # Block 1, Line 1\n line_blocks[0][4], # Block 1, Line 1\n line_blocks[0][8], # Block 1, Line 1\n line_blocks[4][8], # Block 5, Line 5\n line_blocks[7][4], # Block 8, Line 8\n line_blocks[8][0], # Block 9, Line 9\n )\n\n # Compare some grid blocks with some lines of lines_blocks\n self.assertEqual(values_blocks, values_grid)", "def test_block_extra_batch(self):\n pass", "def test_blocks_to_line_2(self):\n grid = self.sudoku.inicializate_sudoku(9)\n\n # Assigment especify values\n grid[0][0] = 1 # Block 1\n grid[1][1] = 2 # Block 1\n grid[2][2] = 3 # Block 1\n grid[5][5] = 9 # Block 5\n grid[7][4] = 7 # Block 8\n grid[6][6] = 7 # Block 9\n\n grid = np.transpose(grid)\n\n values_grid = (grid[0][0],\n grid[1][1],\n grid[2][2],\n grid[5][5],\n grid[7][4],\n grid[6][6])\n\n # Transform blocks to lines\n line_blocks = self.sudoku.blocks_to_line(grid)\n\n values_blocks = (line_blocks[0][0], # Block 1, Line 1\n line_blocks[0][4], # Block 1, Line 1\n line_blocks[0][8], # Block 1, Line 1\n line_blocks[4][8], # Block 5, Line 5\n line_blocks[7][4], # Block 8, Line 8\n line_blocks[8][0], # Block 9, Line 9\n )\n\n # Compare some grid blocks with some lines of lines_blocks\n self.assertEqual(values_blocks, values_grid)", "def test01(self):\n N, blen = self.N, 100\n a = bcolz.fromiter(xrange(N), dtype=np.float64, count=N,\n rootdir=self.rootdir)\n l, s = 0, 0\n for block in bcolz.iterblocks(a, blen):\n if l == 0:\n self.assertEqual(len(block), blen)\n l += len(block)\n s += block.sum()\n self.assertEqual(l, N)", "def test04(self):\n a = np.arange(1e4)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1:8000]->\", `b[1:8000]`\n assert_array_equal(a[1:8000], b[1:8000], \"Arrays are not equal\")", "def test03(self):\n N, blen = self.N, 100\n a = bcolz.fromiter(xrange(N), dtype=np.float64, count=N,\n rootdir=self.rootdir)\n l, s = 0, 0\n for block in bcolz.iterblocks(a, blen, blen-1, 3*blen+2):\n l += len(block)\n s += block.sum()\n mlen = min(N - (blen - 1), 2*blen + 3)\n self.assertEqual(l, mlen)\n slen = min(N, 3*blen + 2)\n self.assertEqual(s, np.arange(blen-1, slen).sum())", "def test_batch_size_pack_size():", "def test_scanmatrixlines_3x3_returns16lists():\n expected = [\n [1, 2, 3], [4, 5, 6], [7, 8, 9],\n [1, 4, 7], [2, 5, 8], [3, 6, 9],\n [7], [4, 8], [1, 5, 9], [2, 6], [3],\n [1], [2, 4], [3, 5, 7], [6, 8], [9]\n ]\n m = problem11.read_matrix('data/test_matrix4.txt')\n assert problem11.scan_matrix_lines(m) == expected", "def test_block_bad_batch(self):\n pass", "def test02(self):\n N, blen = self.N, 100\n a = bcolz.fromiter(xrange(N), dtype=np.float64, count=N,\n rootdir=self.rootdir)\n l, s = 0, 0\n for block in bcolz.iterblocks(a, blen, blen-1):\n l += len(block)\n s += block.sum()\n self.assertEqual(l, (N - (blen - 1)))\n self.assertEqual(s, np.arange(blen-1, N).sum())", "def test_4x4_no_mine():\r\n\r\n input = ('4 4\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def test_block_batches_order(self):\n pass", "def num_blocks(self): # -> int:\n ...", "def test_4x4_1mine_2():\r\n\r\n input = ('4 4\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. * . .\\n'\r\n '. . . .\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '0 0 0 0\\n'\r\n '1 1 1 0\\n'\r\n '1 * 1 0\\n'\r\n '1 1 1 0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def test_4x4_1mine_1():\r\n\r\n input = ('4 4\\n'\r\n '* . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '. . . .\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '* 1 0 0\\n'\r\n '1 1 0 0\\n'\r\n '0 0 0 0\\n'\r\n '0 0 0 0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def test_4x4_16mines():\r\n\r\n input = ('4 4\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '* * * *\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def num_47():\n\n def block_reshape(a, rows, cols, nodata=-1, as_masked=True):\n \"\"\" \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape\n pad = ((0, ypad), (0, xpad))\n p_with =((nodata, nodata), (nodata, nodata))\n b = np.pad(a, pad_width=pad, mode='constant', constant_values=p_with)\n w_y, w_x = w # Blocksize\n y, x = b.shape # padded array\n c = b.reshape((y//w_y, w_y, x//w_x, w_x))\n c = c.swapaxes(1, 2).reshape(-1, w_y, w_x)\n if as_masked:\n mask_val = nodata\n c = np.ma.masked_equal(c, mask_val)\n c.set_fill_value(mask_val)\n return b, c\n y, x = 5, 6\n rows, cols = [3, 4]\n nodata = -1\n a = np.arange(x*y).reshape(y,x)\n b, c = block_reshape(a, rows, cols, nodata)\n print(\"\\n{}\".format(num_47.__doc__))\n print(\"a\\n{}\\nb\\n{}\\nc\\n{}\".format(a, b, c))\n return a, b, c", "def test_32(self):\n assert 'False' == Api.requestBlock('test-32')", "def test03(self):\n a = np.arange(1e3)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1:8:3]->\", `b[1:8:3]`\n assert_array_equal(a[1:8:3], b[1:8:3], \"Arrays are not equal\")", "def test_block_split(self):\n block1 = self.geographies.find({ 'geoid': '150010210051016' }) \n self.assertEqual(block1.count(), 1)\n block1 = block1[0]\n\n split_block_pop = 448 \n block1_land_pct = float(184458) / 587158 # AREALAND_INT / AREALAND_2000\n block1_pop_2000 = int(block1_land_pct * split_block_pop)\n block1_pop_2010 = 22 \n block1_pop_delta = block1_pop_2010 - block1_pop_2000\n block1_pop_pct_change = float(block1_pop_delta) / block1_pop_2000\n\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['POPPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['xwalk']['150010210011337']['HUPCT00'], block1_land_pct, places=4)\n self.assertAlmostEqual(block1['data']['2000']['P1']['P001001'], block1_pop_2000)\n self.assertAlmostEqual(float(block1['data']['2010']['P1']['P001001']), block1_pop_2010)\n self.assertAlmostEqual(float(block1['data']['delta']['P1']['P001001']), block1_pop_delta)\n self.assertAlmostEqual(float(block1['data']['pct_change']['P1']['P001001']), block1_pop_pct_change)", "def test_parsing(self):\n truth = self.generate_fake_pos()\n batch_size = 4\n records = []\n for i in range(batch_size):\n record = b''\n for j in range(2):\n record += self.v4_record(*truth)\n records.append(record)\n\n parser = ChunkParser(ChunkDataSrc(records),\n shuffle_size=1,\n workers=1,\n batch_size=batch_size)\n batchgen = parser.parse()\n data = next(batchgen)\n\n batch = (np.reshape(np.frombuffer(data[0], dtype=np.float32),\n (batch_size, 112, 64)),\n np.reshape(np.frombuffer(data[1], dtype=np.int32),\n (batch_size, 1858)),\n np.reshape(np.frombuffer(data[2], dtype=np.float32),\n (batch_size, 3)),\n np.reshape(np.frombuffer(data[3], dtype=np.float32),\n (batch_size, 3)))\n\n fltplanes = truth[1].astype(np.float32)\n fltplanes[5] /= 99\n for i in range(batch_size):\n data = (batch[0][i][:104],\n np.array([batch[0][i][j][0] for j in range(104, 111)]),\n batch[1][i], batch[2][i], batch[3][i])\n self.assertTrue((data[0] == truth[0]).all())\n self.assertTrue((data[1] == fltplanes).all())\n self.assertTrue((data[2] == truth[2]).all())\n scalar_win = data[3][0] - data[3][-1]\n self.assertTrue(np.abs(scalar_win - truth[3]) < 1e-6)\n scalar_q = data[4][0] - data[4][-1]\n self.assertTrue(np.abs(scalar_q - truth[4]) < 1e-6)\n\n parser.shutdown()", "def test02(self):\n a = np.arange(1e3)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1:3]->\", `b[1:3]`\n assert_array_equal(a[1:3], b[1:3], \"Arrays are not equal\")", "def test_1x1_mine():\r\n\r\n input = ('1 1\\n'\r\n '*\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '*\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def test_tiled_iterator_nogen(self):\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=0\n )\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # no overlap\n tile = next(tile_no_gen)\n img0 = self.test_data_1[65 : 2 * 65, 65 : 2 * 65]\n np.array_equal(tile, img0)\n\n # --- overlapping --- #\n tile_no_gen = TiledIterator(\n twod_image=self.test_file_1, overlap_log_2=2\n )\n\n tile = next(tile_no_gen)\n\n shape = tile.shape\n\n # defaults\n self.assertTrue(shape[0] == 32)\n self.assertTrue(shape[1] == 65)\n self.assertTrue(shape[2] == 65)\n self.assertTrue(shape[3] == 1)\n\n #\n img0 = self.test_data_1[0:65, 0:65]\n np.array_equal(tile, img0)\n\n # 64/(2**2) = 16\n tile = next(tile_no_gen)\n img0 = self.test_data_1[16 : 16 + 65, 16 : 16 + 65]\n np.array_equal(tile, img0)", "def test01(self):\n a = np.arange(1e3)\n b = chunk(a, atom=a.dtype, cparams=bcolz.cparams())\n # print \"b[1]->\", `b[1]`\n self.assertTrue(a[1] == b[1], \"Values in key 1 are not equal\")", "def block(array):\r\n grid = []\r\n for z in range(0,7,3): #0,3,6\r\n #vertical down 3\r\n for n in range(0,7,3): #0,3,6\r\n #horiz across 3\r\n line = []\r\n for i in range(3):\r\n for j in range(3):\r\n vert,hor = i+z,j+n\r\n line.append(array[vert][hor])\r\n grid.append(line)\r\n won = True\r\n for i in range(len(grid)):\r\n if won == True:\r\n if len(grid[i]) != len(set(grid[i])):\r\n won = False\r\n else:\r\n pass\r\n else:\r\n break\r\n return won", "def test_nrows_gtiff_array(self):\n self.assertEqual(_test_array(landsat_gtiff).shape[1], 224)", "def check_if_legal(row, blocks):\n counter = 0\n compare_lst = []\n for square in row:\n if square == Black:\n counter += 1\n else:\n if counter > 0:\n compare_lst.append(counter)\n counter = 0\n if counter > 0:\n compare_lst.append(counter)\n if compare_lst == blocks:\n return True\n return False", "def test_1x1_no_mine():\r\n\r\n input = ('1 1\\n'\r\n '.\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def num_44():\n def block_array(a, rows=3, cols=4, col_first=True, nodata=-1):\n \"\"\" a variant on array_split\n requires a N*m array\n \"\"\"\n s = np.array(a.shape)\n w = np.array([rows, cols])\n m = divmod(s, w)\n new_shape = w*m[0] + w*(m[1]!=0)\n ypad, xpad = new_shape - a.shape \n b = np.pad(a, pad_width=((0, ypad),(0, xpad)), \n mode='constant', \n constant_values=((nodata, nodata),(nodata, nodata)))\n rn, cn = new_shape\n x_s = np.arange(0, cn+cols, cols)[1:] #.tolist()\n y_s = np.arange(0, rn+rows, rows)[1:] #.tolist()\n print(\"x_s {}\\ny_s {}\".format(x_s, y_s))\n #c = np.array([i for i in np.hsplit(b, x_s) if len(i) > 0])\n c = np.array([i for i in np.split(b, x_s, axis=1) if len(i) > 0])\n d = np.array([i for i in np.split(c, y_s, axis=1) if len(i) > 0])\n e = d.swapaxes(0, 1)\n ix = np.in1d(e.ravel(), nodata).reshape(e.shape)\n f = np.ma.array(e, mask=ix, fill_value=-1)\n return b, c, d, e, f\n y, x = 9, 11\n a = np.arange(x*y).reshape(y,x)\n b, c, d, e, f = block_array(a)\n print(\"\\n{}\".format(num_44.__doc__)) \n for i in [a, b, c, d, e, f]:\n _f(i)\n return a, b, c, d, e, f", "def test_se_block(self):\n images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)\n global_params = efficientnet_model.GlobalParams(\n 1.0,\n 1.0,\n 0,\n 'channels_last',\n num_classes=10,\n batch_norm=utils.TpuBatchNormalization)\n blocks_args = [\n efficientnet_model.BlockArgs(\n kernel_size=3,\n num_repeat=3,\n input_filters=3,\n output_filters=6,\n expand_ratio=6,\n id_skip=False,\n strides=[2, 2],\n se_ratio=0.8,\n conv_type=0,\n fused_conv=0,\n super_pixel=0)\n ]\n model = efficientnet_model.Model(blocks_args, global_params)\n outputs = model(images, training=True)\n self.assertEqual((10, 10), outputs[0].shape)", "def test_full_house_flush_ind(self):", "def test00(self):\n N = self.N\n a = bcolz.fromiter(xrange(N), dtype=np.float64, count=N,\n rootdir=self.rootdir)\n l, s = 0, 0\n for block in bcolz.iterblocks(a):\n l += len(block)\n s += block.sum()\n self.assertEqual(l, N)\n # as per Gauss summation formula\n self.assertEqual(s, (N - 1) * (N / 2))", "def test_block_missing_batch(self):\n pass", "def test_chunk_memory(self):\n layer = tl.Serial(tl.Dense(1024*1024), tl.Dense(128))\n chunked = tl.Chunk(layer, 256)\n x = np.random.uniform(size=(16*1024, 16))\n chunked.init(shapes.signature(x))\n y = chunked(x)\n z = tl.Accelerate(chunked)(x)\n self.assertEqual(y.shape, (16*1024, 128))\n self.assertEqual(z.shape, (16*1024, 128))", "def test_4x4_2mines():\r\n\r\n input = ('4 4\\n'\r\n '. . . .\\n'\r\n '. . . *\\n'\r\n '. * . .\\n'\r\n '. . . .\\n'\r\n '0 0')\r\n expected_output = ('Field #1:\\n'\r\n '0 0 1 1\\n'\r\n '1 1 2 *\\n'\r\n '1 * 2 1\\n'\r\n '1 1 1 0\\n'\r\n '\\n')\r\n expected_output = expected_output.replace('\\n', os.linesep)\r\n\r\n output = minesweeper_adaptor.run_minesweeper(input)\r\n assert output == expected_output", "def checkio(lines_list):\n row = [[0]*3,[0]*3,[0]*3,[0]*3]\n colume = [[0]*4,[0]*4,[0]*4]\n square = 0\n # save line in matrix\n for i in lines_list:\n if i[0]-i[1] in [-1,1]:\n row[int((i[0]-1)/4)][min(i[0],i[1])%4-1] = 1\n else:\n colume[int(((min(i[0],i[1])-1)/4))][min(i[0],i[1])%4-1] = 1\n\n for r in [0, 1, 2]:\n # r is the start point of row\n for c in [0, 1, 2]:\n # c is the start point of colume\n for line in range(1, 4-max(r,c)):\n # line is the length of square\n check = 0\n print(line)\n for i in range(0, line):\n check = row[r][c+i] + colume[r+i][c] + row[r+line][c+i] + colume[r+i][c+line] + check\n if check == line * 4:\n square += 1\n return square", "def test_nrows_vrt_array(self):\n self.assertEqual(_test_array(landsat_vrt).shape[1], 224)", "def getChunks():", "def main1():\n output = []\n\n def out(value):\n \"\"\"Append to output, don't yield.\"\"\"\n output.append(value)\n return False\n computer = Computer(\"day13-input\", out=out)\n computer.evaluate()\n board = render_tiles(output)\n for line in board:\n print(line)\n\n num_blocks = 0\n for line in board:\n for tile in line:\n if tile == \"+\":\n num_blocks += 1\n print(f\"{num_blocks} blocks.\")", "def test_bottleneck_block_with_superpixel_tranformation(self):\n images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)\n global_params = efficientnet_model.GlobalParams(\n 1.0,\n 1.0,\n 0,\n 'channels_last',\n num_classes=10,\n batch_norm=utils.TpuBatchNormalization)\n blocks_args = [\n efficientnet_model.BlockArgs(\n kernel_size=3,\n num_repeat=3,\n input_filters=3,\n output_filters=6,\n expand_ratio=6,\n id_skip=True,\n strides=[2, 2],\n conv_type=0,\n fused_conv=0,\n super_pixel=2)\n ]\n model = efficientnet_model.Model(blocks_args, global_params)\n outputs = model(images, training=True)\n self.assertEqual((10, 10), outputs[0].shape)", "def _block_lower_triangular_dense(expected_shape, blocks):\n rows = []\n num_cols = 0\n for row_blocks in blocks:\n\n # Get the batch shape for the block.\n batch_row_shape = array_ops.shape(row_blocks[0])[:-1]\n\n num_cols += array_ops.shape(row_blocks[-1])[-1]\n zeros_to_pad_after_shape = array_ops.concat(\n [batch_row_shape, [expected_shape[-2] - num_cols]], axis=-1)\n zeros_to_pad_after = array_ops.zeros(\n zeros_to_pad_after_shape, dtype=row_blocks[-1].dtype)\n\n row_blocks.append(zeros_to_pad_after)\n rows.append(array_ops.concat(row_blocks, axis=-1))\n\n return array_ops.concat(rows, axis=-2)", "def colour_tetris_1D(rounds):\n charstr = 'RGB'\n blocklist = []\n stack = create_stack()\n count = 0\n for i in range(rounds):\n blocklist.append(charstr[random.randint(0,2)])\n #print('blocklist =', blocklist)\n prefix = ' '\n i = 1\n for block in blocklist:\n output = str(i) + ': Accept ' + block + '?'\n clocktime0 = time.time()\n ans = input(output)\n clocktime1 = time.time()\n elapsed = clocktime1 - clocktime0\n success = ' '\n if elapsed > 2:\n print('TOO LATE (', elapsed, ' sec), block accepted')\n ans = 'y'\n if ans == 'y' or ans == 'Y':\n if stack.top() == block:\n stack.pop()\n count = count + 1\n success = block + '-' + block + ' * '\n else:\n stack.push(block)\n print(success + 'Score = ' + str(count) + '; Stack: ' + str(stack))\n i = i+1\n print(stack.length(), 'still in stack')\n print('Score:', count - stack.length())", "def testByteTifDimensions(self):\n self.assertEqual(self.coal.fine_map.x_offset, 0)\n self.assertEqual(self.coal.fine_map.y_offset, 0)\n self.assertEqual(self.coal.fine_map.x_size, 24)\n self.assertEqual(self.coal.fine_map.y_size, 24)\n self.assertEqual(self.coal.fine_map.x_res, 1.0)\n self.assertEqual(self.coal.fine_map.y_res, 1.0)", "def test_tile_read_nodata():\n # Partial Tile 7-42-24\n bounds = [\n -6887893.4928338025,\n 12210356.646387195,\n -6574807.424977721,\n 12523442.714243278,\n ]\n tilesize = 16\n with rasterio.open(COG) as src_dst:\n arr, mask = reader.part(src_dst, bounds, tilesize, tilesize, nodata=1)\n assert arr.shape == (1, 16, 16)\n assert mask.shape == (16, 16)\n assert not mask.all()", "def build(self, block_size):", "def test_simple_block(self):\n if config.SUMLEV_BLOCK not in config.SUMLEVS:\n pass\n\n blocks = self.geographies.find({ 'geoid': '150010210053029' })\n\n self.assertEqual(blocks.count(), 1)\n\n block = blocks[0]\n\n self.assertEqual(block['sumlev'], config.SUMLEV_BLOCK)\n self.assertEqual(block['metadata']['NAME'], 'Block 3029')\n self.assertEqual(block['metadata']['STATE'], '15')\n self.assertEqual(block['metadata']['COUNTY'], '001')\n self.assertEqual(block['metadata']['TRACT'], '021005')\n\n pop_2000 = 33 \n pop_2010 = 93 \n self._test_totalpop(block, pop_2000, pop_2010)", "def test_split_cell_creates_four_more_cells(mock_amg):\n\n init_n_windows = mock_amg.n_windows\n init_n_cells = mock_amg.n_cells\n\n mock_amg.cells[0].split()\n\n assert init_n_windows + 5 == mock_amg.n_windows\n assert init_n_cells + 4 == mock_amg.n_cells", "def test_ThinDataMultiArray(self):\n for split in self.splits:\n y_data, x_data = data_process.thinData(y_testMultiple, xdim_test, split)\n for spec in y_data:\n self.assertTrue(len(spec) == len(x_data))", "def test_split_data(self):\n Xlists = tuple([[np.zeros((200,9)) for b in range(14)] for c in range(9)])\n ybinarylists = [np.zeros((14,12)) for c in range(9)]\n indices = slice(7, 9)\n x_test, y_test = tutorial_pamap2.split_data(Xlists, ybinarylists, \\\n indices)\n test = y_test[0].shape == (12,) and x_test[0].shape == (200, 9)\n assert test", "def test_create_lines():\n board = Board(640, 640, 8)\n exp_vert_lines = []\n exp_hort_lines = []\n for i in range(1, 8):\n exp_vert_lines.append((640//8)*i)\n exp_hort_lines.append((640//8)*i)\n board.create_lines()\n assert board.vert_lines == exp_vert_lines\n assert board.hort_lines == exp_hort_lines\n\n board = Board(640, 640, 4)\n exp_vert_lines = []\n exp_hort_lines = []\n for i in range(1, 4):\n exp_vert_lines.append((640//4)*i)\n exp_hort_lines.append((640//4)*i)\n board.create_lines()\n assert board.vert_lines == exp_vert_lines\n assert board.hort_lines == exp_hort_lines", "def part3e_0():\n xs = \"A A B C B A C\".split()\n blocks = submission.getLongRangeCRFBlocks(xs)\n \n # Make sure everything is there\n grader.requireIsEqual( set( it.chain.from_iterable( blocks ) ), set(xrange(len(xs))) )\n # Make sure that each block has identical symbols\n for block in blocks:\n grader.requireIsEqual( 1, len(set([xs[i] for i in block])) )", "def test_block_bad_consensus(self):\n pass", "def test_main(self):\r\n\r\n expected = \">FS8APND01D3TW3 | cluster size: 94 \\nCTCCCGTAGGAGTCTGGGCCGTATCTCAGTCCCAATGTGGCCGGTCACCCTCTCAGGCCGGCTACCCGTCAAAGCCTTGGTAAGCCACTACCCCACCAACAAGCTGATAAGCCGCGAGTCCATCCCCAACCGCCGAAACTTTCCAACCCCCACCATGCAGCAGGAGCTCCTATCCGGTATTAGCCCCAGTTTCCTGAAGTTATCCCAAAGTCAAGGGCAGGTTACTCACGTGTTACTCACCCGTTCGCC\\n\"\r\n\r\n expected_map = \"\"\"FS8APND01EWRS4:\r\nFS8APND01DXG45:\r\nFS8APND01D3TW3:\\tFS8APND01CSXFN\\tFS8APND01DQ8MX\\tFS8APND01DY7QW\\tFS8APND01B5QNI\\tFS8APND01CQ6OG\\tFS8APND01C7IGN\\tFS8APND01DHSGH\\tFS8APND01DJ17E\\tFS8APND01CUXOA\\tFS8APND01EUTYG\\tFS8APND01EKK7T\\tFS8APND01D582W\\tFS8APND01B5GWU\\tFS8APND01D7N2A\\tFS8APND01BJGHZ\\tFS8APND01D6DYZ\\tFS8APND01C6ZIM\\tFS8APND01D2X6Y\\tFS8APND01BUYCE\\tFS8APND01BNUEY\\tFS8APND01DKLOE\\tFS8APND01C24PP\\tFS8APND01EBWQX\\tFS8APND01ELDYW\\tFS8APND01B0GCS\\tFS8APND01D4QXI\\tFS8APND01EMYD9\\tFS8APND01EA2SK\\tFS8APND01DZOSO\\tFS8APND01DHYAZ\\tFS8APND01C7UD9\\tFS8APND01BTZFV\\tFS8APND01CR78R\\tFS8APND01B39IE\\tFS8APND01ECVC0\\tFS8APND01DM3PL\\tFS8APND01DELWS\\tFS8APND01CIEK8\\tFS8APND01D7ZOZ\\tFS8APND01CZSAI\\tFS8APND01DYOVR\\tFS8APND01BX9XY\\tFS8APND01DEWJA\\tFS8APND01BEKIW\\tFS8APND01DCKB9\\tFS8APND01EEYIS\\tFS8APND01DDKEA\\tFS8APND01DSZLO\\tFS8APND01C6EBC\\tFS8APND01EE15M\\tFS8APND01ELO9B\\tFS8APND01C58QY\\tFS8APND01DONCG\\tFS8APND01DVXX2\\tFS8APND01BL5YT\\tFS8APND01BIL2V\\tFS8APND01EBSYQ\\tFS8APND01CCX8R\\tFS8APND01B2YCJ\\tFS8APND01B1JG4\\tFS8APND01DJ024\\tFS8APND01BIJY0\\tFS8APND01CIA4G\\tFS8APND01DV74M\\tFS8APND01ECAX5\\tFS8APND01DC3TZ\\tFS8APND01EJVO6\\tFS8APND01D4VFG\\tFS8APND01DYYYO\\tFS8APND01D1EDD\\tFS8APND01DQUOT\\tFS8APND01A2NSJ\\tFS8APND01DDC8I\\tFS8APND01BP1T2\\tFS8APND01DPY6U\\tFS8APND01CIQGV\\tFS8APND01BPUT8\\tFS8APND01BDNH4\\tFS8APND01DOZDN\\tFS8APND01DS866\\tFS8APND01DGS2J\\tFS8APND01EDK32\\tFS8APND01EPA0T\\tFS8APND01CK3JM\\tFS8APND01BKLWW\\tFS8APND01DV0BO\\tFS8APND01DPNXE\\tFS8APND01B7LUA\\tFS8APND01BTTE2\\tFS8APND01CKO4X\\tFS8APND01DGGBY\\tFS8APND01C4NHX\\tFS8APND01DYPQN\r\nFS8APND01BSTVP:\r\nFS8APND01EFK0W:\r\nFS8APND01DCIOO:\r\nFS8APND01CKOMZ:\r\n\"\"\"\r\n\r\n command = \" \".join([\"denoiser.py\",\r\n \"--force\", \"-o\", self.test_dir, \"-i\",\r\n \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME])\r\n\r\n result = Popen(command, shell=True, universal_newlines=True,\r\n stdout=PIPE, stderr=STDOUT).stdout.read()\r\n self.result_dir = self.test_dir\r\n\r\n observed = \"\".join(list(open(self.result_dir + \"centroids.fasta\")))\r\n self.assertEqual(observed, expected)\r\n\r\n self.assertEqual(\r\n len(list(parse_fasta(open(self.result_dir + \"singletons.fasta\")))),\r\n 6)\r\n\r\n observed = \"\".join(\r\n list(open(self.result_dir + \"denoiser_mapping.txt\")))\r\n self.assertEqual(observed, expected_map)", "def get_blocks(index):\r\n #call with -1 to get full blocklist\r\n #the reason this is a function instead of just a list is that originally\r\n #i had plans to support dynamic tilesets, for example if only a certain\r\n #number of each tile were available. in the end this didnt happen though\r\n all_blocks = [\r\n [[0,0,0],[1,1,1],[0,0,0]], #0 - (horizontal passage)\r\n [[0,1,0],[0,1,0],[0,1,0]], #1 | (vertical passage)\r\n \r\n [[0,0,0],[1,1,0],[0,1,0]], #2 >v various L-junctions\r\n [[0,1,0],[1,1,0],[0,0,0]], #3 >^\r\n [[0,0,0],[0,1,1],[0,1,0]], #4 ^>\r\n [[0,1,0],[0,1,1],[0,0,0]], #5 v>\r\n \r\n [[0,0,0],[0,0,0],[0,0,0]], #6 0 empty\r\n [[0,1,0],[1,1,1],[0,1,0]], #7 + cross\r\n \r\n [[0,1,0],[1,1,1],[0,0,0]], #8 _|_ various T-junctions\r\n [[0,0,0],[1,1,1],[0,1,0]], #9 T\r\n [[0,1,0],[1,1,0],[0,1,0]], #10 -|\r\n [[0,0,0],[1,1,1],[0,0,0]]] #11 |-\r\n \r\n# [[0,1,0],[0,1,0],[0,0,0]], #12 #unsued \"dead end\" pieces\r\n# [[0,0,0],[0,1,0],[0,1,0]], #13\r\n# [[0,0,0],[0,1,1],[0,0,0]], #14\r\n# [[0,0,0],[1,1,0],[0,0,0]] ]#15\r\n if index == -1:\r\n return all_blocks\r\n else:\r\n return all_blocks[index]", "def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2", "def collect_blocks():\n\n # Below are the position of (c,r) in a block.\n\n #########################\n # (0,0) # (1,0) # (2,0) #\n #########################\n #########################\n # (0,1) # (1,1) # (2,1) #\n #########################\n #########################\n # (0,2) # (1,2) # (2,2) #\n #########################\n\n for x in range(72):\n r, c = x // 9 % 3, x % 3\n if r == 0:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n yield x, x + 19\n yield x, x + 20\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n yield x, x + 17\n yield x, x + 19\n else:\n yield x, x + 7\n yield x, x + 8\n yield x, x + 16\n yield x, x + 17\n elif r == 1:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n else:\n yield x, x + 8\n yield x, x + 7", "def test_tile_read_valid():\n # Tile 7-43-24 - Full tile\n bounds = [\n -6574807.42497772,\n 12210356.646387195,\n -6261721.357121638,\n 12523442.714243278,\n ]\n with rasterio.open(COG) as src_dst:\n arr, mask = reader.part(\n src_dst, bounds, 16, 16, dst_crs=constants.WEB_MERCATOR_CRS\n )\n assert arr.shape == (1, 16, 16)\n assert mask.shape == (16, 16)\n\n # Read bounds at full resolution\n with rasterio.open(COG) as src_dst:\n arr, mask = reader.part(src_dst, bounds, dst_crs=constants.WEB_MERCATOR_CRS)\n assert arr.shape == (1, 893, 893)\n assert mask.shape == (893, 893)\n\n # set max_size for the returned array\n with rasterio.open(COG) as src_dst:\n arr, mask = reader.part(\n src_dst, bounds, max_size=50, dst_crs=constants.WEB_MERCATOR_CRS\n )\n assert arr.shape == (1, 50, 50)\n assert mask.shape == (50, 50)\n\n # If max_size is bigger than actual size, there is no effect\n with rasterio.open(COG) as src_dst:\n arr, mask = reader.part(\n src_dst, bounds, max_size=1000, dst_crs=constants.WEB_MERCATOR_CRS\n )\n assert arr.shape == (1, 893, 893)\n assert mask.shape == (893, 893)\n\n # Incompatible max_size with height and width\n with pytest.warns(UserWarning):\n with rasterio.open(COG) as src_dst:\n arr, mask = reader.part(\n src_dst,\n bounds,\n max_size=50,\n width=25,\n height=25,\n dst_crs=constants.WEB_MERCATOR_CRS,\n )\n assert arr.shape == (1, 25, 25)\n assert mask.shape == (25, 25)", "def test04a(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=16, rootdir=self.rootdir)\n sl = slice(1, 2)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test_bottleneck_block(self):\n images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)\n global_params = efficientnet_model.GlobalParams(\n 1.0,\n 1.0,\n 0,\n 'channels_last',\n num_classes=10,\n batch_norm=utils.batch_norm_class(False))\n blocks_args = [\n efficientnet_model.BlockArgs(\n kernel_size=3,\n num_repeat=3,\n input_filters=3,\n output_filters=6,\n expand_ratio=6,\n id_skip=True,\n strides=[2, 2],\n conv_type=0,\n fused_conv=0,\n super_pixel=0)\n ]\n model = efficientnet_model.Model(blocks_args, global_params)\n outputs = model(images, training=True)\n self.assertEqual((10, 10), outputs[0].shape)", "def test_4_2_5D_cube_splits(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 1, 1, 1, 0),\n (1, 1, 1, 0, 1), (1, 1, 0, 1, 0), (1, 1, 0, 1, 1),\n (1, 1, 0, 0, 1), (1, 0, 1, 0, 0), (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1), (1, 0, 0, 0, 1), (0, 1, 0, 0, 0),\n (0, 1, 1, 0, 0), (0, 1, 1, 1, 0), (0, 1, 1, 1, 1),\n (0, 1, 1, 0, 1), (0, 1, 0, 1, 0), (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1), (0, 0, 1, 0, 1), (0, 0, 0, 1, 0),\n (0, 0, 0, 1, 1), (0, 0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.0), (0.0, 0.0, 0.5, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5, 0.5), (0.0, 0.5, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.0, 0.0, 0.0), (0.0, 0.5, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.5, 0.0, 0.5), (0.0, 0.5, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0, 0.0), (0.5, 0.0, 0.0, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.0, 0.5), (0.5, 0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.0, 0.0), (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.25, 0.25, 0.25, 0.25, 0.25), (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5), (1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 0.5), (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 1.0, 0.5, 0.5, 1.0), (1.0, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0), (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 0.5, 1.0, 1.0), (1.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5), (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0), (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 1.0), (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5), (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0), (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5), (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5, 0.5), (1.0, 0.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.0, 0.5, 0.0),\n (1.0, 0.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0, 0.0),\n (1.0, 0.0, 0.5, 0.5, 0.0), (1.0, 0.5, 0.0, 0.5, 0.5),\n (1.0, 0.5, 0.0, 0.0, 0.5), (1.0, 0.5, 0.0, 0.0, 0.0),\n (1.0, 0.5, 0.0, 0.5, 0.0), (1.0, 0.5, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25, 0.25), (1.0, 1.0, 0.0, 0.5, 0.5),\n (1.0, 1.0, 0.0, 0.0, 0.5), (1.0, 1.0, 0.0, 0.5, 0.0),\n (1.0, 1.0, 0.5, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0, 0.0),\n (1.0, 1.0, 0.5, 0.5, 0.0), (0.5, 1.0, 0.0, 0.5, 0.5),\n (0.5, 1.0, 0.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.0, 0.0),\n (0.5, 1.0, 0.0, 0.5, 0.0), (0.5, 1.0, 0.5, 0.0, 0.5),\n (0.5, 1.0, 0.5, 0.0, 0.0), (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25, 0.25), (1.0, 1.0, 1.0, 0.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.0), (1.0, 0.5, 1.0, 0.0, 0.5),\n (1.0, 0.5, 1.0, 0.0, 0.0), (1.0, 0.5, 1.0, 0.5, 0.0),\n (0.5, 1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 1.0, 0.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0), (0.5, 0.5, 1.0, 0.0, 0.5),\n (0.5, 0.5, 1.0, 0.0, 0.0), (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.75, 0.25, 0.25), (1.0, 1.0, 0.5, 1.0, 0.0),\n (1.0, 0.5, 1.0, 1.0, 0.0), (1.0, 0.5, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 1.0, 0.0), (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 0.5, 1.0, 1.0, 0.0), (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.75, 0.25), (1.0, 1.0, 0.5, 0.0, 1.0),\n (1.0, 0.5, 1.0, 0.0, 1.0), (1.0, 0.5, 0.5, 0.0, 1.0),\n (0.5, 1.0, 1.0, 0.0, 1.0), (0.5, 1.0, 0.5, 0.0, 1.0),\n (0.5, 0.5, 1.0, 0.0, 1.0), (0.5, 0.5, 0.5, 0.0, 1.0),\n (0.75, 0.75, 0.75, 0.25, 0.75), (1.0, 1.0, 0.0, 1.0, 0.5),\n (1.0, 0.5, 0.0, 1.0, 0.5), (1.0, 0.5, 0.0, 1.0, 0.0),\n (0.5, 1.0, 0.0, 1.0, 0.5), (0.5, 1.0, 0.0, 1.0, 0.0),\n (0.5, 0.5, 0.0, 1.0, 0.5), (0.5, 0.5, 0.0, 1.0, 0.0),\n (0.75, 0.75, 0.25, 0.75, 0.25), (1.0, 1.0, 0.0, 0.5, 1.0),\n (1.0, 0.5, 0.0, 1.0, 1.0), (1.0, 0.5, 0.0, 0.5, 1.0),\n (0.5, 1.0, 0.0, 1.0, 1.0), (0.5, 1.0, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0, 1.0), (0.5, 0.5, 0.0, 0.5, 1.0),\n (0.75, 0.75, 0.25, 0.75, 0.75), (1.0, 0.5, 0.0, 0.0, 1.0),\n (0.5, 1.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.0, 0.0, 1.0),\n (0.75, 0.75, 0.25, 0.25, 0.75), (1.0, 0.0, 1.0, 0.5, 0.5),\n (1.0, 0.0, 1.0, 0.0, 0.5), (1.0, 0.0, 1.0, 0.5, 0.0),\n (0.5, 0.0, 1.0, 0.5, 0.5), (0.5, 0.0, 1.0, 0.0, 0.5),\n (0.5, 0.0, 1.0, 0.0, 0.0), (0.5, 0.0, 1.0, 0.5, 0.0),\n (0.75, 0.25, 0.75, 0.25, 0.25), (1.0, 0.0, 1.0, 1.0, 0.5),\n (1.0, 0.0, 0.5, 1.0, 0.5), (1.0, 0.0, 0.5, 1.0, 0.0),\n (0.5, 0.0, 1.0, 1.0, 0.5), (0.5, 0.0, 1.0, 1.0, 0.0),\n (0.5, 0.0, 0.5, 1.0, 0.5), (0.5, 0.0, 0.5, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.75, 0.25), (1.0, 0.0, 1.0, 0.5, 1.0),\n (1.0, 0.0, 0.5, 1.0, 1.0), (1.0, 0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 1.0, 1.0, 1.0), (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0, 1.0), (0.5, 0.0, 0.5, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75, 0.75), (1.0, 0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 1.0, 0.0, 1.0), (0.5, 0.0, 0.5, 0.0, 1.0),\n (0.75, 0.25, 0.75, 0.25, 0.75), (1.0, 0.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 0.0, 1.0, 0.5), (0.5, 0.0, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.25, 0.75, 0.25), (1.0, 0.0, 0.0, 0.5, 1.0),\n (0.5, 0.0, 0.0, 1.0, 1.0), (0.5, 0.0, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.0, 0.0, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.0), (0.0, 1.0, 0.5, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0, 0.0), (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.25, 0.75, 0.25, 0.25, 0.25), (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.0, 0.5), (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5), (0.0, 0.5, 1.0, 0.0, 0.5),\n (0.0, 0.5, 1.0, 0.0, 0.0), (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.25, 0.75, 0.75, 0.25, 0.25), (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5), (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5), (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.5), (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.75, 0.25), (0.0, 1.0, 1.0, 0.5, 1.0),\n (0.0, 1.0, 0.5, 1.0, 1.0), (0.0, 1.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 1.0, 1.0, 1.0), (0.0, 0.5, 1.0, 0.5, 1.0),\n (0.0, 0.5, 0.5, 1.0, 1.0), (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75, 0.75), (0.0, 1.0, 0.5, 0.0, 1.0),\n (0.0, 0.5, 1.0, 0.0, 1.0), (0.0, 0.5, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.75, 0.25, 0.75), (0.0, 1.0, 0.0, 1.0, 0.5),\n (0.0, 0.5, 0.0, 1.0, 0.5), (0.0, 0.5, 0.0, 1.0, 0.0),\n (0.25, 0.75, 0.25, 0.75, 0.25), (0.0, 1.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0, 1.0), (0.0, 0.5, 0.0, 0.5, 1.0),\n (0.25, 0.75, 0.25, 0.75, 0.75), (0.0, 0.5, 0.0, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5, 0.0),\n (0.25, 0.25, 0.75, 0.25, 0.25), (0.0, 0.0, 1.0, 1.0, 0.5),\n (0.0, 0.0, 0.5, 1.0, 0.5), (0.0, 0.0, 0.5, 1.0, 0.0),\n (0.25, 0.25, 0.75, 0.75, 0.25), (0.0, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.0, 0.5, 1.0, 1.0), (0.0, 0.0, 0.5, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75, 0.75), (0.0, 0.0, 0.5, 0.0, 1.0),\n (0.25, 0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(1, 1, 1, 1, 1): [(1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0),\n (1.0, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0.75, 0.75, 0.75, 0.75, 0.75),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0),\n (0.5, 1.0, 0.5, 1.0, 1.0)],\n (0.25, 0.75, 0.75, 0.75, 0.25): [(0.5, 1.0, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0, 1, 1, 1, 0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (\n 0.5, 0.5, 1.0, 0.5, 0.5)],\n (0.0, 0.0, 1.0, 0.5, 1.0): [(0.5, 0.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.0, 0.0, 0.5, 0.5, 1.0),\n (0, 0, 1, 1, 1),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.5, 1.0, 0.5, 1.0),\n (0, 0, 1, 0, 1),\n (0.5, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.25, 0.25, 0.75, 0.75, 0.75),\n (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (\n 0.25, 0.25, 0.75, 0.25, 0.75)]}\n\n init_triangulation(5, 1, check, nn_checks)", "def test_process_chunk_trimmed(self):\n foo = None\n chunk, order = self.chunk_procesor.process(foo, 1, 1, 1)\n expected = (12, 2, 14, 1)\n self.assertEqual(expected, chunk.shape)", "def test_segment_image(self):\n with Image.open(self.subject) as im:\n image = im.convert(\"RGB\")\n\n segment_generator = image_helper.segment_image(image, 200, 200)\n\n sizes = [\n ((200, 200), next(segment_generator)),\n ((200, 200), next(segment_generator)),\n ((200, 200), next(segment_generator)),\n ((200, 200), next(segment_generator)),\n ((200, 200), next(segment_generator)),\n ((200, 200), next(segment_generator)),\n ((200, 200), next(segment_generator)),\n ((200, 200), next(segment_generator)),\n ((200, 50), next(segment_generator)),\n ((200, 50), next(segment_generator)),\n ((200, 50), next(segment_generator)),\n ((200, 50), next(segment_generator))\n ]\n\n for size, segment in sizes:\n self.assertEqual(size, segment.size)", "def test_undohandle_read_block(self):\n data = \"\"\"\\\nThis\nis\na multi-line\nfile\"\"\"\n for block in [1, 2, 10]:\n s = StringIO(data)\n h = ParserSupport.UndoHandle(s)\n h.peekline()\n new = \"\"\n while True:\n tmp = h.read(block)\n if not tmp:\n break\n new += tmp\n self.assertEqual(data, new)\n h.close()", "def test_unittest_snapshots(self, shadow):\n T, n = shadow.bits.shape\n assert (T, n) == shadow.recipes.shape\n assert shadow.snapshots == T\n assert shadow.local_snapshots().shape == (T, n, 2, 2)\n assert shadow.global_snapshots().shape == (T, 2**n, 2**n)", "def test_04(self):\n assert 'False' == Api.requestBlock('test-04', charOrder='')", "def test02e(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=10, rootdir=self.rootdir)\n sl = slice(4, 3, 30)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)", "def hashcodeGrid4x4x4(hashcodestring):\n hashcodegrid = [\" \",\" \",\" \",\" \"]\n \n i1 = 0\n while i1 < 64: # every character.\n linenum = int(i1/16) # every 16th character.\n if i1 % 4 == 3: # every 4th character.\n hashcodegrid[linenum] = hashcodegrid[linenum] + hashcodestring[i1] + \" \"\n else: # every other character.\n hashcodegrid[linenum] = hashcodegrid[linenum] + hashcodestring[i1]\n i1=i1+1\n \n #print(\"- - - - - - - - - - -\")\n print(\"+---- ---- ---- ----+\")\n for line in hashcodegrid:\n print(line)\n print(\"+---- ---- ---- ----+\")\n #print(\"- - - - - - - - - - -\")", "def test_block_structure_scalar_on_elements_validity(n_rods):\n\n world_rods = [MockRod(np.random.randint(10, 30 + 1)) for _ in range(n_rods)]\n block_structure = MemoryBlockCosseratRod(\n world_rods, [i for i in range(len(world_rods))]\n )\n\n for i in range(n_rods):\n start_idx = block_structure.start_idx_in_rod_elems[i]\n end_idx = block_structure.end_idx_in_rod_elems[i]\n\n # radius\n assert np.shares_memory(block_structure.radius, world_rods[i].radius)\n assert np.shares_memory(\n block_structure.scalar_dofs_in_rod_elems, world_rods[i].radius\n )\n assert_allclose(block_structure.radius[start_idx:end_idx], world_rods[i].radius)\n\n # volume\n assert np.shares_memory(block_structure.volume, world_rods[i].volume)\n assert np.shares_memory(\n block_structure.scalar_dofs_in_rod_elems, world_rods[i].volume\n )\n assert_allclose(block_structure.volume[start_idx:end_idx], world_rods[i].volume)\n\n # density\n assert np.shares_memory(block_structure.density, world_rods[i].density)\n assert np.shares_memory(\n block_structure.scalar_dofs_in_rod_elems, world_rods[i].density\n )\n assert_allclose(\n block_structure.density[start_idx:end_idx], world_rods[i].density\n )\n\n # lengths\n assert np.shares_memory(block_structure.lengths, world_rods[i].lengths)\n assert np.shares_memory(\n block_structure.scalar_dofs_in_rod_elems, world_rods[i].lengths\n )\n assert_allclose(\n block_structure.lengths[start_idx:end_idx], world_rods[i].lengths\n )\n\n # rest lengths\n assert np.shares_memory(\n block_structure.rest_lengths, world_rods[i].rest_lengths\n )\n assert np.shares_memory(\n block_structure.scalar_dofs_in_rod_elems, world_rods[i].rest_lengths\n )\n assert_allclose(\n block_structure.rest_lengths[start_idx:end_idx], world_rods[i].rest_lengths\n )\n\n # dilatation\n assert np.shares_memory(block_structure.dilatation, world_rods[i].dilatation)\n assert np.shares_memory(\n block_structure.scalar_dofs_in_rod_elems, world_rods[i].dilatation\n )\n assert_allclose(\n block_structure.dilatation[start_idx:end_idx], world_rods[i].dilatation\n )\n\n # dilatation rate\n assert np.shares_memory(\n block_structure.dilatation_rate, world_rods[i].dilatation_rate\n )\n assert np.shares_memory(\n block_structure.scalar_dofs_in_rod_elems, world_rods[i].dilatation_rate\n )\n assert_allclose(\n block_structure.dilatation_rate[start_idx:end_idx],\n world_rods[i].dilatation_rate,\n )", "def test_tiled_data_generator(self):\n\n datagen = TiledDataGenerator(\n featurewise_center=True, featurewise_std_normalization=True\n )\n\n datagen.fit(self.test_file_2)\n\n mn = np.mean(self.test_data_2)\n std = np.std(self.test_data_2)\n\n self.assertAlmostEqual(mn / datagen.mean[0], 1, places=6)\n self.assertAlmostEqual(std / datagen.std[0], 1, places=6)\n\n tile_gen = TiledIterator(\n twod_image=self.test_file_1,\n overlap_log_2=1,\n image_data_generator=datagen,\n )\n\n next(tile_gen)\n next(tile_gen)\n tile = next(tile_gen)\n\n # 64//2**1 = 32\n ofst = 32 * 2\n img0 = self.test_data_1[ofst : ofst + 65, ofst : ofst + 65]\n\n np.allclose(tile, (img0 - mn) / std)", "def num_43():\n \n def block(a, r=3, cs=3, row_order=True):\n \"\"\"Block slice an array using a window of (rs, cs) size\n \"\"\"\n lenr = a.shape[0]//rs\n lenc = a.shape[1]//cs\n if row_order:\n iter = [(i, j) for (i, j) in np.ndindex(lenr, lenc)]\n else:\n iter = [(j, i) for (i, j) in np.ndindex(lenr, lenc)]\n b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] for (i,j) in iter])\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n return b\n r = 6\n c = 6\n a = np.arange(r*c).reshape(r, c)\n vs = np.array(np.vsplit(a, 2))\n hs = np.array(np.hsplit(a, 2))\n #a.squeeze(axis=(2,3))\n rs = 3\n cs = 4\n #lenr = a.shape[0]//rs\n #lenc = a.shape[1]//cs\n #b = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (i, j) in np.ndindex(lenr, lenc)])\n #b1 = np.array([a[i*rs:(i+1)*rs, j*cs:(j+1)*cs] \n # for (j, i) in np.ndindex(lenr, lenc)])\n e = block(a, 3, 4, row_first=False)\n b = block(a, rs, cs, True)\n b1 = block(a, rs, cs, False)\n c = np.array([np.vsplit(i, 2) for i in np.hsplit(a, 2)])\n d = np.array([np.hsplit(i, 2) for i in np.vsplit(a, 2)])\n #c = c.reshape(lenr*lenc, rs, cs) \n return a, b, b1, c, d, e", "def trial(test):\n clear, index, byte = test\n assert len(clear) == blocksize\n\n # Handle general case\n tmp = rand(index)\n pad = padding(tmp, blocksize)\n tmp = xor(tmp + pad, clear)\n tmp[index] = byte\n assert len(tmp) == blocksize\n if not query(tmp + block):\n return False\n\n # Handle cases like above\n if index == 0:\n return True\n tmp[index - 1] ^= 0xff\n return query(tmp + block)", "def test_bottleneck_block_with_superpixel_layer(self):\n images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)\n global_params = efficientnet_model.GlobalParams(\n 1.0,\n 1.0,\n 0,\n 'channels_last',\n num_classes=10,\n batch_norm=utils.TpuBatchNormalization)\n blocks_args = [\n efficientnet_model.BlockArgs(\n kernel_size=3,\n num_repeat=3,\n input_filters=3,\n output_filters=6,\n expand_ratio=6,\n id_skip=True,\n strides=[2, 2],\n conv_type=0,\n fused_conv=0,\n super_pixel=1)\n ]\n model = efficientnet_model.Model(blocks_args, global_params)\n outputs = model(images, training=True)\n self.assertEqual((10, 10), outputs[0].shape)", "def test04c(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=100, rootdir=self.rootdir)\n sl = slice(8000, None)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test_small_chunk(self):\n chunksize = 7 * (1024 ** 2)\n size = 8 * (1024 ** 2)\n self.assertEqual(find_chunksize(size, chunksize), chunksize)", "def test_06(self):\n assert 'False' == Api.requestBlock('test-06', charOrder=51)", "def test_main(self):\n\n expected = \"\"\">FS8APND01D3TW3 | cluster size: 94 \nCTCCCGTAGGAGTCTGGGCCGTATCTCAGTCCCAATGTGGCCGGTCACCCTCTCAGGCCGGCTACCCGTCAAAGCCTTGGTAAGCCACTACCCCACCAACAAGCTGATAAGCCGCGAGTCCATCCCCAACCGCCGAAACTTTCCAACCCCCACCATGCAGCAGGAGCTCCTATCCGGTATTAGCCCCAGTTTCCTGAAGTTATCCCAAAGTCAAGGGCAGGTTACTCACGTGTTACTCACCCGTTCGCC\n\"\"\"\n\n expected_map = \"\"\"FS8APND01EWRS4:\nFS8APND01DXG45:\nFS8APND01D3TW3:\\tFS8APND01CSXFN\\tFS8APND01DQ8MX\\tFS8APND01DY7QW\\tFS8APND01B5QNI\\tFS8APND01CQ6OG\\tFS8APND01C7IGN\\tFS8APND01DHSGH\\tFS8APND01DJ17E\\tFS8APND01CUXOA\\tFS8APND01EUTYG\\tFS8APND01EKK7T\\tFS8APND01D582W\\tFS8APND01B5GWU\\tFS8APND01D7N2A\\tFS8APND01BJGHZ\\tFS8APND01D6DYZ\\tFS8APND01C6ZIM\\tFS8APND01D2X6Y\\tFS8APND01BUYCE\\tFS8APND01BNUEY\\tFS8APND01DKLOE\\tFS8APND01C24PP\\tFS8APND01EBWQX\\tFS8APND01ELDYW\\tFS8APND01B0GCS\\tFS8APND01D4QXI\\tFS8APND01EMYD9\\tFS8APND01EA2SK\\tFS8APND01DZOSO\\tFS8APND01DHYAZ\\tFS8APND01C7UD9\\tFS8APND01BTZFV\\tFS8APND01CR78R\\tFS8APND01B39IE\\tFS8APND01ECVC0\\tFS8APND01DM3PL\\tFS8APND01DELWS\\tFS8APND01CIEK8\\tFS8APND01D7ZOZ\\tFS8APND01CZSAI\\tFS8APND01DYOVR\\tFS8APND01BX9XY\\tFS8APND01DEWJA\\tFS8APND01BEKIW\\tFS8APND01DCKB9\\tFS8APND01EEYIS\\tFS8APND01DDKEA\\tFS8APND01DSZLO\\tFS8APND01C6EBC\\tFS8APND01EE15M\\tFS8APND01ELO9B\\tFS8APND01C58QY\\tFS8APND01DONCG\\tFS8APND01DVXX2\\tFS8APND01BL5YT\\tFS8APND01BIL2V\\tFS8APND01EBSYQ\\tFS8APND01CCX8R\\tFS8APND01B2YCJ\\tFS8APND01B1JG4\\tFS8APND01DJ024\\tFS8APND01BIJY0\\tFS8APND01CIA4G\\tFS8APND01DV74M\\tFS8APND01ECAX5\\tFS8APND01DC3TZ\\tFS8APND01EJVO6\\tFS8APND01D4VFG\\tFS8APND01DYYYO\\tFS8APND01D1EDD\\tFS8APND01DQUOT\\tFS8APND01A2NSJ\\tFS8APND01DDC8I\\tFS8APND01BP1T2\\tFS8APND01DPY6U\\tFS8APND01CIQGV\\tFS8APND01BPUT8\\tFS8APND01BDNH4\\tFS8APND01DOZDN\\tFS8APND01DS866\\tFS8APND01DGS2J\\tFS8APND01EDK32\\tFS8APND01EPA0T\\tFS8APND01CK3JM\\tFS8APND01BKLWW\\tFS8APND01DV0BO\\tFS8APND01DPNXE\\tFS8APND01B7LUA\\tFS8APND01BTTE2\\tFS8APND01CKO4X\\tFS8APND01DGGBY\\tFS8APND01C4NHX\\tFS8APND01DYPQN\nFS8APND01BSTVP:\nFS8APND01EFK0W:\nFS8APND01DCIOO:\nFS8APND01CKOMZ:\n\"\"\"\n\n command = \" \".join( [\"%s/denoiser.py\" % get_qiime_scripts_dir(),\n \"--force\", \"-o\", self.test_dir, \"-i\",\n \"%s/qiime/support_files/denoiser/TestData/denoiser_test_set.sff.txt\" % PROJECT_HOME] );\n\n result = Popen(command,shell=True,universal_newlines=True,\\\n stdout=PIPE,stderr=STDOUT).stdout.read()\n self.result_dir = self.test_dir\n\n observed = \"\".join(list(open(self.result_dir+ \"centroids.fasta\")))\n self.assertEqual(observed, expected)\n \n self.assertEqual(len(list(MinimalFastaParser(open(self.result_dir + \"singletons.fasta\")))), 6)\n\n observed = \"\".join(list(open(self.result_dir+ \"denoiser_mapping.txt\")))\n self.assertEqual(observed, expected_map)", "def test_tile_read_nodata_and_alpha():\n bounds = (\n 13604568.04230881,\n -333876.9395496497,\n 13605791.034761373,\n -332653.9470970885,\n )\n\n tilesize = 16\n with rasterio.open(PIX4D_PATH) as src_dst:\n arr, mask = reader.part(src_dst, bounds, tilesize, tilesize, indexes=[1, 2, 3])\n assert arr.shape == (3, 16, 16)\n assert mask.shape == (16, 16)\n assert not mask.all()", "def test_with_2_lines():\n line = \"n\" * 15 + \"\\n\" + \"n\" * 60 + \" \" + \"n\" * 10\n assert wrap_line(line) == \"n\" * 15 + \" \" + \"n\" * 60 + \"\\n\" + \"n\" * 10", "def test_multi_area(self):\n pass", "def test_05(self):\n assert 'True' == Api.requestBlock('test-05', charOrder=50)", "def test_07(self):\n assert 'False' == Api.requestBlock('test-07', charOrder='~!@#$%%^&*()=_+<>?/')", "def test_chunk_size(self):\n for chunk_size, expected_n_chunks in [(1, 100), (3, 34), (200, 1), (None, 1)]:\n with self.subTest(chunk_size=chunk_size):\n iterable_of_args, iterable_len, chunk_size_, n_splits = apply_numpy_chunking(\n self.test_data_numpy, chunk_size=chunk_size, n_splits=1\n )\n\n # Materialize generator and test contents. The chunks should be of size chunk_size (expect for the last\n # chunk which can be smaller)\n iterable_of_args = list(iterable_of_args)\n self.assertEqual(len(iterable_of_args), expected_n_chunks)\n chunk_size = chunk_size or 100\n for chunk_idx, chunk in enumerate(iterable_of_args):\n self.assertIsInstance(chunk[0], np.ndarray)\n np.testing.assert_array_equal(chunk[0], self.test_data_numpy[chunk_idx * chunk_size:\n (chunk_idx + 1) * chunk_size])\n\n # Test other output\n self.assertEqual(iterable_len, expected_n_chunks)\n self.assertEqual(chunk_size_, 1)\n self.assertIsNone(n_splits)", "def test_52(self):\n assert 'False' == Api.requestBlock('test-52')", "def test_nrows_gtiff_object(self):\n self.assertEqual(_test_object(landsat_gtiff)[1], 224)", "def test_16(self):\n assert 'False' == Api.requestBlock('test-16')", "def get_docshtest_blocks(lines):\n\n block = []\n consecutive_empty = 0\n for line_nb, line in enumerate(lines):\n is_empty_line = not line.strip()\n if not is_empty_line:\n if not line.startswith(\" \"):\n if block:\n yield block[:-consecutive_empty] \\\n if consecutive_empty else block\n block = []\n continue\n else:\n line = line[4:]\n if line.startswith(\"$ \") or block:\n if line.startswith(\"$ \"):\n line = line[2:]\n if block:\n yield block[:-consecutive_empty] \\\n if consecutive_empty else block\n block = []\n if is_empty_line:\n consecutive_empty += 1\n else:\n consecutive_empty = 0\n block.append((line_nb + 1, line))\n if block:\n yield block[:-consecutive_empty] \\\n if consecutive_empty else block", "def test_large_block_index():\n\n # TODO: It would be nice to find a way to make this test faster. The\n # real bottleneck here is the enormous YAML section.\n\n buff = io.BytesIO()\n\n narrays = int(io.DEFAULT_BUFFER_SIZE / 4)\n\n arrays = []\n for i in range(narrays):\n arrays.append(np.array([i], np.uint16))\n\n tree = {\"arrays\": arrays}\n\n ff = asdf.AsdfFile(tree)\n # Since we're testing with small arrays, force all arrays to be stored\n # in internal blocks rather than letting some of them be automatically put\n # inline.\n ff.write_to(buff, all_array_storage=\"internal\")\n\n buff.seek(0)\n with asdf.open(buff) as ff2:\n assert isinstance(ff2._blocks._internal_blocks[0], block.Block)\n assert len(ff2._blocks._internal_blocks) == narrays", "def test04ab(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=100, rootdir=self.rootdir)\n sl = slice(1, 8000)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def block_repeat(self, board, n):\r\n this_block = []\r\n row_start = 3 * (n // 4)\r\n col_start = 3 * (n % 4)\r\n for r in range(3):\r\n this_row = board[(12 * (row_start + r) + col_start):(12 * (row_start + r) + col_start + 3)]\r\n for x in this_row:\r\n this_block.append(x)\r\n # println(n)\r\n # println(this_block)\r\n for letter in 'abcdef':\r\n if this_block.count(letter) > 1:\r\n # println(this_block)\r\n return True\r\n return False", "def test_block():\n b = common.Block(['1 1 1', '2 2 3 ', ''])\n expect = ['1 1 1', '2 2 3']\n c = b.compact()\n assert c == expect\n assert b != expect\n\n b.compact(inplace=True)\n assert b == expect", "def test04d(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=100, rootdir=self.rootdir)\n sl = slice(None, None, 2)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test_block_structure_matrices_on_elements_validity(n_rods):\n world_rods = [MockRod(np.random.randint(10, 30 + 1)) for _ in range(n_rods)]\n block_structure = MemoryBlockCosseratRod(\n world_rods, [i for i in range(len(world_rods))]\n )\n\n for i in range(n_rods):\n start_idx = block_structure.start_idx_in_rod_elems[i]\n end_idx = block_structure.end_idx_in_rod_elems[i]\n\n # director collection\n assert np.shares_memory(\n block_structure.director_collection, world_rods[i].director_collection\n )\n assert np.shares_memory(\n block_structure.matrix_dofs_in_rod_elems, world_rods[i].director_collection\n )\n assert_allclose(\n block_structure.director_collection[..., start_idx:end_idx],\n world_rods[i].director_collection,\n )\n\n # mass second moment of inertia\n assert np.shares_memory(\n block_structure.mass_second_moment_of_inertia,\n world_rods[i].mass_second_moment_of_inertia,\n )\n assert np.shares_memory(\n block_structure.matrix_dofs_in_rod_elems,\n world_rods[i].mass_second_moment_of_inertia,\n )\n assert_allclose(\n block_structure.mass_second_moment_of_inertia[..., start_idx:end_idx],\n world_rods[i].mass_second_moment_of_inertia,\n )\n\n # inv mass second moment of inertia\n assert np.shares_memory(\n block_structure.inv_mass_second_moment_of_inertia,\n world_rods[i].inv_mass_second_moment_of_inertia,\n )\n assert np.shares_memory(\n block_structure.matrix_dofs_in_rod_elems,\n world_rods[i].inv_mass_second_moment_of_inertia,\n )\n assert_allclose(\n block_structure.inv_mass_second_moment_of_inertia[..., start_idx:end_idx],\n world_rods[i].inv_mass_second_moment_of_inertia,\n )\n\n # shear matrix\n assert np.shares_memory(\n block_structure.shear_matrix, world_rods[i].shear_matrix\n )\n assert np.shares_memory(\n block_structure.matrix_dofs_in_rod_elems, world_rods[i].shear_matrix\n )\n assert_allclose(\n block_structure.shear_matrix[..., start_idx:end_idx],\n world_rods[i].shear_matrix,\n )", "def test_2d_lowmem():\n dic, data = ng.bruker.read_lowmem(os.path.join(DATA_DIR, \"bruker_2d\"))\n assert dic['FILE_SIZE'] == 3686400\n assert data.shape == (600, 768)\n assert round(data[0, 40].real, 2) == 28.0\n assert round(data[0, 40].imag, 2) == -286.0\n assert round(data[13, 91].real, 2) == -7279.0\n assert round(data[13, 91].imag, 2) == -17680.0\n lowmem_write_readback(dic, data)", "def test_chunker(self):\n chunker = StringChunker(Protocol.sieve_function)\n\n self.assert_chunker_sample(chunker, self.RASFL_SAMPLE_DATA1)\n self.assert_chunker_sample_with_noise(chunker, self.RASFL_SAMPLE_DATA1)\n self.assert_chunker_fragmented_sample(chunker, self.RASFL_SAMPLE_DATA1)\n self.assert_chunker_combined_sample(chunker, self.RASFL_SAMPLE_DATA1) \n \n self.assert_chunker_sample(chunker, self.RASFL_STATUS_DATA)\n self.assert_chunker_sample_with_noise(chunker, self.RASFL_STATUS_DATA)\n self.assert_chunker_fragmented_sample(chunker, self.RASFL_STATUS_DATA)\n self.assert_chunker_combined_sample(chunker, self.RASFL_STATUS_DATA)", "def test_31(self):\n assert 'True' == Api.requestBlock('test-31')", "def test_block_bad_signature(self):\n pass", "def test_dxt5_colorblock_alpha_issue_4142():\n\n with Image.open(\"Tests/images/dxt5-colorblock-alpha-issue-4142.dds\") as im:\n px = im.getpixel((0, 0))\n assert px[0] != 0\n assert px[1] != 0\n assert px[2] != 0\n\n px = im.getpixel((1, 0))\n assert px[0] != 0\n assert px[1] != 0\n assert px[2] != 0", "def get_test_suite():\n MAX_COUNT = 3000\n EOF = -1\n\n count = 23\n block_of_bytes = b'hi, how are\\nyou?'\n suite = get_one_suite(count, block_of_bytes, MAX_COUNT, EOF)\n yield suite\n\n count = 300\n block_of_bytes = bytes([n for n in range(256)])\n suite = get_one_suite(count, block_of_bytes, MAX_COUNT, EOF)\n yield suite\n\n for count in [1, 2, 10]:\n block_of_bytes = b''\n suite = get_one_suite(count, block_of_bytes, MAX_COUNT, EOF)\n yield suite\n\n # count = 1\n # for i in list(range(0, 30)) + list(range(70, 75)) + list(range(100, 104)) \\\n # + list(range(251, 255)):\n # block_of_bytes = bytes([i])\n # suite = get_one_suite(count, block_of_bytes, MAX_COUNT, EOF)\n # yield suite\n\n for count in [2, 3, 127, 128, 129, 254, 255, 256, 260]:\n alist = [n for n in range(256)]\n random.shuffle(alist)\n block_of_bytes = bytes(alist)\n suite = get_one_suite(count, block_of_bytes, MAX_COUNT, EOF)\n yield suite\n\n for count in [2, 5, 127, 255, 300, 1000, 2000]:\n alist = [n for n in range(256)]*3\n random.shuffle(alist)\n block_of_bytes = bytes(alist)\n suite = get_one_suite(count, block_of_bytes, MAX_COUNT, EOF)\n yield suite", "def test_adjacent_bomb_count_3(self):\n index = 17\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)" ]
[ "0.65974164", "0.6437619", "0.6378572", "0.6358274", "0.634844", "0.63045037", "0.6271461", "0.6197353", "0.6164198", "0.61533105", "0.6144925", "0.60612434", "0.60551167", "0.6042766", "0.6040426", "0.6015868", "0.60060424", "0.59940493", "0.5990534", "0.5964054", "0.595681", "0.5952927", "0.5930085", "0.5898428", "0.5897798", "0.5890067", "0.588314", "0.5836057", "0.583285", "0.5814758", "0.5756571", "0.5735809", "0.572649", "0.568947", "0.567681", "0.56763816", "0.5675343", "0.5671528", "0.5657643", "0.5627437", "0.5609965", "0.5576888", "0.55745566", "0.55426353", "0.5537334", "0.552916", "0.5527684", "0.55206704", "0.5519983", "0.5516619", "0.55141246", "0.55051374", "0.5504575", "0.5498738", "0.54976237", "0.548216", "0.54801834", "0.5467198", "0.5448932", "0.544809", "0.54477316", "0.54445493", "0.5442222", "0.54410505", "0.54367524", "0.54314977", "0.5424836", "0.5423934", "0.5422609", "0.54165417", "0.5403028", "0.540008", "0.53952414", "0.5394686", "0.5387568", "0.538611", "0.53840935", "0.5380438", "0.5376957", "0.53579146", "0.5357267", "0.53458005", "0.5339442", "0.5337702", "0.5334778", "0.5331827", "0.53290325", "0.5329001", "0.5323095", "0.531993", "0.5318415", "0.5306201", "0.53042215", "0.530065", "0.5286899", "0.52859163", "0.5273794", "0.5269512", "0.5269125", "0.5265167", "0.52598137" ]
0.0
-1
Sets the default values for the project
def __init__(self): # BASE_DIR:///artifice/scraper/ self.BASE_DIR = os.path.dirname(loc) # prototypes self._eth0 = '0.0.0.0' self._exposed_port = 8080 self._db_name = 'site.db' self._redis_pword = 'password' self._redis_host = 'localhost' self._redis_port = 6379 self._celery_broker_uname = 'michael' self._celery_broker_pword = 'michael123' self._celery_broker_host = 'localhost' self._celery_broker_virtual_host = 'michael_vhost' # flask self.TESTING = False self.URL_PREFIX = '' self.FLASK_PORT = self._exposed_port self.FLASK_HOST = '0.0.0.0' self.FLASK_DEBUG = False self.FLASK_USE_RELOADER = False self.FLASK_THREADED = True # logging self.LOG_FILE = 'flask.log' self.LOG_LEVEL = 'INFO' self.CELERY_LOG_LEVEL = 'ERROR' self.CELERY_LOG_FILE = 'celery.log' self.STDOUT = True # database self.DROP_TABLES = True self.SQLALCHEMY_TRACK_MODIFICATIONS = False self.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format( os.path.join(self.BASE_DIR, self._db_name)) # redis self.REDIS_URL = 'redis://{}:@{}:{}/0'.format( self._redis_pword, self._redis_host, self._redis_port) self.REDIS_HIT_COUNTER = 'HIT_COUNTER' # defaults self.ARGS_DEFAULT_LIMIT = 10 self.ARGS_DEFAULT_STATUS = ['READY', 'TASKED', 'DONE'] self.SUPERVISOR_ENABLED = True self.SUPERVISOR_DEBUG = False self.SUPERVISOR_POLITE = 1 # celery self.CELERY_WORKERS = 8 self.CELERY_MODULE = 'background' self.CELERY_BROKER = 'amqp://{}:{}@{}/{}'.format( self._celery_broker_uname, self._celery_broker_pword, self._celery_broker_host, self._celery_broker_virtual_host) self.CELERY_BACKEND = 'rpc://' self.CELERY_INCLUDE = ['artifice.scraper.background.tasks'] # endpoints self.URL_FOR_STATUS = 'http://{}:{}/status'.format(self._eth0, self._exposed_port) self.URL_FOR_QUEUE = 'http://{}:{}/queue'.format(self._eth0, self._exposed_port) self.URL_FOR_CONTENT = 'http://{}:{}/content'.format(self._eth0, self._exposed_port)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_defaults(context: CreateCommandsContext):\n job_default_parameters: List[\n Parameter\n ] = context.settings.job_default_parameters\n logger.info(\n \"Please set default rows current value shown in [brackets]. Pressing enter\"\n \" without input will keep current value\"\n )\n try:\n project_name = click.prompt(\n \"Please enter default IDIS project name:\",\n show_default=True,\n default=job_default_parameters.project_name,\n )\n\n destination_path = click.prompt(\n \"Please enter default job destination directory:\",\n show_default=True,\n default=job_default_parameters.destination_path,\n )\n except Abort:\n logger.info(\"Cancelled\")\n\n job_default_parameters.project_name = project_name\n job_default_parameters.destination_path = destination_path\n context.settings.save_to()\n logger.info(\"Saved\")", "def set_defaults(self):\n self.plastic = False\n self.unset_output()\n self.reward = False\n self.patmod = config.impact_modulation_default", "def set_defaults(self):\n if self.main_win.working_dir is None or self.main_win.id is None or \\\n len(self.main_win.working_dir) == 0 or len(self.main_win.id) == 0:\n msg_window('Working Directory or Reconstruction ID not configured')\n else:\n self.reconstructions.setText('1')\n self.device.setText('(0,1)')\n self.alg_seq.setText('((3,(\"ER\",20),(\"HIO\",180)),(1,(\"ER\",20)))')\n self.beta.setText('.9')\n self.support_area.setText('(0.5, 0.5, 0.5)')\n self.cont.setChecked(False)", "def defaults(self):\n self.lib.iperf_defaults(self._test)", "def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }", "def _initialize_project_variables(self):\n self.Source = ''\n self.Regional = ''\n self.Vernacular = ''\n self.Fallback = dict()\n self.New_Target = dict()\n self.Biblical_Terms = dict()\n self.Old_Target = dict()\n\n# self.list_projects = []\n# self.project_lines = []\n# self.indent = 0\n# self.Treed = False\n self.root = etree.Element('root')\n# #add child 'settings', all user configurable bits under here\n self.settings = etree.SubElement(self.root, \"settings\")\n# self.old_mode = dict()\n# self.spreferred = etree.SubElement(self.settings, \"preferred\")\n# self.smode = etree.SubElement(self.settings, \"mode\")\n# self.stemp = etree.SubElement(self.settings, \"template\")\n self.sf0 = etree.SubElement(self.settings, \"f0\")\n self.sf1 = etree.SubElement(self.settings, \"f1\")\n self.sf2 = etree.SubElement(self.settings, \"f2\")\n self.trout = etree.SubElement(self.root, \"tree\")", "def init():\n defaults = _project_defaults()\n\n if Project.prompt:\n defaults['name'] = prompt(\"Enter the project's name:\", defaults['name'])\n defaults['package'] = prompt(\"Enter the project's package:\", defaults['package'])\n defaults['author'] = prompt(\"Enter the project's author:\", defaults['author'])\n defaults['author_email'] = prompt(\"Enter the project's author's email:\", defaults['author_email'])\n defaults['description'] = prompt(\"Enter the project's description:\", defaults['description'])\n\n # print(\"defaults:\\n{defaults}\".format(defaults=pformat(defaults)))\n\n if Project.use_templates:\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)", "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def set_initial_values(self):\n\n pass", "def defaults():\n return {}", "def set_defaults(self):\n\n for k, v in self.DEFAULTS.items():\n if not getattr(self, k, None):\n setattr(self, k, v)", "def set_default_parameters(self):\n super().set_default_parameters()", "def getDefaultSettings():\n return {}", "def set_defaults(self, **kw):\n group = kw.pop('group', None)\n for o, v in kw.items():\n self.cfg_fixture.set_default(o, v, group=group)", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def setup_default_terms(self):\n # Setting config with pattern -\n # default_dict[\"json config key\"] = (\"Default Value\", \"Ask User\", \"Value Type\")\n\n self.default_terms[Keys.first_run] = (Values.first_run, False, None)\n self.default_terms[Keys.nas_mount] = (Values.nas_mount_path, True, str)\n self.default_terms[Keys.secs_between_checks] = (Values.check_time, True, int)\n self.default_terms[Keys.redmine_api_key] = ('none', False, str)", "def setDefaults(self) -> None:\n self.night_boundary = -12.0\n self.new_moon_phase_threshold = 20.0", "def defaults(self):\n\n return None", "def defaults(self):\n\n return None", "def defaults() -> dict:\n pass", "def default_setting():\n total_count.set(default_len)\n training_rate_clicked.set('50%')\n dimension_clicked.set('100 x 100')", "def setdefaults(self): # 3\n res = self.__obj.setdefaults()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')", "def set_studio_default(self):\n raise NotImplementedError(\n \"{} Method `set_studio_default` not implemented!\".format(\n repr(self)\n )\n )", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"replace_existing_files\" in vars(self):\n self.replace_existing_files = False\n if not \"num_files_per_point\" in vars(self):\n self.num_files_per_point = -1\n if not \"input_location_type\" in vars(self):\n self.input_location_type = \"local\"\n if not \"output_location_type\" in vars(self):\n self.output_location_type = \"local\"", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def setup_defaults(self):\n status = self._lib_vscf_ecc.vscf_ecc_setup_defaults(self.ctx)\n VscfStatus.handle_status(status)", "def default(self):\n raise Error(\"Missing mandatory setting:\", self.name)", "def loadDefaults(self):\n # (025) Merged into settings.RawSettings.\n pass", "def _create_defaults(self):\n return DefaultCommandOptionValues(\n min_confidence=3, output_format='vs7')", "def reset_defaults(self):\n self.domain_list = [{\"domain\": \"mywebsite%s.com\" % uuid.uuid1()}]\n self.origin_list = [{\"origin\": \"mywebsite1.com\",\n \"port\": 443,\n \"ssl\": False}]\n self.caching_list = [{\"name\": \"default\", \"ttl\": 3600},\n {\"name\": \"home\",\n \"ttl\": 1200,\n \"rules\": [{\"name\": \"index\",\n \"request_url\": \"/index.htm\"}]}]\n self.service_name = str(uuid.uuid1())\n self.flavor_id = self.test_config.default_flavor", "def defaults():\n\n #dummy = FieldTemplate.dummy\n\n return None", "def make_default_config(project):\n return {\n \"breathe_projects\": {\n project: \"./_doxygen/xml\"\n },\n \"breathe_default_project\": project,\n \"exhale_args\": {\n # required arguments\n \"containmentFolder\": \"./api\",\n \"rootFileName\": \"{0}_root.rst\".format(project),\n \"rootFileTitle\": \"``{0}`` Test Project\".format(project),\n \"doxygenStripFromPath\": \"..\",\n # additional arguments\n \"exhaleExecutesDoxygen\": True,\n \"exhaleDoxygenStdin\": \"INPUT = ../include\"\n }\n }", "def __init__(self):\n for name, default in self.defaults.items():\n value = getattr(django.conf.settings, name, default)\n setattr(self, name, value)", "def test_set_project_default_power_schedule(self):\n pass", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def _set_default_attributes(self):\n # Default input attributes\n self._has_studio_override = False\n self._had_studio_override = False\n\n self._is_overriden = False\n self._was_overriden = False\n\n self._is_modified = False\n self._is_invalid = False\n\n self._is_nullable = False\n self._as_widget = False\n self._is_group = False\n\n # If value should be stored to environments\n self._env_group_key = None\n\n self._any_parent_as_widget = None\n self._any_parent_is_group = None\n\n # Parent input\n self._parent = None\n\n # States of inputs\n self._state = None\n self._child_state = None\n\n # Attributes where values are stored\n self.default_value = NOT_SET\n self.studio_value = NOT_SET\n self.override_value = NOT_SET\n\n # Log object\n self._log = None\n\n # Only for develop mode\n self.defaults_not_set = False", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def _use_default_params(self):\n self.params = {\n # Desktop window params\n 'pos': (100, 100),\n 'lock_pos': False,\n # Font params\n 'default_font': 'Sans 9',\n # Lessons colors\n 'lecture_color': '#009566660000',\n 'laboratory_color': '#987600000000',\n 'practice_color': '#188820eda89b',\n 'non_color': '#0000849acdf4',\n 'day_color': '#000000000000',\n # Window style\n 'full_transparent': True,\n 'window_color': '#5ad65ad65ad6',\n 'transparent_percent': 50.0,\n # View schedule settings\n 'view_sch': [True, True, True, True, True]\n }\n self.save_params()", "def defaults():\n\n return {\"disease_case_id\": FieldTemplate.dummy(\"case_id\"),\n }", "def set_app_defaults(self):\n self.curve_render = 0\n self.image_render = 0\n self.image_height = 200\n self.image_data = []\n self.auto_scale = True\n\n self.create_actions()\n self.setup_signals()\n self.reset_graph()\n\n self.fps = utils.SimpleFPS()\n\n # Click the live button\n self.ui.actionContinue_Live_Updates.trigger()", "def set_defaults(self):\n if not self.HAS_DS9: # pragma: no cover\n return\n self.run('frame delete all')\n self.run('wcs degrees')\n if self.disp_parameters['tile']:\n self.run('tile yes')\n else:\n self.run('tile no')\n self.cs = str(self.disp_parameters['lock_image']).lower()\n self.lock()", "def _set_defaults(self):\n self.api_protocol = 'https'\n self.api_host = 'nhl-score-api.herokuapp.com'\n self.current_score = 0\n self.sleep_seconds = 30 # Time to sleep after calling the API\n self.desired_game_state = 'LIVE' # Desired game state is LIVE", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3", "def default():\n raise NotImplementedError(\"Pvwattsv7 default file no longer exists!\")", "def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)", "def assign_defaults(self):\n\n def module_default_sort_key(module):\n sort_key = (\n 1 if module.marked_as_default else -1,\n module.version,\n module.variant,\n -self.index(module.modulepath),\n )\n return sort_key\n\n self.defaults = {}\n grouped = groupby(\n [module for path in self.path for module in path.modules], lambda x: x.name\n )\n for (_, modules) in grouped:\n for module in modules:\n module.is_default = False\n if len(modules) > 1:\n modules = sorted(modules, key=module_default_sort_key, reverse=True)\n modules[0].is_default = True\n self.defaults[modules[0].name] = modules[0]", "def default_settings(self, settings):\n return {}", "def get_defaults():\n\n # get package defaults\n with open(os.path.join(iLoop_RNAseq_pipeline.__path__[0], 'defaults', 'RNAseq_pipeline_defaults.txt')) as rpd:\n defaults = {}\n for line in rpd.readlines():\n if line.strip():\n defaults[line.split(',')[0].strip()] = line.split(',')[1].strip()\n\n try:\n with open(os.path.join(os.path.expanduser(\"~\"), 'RNAseq_pipeline_defaults.txt')) as rpd:\n for line in rpd.readlines():\n if line.strip():\n defaults[line.split(',')[0].strip()] = line.split(',')[1].strip()\n except FileNotFoundError:\n logger.warning('\"RNAseq_pipeline_defaults.txt\" does not exist under home path. An email address and project ID should be should be define in that file.')\n\n # replace with user defaults\n try:\n with open('RNAseq_pipeline_defaults.txt') as rpd:\n for line in rpd.readlines():\n if line.strip():\n defaults[line.split(',')[0].strip()] = line.split(',')[1].strip()\n except FileNotFoundError:\n logger.info(\n '\"RNAseq_pipeline_defaults.txt\" does not exist under this folder. Defaults from the package and home path will be used.')\n\n if 'email' not in defaults:\n if not validate_email(defaults['email']):\n while True:\n email = input('Enter a valid email address for job status: \\n')\n if validate_email(email):\n defaults['email'] = email\n print('Writing email to \"RNAseq_pipeline_defaults.txt\" under home path.')\n f = open(os.path.join(os.path.expanduser(\"~\"), 'RNAseq_pipeline_defaults.txt'), 'w+')\n f.write('\\nemail,{}'.format(email))\n f.close()\n break\n else:\n print('{} is not valid, try again.'.format(email))\n\n if ('project' not in defaults) or (defaults['project'] == 'projectid'):\n project = input('Enter Computerome project ID for billing: \\n')\n # TODO It is possible to validate this by checking folder name under \"/home/projects\".\n defaults['project'] = project\n print('Writing project ID to \"RNAseq_pipeline_defaults.txt\" under home path.')\n f = open(os.path.join(os.path.expanduser(\"~\"), 'RNAseq_pipeline_defaults.txt'), 'w+')\n f.write('\\nproject,{}'.format(project))\n f.close()\n\n return defaults", "def add_earlydefault_settings(self):\n self.add_default_settings_config()\n self.add_default_settings_aliases()", "def set_defaults(self):\n for key, constraints in self.__class__.MODEL.items():\n if key not in self.resource:\n self.resource[key] = constraints[3]", "def restore_defaults(self):\n # Entry Widgets\n self.name.delete(0, tk.END)\n self.num_invest.delete(0, tk.END)\n self.num_flows.delete(0, tk.END)\n\n self.name.insert(0, self.ini_name)\n self.num_invest.insert(0, self.ini_num_invest)\n self.num_flows.insert(0, self.ini_num_flows)\n\n # Checkboxes\n self.rand_data.set(self.ini_rand_data)\n self.mult.set(self.ini_mult)\n self.inflation.set(self.ini_inflation)\n self.taxes.set(self.ini_taxes)\n self.uncertainty.set(self.ini_uncertainty)\n\n # Radio Buttons\n self.depreciation.set(self.ini_depreciation)\n self.distribution.set(self.ini_distribution)\n self.estimate.set(self.ini_estimate)", "def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }", "def help_default_values():\n click.echo_via_pager(docgen.generate_default_value_help())", "def default_value(self, val):\n self.set_property(\"DefaultValue\", val)", "def __defaults__(self): \n self.tag = 'Constant-property atmosphere'\n self.composition = Data()\n self.composition.gas = 1.0", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def test_no_project_defaults(self):\n ep = exposed.ExposedProject()\n self.assertIsNone(ep.display)\n self.assertIsNone(ep.shared)\n self.assertIsNone(ep.settings)\n self.assertIsNone(ep.title)\n self.assertIsNone(ep.id)\n self.assertIsNone(ep.path())\n\n with self.assertRaises(RuntimeError):\n ep.title = 'Some Title'", "def _set_defaults(self):\n self._opts = {\n \"insecure\": [],\n \"header\": [],\n \"verbose\": [],\n \"nobody\": [],\n \"proxy\": [],\n \"resume\": [],\n \"ctimeout\": [\"--connect-timeout\", str(self.ctimeout)],\n \"timeout\": [\"-m\", str(self.timeout)],\n \"other\": [\"-s\", \"-q\", \"-S\"]\n }\n if self.insecure:\n self._opts[\"insecure\"] = [\"-k\"]\n if Msg().level > Msg.DBG:\n self._opts[\"verbose\"] = [\"-v\"]\n self._files = {\n \"url\": \"\",\n \"error_file\": FileUtil(\"execurl_err\").mktmp(),\n \"output_file\": FileUtil(\"execurl_out\").mktmp(),\n \"header_file\": FileUtil(\"execurl_hdr\").mktmp()\n }", "def set_default_values(self):\n\t\t\n\t\t# ajout liste de devises a la combobox\n\t\tself.cbb_devisesFrom.addItems(sorted(list(self.c.currencies)))\n\t\tself.cbb_devisesTo.addItems(sorted(list(self.c.currencies)))\n\t\t# affichage par defaut cbb\n\t\tself.cbb_devisesFrom.setCurrentText(\"EUR\")\n\t\tself.cbb_devisesTo.setCurrentText(\"EUR\")\n\t\t\n\t\t# choix montant max de la spinbox\n\t\tself.spn_montant.setRange(0,1000000)\n\t\tself.spn_montantConverti.setRange(0,1000000)\n\t\t# affichage par defaut spn\n\t\tself.spn_montant.setValue(1)\n\t\tself.spn_montantConverti.setValue(1)", "def set_defaults():\n\tglobal defaults\n\n\tdefaults = {}\n\tdefaults[\"aliase\"] = {}\n\n\tdefaults[\"nigiri\"] = {}\n\tdefaults[\"nigiri\"][\"wrap_input\"] = \"True\"\n\tdefaults[\"nigiri\"][\"command_char\"] = \"/\"\n\tdefaults[\"nigiri\"][\"shortcut_pattern\"] = \"meta [0-9]\"\n\tdefaults[\"nigiri\"][\"server_shortcuts\"] = \"true\"\n\tdefaults[\"nigiri\"][\"show_debug\"] = \"True\"\n\tdefaults[\"nigiri\"][\"logfile\"] = os.path.join(xdg_cache_home, \"sushi\", \"nigiri.txt\")\n\tdefaults[\"nigiri\"][\"locale_dir\"] = get_path(\"..\", \"locale\")\n\tdefaults[\"nigiri\"][\"plugin_dirs\"] = escape_join(\",\", (\n\t\t\tos.path.join(xdg_data_home, \"nigiri\", \"plugins\"),\n\t\t\tos.path.join(xdg_data_home, \"chirashi\"),\n\t\t\tget_path(\"plugins\"),\n\t\t\tget_path(\"..\", \"chirashi\")\n\t\t))\n\n\tdefaults[\"chatting\"] = {}\n\tdefaults[\"chatting\"][\"quit_message\"] = \"Quit.\"\n\tdefaults[\"chatting\"][\"part_message\"] = \"Part.\"\n\tdefaults[\"chatting\"][\"last_log_lines\"] = \"15\"\n\tdefaults[\"chatting\"][\"nick_separator\"] = \": \"\n\n\tdefaults[\"colors\"] = {}\n\tdefaults[\"colors\"][\"messages\"] = \"gray\"\n\tdefaults[\"colors\"][\"messages_own\"] = \"white\"\n\tdefaults[\"colors\"][\"messages_highlight\"] = \"red\"\n\tdefaults[\"colors\"][\"actions\"] = \"gray\"\n\tdefaults[\"colors\"][\"actions_own\"] = \"white\"\n\tdefaults[\"colors\"][\"informative\"] = \"blue\"\n\n\tdefaults[\"templates\"] = {}\n\t# misc\n\tdefaults[\"templates\"][\"datestring\"] = \"%H:%M\"\n\t# messages\n\tdefaults[\"templates\"][\"action\"] = \"%(time)s %(nick)s %(message)s\"\n\tdefaults[\"templates\"][\"action_own\"] = \"%(time)s %(nick)s %(message)s\"\n\tdefaults[\"templates\"][\"message\"] = \"%(time)s <%(prefix)s%(nick)s> %(message)s\"\n\tdefaults[\"templates\"][\"message_own\"] = \"%(time)s <%(prefix)s%(nick)s> %(message)s\"\n\tdefaults[\"templates\"][\"ctcp\"] = \"%(time)s -%(nick)s/%(target)s- %(message)s\"\n\tdefaults[\"templates\"][\"ctcp_own\"] = \"%(time)s -%(nick)s/%(target)s- %(message)s\"\n\tdefaults[\"templates\"][\"notice\"] = \"%(time)s *%(nick)s/%(target)s* %(message)s\"\n\tdefaults[\"templates\"][\"notice_own\"] = \"%(time)s *%(nick)s/%(target)s* %(message)s\"\n\n\t# actions\n\tdefaults[\"templates\"][\"invite\"] = \"%(time)s * %(nick)s invites %(who)s to %(channel)s\"\n\tdefaults[\"templates\"][\"invite_own\"] = \"%(time)s * You were invited by %(nick)s to %(channel)s.\"\n\tdefaults[\"templates\"][\"join\"] = \"%(time)s * %(nick)s (%(host)s) has joined %(channel)s.\"\n\tdefaults[\"templates\"][\"join_own\"] = \"%(time)s * You have joined %(channel)s.\"\n\tdefaults[\"templates\"][\"kick\"] = \"%(time)s * %(who)s got kicked from %(channel)s by %(nick)s (%(reason)s)\"\n\tdefaults[\"templates\"][\"kick_own\"] = \"%(time)s * You got kicked from %(channel)s by %(nick)s (%(reason)s)\"\n\tdefaults[\"templates\"][\"nick\"] = \"%(time)s * %(nick)s is now known as %(new_nick)s.\"\n\tdefaults[\"templates\"][\"nick_own\"] = \"%(time)s * You are now known as %(new_nick)s.\"\n\tdefaults[\"templates\"][\"mode_list\"] = \"%(time)s * Modes for %(target)s: %(modes)s.\"\n\tdefaults[\"templates\"][\"mode_list_own\"] = \"%(time)s * Modes set on you: %(modes)s.\"\n\tdefaults[\"templates\"][\"mode\"] = \"%(time)s %(nick)s set %(mode)s%(param)s on %(target)s.\"\n\tdefaults[\"templates\"][\"mode_own\"] = \"%(time)s You set %(mode)s%(param)s on %(target)s.\"\n\tdefaults[\"templates\"][\"oper\"] = \"%(time)s * %(nick)s is now an OPER.\"\n\tdefaults[\"templates\"][\"oper_own\"] = \"%(time)s * You are now an OPER.\"\n\tdefaults[\"templates\"][\"part\"] = \"%(time)s * %(nick)s has left %(channel)s (%(reason)s).\"\n\tdefaults[\"templates\"][\"part_own\"] = \"%(time)s * You have left %(channel)s (%(reason)s).\"\n\tdefaults[\"templates\"][\"quit\"] = \"%(time)s * %(nick)s has quit (%(reason)s).\"\n\tdefaults[\"templates\"][\"quit_own\"] = \"%(time)s * You have quit (%(reason)s).\"\n\tdefaults[\"templates\"][\"topic\"] = \"%(time)s * %(nick)s has set the topic on %(channel)s to \\\"%(topic)s\\\".\"\n\tdefaults[\"templates\"][\"topic_own\"] = \"%(time)s * You have set the topic on %(channel)s to \\\"%(topic)s\\\".\"\n\tdefaults[\"templates\"][\"topic_anonymous\"] = \"* Topic of %(channel)s: %(topic)s\"\n\n\t# informative\n\tdefaults[\"templates\"][\"banlist_begin\"] = \"%(time)s Begin of banlist on channel %(channel)s.\"\n\tdefaults[\"templates\"][\"banlist_item\"] = \">> %(who)s %(mask)s %(when)s\"\n\tdefaults[\"templates\"][\"banlist_end\"] = \"%(time)s End of banlist. (%(channel)s)\"\n\tdefaults[\"templates\"][\"cannot_join\"] = \"%(time)s Can't join channel %(channel)s: %(reason)s\"\n\tdefaults[\"templates\"][\"list_begin\"] = \"%(time)s Begin of list.\"\n\tdefaults[\"templates\"][\"list_item\"] = \">> %(channel)s %(user)s %(topic)s\"\n\tdefaults[\"templates\"][\"list_end\"] = \"%(time)s End of list.\"\n\tdefaults[\"templates\"][\"names_begin\"] = \"%(time)s Begin of names (%(channel)s).\"\n\tdefaults[\"templates\"][\"names_item\"] = \"[ %(row)s ]\"\n\tdefaults[\"templates\"][\"names_end\"] = \"%(time)s End of names.\"\n\tdefaults[\"templates\"][\"no_such\"] = \"%(time)s No such %(type)s: %(target)s.\"\n\tdefaults[\"templates\"][\"whois_begin\"] = \"%(time)s Begin of whois (%(target)s).\"\n\tdefaults[\"templates\"][\"whois_item\"] = \">> %(nick)s: %(message)s\"\n\tdefaults[\"templates\"][\"whois_end\"] = \"%(time)s End of whois.\"\n\tdefaults[\"templates\"][\"dcc_new_incoming\"] = \"%(time)s Incoming file transfer (ID:%(id)d) from %(sender)s: \\\"%(filename)s\\\" (%(size)d).\"\n\tdefaults[\"templates\"][\"dcc_file_auto_accept\"] = \"%(time)s Auto accepted file transfer from %(sender)s: \\\"%(filename)s\\\" (%(size)d).\"\n\n\t# Add default sections to config parser\n\t# so setting is easier\n\tfor section in defaults.keys():\n\t\ttry:\n\t\t\tconfig_parser.add_section(section)\n\t\texcept ConfigParser.DuplicateSectionError:\n\t\t\tcontinue\n\n\t# sections defined below are not added to the configParser and\n\t# can't be set by the set method (will raise NoSectionError)\n\tpass", "def set_defaults(self, compmgr=None):\n for section, default_options in self.defaults(compmgr).items():\n for name, value in default_options.items():\n if not ProductSetting.exists(self.env, self.product,\n section, name):\n if any(parent[section].contains(name, defaults=False)\n for parent in self.parents):\n value = None\n self.set(section, name, value)", "def ConfigureDefaults(area_bounds=None, \n area_bounds_format=['x_min','y_min','x_max','y_max'], \n area_bounds_range=None, years_are_bounds=False,\n dates_are_bounds=False, init_date_str_format='%y%m%d',\n member_name='realization', period_name='time', \n initialistion_time_name='forecast_reference_time'): \n global default_area_bounds\n global default_area_bounds_format\n global default_area_bounds_range\n global default_years_are_bounds\n global default_dates_are_bounds\n global default_init_date_str_format\n global default_member_name\n global default_period_name\n global default_initialistion_time_name\n \n default_area_bounds = area_bounds\n default_area_bounds_format = area_bounds_format\n default_area_bounds_range = area_bounds_range\n default_years_are_bounds = years_are_bounds\n default_dates_are_bounds = dates_are_bounds\n default_init_date_str_format = init_date_str_format\n default_member_name = member_name\n default_period_name = period_name\n default_initialistion_time_name = initialistion_time_name", "def set_project_values(project, data):\n project.hashtag = data['hashtag']\n if 'name' in data and len(data['name']) > 0:\n project.name = data['name']\n else:\n project.name = project.hashtag.replace('-', ' ')\n if 'summary' in data and len(data['summary']) > 0:\n project.summary = data['summary']\n has_longtext = 'longtext' in data and len(data['longtext']) > 0\n if has_longtext:\n project.longtext = data['longtext']\n if 'autotext_url' in data and data['autotext_url'].startswith('http'):\n project.autotext_url = data['autotext_url']\n if not project.source_url or project.source_url == '':\n project.source_url = data['autotext_url']\n # MAX progress\n if 'levelup' in data and 0 < project.progress + data['levelup'] * 10 < 50:\n project.progress = project.progress + data['levelup'] * 10\n # return jsonify(data=data)\n if project.autotext_url is not None and not has_longtext:\n # Now try to autosync\n project = AddProjectData(project)\n return project", "def set_value_to_default(self):\n self.setValue(self.default_value)", "def set_missing_defaults(self):\n if 'pub_options' not in self.config:\n self.config['pub_options'] = {\n 'acknowledge': True,\n 'retain': True\n }\n\n if 'sub_options' not in self.config:\n self.config['sub_options'] = {\n 'get_retained': False\n }\n\n if 'subscribed_topics' not in self.config:\n self.config['subscribed_topics'] = None\n\n if 'replay_events' not in self.config:\n self.config['replay_events'] = False\n\n if 'max_reconnect_retries' not in self.config:\n self.config['max_reconnect_retries'] = 10", "def reset(self):\n self.manager.delete_all()\n for name, val in DEFAULT_SETTINGS.items():\n val['name'] = name\n val['default_value'] = val['value']\n self.manager.from_dict(val)", "def save_defaults(self):\n\n pass", "def defaults():\n\n dummy = FieldTemplate.dummy\n\n return {\"disease_disease_id\": dummy(\"disease_id\"),\n \"disease_symptom_id\": dummy(\"symptom_id\"),\n \"disease_testing_device_id\": dummy(\"device_id\"),\n }", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def test_default_options(self):\r\n\r\n settings.ASSETS_URL_EXPIRE = True\r\n assert get_env().config['url_expire'] == settings.ASSETS_URL_EXPIRE\r\n\r\n settings.ASSETS_ROOT = 'FOO_ASSETS'\r\n settings.STATIC_ROOT = 'FOO_STATIC'\r\n settings.MEDIA_ROOT = 'FOO_MEDIA'\r\n # Pointing to ASSETS_ROOT\r\n assert get_env().directory.endswith('FOO_ASSETS')\r\n get_env().directory = 'BAR'\r\n assert settings.ASSETS_ROOT == 'BAR'\r\n # Pointing to STATIC_ROOT\r\n delsetting('ASSETS_ROOT')\r\n assert get_env().directory.endswith('FOO_STATIC')\r\n get_env().directory = 'BAR'\r\n assert settings.STATIC_ROOT == 'BAR'\r\n # Pointing to MEDIA_ROOT; Note we only\r\n # set STATIC_ROOT to None rather than deleting\r\n # it, a scenario that may occur in the wild.\r\n settings.STATIC_ROOT = None\r\n assert get_env().directory.endswith('FOO_MEDIA')\r\n get_env().directory = 'BAR'\r\n assert settings.MEDIA_ROOT == 'BAR'", "def set_defaults(self, all_defaults):\r\n \r\n if all_defaults:\r\n # Set every value from the defaults.\r\n self.letters = probabilities.LETTERS\r\n self.word_constructions = probabilities.WORD_CONSTRUCTIONS\r\n self.word_sizes = probabilities.WORD_SIZES\r\n self.sentence_sizes = probabilities.SENTENCE_SIZES\r\n self.paragraph_sizes = probabilities.PARAGRAPH_SIZES\r\n self.punctuation_midline = probabilities.PUNCTUATION_MIDLINE\r\n self.punctuation_endline = probabilities.PUNCTUATION_ENDLINE\r\n self.punctuation_matched = probabilities.PUNCTUATION_MATCHED\r\n self.vowels = probabilities.VOWELS\r\n\r\n # Common values even when parsing imported text\r\n self.new_word_chance = probabilities.NEW_WORD_CHANCE\r\n self.capital_chance = probabilities.CAPITAL_CHANCE\r\n self.punctuation_midline_chance = probabilities.PUNCTUATION_MIDLINE_CHANCE\r\n self.punctuation_matched_chance = probabilities.PUNCTUATION_MATCHED_CHANCE\r\n self.optimal_word_count = probabilities.OPTIMAL_WORD_COUNT\r\n self.vowel_distance_threshold = probabilities.VOWEL_DISTANCE_THRESHOLD", "def initDefaults(self, kwargs):\n \n for k,v in self.defaults.iteritems():\n if k in kwargs: # use assigned values\n setattr(self, k, kwargs[k])\n else: # use default values\n setattr(self, k, v)\n \n for k,v in kwargs.iteritems():\n if k not in self.defaults:\n setattr(self, k, v)\n pass", "def add_default_options(self):\n\n options = getattr(self.parent, \"pyautodoc_set_default_option\", [])\n for option in options:\n self.set_default_option(option)", "def __init__(__self__, *,\n project: Optional[pulumi.Input[str]] = None):\n if project is not None:\n pulumi.set(__self__, \"project\", project)", "def test_should_set_default_properties(self): # pylint: disable=invalid-name\n initialize_semver_git_tag(self.project)\n self.assertEquals(\n self.project.get_property('semver_git_tag_increment_part'), 'patch')\n self.assertEquals(\n self.project.get_property('semver_git_tag_version_prefix'), '')", "def defaults(self, **kwargs):\n for i in kwargs:\n self._.setdefault(i, kwargs[i])\n return self", "def default(self, default):\n\n self._default = default", "def defaults():\n\n return {\"cr_shelter_flag_id\": S3ReusableField.dummy(\"flag_id\"),\n }", "def defaultPreset (self):\n assert False, \"To be implemented by child\"", "def default(self, default):\n self._default = default\n return self", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def default(self, value):\n # save {value} as the default\n self._default = value\n # all done\n return", "def initDefaults(self):\n return _libsbml.Unit_initDefaults(self)", "def write_default_values():\n values = default_values()\n write_files(values, path_to_data())\n return values", "def set_config_defaults(config):\n new_config = config.copy()\n\n new_config.setdefault(\"window_title\", \"Materials Cloud Tool\")\n new_config.setdefault(\n \"page_title\",\n \"<PLEASE SPECIFY A PAGE_TITLE AND A WINDOW_TITLE IN THE CONFIG FILE>\",\n )\n\n new_config.setdefault(\"custom_css_files\", {})\n new_config.setdefault(\"custom_js_files\", {})\n new_config.setdefault(\"templates\", {})\n\n return new_config", "def testDefaults(self, widget):\n assert isinstance(widget.highlight, PythonHighlighter)\n assert isinstance(widget.parameter_dict, dict)\n assert isinstance(widget.pd_parameter_dict, dict)\n\n assert len(widget.model) == 6\n assert \"filename\" in widget.model.keys()\n assert \"overwrite\" in widget.model.keys()\n assert \"description\" in widget.model.keys()\n assert \"parameters\" in widget.model.keys()\n assert \"pd_parameters\" in widget.model.keys()\n assert \"text\" in widget.model.keys()", "def default_input_fields(fields):\n set_defaults(fields, INPUTSPEC_DEFAULTS)", "def restore_defaults(self):\n temp_index = self.temp_dropdown.findText(self.default_units[\"Temperature\"])\n vol_index = self.volume_dropdown.findText(self.default_units[\"Volume\"])\n press_index = self.press_dropdown.findText(self.default_units[\"Pressure\"])\n energy_index = self.energy_dropdown.findText(self.default_units[\"Energy\"])\n amount_index = self.amount_dropdown.findText(self.default_units[\"Amount\"])\n speed_index = self.speed_dropdown.findText(self.default_units[\"Speed\"])\n\n self.temp_dropdown.setCurrentIndex(temp_index)\n self.volume_dropdown.setCurrentIndex(vol_index)\n self.press_dropdown.setCurrentIndex(press_index)\n self.energy_dropdown.setCurrentIndex(energy_index)\n self.amount_dropdown.setCurrentIndex(amount_index)\n self.speed_dropdown.setCurrentIndex(speed_index)", "def default(self, value):\n # also easy\n self._default = value\n # all done\n return", "def random_project(**overrides) -> Dict[str, Any]:\n data = dict(\n uuid=fake.uuid4(),\n name=fake.word(),\n description=fake.sentence(),\n prj_owner=fake.pyint(),\n thumbnail=fake.image_url(width=120, height=120),\n access_rights={},\n workbench={},\n published=False,\n )\n data.update(overrides)\n return data", "def set_lib_defaults():\n\n set_middleware_defaults()\n\n # TODO(gmann): Remove setting the default value of config policy_file\n # once oslo_policy change the default value to 'policy.yaml'.\n # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49\n DEFAULT_POLICY_FILE = 'policy.yaml'\n policy_opts.set_defaults(CONF, DEFAULT_POLICY_FILE)", "def _populate_default_values(self):\n\n if 'input_data' not in self._definition:\n self._definition['input_data'] = []\n for input_dict in self._definition['input_data']:\n if 'required' not in input_dict:\n input_dict['required'] = True\n\n if 'jobs' not in self._definition:\n self._definition['jobs'] = []\n for job_dict in self._definition['jobs']:\n if 'recipe_inputs' not in job_dict:\n job_dict['recipe_inputs'] = []\n if 'dependencies' not in job_dict:\n job_dict['dependencies'] = []\n for dependency_dict in job_dict['dependencies']:\n if 'connections' not in dependency_dict:\n dependency_dict['connections'] = []", "def setup_default_arguments(self):\n self.add_argument('--clean', action='store_true',\n help='Cleans all generated files.')", "def defaults(argv=None):\n default_cfg = {\n \"random_seed\": 42,\n \"repo_age_in_days\": 10,\n \"fake\": Faker,\n \"team_size\": 3,\n \"developer_strategy\": \"random-uniform\",\n \"general_commit_words\": [\"Add\", \"an\", \"empty\", \"change\"],\n \"merge_commit_words\": [\"Introduce\", \"the\", \"feature\"],\n \"max_commits_per_branch\": 10,\n \"repo_dir\": \"repository\",\n \"datetime_format_template\": r\"%Y-%m-%dT%H:%M:%S\",\n \"ticket_id_template\": r\"ACME-%d\",\n \"message_template\": r\"%s %s\",\n }\n mixin_cfg = mixin(argv)\n cfg = {**default_cfg, **mixin_cfg}\n\n if not cfg.get(\"repo_dir\"):\n raise ValueError(\"empty repo_dir, no implicit current working dir use\")\n\n cfg = activate_model(cfg)\n cfg = seed_model(cfg)\n\n if not cfg.get(\"developers\"):\n if not cfg.get(\"developer_data\"):\n cfg[\"developer_data\"] = [\n (cfg[\"fake\"].name(), cfg[\"fake\"].email())\n for _ in range(cfg[\"team_size\"])\n ]\n cfg[\"developers\"] = pairs_to_actors(cfg[\"developer_data\"])\n\n if cfg[\"developer_strategy\"] not in DEVELOPER_STRATEGIES:\n raise ValueError(\n \"warning: developer selection strategy expected in {} but found ('{}') instead\".format(\n DEVELOPER_STRATEGIES, cfg[\"developer_strategy\"]\n )\n )\n\n return cfg", "def defaults():\n\n dummy = FieldTemplate.dummy\n\n return {\"disease_demographic_id\": dummy(\"demographic_id\"),\n }" ]
[ "0.6994468", "0.6959797", "0.68457776", "0.67833525", "0.66337454", "0.6582282", "0.6558582", "0.6526463", "0.6523897", "0.6491703", "0.6482176", "0.6464206", "0.6461486", "0.6424208", "0.6412936", "0.6357029", "0.63031733", "0.6299067", "0.6298111", "0.6293933", "0.6293933", "0.62914115", "0.62893933", "0.62871546", "0.6239317", "0.6230288", "0.62224793", "0.6221455", "0.6221455", "0.62111765", "0.62014663", "0.61793745", "0.61673343", "0.6151192", "0.61380684", "0.6137809", "0.6135187", "0.61226773", "0.6107539", "0.6107539", "0.6107539", "0.6083867", "0.604105", "0.6028358", "0.60153955", "0.59419596", "0.59386826", "0.5935187", "0.5931211", "0.59221184", "0.58989686", "0.5887775", "0.5874017", "0.58717746", "0.586201", "0.58586663", "0.58577204", "0.5851762", "0.5808723", "0.5807691", "0.5802714", "0.5795619", "0.579217", "0.5790654", "0.5789759", "0.57749164", "0.5768218", "0.57649", "0.57646257", "0.5764211", "0.57606703", "0.57557255", "0.5751164", "0.573981", "0.5738001", "0.5735663", "0.57341385", "0.572854", "0.57183653", "0.57016903", "0.5688813", "0.56826276", "0.56818926", "0.5666104", "0.5664735", "0.56533957", "0.5650121", "0.5647918", "0.56396055", "0.5634188", "0.5632772", "0.5631577", "0.5631219", "0.5625478", "0.5623114", "0.5621774", "0.5608856", "0.5605346", "0.56038356", "0.56004775", "0.5596172" ]
0.0
-1
Deduce correct spark dtype from pandas dtype for column col of pandas dataframe df
def infer_spark_dtype(df, col): logger = logging.getLogger(__name__ + ".infer_spark_dtype") pd_dtype = df.dtypes[col] # get a sample from column col sample = df[col].dropna() if sample.shape[0] == 0: logger.warning("column %s of dtype %s containing nulls found" % (col, pd_dtype)) sample = None else: sample = sample.iloc[0] # infer spark dtype # datetimes if pd.api.types.is_datetime64_any_dtype(pd_dtype): ret = T.TimestampType() # ints elif (pd_dtype == 'int8') or (pd_dtype == 'int16'): # int8, int16 ret = T.ShortType() elif pd_dtype == 'int32': ret = T.IntegerType() elif pd.api.types.is_int64_dtype(pd_dtype): ret = T.LongType() # uints elif pd_dtype == 'uint8': ret = T.ShortType() elif pd_dtype == 'uint16': ret = T.IntegerType() elif pd_dtype == 'uint32': ret = T.LongType() elif pd_dtype == 'uint64': logger.warning("converting column %s of type uint64 to spark LongType - overflows will be nulls" % col) ret = T.LongType() # floats elif (pd_dtype == 'float16') or (pd_dtype == 'float32'): ret = T.FloatType() elif pd_dtype == 'float64': # float64 ret = T.DoubleType() elif pd_dtype == 'bool': ret = T.BooleanType() # object elif pd_dtype == 'object': if (sample is None) or (isinstance(sample, str)): logger.warning("converting column %s of type object to spark StringType" % col) ret = T.StringType() elif isinstance(sample, tuple): raise NotImplementedError("cannot convert column %s containing tuples to spark" % col) else: raise NotImplementedError("values in column %s of type object not understood" % col) # category elif pd.api.types.is_categorical_dtype(pd_dtype): logger.warning("converting column %s of type category to spark StringType" % col) ret = T.StringType() else: raise NotImplementedError("column %s of type %s not understood" % (col, pd_dtype)) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_dtype(data_df, settings):\n data_df = data_df.astype(settings[\"dtype\"])\n return data_df", "def change_col_type(df,schema):\n d = {'int':IntegerType(),'str':StringType(),'float':FloatType(),'bool':BooleanType()}\n \n for c,t in schema.items():\n df = df.withColumn(c,col(c).cast(d[t]))\n return df", "def get_col_dtype(col):\n if col.dtype == \"object\":\n try:\n col_new = pd.to_datetime(col.dropna().unique())\n return ['timestamp', 'datetime']\n except:\n return [\"text\", 'string']\n elif col.dtype == 'float64':\n return ['float', 'float64']\n elif col.dtype == 'int64':\n return ['int', 'int64']\n elif col.dtype == 'datetime64[ns]':\n return ['timestamp', 'datetime']\n else:\n return ['text', 'string']", "def inspect_dtype_object(self, column: str) -> str:\n\n series = self.df[column].dropna()\n\n # check for bool\n try:\n conv = pd.to_numeric(series)\n return self.inspect_dtype(conv)\n except ValueError:\n pass\n\n # check for mixed dtypes\n dtypes = {type(x) for x in series}\n if len(dtypes) > 1:\n raise TypeError(\"Column `{}` has mixed dtypes: {}. Currently, \"\n \"this is not supported.\"\n .format(column, dtypes))\n\n # check for string\n if isinstance(series[0], str):\n return \"str\"\n\n # raise if unsupported dtype is encountered\n raise TypeError(\"Column `{}` has dtype `{}` which is currently \"\n \"not supported.\"\n .format(column, type(series[0])))", "def get_data_type(col_val):\n dtype = \"\"\n\n original_col_val = col_val\n digits_only = col_val.replace('-', '',1).replace(',', '', -1).replace(\".\", \"\")\n if digits_only.isdigit():\n try:\n int(original_col_val)\n dtype = TYPE_INT\n except ValueError:\n dtype = TYPE_FLOAT\n \n return dtype", "def ibis_schema_apply_to(schema, df):\n\n for column, dtype in schema.items():\n pandas_dtype = dtype.to_pandas()\n if isinstance(dtype, dt.Interval):\n df[column] = df[column].values.astype(pandas_dtype)\n else:\n df[column] = df[column].astype(pandas_dtype, errors='ignore')\n\n if PY2 and dtype == dt.string:\n df[column] = df[column].str.decode('utf-8', errors='ignore')\n\n return df", "def cast(elem, psql_type):\n if psql_type == 'real':\n return float(format(elem, '.6g'))\n elif psql_type == 'double precision':\n return float(format(elem, '.15g'))\n elif psql_type == 'timestamp':\n if isinstance(elem, pd.Timestamp):\n return elem.to_pydatetime()\n else:\n return elem\n elif psql_type == 'text':\n if type(elem) == float:\n return \"NaN\"\n return str(elem)\n else:\n return elem", "def test_df_all_types():\n return pd.DataFrame({\n 'intcol': [1, 2],\n 'strcol': ['three', 'four'],\n 'floatcol': [5.0, 6.0],\n 'boolcol': [True, False],\n 'datetimecol': [\n np.datetime64('2020-01-01'), np.datetime64('2020-01-02')],\n })", "def _infer_pa_column_type(self, column: pa.lib.ChunkedArray):\n # Validates the column to ensure that value types are consistent\n column.validate()\n return pa_to_feast_value_type(column)", "def convert_types(df):\n \n # Iterate through each column\n for c in df:\n \n # Convert ids and booleans to integers\n if ('SK_ID' in c):\n df[c] = df[c].fillna(0).astype(np.int32)\n \n # Convert objects to category\n elif (df[c].dtype == 'object') and (df[c].nunique() < df.shape[0]):\n df[c] = df[c].astype('category')\n \n # Booleans mapped to integers\n elif list(df[c].unique()) == [1, 0]:\n df[c] = df[c].astype(bool)\n \n # Float64 to float32\n elif df[c].dtype == float:\n df[c] = df[c].astype(np.float32)\n \n # Int64 to int32\n elif df[c].dtype == int:\n df[c] = df[c].astype(np.int32)\n \n return df", "def pandas_typecast(self) -> dict:\n res = {}\n for feat in self.data_features:\n res[feat.key] = ApiForm.typecast(feat.dtype)\n return res", "def get_dtype(col):\n dtype = col.dtype\n\n if isinstance(dtype, CategoricalDtype):\n col = col.astype(type(col.values[0]))\n out = get_dtype(col)\n elif np.issubdtype(dtype, np.floating):\n out = 'float32'\n elif np.issubdtype(dtype, np.integer):\n if col.max() < 32767:\n out = 'int16'\n else:\n out = 'int32'\n elif np.issubdtype(dtype, np.object_):\n size = int(col.astype(str).str.len().max())\n out = 'S{:}'.format(size)\n else:\n out = dtype\n\n return out", "def infer_dtype(self):\n raise NotImplementedError", "def cudf_dtype_from_pydata_dtype(dtype):\n\n if cudf.api.types.is_categorical_dtype(dtype):\n return cudf.core.dtypes.CategoricalDtype\n elif cudf.api.types.is_decimal32_dtype(dtype):\n return cudf.core.dtypes.Decimal32Dtype\n elif cudf.api.types.is_decimal64_dtype(dtype):\n return cudf.core.dtypes.Decimal64Dtype\n elif cudf.api.types.is_decimal128_dtype(dtype):\n return cudf.core.dtypes.Decimal128Dtype\n elif dtype in cudf._lib.types.SUPPORTED_NUMPY_TO_LIBCUDF_TYPES:\n return dtype.type\n\n return infer_dtype_from_object(dtype)", "def cast_type(cdm_column_type, value):\n if cdm_column_type in ('integer', 'int64'):\n # Regex check only relevant if submission dtype is 'object'\n if not re.match(SCIENTIFIC_NOTATION_REGEX, str(value)):\n return int(value)\n if cdm_column_type in ('character varying', 'text', 'string'):\n return str(value)\n if cdm_column_type == 'numeric':\n return float(value)\n if cdm_column_type == 'float' and isinstance(value, float):\n return value\n if cdm_column_type == 'date' and isinstance(value, datetime.date):\n return value\n if cdm_column_type == 'timestamp' and isinstance(\n value, datetime.datetime): # do not do datetime.datetime\n return value", "def dtype_to_pgtype(dtype, colname):\n if colname in ('the_geom', 'the_geom_webmercator'):\n return 'geometry'\n else:\n if dtype == 'float64':\n return 'numeric'\n elif dtype == 'int64':\n return 'int'\n elif dtype == 'datetime64[ns]':\n return 'date'\n elif dtype == 'bool':\n return 'boolean'\n else:\n return 'text'\n\n return None", "def inspect_dtype(series: pd.Series) -> str:\n\n mapping = {pd_types.is_bool_dtype: \"bool\",\n pd_types.is_integer_dtype: \"int\",\n pd_types.is_float_dtype: \"float\",\n pd_types.is_datetime64_any_dtype: \"datetime\"}\n\n for check, result in mapping.items():\n if check(series):\n return result\n\n raise TypeError(\"Type is not understand for column '{}'. Allowed \"\n \"types are bool, int, float, str and datetime.\"\n .format(series.name))", "def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data", "def astype(self, dtype: Union[Dict[str, str], str]) -> 'DataFrame':\n\n def change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm):\n missing_value_code = utils.get_missing_value_code(new_kind)\n if new_kind == 'S':\n if old_kind == 'b':\n arr = arr + 1\n cur_srm = [False, 'False', 'True']\n elif old_kind in 'i':\n cur_srm, arr = _va.convert_int_to_str(arr)\n elif old_kind == 'f':\n cur_srm, arr = _va.convert_float_to_str(arr)\n elif old_kind in 'mM':\n cur_srm, arr = _va.convert_datetime_str_to_str(arr.astype('str'))\n\n new_arr[:, new_loc] = arr\n new_srm[new_loc] = cur_srm\n else:\n if new_kind != old_kind:\n nas = utils.isna_array(arr, old_kind)\n if new_kind == 'b' and old_kind != 'b':\n arr = arr.astype('bool').astype('int8')\n new_arr[:, new_loc] = arr\n if new_kind != old_kind:\n new_arr[nas, new_loc] = missing_value_code\n\n if isinstance(dtype, str):\n new_dtype: str = utils.check_valid_dtype_convert(dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n utils.check_astype_compatible(new_kind, self._data.keys())\n\n new_column_info: ColInfoT = {}\n new_arr = utils.create_empty_arr(new_kind, self.shape)\n new_data = {new_kind: new_arr}\n new_srm = {}\n col_iter = enumerate(self._col_info_iter(with_order=True, with_arr=True))\n for i, (col, old_kind, loc, order, arr) in col_iter:\n new_column_info[col] = utils.Column(new_kind, i, order)\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(i, new_kind, old_kind, arr, new_arr, cur_srm)\n elif isinstance(dtype, dict):\n col_kind_convert = {}\n for col, new_dtype in dtype.items():\n self._validate_column_name(col)\n new_dtype: str = utils.check_valid_dtype_convert(new_dtype)\n new_kind: str = utils.convert_numpy_to_kind(new_dtype)\n col_kind_convert[col] = new_kind\n old_kind = self._column_info[col].dtype\n utils.check_astype_compatible(new_kind, {old_kind})\n\n new_column_info: ColInfoT = {}\n cols_per_kind: Dict[str, int] = defaultdict(int)\n for col, old_kind, loc, order in self._col_info_iter(with_order=True):\n new_kind = col_kind_convert.get(col, old_kind)\n cur_loc = cols_per_kind[new_kind]\n new_column_info[col] = utils.Column(new_kind, cur_loc, order)\n cols_per_kind[new_kind] += 1\n\n # create empty arrays for each type\n new_data = {}\n for new_kind, num_cols in cols_per_kind.items():\n shape = len(self), num_cols\n new_data[new_kind] = utils.create_empty_arr(new_kind, shape)\n\n new_srm = {}\n for col, old_kind, loc, order, arr in self._col_info_iter(with_order=True, with_arr=True):\n new_kind = new_column_info[col].dtype\n new_loc = new_column_info[col].loc\n new_arr = new_data[new_kind]\n if old_kind == 'S':\n cur_srm = self._str_reverse_map[loc].copy()\n else:\n cur_srm = []\n change_each_array(new_loc, new_kind, old_kind, arr, new_arr, cur_srm)\n else:\n raise TypeError('Argument dtype must be either a string or a dictionary')\n\n new_columns = self._columns.copy()\n return self._construct_from_new(new_data, new_column_info, new_columns, new_srm)", "def get_column_dtypes(self) -> Tuple[List[str], List[str]]:\n\n columns, pyspark_dtypes = zip(*self.df.dtypes)\n\n # check unsupported pyspark dtypes\n unsupported = set(pyspark_dtypes).difference(self.TYPE_MAPPING.keys())\n if unsupported:\n raise ValueError(\"Unsupported dtype encountered: {}. Supported\"\n \"dtypes are: {}.\"\n .format(unsupported, self.TYPE_MAPPING.keys()))\n\n dtypes = [self.TYPE_MAPPING[dtype] for dtype in pyspark_dtypes]\n\n return columns, dtypes", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def get_data_type(self, col):\n if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):\n return 'int'\n elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):\n return 'float'\n else:\n raise ValueError(\"Unknown data type of feature %s: must be int or float\" % col)", "def to_dtype(x, dtype):\n return x.type(dtype)", "def to_pandas(self) -> np.dtype:\n return self._pandas_type", "def test__convert_to_str_dtype(self):\n new_column_types = process_mutation._convert_to_str_dtype(\n self.column_types, [\"foo\"]\n )\n assert new_column_types == {\"foo\": \"object\", \"bar\": \"object\"}", "def set_dtypes(df):\n # drop rows where a column names appear (happened while appending to csv)\n df = df.loc[df[df.columns[0]] != df.columns[0]]\n # convert numerics\n df = df.apply(pd.to_numeric, errors='ignore')\n # parse query_timestamp\n df.query_timestamp = df.query_timestamp.apply(pd.to_datetime)\n\n df.reset_index(inplace=True, drop=True)\n\n return df", "def convertColumn(df, names, newType) -> pyspark.sql.dataframe.DataFrame:\n for name in names: \n df = df.withColumn(name, df[name].cast(newType))\n return df", "def data_all_types(df):\n \n printmd (\"**Type of every column in the data**\")\n print(\"\")\n print(df.dtypes)", "def convert_dtypes(rows):\n dtype_map = {pd.Timestamp: lambda x: x.to_pydatetime(),\n np.int8: lambda x: int(x),\n np.int16: lambda x: int(x),\n np.int32: lambda x: int(x),\n np.int64: lambda x: int(x),\n np.float16: lambda x: float(x),\n np.float32: lambda x: float(x),\n np.float64: lambda x: float(x),\n np.float128: lambda x: float(x)}\n for row in rows:\n yield [dtype_map.get(type(elem), lambda x: x)(elem) for elem in row]", "def _preprocess_temporal_columns(df: DataFrame) -> DataFrame:\n for col in df.select_dtypes(include=[\"datetime64[ns, UTC]\"]):\n df = df.astype({col: \"O\"})\n for col in df.select_dtypes(include=\"timedelta64[ns]\"):\n df = df.astype({col: \"O\"})\n return df", "def rep_dtypes(df):\n return \"(\" + re.sub(\", dtype.*\", \"\", re.sub(r\" +\", \": \", str(df.dtypes)).replace(\"\\n\", \", \")) + \")\"", "def test_mapping_column_types():\n\n dr1 = date_range(\"2020-01-01\", periods=3, freq=\"D\")\n dr2 = date_range(\"2019-06-23\", periods=3, freq=\"D\")\n df = DataFrame(\n {\n \"String\": list(\"abc\"),\n \"pd_String\": Series(list(\"abc\"), dtype=\"string\"),\n \"Int\": [1, 2, 3],\n \"Int16\": array([1, 2, 3], dtype=\"int16\"),\n \"pd_Int64\": Series([1, 2, 3], dtype=\"Int64\"),\n \"Float\": [4.0, 5.0, 6.0],\n \"Float32\": array([4, 4, 6], dtype=\"float32\"),\n \"Date\": dr1,\n \"Timedelta\": dr1 - dr2,\n \"Bool\": [True, False, True],\n }\n )\n adf_client, run_response = df_to_azure(\n df, tablename=\"test_df_to_azure\", schema=\"test\", method=\"create\"\n )\n wait_till_pipeline_is_done(adf_client, run_response)\n\n expected = DataFrame(\n {\n \"COLUMN_NAME\": [\n \"String\",\n \"pd_String\",\n \"Int\",\n \"Int16\",\n \"pd_Int64\",\n \"Float\",\n \"Float32\",\n \"Date\",\n \"Timedelta\",\n \"Bool\",\n ],\n \"DATA_TYPE\": [\n \"varchar\",\n \"varchar\",\n \"int\",\n \"int\",\n \"int\",\n \"real\",\n \"real\",\n \"datetime\",\n \"real\",\n \"bit\",\n ],\n \"CHARACTER_MAXIMUM_LENGTH\": [255, 255, nan, nan, nan, nan, nan, nan, nan, nan],\n \"NUMERIC_PRECISION\": [nan, nan, 10, 10, 10, 24, 24, nan, 24, nan],\n }\n )\n\n query = \"\"\"\n SELECT\n COLUMN_NAME,\n DATA_TYPE,\n CHARACTER_MAXIMUM_LENGTH,\n NUMERIC_PRECISION\n FROM\n INFORMATION_SCHEMA.COLUMNS\n WHERE\n TABLE_NAME = 'test_df_to_azure';\n \"\"\"\n\n with auth_azure() as con:\n result = read_sql_query(query, con=con)\n\n assert_frame_equal(expected, result)", "def convert_dtypes(\n self,\n infer_objects: bool = True,\n convert_string: bool = True,\n convert_integer: bool = True,\n convert_boolean: bool = True,\n convert_floating: bool = True,\n dtype_backend: DtypeBackend = \"numpy_nullable\",\n ):\n return DataFrameDefault.register(pandas.DataFrame.convert_dtypes)(\n self,\n infer_objects=infer_objects,\n convert_string=convert_string,\n convert_integer=convert_integer,\n convert_boolean=convert_boolean,\n convert_floating=convert_floating,\n dtype_backend=dtype_backend,\n )", "def _infer_dtype(val):\n if re.match(r'\\d{4}-\\d{2}(?:-\\d{2})?', val):\n return 'date'\n elif re.match(r'[+-]?\\d+$', val):\n return 'int'\n elif re.match(r'[+-]?\\d+%$', val):\n return 'pct'\n elif re.match(r'[a-zA-Z ]+', val):\n return 'text'\n else:\n msg = \"val={0} dtype not recognized\".format(val)\n raise ValueError(msg)", "def dtype(a):\n return a.dtype", "def datatype_map(dtype):\n # TODO: add datetype conversion\n if 'float' in dtype:\n return 'numeric'\n elif 'int' in dtype:\n return 'int'\n elif 'bool' in dtype:\n return 'boolean'\n else:\n return 'text'", "def element_type_from_dataframe(proxy, include_indexes=False):\n # type: (pd.DataFrame, bool) -> type\n return element_typehint_from_dataframe_proxy(proxy, include_indexes).user_type", "def assign_column_types(self):\n type_list = [\"category\" if u_input == 1 else float for u_input in self.user_column_label]\n self.df = self.df.astype(dict(zip(self.df.columns, type_list)))\n df_types = pd.DataFrame(self.df.dtypes).reset_index()\n df_types.columns = [\"column_name\", \"dtype\"]\n df_types.dtype = df_types.dtype.astype(str)\n self.column_dtypes = {list(df_types.column_name)[i]: list(df_types.dtype)[i] for i in range(len(df_types))}", "def pg2dtypes(pgtype):\n mapping = {\n 'bigint': 'float64',\n 'boolean': 'bool',\n 'date': 'datetime64[D]',\n 'double precision': 'float64',\n 'geometry': 'object',\n 'int': 'int64',\n 'integer': 'float64',\n 'number': 'float64',\n 'numeric': 'float64',\n 'real': 'float64',\n 'smallint': 'float64',\n 'string': 'object',\n 'timestamp': 'datetime64[ns]',\n 'timestampz': 'datetime64[ns]',\n 'timestamp with time zone': 'datetime64[ns]',\n 'timestamp without time zone': 'datetime64[ns]',\n 'USER-DEFINED': 'object',\n }\n return mapping.get(str(pgtype), 'object')", "def test_pandas_dtypes():\n assert pd.DataFrame([1, 2]).dtypes.values[0] == np.dtype('int64') == np.int64\n assert pd.DataFrame([1, 2, None]).dtypes.values[0] == np.dtype('float64') == np.float64\n\n assert pd.DataFrame([1.0, 2.0]).dtypes.values[0] == np.dtype('float64') == np.float64\n assert pd.DataFrame([1.0, 2.0, None]).dtypes.values[0] == np.dtype('float64') == np.float64\n\n assert pd.DataFrame([True, False]).dtypes.values[0] == np.dtype('bool') == np.bool\n assert pd.DataFrame([True, False, None]).dtypes.values[0] == np.dtype('object') == np.object\n\n assert pd.DataFrame([\"A\", \"B\"]).dtypes.values[0] == np.dtype('object') == np.object\n assert pd.DataFrame([\"A\", \"B\", None]).dtypes.values[0] == np.dtype('object') == np.object", "def arrow_type_for(column_name, cx_oracle_type, precision, scale):\n\n # In the schema, these are bare NUMBER columns, so erroneously come back as floats.\n if column_name in ['AVPID', 'TUMOUR_AVPID']:\n return pa.int64()\n\n if cx_oracle_type == cx_Oracle.DB_TYPE_NUMBER:\n if scale == 0:\n if precision == 1:\n return pa.bool_()\n else:\n return pa.int64()\n else:\n return pa.float64()\n else:\n return type_map.get(cx_oracle_type)", "def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Float()", "def _spark_data_type(self) -> sql_type.DataType:\n return self._spark_type_class()", "def _resolve_target_dtypes(self, dyf: DynamicFrame) -> DynamicFrame:\n resolve_choice_specs = [\n (col, f\"cast:{col_type}\") for col, col_type in self.target_table.get_dyf().toDF().dtypes\n ]\n\n return dyf.resolveChoice(resolve_choice_specs)", "def _transform_col(col, val):\n if dict_values(col.types)[0] in ('int', 'real'):\n return col.asnumeric(), float(val)\n\n # for enums, character, etc...\n return col, val", "def convert_variable_type_n(df):\n # available columns\n \"\"\"\n 'source_file', 'source_id', 'report_id', 'observation_id',\n 'record_timestamp', 'iday', 'station_id', 'lat@hdr', 'lon@hdr',\n 'vertco_reference_1@body', 'obsvalue@body', 'varno@body', 'units',\n 'number_of_pressure_levels'\n \"\"\"\n dic_var_type = { 'int32' : ['varno@body', 'number_of_pressure_levels' , 'units', 'z_coordinate_type' , 'vertco_type@body' ] ,\n 'float32' : ['lat@hdr', 'lon@hdr' , 'vertco_reference_1@body', 'obsvalue@body', 'iday' ] ,\n 'string' : ['source_id' , 'station_id' , 'source_file' , 'report_id', 'observation_id', ] ,\n 'int64' : ['report_timestamp' , 'date_time', 'record_timestamp'] } \n \n convert = { 'int32' : np.int32 , \n 'string' : np.bytes_ ,\n 'float32' : np.float32 ,\n 'float64' : np.float64\n \n }\n # creating a dictionary variable - nptype \n mapping = {}\n for k in dic_var_type.keys():\n for l in dic_var_type[k]:\n mapping[l] = k \n\n for c in df.columns:\n try:\n #print('converting ' , c , ' to type ' , mapping[c] )\n df[c] = df[c].astype( convert[mapping[c]] )\n #print('converted: ', c )\n \n except:\n #print('could not convert type column ' , c )\n pass \n \n return df", "def map_dtypes(pgtype):\n # may not be a complete list, could not find CARTO SQL API documentation\n # about data types\n dtypes = {'number': 'float64',\n 'date': 'datetime64',\n 'string': 'object',\n 'geometry': 'object',\n 'boolean': 'bool'}\n try:\n return dtypes[pgtype]\n except KeyError:\n # make it a string if not in dict above\n return 'object'", "def _coerce_and_store_data_types(tag_loop_dict):\n\n regex_format = re.compile(r\"\"\"\\d*\\.(?P<decimal>\\d+)(?:[Ee]?[+-]?(?P<exponent>\\d?))\"\"\")\n\n # Attempt to convert data columns from strings to integers or floats whenever possible\n # Skip any table with 'data_header' in its name because these contain mixed data\n for key in tag_loop_dict.keys():\n if u'data_header' not in key:\n tmp = tag_loop_dict[key].copy()\n tag_loop_dict[key] = tag_loop_dict[key].apply(lambda x: pd.to_numeric(x, errors=u'ignore'))\n \n # Preserve the formatting for all columns that were converted to floats\n float_cols = [x for x in tag_loop_dict[key].columns if tag_loop_dict[key][x].dtype == np.float]\n\n decimal_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('decimal'))).max())\n for col in float_cols])\n\n exponent_format = dict([(col, tmp[col].apply(lambda x: \n len(re.search(regex_format, x).group('exponent'))).max())\n for col in float_cols])\n\n number_format = dict([(col,'f') if exponent_format[col] == 0 else (col,'E')\n for col in float_cols])\n\n formatter = dict([(col, '{:.' + str(decimal_format[col]) + number_format[col] + '}') \n for col in float_cols])\n \n # Save format instructions to dataframe\n tag_loop_dict[key]._print_format = formatter\n\n return tag_loop_dict", "def _parse_column_type(self) -> object:\n\n try:\n column_type = self.get_column_type(self.dest_options.pop(\"type_cast\"))\n type_length = self.dest_options.pop(\"length\")\n if type_length:\n column_type = column_type(type_length)\n return column_type\n except Exception as err:\n logger.error(\"_parse_column_type [error] -> %s\" % err)\n\n # logger.error(self.dest_options.get(\"length\"))\n type_length = self.dest_options.pop(\"length\")\n if type_length:\n column_type = column_type(type_length)\n return column_type", "def get_field_dtype(self, field=None):\n\n if field in self._fields_dtypes:\n return self._fields_dtypes[field]\n\n # initialize dbtypes for all fields\n field_type = pd.read_sql(\n 'select distinct column_name, type '\n 'from fields',\n self._get_db_engine())\n\n for row in field_type.itertuples():\n self._fields_dtypes[row.column_name] = row.type\n\n return self._fields_dtypes[field] if field in self._fields_dtypes else None", "def reduce_memory_footprint(df):\n for col in df.columns:\n if df[col].dtypes == 'float64':\n df[col] = df[col].astype('float32')\n elif df[col].dtypes == 'int64':\n df[col] = df[col].astype('int32')\n \n return df", "def convertColumnToInt32(dataFrame, columnName):\n\n dataFrame[columnName] = dataFrame[columnName].astype(np.int32)\n return dataFrame", "def _convert(frame):\n frame = frame.convert_objects(convert_numeric=True)\n for column in frame:\n if column in c.dates:\n frame[column] = frame[column].astype('datetime64')\n return frame", "def dtypes(self):\n return self.to_pandas().dtypes", "def _convert_field_type(row):\n return row", "def dtype_to_db_type(dtype):\n\n candidates = [dtype]\n\n # if we get a single character code we should normalize to a NumPy type\n if dtype in np.typeDict:\n dtype = np.typeDict[dtype]\n candidates.append(dtype.__name__)\n\n #if we get a dtype object i.e. dtype('int16'), then pull out its name\n if hasattr(dtype, 'name'):\n candidates.append(dtype.name)\n\n # for a dtype like dtype('S3') need to access dtype.type.__name__ to get 'string_'\n if hasattr(dtype, 'type'):\n candidates.append(dtype.type.__name__)\n\n # convert Python types by adding their type's name\n if hasattr(dtype, '__name__'):\n candidates.append(dtype.__name__)\n\n candidates.append(str(dtype))\n\n for candidate_key in candidates:\n if candidate_key in _dtype_to_db_type_dict:\n return _dtype_to_db_type_dict[candidate_key]\n\n assert False, \"Failed to find sqlite3 column type for %s\" % dtype", "def is_date_dtype(df, col_name):\n dtype = df.dtypes[col_name]\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)", "def predict_column_type(data):\n data_types = [type(item) for item in data]\n data_types = list(set(data_types))\n if len(data_types) == 1:\n return data_types[0].__name__\n elif str in data_types:\n return \"str\"\n elif float in data_types:\n return \"float\"\n elif int in data_types:\n return \"int\"\n else:\n return \"str\"", "def cudf_dtype_from_pa_type(typ):\n if pa.types.is_list(typ):\n return cudf.core.dtypes.ListDtype.from_arrow(typ)\n elif pa.types.is_struct(typ):\n return cudf.core.dtypes.StructDtype.from_arrow(typ)\n elif pa.types.is_decimal(typ):\n return cudf.core.dtypes.Decimal128Dtype.from_arrow(typ)\n else:\n return cudf.api.types.pandas_dtype(typ.to_pandas_dtype())", "def data_type(self, col_name: str, pa_dtype: pa.DataType) -> Optional[str]:\n set_type = self._type_dict.get(col_name)\n if set_type:\n return set_type\n\n mapped_type = self.convert_pa_dtype(pa_dtype)\n if mapped_type:\n return mapped_type\n\n return None", "def _can_cast(from_dtype, to_dtype):\n if cudf.utils.utils.is_na_like(from_dtype):\n return True\n if isinstance(from_dtype, type):\n from_dtype = cudf.dtype(from_dtype)\n if isinstance(to_dtype, type):\n to_dtype = cudf.dtype(to_dtype)\n\n # TODO : Add precision & scale checking for\n # decimal types in future\n\n if isinstance(from_dtype, cudf.core.dtypes.DecimalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n if to_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(from_dtype, np.dtype):\n if isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype, to_dtype)\n elif isinstance(to_dtype, cudf.core.dtypes.DecimalDtype):\n if from_dtype.kind in {\"i\", \"f\", \"u\", \"U\", \"O\"}:\n return True\n else:\n return False\n elif isinstance(to_dtype, cudf.core.types.CategoricalDtype):\n return True\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.ListDtype):\n # TODO: Add level based checks too once casting of\n # list columns is supported\n if isinstance(to_dtype, cudf.core.dtypes.ListDtype):\n return np.can_cast(from_dtype.leaf_type, to_dtype.leaf_type)\n else:\n return False\n elif isinstance(from_dtype, cudf.core.dtypes.CategoricalDtype):\n if isinstance(to_dtype, cudf.core.dtypes.CategoricalDtype):\n return True\n elif isinstance(to_dtype, np.dtype):\n return np.can_cast(from_dtype._categories.dtype, to_dtype)\n else:\n return False\n else:\n return np.can_cast(from_dtype, to_dtype)", "def _convert_dtype_value(val):\n\n convert_dtype_map = {\n 21: \"int8\",\n 20: \"uint8\",\n 6: \"float64\",\n 5: \"float32\",\n 4: \"float16\",\n 3: \"int64\",\n 2: \"int32\",\n 1: \"int16\",\n 0: \"bool\",\n }\n if val not in convert_dtype_map:\n msg = f\"Paddle data type value {val} is not handled yet.\"\n raise NotImplementedError(msg)\n return convert_dtype_map[val]", "def test_mixed_dtypes(suffix: str) -> None:\n path = rsc / mixed_dtypes_file\n df = read_ods(path.with_suffix(suffix), 1)\n\n assert isinstance(df, pd.DataFrame)\n assert len(df) == 10\n assert len(df.columns) == 5\n\n type_list = [float, object, float, float, object]\n assert df.dtypes.tolist() == type_list\n col_b_types = [type(v) for v in df.B.values]\n assert str in col_b_types and float in col_b_types", "def cudf_dtype_to_pa_type(dtype):\n if cudf.api.types.is_categorical_dtype(dtype):\n raise NotImplementedError()\n elif (\n cudf.api.types.is_list_dtype(dtype)\n or cudf.api.types.is_struct_dtype(dtype)\n or cudf.api.types.is_decimal_dtype(dtype)\n ):\n return dtype.to_arrow()\n else:\n return np_to_pa_dtype(cudf.dtype(dtype))", "def to_cudf_compatible_scalar(val, dtype=None):\n\n if cudf._lib.scalar._is_null_host_scalar(val) or isinstance(\n val, cudf.Scalar\n ):\n return val\n\n if not cudf.api.types._is_scalar_or_zero_d_array(val):\n raise ValueError(\n f\"Cannot convert value of type {type(val).__name__} \"\n \"to cudf scalar\"\n )\n\n if isinstance(val, Decimal):\n return val\n\n if isinstance(val, (np.ndarray, cp.ndarray)) and val.ndim == 0:\n val = val.item()\n\n if (\n (dtype is None) and isinstance(val, str)\n ) or cudf.api.types.is_string_dtype(dtype):\n dtype = \"str\"\n\n if isinstance(val, str) and val.endswith(\"\\x00\"):\n # Numpy string dtypes are fixed width and use NULL to\n # indicate the end of the string, so they cannot\n # distinguish between \"abc\\x00\" and \"abc\".\n # https://github.com/numpy/numpy/issues/20118\n # In this case, don't try going through numpy and just use\n # the string value directly (cudf.DeviceScalar will DTRT)\n return val\n\n tz_error_msg = (\n \"Cannot covert a timezone-aware timestamp to timezone-naive scalar.\"\n )\n if isinstance(val, pd.Timestamp):\n if val.tz is not None:\n raise NotImplementedError(tz_error_msg)\n val = val.to_datetime64()\n elif isinstance(val, pd.Timedelta):\n val = val.to_timedelta64()\n elif isinstance(val, datetime.datetime):\n if val.tzinfo is not None:\n raise NotImplementedError(tz_error_msg)\n val = np.datetime64(val)\n elif isinstance(val, datetime.timedelta):\n val = np.timedelta64(val)\n\n val = _maybe_convert_to_default_type(\n cudf.api.types.pandas_dtype(type(val))\n ).type(val)\n\n if dtype is not None:\n if isinstance(val, str) and np.dtype(dtype).kind == \"M\":\n # pd.Timestamp can handle str, but not np.str_\n val = pd.Timestamp(str(val)).to_datetime64().astype(dtype)\n else:\n val = val.astype(dtype)\n\n if val.dtype.type is np.datetime64:\n time_unit, _ = np.datetime_data(val.dtype)\n if time_unit in (\"D\", \"W\", \"M\", \"Y\"):\n val = val.astype(\"datetime64[s]\")\n elif val.dtype.type is np.timedelta64:\n time_unit, _ = np.datetime_data(val.dtype)\n if time_unit in (\"D\", \"W\", \"M\", \"Y\"):\n val = val.astype(\"timedelta64[ns]\")\n\n return val", "def get_data_type(self, idx):\n return(self.data[idx].dtype)", "def typecast(dtype: Any) -> str:\n if dtype is int:\n return \"Int64\"\n elif dtype is float:\n return \"Float64\"\n elif dtype is bool:\n return \"bool\"\n return \"string\"", "def get_column_type(cls, **kwargs: Any) -> Any:\n precision = kwargs.get(\"precision\")\n scale = kwargs.get(\"scale\")\n return sqlalchemy.DECIMAL(precision=precision, scale=scale)", "def test_wrong_type_of_cast_non_numeric_values(currency_df):\n with pytest.raises(TypeError):\n _ = currency_df.currency_column_to_numeric(\n \"d_col\",\n cast_non_numeric={\"foo\": \"zzzzz\"},\n )", "def astype(self, col_dtypes, errors: str = \"raise\"): # noqa: PR02\n return DataFrameDefault.register(pandas.DataFrame.astype)(\n self, dtype=col_dtypes, errors=errors\n )", "def numerical(df):\r\n numerical_var=df.select_dtypes(include =['float64','int64']).columns.tolist()\r\n return numerical_var", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def _convert_timestamp(self, df: DataFrame, timestamp_column: str = \"ts\") -> DataFrame:\n return df.withColumn(timestamp_column, (col(timestamp_column) / 1000).cast(TimestampType()))", "def _astype_internal(self, column: str, numpy_dtype: str) -> None:\n new_kind: str = utils.convert_numpy_to_kind(numpy_dtype)\n dtype, loc, order = self._get_col_dtype_loc_order(column) # type: str, int, int\n\n srm = []\n\n if dtype == new_kind:\n return None\n col_data: ndarray = self._data[dtype][:, loc]\n nulls = utils.isna_array(col_data, dtype)\n\n if numpy_dtype == 'S':\n col_data = col_data.astype('U')\n col_data, _, srm = _va.convert_str_to_cat(col_data)\n col_data[nulls] = 0\n elif numpy_dtype == 'b':\n col_data = col_data.astype('bool').astype('int8')\n col_data[nulls] = -1\n elif numpy_dtype == 'i':\n col_data = col_data.astype('int64')\n col_data[nulls] = MIN_INT\n elif numpy_dtype == 'f':\n col_data = col_data.astype('int64')\n col_data[nulls] = np.nan\n elif col_data.dtype.kind == 'M':\n col_data = col_data.astype('datetime64[ns]')\n col_data[nulls] = NaT\n elif col_data.dtype.kind == 'm':\n col_data = col_data.astype('timedelta64[ns]')\n col_data[nulls] = NaT\n\n self._remove_column(column)\n self._write_new_column_data(column, new_kind, col_data, srm, order)", "def add_primitive_column(self, colname, coltype, func, *col_names):\n from pyspark.sql.types import _parse_datatype_string\n t = _parse_datatype_string(coltype)\n\n @pandas_udf(t)\n def pudf(*cols):\n return func(*cols)\n columns = [self._df[cn] for cn in col_names]\n return wrap(self._df.withColumn(colname, pudf(*columns)), self._table_info)", "def inferdtypes(df):\n dev_lst = df[DEVICE].unique()\n res_lst = []\n for dev in dev_lst:\n vals = df[df[DEVICE] == dev][VAL]\n dtype = infer_dtype(vals)\n res_lst.append((dev, dtype))\n return res_lst", "def _column_type(t):\n return 'bigint' if datastore_type[t].numeric else 'text'", "def _maybe_convert_to_default_type(dtype):\n if cudf.get_option(\"default_integer_bitwidth\"):\n if cudf.api.types.is_signed_integer_dtype(dtype):\n return cudf.dtype(\n f'i{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n elif cudf.api.types.is_unsigned_integer_dtype(dtype):\n return cudf.dtype(\n f'u{cudf.get_option(\"default_integer_bitwidth\")//8}'\n )\n if cudf.get_option(\n \"default_float_bitwidth\"\n ) and cudf.api.types.is_float_dtype(dtype):\n return cudf.dtype(f'f{cudf.get_option(\"default_float_bitwidth\")//8}')\n\n return dtype", "def sql_type(dtype):\n if dtype.kind in (\"i\",\"u\",\"f\"):\n # It's a numeric type\n if dtype == np.int32:\n return \"integer\"\n elif dtype == np.int64:\n return \"bigint\"\n elif dtype == np.float32:\n return \"real\"\n elif dtype == np.float64:\n return \"float\"\n else:\n raise ValueError(\"Unsupported data type \"+str(dtype))\n elif dtype.kind == \"S\":\n # It's a string\n # Note: this assumes 1 byte = 1 character!\n return (\"char(%d)\" % dtype.itemsize)\n else:\n # Not numeric or string, don't know what to do with this!\n raise ValueError(\"Unsupported data type \"+str(dtype))", "def get_postgres_column_type(\n self, tablename: str, column_name: str\n ) -> str:\n return self.get_postgres_column_definition(\n tablename=tablename, column_name=column_name\n ).data_type.upper()", "def identify_numeric_columns(dataset):\n return identify_columns_by_type(dataset, include=['int64', 'float64'])", "def as_spark_schema(self):\n if self.is_tensor_spec():\n raise MlflowException(\"TensorSpec cannot be converted to spark dataframe\")\n if len(self.inputs) == 1 and self.inputs[0].name is None:\n return self.inputs[0].type.to_spark()\n from pyspark.sql.types import StructField, StructType\n\n return StructType(\n [\n StructField(name=col.name or str(i), dataType=col.type.to_spark())\n for i, col in enumerate(self.inputs)\n ]\n )", "def as_correct_dtype(obj: ndarray, required_dtype: dtype) -> ndarray:\n if obj.dtype != required_dtype:\n return obj.astype(required_dtype)\n return obj", "def to_numeric_and_downcast_data(df: pd.DataFrame):\n fcols = df.select_dtypes('float').columns\n \n icols = df.select_dtypes('integer').columns\n\n df[fcols] = df[fcols].apply(pd.to_numeric, downcast='float')\n \n df[icols] = df[icols].apply(pd.to_numeric, downcast='integer')\n\n return df", "def apply_to(self, df: pd.DataFrame) -> pd.DataFrame:\n schema_names = self.names\n data_columns = df.columns\n\n assert len(schema_names) == len(\n data_columns\n ), \"schema column count does not match input data column count\"\n\n for column, dtype in zip(data_columns, self.types):\n pandas_dtype = dtype.to_pandas()\n\n col = df[column]\n col_dtype = col.dtype\n\n try:\n not_equal = pandas_dtype != col_dtype\n except TypeError:\n # ugh, we can't compare dtypes coming from pandas,\n # assume not equal\n not_equal = True\n\n if not_equal or not dtype.is_primitive():\n new_col = convert(col_dtype, dtype, col)\n else:\n new_col = col\n df[column] = new_col\n\n # return data with the schema's columns which may be different than the\n # input columns\n df.columns = schema_names\n return df", "def byte_to_literal_strings(dataframe):\n # Select the str columns:\n str_df = dataframe.select_dtypes([np.object])\n\n if not str_df.empty:\n # Convert all of them into unicode strings\n str_df = str_df.stack().str.decode('utf-8').unstack()\n # Swap out converted cols with the original df cols\n for col in str_df:\n dataframe[col] = str_df[col]\n\n return dataframe", "def is_numeric_dtype(df, col_name):\n dtype = df.dtypes[col_name]\n return np.issubdtype(dtype, np.number)", "def test_df():\n return pd.DataFrame({\n 'intcol': [1, 2, 3],\n 'strcol': ['four', 'five', 'six'],\n 'floatcol': [7.0, 8.0, 9.0]\n })", "def get_inferred_dtypes(self, dtypes_validated: TYPE_DSTR) -> TYPE_DSTR:\n\n dtypes_inferred = {}\n\n for column in self.df.columns:\n if column in dtypes_validated:\n continue\n\n dtypes_inferred[column] = self.inspect_dtype(self.df[column])\n\n return dtypes_inferred", "def _is_object_type(df, field):\n return df[field].dtype.name == 'object'", "def dtypes(self) -> 'DataFrame':\n dtype_list: List[str] = [utils.convert_kind_to_dtype(self._column_info[col].dtype)\n for col in self._columns]\n arr: ndarray = np.array(dtype_list, dtype='O')\n columns: List[str] = ['Column Name', 'Data Type']\n data, str_reverse_map = _va.convert_str_to_cat_list_2d([self._columns, arr])\n new_data: Dict[str, ndarray] = {'S': data}\n new_column_info: ColInfoT = {'Column Name': utils.Column('S', 0, 0),\n 'Data Type': utils.Column('S', 1, 1)}\n return self._construct_from_new(new_data, new_column_info, np.array(columns, dtype='O'),\n str_reverse_map)", "def get_dtype(path,nrows=10):\n if nrows is not None:\n train = pd.read_csv(path,nrows=nrows)\n else:\n train = pd.read_pickle(path.replace('.csv','.pkl'))\n col2dtype = OrderedDict()\n for col in train.columns:\n if train[col].dtype=='O':\n col2dtype[col] = 'str'\n elif train[col].dtype==np.int64:\n col2dtype[col] = 'int32'\n else:\n col2dtype[col] = 'float32'\n return col2dtype", "def get_column_type(\n self,\n table: exp.Table | str,\n column: exp.Column,\n dialect: DialectType = None,\n normalize: t.Optional[bool] = None,\n ) -> exp.DataType:", "def dtype(self):\n return self.dataset.dtype", "def test_wrong_type_of_fill_all_non_numeric(currency_df):\n with pytest.raises(TypeError):\n _ = currency_df.currency_column_to_numeric(\n \"d_col\",\n fill_all_non_numeric=\"zzzzz\",\n )", "def to_scalar_df(df: pd.DataFrame) -> pd.DataFrame:\n scalar_df = df\n column_ordering = []\n for c, s in df.items():\n if s.dtype == \"object\":\n s_list = s.to_list()\n try:\n ncols = s_list[0].shape[0]\n split_cols = [f\"{c}_{k}\" for k in range(ncols)]\n sdf = pd.DataFrame(s_list, columns=split_cols)\n scalar_df = pd.concat([scalar_df, sdf], axis=1)\n column_ordering += split_cols\n except AttributeError as e:\n raise ValueError(f\"Expected series of lists, but found {s_list[0]}\") from e\n else:\n column_ordering.append(c)\n return scalar_df[column_ordering]", "def as_column(arbitrary):\n from . import numerical, categorical\n\n if isinstance(arbitrary, Column):\n if not isinstance(arbitrary, TypedColumnBase):\n # interpret as numeric\n return arbitrary.view(numerical.NumericalColumn,\n dtype=arbitrary.dtype)\n else:\n return arbitrary\n elif isinstance(arbitrary, pd.Categorical):\n return categorical.pandas_categorical_as_column(arbitrary)\n elif isinstance(arbitrary, Buffer):\n return numerical.NumericalColumn(data=arbitrary, dtype=arbitrary.dtype)\n elif cuda.devicearray.is_cuda_ndarray(arbitrary):\n return as_column(Buffer(arbitrary))\n elif isinstance(arbitrary, np.ndarray):\n return as_column(Buffer(arbitrary))\n else:\n return as_column(np.asarray(arbitrary))", "def dtype(self):\n return self._vars[0].dtype", "def convert_type(data):\n# Categorical features\n columns = ['Browser', 'OperatingSystems', 'Region', 'TrafficType']\n for col in columns:\n data[col] = data[col].apply(lambda x: str(x))\n return data", "def convert_type(self, value, schema_type, **kwargs):" ]
[ "0.72055167", "0.70074373", "0.67383504", "0.66940576", "0.66150796", "0.65282935", "0.64637434", "0.64026904", "0.63992226", "0.63900924", "0.63755214", "0.63500774", "0.6339335", "0.63325894", "0.6327806", "0.63060737", "0.62946403", "0.62688094", "0.6235217", "0.6229606", "0.62216234", "0.6220546", "0.6207605", "0.6205551", "0.618993", "0.6183051", "0.616651", "0.6109026", "0.60733336", "0.607028", "0.6066966", "0.606524", "0.6061678", "0.6053425", "0.6039371", "0.60374105", "0.60351", "0.60187846", "0.5986433", "0.59859824", "0.59666127", "0.5950173", "0.5947038", "0.5913827", "0.58832216", "0.5876434", "0.58578897", "0.5852664", "0.5851989", "0.5835813", "0.58353156", "0.58243525", "0.5823382", "0.5822718", "0.5810129", "0.5809219", "0.58045137", "0.5803344", "0.579727", "0.57771033", "0.577634", "0.5768496", "0.5766407", "0.57613206", "0.57594764", "0.5758032", "0.5756342", "0.57533145", "0.57513136", "0.57391036", "0.57374877", "0.57354504", "0.57312864", "0.5725831", "0.5717602", "0.5712458", "0.5708492", "0.5705891", "0.57017446", "0.5700735", "0.5695873", "0.56883264", "0.568695", "0.568402", "0.5656267", "0.5651409", "0.5650776", "0.5640392", "0.5617212", "0.56112415", "0.56017435", "0.5591067", "0.5578665", "0.5574793", "0.5550054", "0.5549199", "0.5545072", "0.5541873", "0.55414563", "0.5537471" ]
0.7871534
0
Model function for CNN.
def cnn_model_fn(input_data): # Input Layer # [batch_size, image_height, image_width, channels] input_layer = tf.reshape(input_data, [-1, 100, 100, 3]) # Convolutional Layer #1 conv1 = tf.layers.conv2d( inputs=input_layer, filters=32, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) # Pooling Layer #1 pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # Convolutional Layer #2 and Pooling Layer #2 conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding="same", activation=tf.nn.relu) pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # Dense Layer pool2_flat = tf.reshape(pool2, [-1, 25 * 25 * 64]) dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) # Logits Layer logits = tf.layers.dense(inputs=dropout, units=PREDICT_CLASSES) predictions = { # Generate predictions (for PREDICT and EVAL mode) "classes": tf.argmax(input=logits, axis=1), # Add `softmax_tensor` to the graph. It is used for PREDICT and by the # `logging_hook`. "probabilities": tf.nn.softmax(logits, name="softmax_tensor") } return logits, predictions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CNN_model():\n prob = 0.1\n model = Sequential()\n # model.add(Conv2D(filters = 32, kernel_size = (5,5),padding = 'Same',\n # activation ='relu', input_shape = (28,28,1)))\n # model.add(Conv2D(filters = 64, kernel_size = (5,5),padding = 'Same',\n # activation ='relu'))\n # model.add(BatchNormalization())\n # model.add(MaxPooling2D(pool_size=(2,2)))\n # model.add(Dropout(0.25))\n\n # model.add(Flatten())\n model.add(Dense(512, activation = \"relu\"))\n model.add(Dropout(prob))\n\n model.add(Dense(1024, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(2048, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(2048, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(4096, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(2048, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(2048, activation = \"relu\"))\n model.add(Dropout(prob))\n model.add(Dense(1024, activation = \"relu\"))\n model.add(Dropout(prob))\n\n # model.add(Dense(2048, activation = \"relu\"))\n # model.add(Dropout(prob))\n #\n # model.add(Dense(2048, activation = \"relu\"))\n # model.add(Dropout(prob))\n\n model.add(Dense(1024, activation = \"relu\"))\n model.add(Dropout(prob))\n\n # model.add(BatchNormalization())\n model.add(Dense(512, activation = \"relu\"))\n model.add(Dropout(prob))\n # model.add(BatchNormalization())\n # model.add(Dropout(0.5))\n model.add(Dense(2, activation = \"softmax\"))\n\n return model", "def build_model_cnn(x_train, n=32, d=0.25, k=5):\n model = Sequential()\n model.add(Dropout(rate = d, input_shape = (x_train.shape[1], 1)))\n model.add(Conv1D(filters=n, kernel_size=(5), strides=1, activation = 'relu', kernel_constraint=max_norm(4)))\n model.add(Dropout(rate = d))\n model.add(MaxPooling1D(pool_size=k))\n model.add(Conv1D(filters=n, kernel_size=(5), strides=1, activation = 'relu', kernel_constraint=max_norm(4)))\n model.add(Dropout(rate = d))\n model.add(MaxPooling1D(pool_size=k))\n model.add(Flatten())\n model.add(Dense(n, activation ='relu', kernel_constraint=max_norm(4)))\n model.add(Dense(4, activation = 'softmax')) # eliminated Q label\n model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['categorical_accuracy'])\n return model", "def cnn_model(model_name, img_size):\n\tinput_size = (img_size, img_size, 3)\n\n\tif model_name == \"xception\":\n\t\tbaseModel = Xception(\n\t\t\tweights=\"imagenet\", include_top=False, input_shape=(img_size, img_size, 3)\n\t\t)\n\telif model_name == \"efn0\":\n\t\tbaseModel = efn.EfficientNetB0(weights=\"imagenet\", include_top=False,\n\t\t\tinput_shape=input_size)\n\telif model_name == \"efn_noisy\":\n\t\tbaseModel = efn.EfficientNetB5(weights=\"noisy-student\", include_top=False,\n\t\t\tinput_shape=input_size)\n\n\theadModel = baseModel.output\n\theadModel = GlobalAveragePooling2D()(headModel)\n\theadModel = Dense(1024, activation=\"relu\", kernel_initializer=\"he_uniform\")(\n\t\theadModel\n\t)\n\theadModel = Dropout(0.4)(headModel)\n\tpredictions = Dense(\n\t\t200,\n\t\tactivation=\"softmax\",\n\t\tkernel_initializer=\"he_uniform\")(\n\t\theadModel\n\t)\n\tmodel = Model(inputs=baseModel.input, outputs=predictions)\n\n\tfor layer in baseModel.layers:\n\t\tlayer.trainable = False\n\n\toptimizer = Nadam(\n\t\tlr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004\n\t)\n\tmodel.compile(\n\t\t# loss=\"categorical_crossentropy\",\n\t\tloss=joint_loss,\n\t\toptimizer=optimizer,\n\t\tmetrics=[\"accuracy\"]\n\t)\n\treturn model", "def build_cnn(self):\n model = Sequential()\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Flatten())\n model.add(Dense(len(ACTIONS), activation = 'linear'))\n model.compile(loss = 'mse', optimizer = Adam(lr = alpha))\n\n return model", "def make_model():\n \n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu',\n input_shape=(150, 150, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(37, activation='softmax'))\n \n #model.add(layers.Dense(1, activation='sigmoid'))\n \n\n model.compile(loss='sparse_categorical_crossentropy',\n optimizer=optimizers.RMSprop(lr=1e-4),\n metrics=['acc'])\n return model", "def CNN_ClassModel(input_shape=(1, 72, 72),n_class=3,lr=0.001):\n model=Sequential()\n model.add(Convolution2D(filters=16,nb_row=5,nb_col=5, \n # kernel_size=5,strides=1,\n padding='same',data_format='channels_first',\n input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n model.add(Dropout(0.9))\n model.add(Flatten())\n model.add(Dense(200))\n model.add(Activation('relu'))\n\n model.add(Dense(n_class))\n model.add(Activation('softmax'))\n\n adam=Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)\n model.compile(optimizer=adam,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n return model", "def construct_model():\n # model = Sequential()\n # model.add(Dense(units=64, activation='relu', input_dim=100))\n # model.add(Dense(units=10, activation='softmax'))\n # model.compile(loss='categorical_crossentropy',\n # optimizer='sgd',\n # metrics=['accuracy'])\n # return model\n\n model = Sequential()\n # Input Layer\n model.add(Conv2D(64, 3, data_format='channels_last', activation='relu', padding='same',\n input_shape=(img_width, img_height, 3)))\n model.add(MaxPool2D(pool_size=2, strides=2))\n # Hidden Layer 1\n model.add(Conv2D(64, 3, activation='relu', padding='same'))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 2\n model.add(Conv2D(128, 3, activation='relu', padding='same'))\n model.add(Conv2D(128, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n # Hidden Layer 3\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same'))\n model.add(Conv2D(256, 3, activation='relu', padding='same', strides=2))\n model.add(MaxPool2D(pool_size=2, strides=2))\n\n\n # Fully Connected Layer\n model.add(Flatten())\n # 512 Neuron Layer\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n # Output Layer\n model.add(Dense(num_of_classes))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def cnn_model(model_name, img_size, weights):\n input_size = (img_size, img_size, 3)\n if model_name == \"xception\":\n baseModel = Xception(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"iv3\":\n baseModel = InceptionV3(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"irv2\":\n baseModel = InceptionResNetV2(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"resnet\":\n baseModel = ResNet50(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"nasnet\":\n baseModel = NASNetLarge(\n weights=\"imagenet\",\n include_top=False,\n input_shape=(img_size, img_size, 3)\n )\n elif model_name == \"ef0\":\n baseModel = EfficientNetB0(\n input_size,\n weights=\"imagenet\",\n include_top=False\n )\n elif model_name == \"ef5\":\n baseModel = EfficientNetB5(\n input_size,\n weights=\"imagenet\",\n include_top=False\n )\n\n headModel = baseModel.output\n headModel = GlobalAveragePooling2D()(headModel)\n headModel = Dense(\n 512,\n activation=\"relu\",\n kernel_initializer=\"he_uniform\",\n name=\"fc1\")(\n headModel\n )\n headModel = Dropout(0.4)(headModel)\n predictions = Dense(\n 2,\n activation=\"softmax\",\n kernel_initializer=\"he_uniform\")(\n headModel\n )\n model = Model(inputs=baseModel.input, outputs=predictions)\n\n model.load_weights(weights)\n print(\"Weights loaded...\")\n model_lstm = Model(\n inputs=baseModel.input,\n outputs=model.get_layer(\"fc1\").output\n )\n\n for layer in baseModel.layers:\n layer.trainable = True\n\n optimizer = Nadam(\n lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004\n )\n model.compile(\n loss=\"categorical_crossentropy\",\n optimizer=optimizer,\n metrics=[\"accuracy\"]\n )\n return model_lstm", "def create_model():\n \n cnn = tf.keras.Sequential()\n \n cnn.add(tf.keras.layers.InputLayer(input_shape=(img_height,img_width,1)))\n \n # Normalization\n cnn.add(tf.keras.layers.BatchNormalization())\n \n # Conv + Maxpooling\n cnn.add(tf.keras.layers.Convolution2D(64, (4, 4), padding='same', activation='relu'))\n cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n\n # Dropout\n cnn.add(tf.keras.layers.Dropout(0.1))\n \n # Conv + Maxpooling\n cnn.add(tf.keras.layers.Convolution2D(64, (4, 4), activation='relu'))\n cnn.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n\n # Dropout\n cnn.add(tf.keras.layers.Dropout(0.3))\n\n # Converting 3D feature to 1D feature Vektor\n cnn.add(tf.keras.layers.Flatten())\n\n # Fully Connected Layer\n cnn.add(tf.keras.layers.Dense(256, activation='relu'))\n\n # Dropout\n cnn.add(tf.keras.layers.Dropout(0.5))\n \n # Fully Connected Layer\n cnn.add(tf.keras.layers.Dense(64, activation='relu'))\n \n # Normalization\n cnn.add(tf.keras.layers.BatchNormalization())\n\n cnn.add(tf.keras.layers.Dense(num_classes, activation='softmax'))\n cnn.compile(loss='sparse_categorical_crossentropy', optimizer=tf.compat.v1.train.AdamOptimizer(), metrics=['accuracy'])\n\n return cnn", "def build_model():\n\n if K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\n else:\n input_shape = (img_width, img_height, 3)\n\n model = Sequential()\n model.add(Conv2D(32, (3, 3), input_shape=input_shape))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(32, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(64, (3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Flatten())\n\n # FC layer\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n\n model.compile(loss='binary_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\n return model", "def build_model():\n model = models.Sequential()\n\n # # Anti-overfit methods\n # model.add(layers.BatchNormalization())\n # model.add(layers.Dropout(0.5))\n # regularizers.l1_l2(l1=0.01, l2=0.01)\n\n model.add(layers.Conv2D(200, (3, 3), activation='relu',\n input_shape=nnc.INPUT_SHAPE))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(200, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(150, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Conv2D(100, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.BatchNormalization())\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(256, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(512, activation='relu',))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(7, activation='sigmoid'))\n model.compile(optimizer=nnc.OPTIMIZER, loss=nnc.LOSS, metrics=nnc.METRICS)\n\n # # Print the model to the console\n model.summary()\n # # Print the model to a png file\n # utils.plot_model(model, show_shapes=True, to_file=nnc.MODEL_PLOT_PATH)\n # # Turn into multi-gpu model\n # model = utils.multi_gpu_model(model, gpus=2)\n\n return model", "def build_model(num_classes=43):\n model = models.Sequential()\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.Conv2D(128, (3, 3), activation='relu',\n input_shape=(32, 32, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n \n\n model.add(layers.Flatten())\n model.add(layers.Dense(num_classes, activation='softmax'))\n model.summary()\n\n return model", "def create_cnn_model() -> tf.keras.Sequential:\n model = Sequential(\n [\n layers.Convolution1D(filters=16, kernel_size=7, input_shape=[186, 1]),\n layers.LeakyReLU(),\n layers.MaxPool1D(pool_size=2),\n layers.Convolution1D(filters=16, kernel_size=5),\n layers.LeakyReLU(),\n layers.Convolution1D(filters=16, kernel_size=5),\n layers.LeakyReLU(),\n layers.Convolution1D(filters=16, kernel_size=5),\n layers.LeakyReLU(),\n layers.MaxPool1D(pool_size=2),\n layers.Flatten(),\n layers.Dense(128),\n layers.LeakyReLU(),\n layers.Dense(5, activation=\"softmax\"),\n ]\n )\n return model", "def create_cnn(num_features: int = None) -> models.Model:\n nclass = num_features or 5\n inp = Input(shape=(187, 1))\n img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding=\"valid\")(inp)\n # img_1 = Convolution1D(16, kernel_size=5, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = MaxPool1D(pool_size=2)(img_1)\n img_1 = Dropout(rate=0.1)(img_1)\n img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n # img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = MaxPool1D(pool_size=2)(img_1)\n img_1 = Dropout(rate=0.1)(img_1)\n img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n # img_1 = Convolution1D(32, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = MaxPool1D(pool_size=2)(img_1)\n img_1 = Dropout(rate=0.1)(img_1)\n img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n # img_1 = Convolution1D(256, kernel_size=3, activation=activations.relu, padding=\"valid\")(img_1)\n img_1 = GlobalMaxPool1D()(img_1)\n img_1 = Dropout(rate=0.2)(img_1)\n\n dense_1 = Dense(64, activation=activations.relu, name=\"dense_1\")(img_1)\n dense_1 = Dense(64, activation=activations.relu, name=\"dense_2\")(dense_1)\n dense_1 = Dense(nclass, activation=activations.softmax, name=\"dense_3_mitbih\")(dense_1)\n\n model = models.Model(inputs=inp, outputs=dense_1)\n opt = optimizers.Adam(learning_rate=0.001)\n\n model.compile(optimizer=opt,\n loss=losses.sparse_categorical_crossentropy,\n metrics=['accuracy'])\n model.summary()\n return model", "def modelbuilder():\n model = Sequential()\n # Add a convolution layer with with a sigmoid activation function\n model.add(layers.Conv2D(1, (2, 2), strides=(1, 1), activation='sigmoid', padding='same', input_shape=(256, 256, 3)))\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\n model.summary()\n return model", "def create_model(input_shape=None):\n\n model = Sequential()\n #n,height,width,chennel = input_shape\n height = 146\n width = 243\n chennel = 3\n\n model.add(Conv2D(filters=4, input_shape=(width, height, chennel), kernel_size=(3, 3), padding='same'))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=4,kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=4, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=16, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=8, kernel_size=(3, 3)))\n model.add(Activation('relu'))\n model.add(Conv2D(filters=16, kernel_size=(5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(Dense(32))\n model.add(Activation('relu'))\n model.add(Dropout(0.25))\n model.add(Dense(8))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.87, nesterov=True)\n model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=[\"accuracy\"])\n return model", "def get_model(summary=False):\n\timage_input=Input(shape=(220,220,5),name='image_input')\n\tbranch1_conv1=Conv2D(64, kernel_size=(3, 3), border_mode='same', input_shape=(220,220,5), activation='relu')(image_input)\n\tbranch1_conv2=Conv2D(64, kernel_size=(1, 1), border_mode='same', activation='relu')(branch1_conv1)\t\n\tbranch1_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch1_conv1)\n\tbranch2_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch1_pool1)\n\tbranch2_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch2_conv1)\t\n\tbranch2_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch2_conv2)\n\tbranch3_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch2_pool1)\n\tbranch3_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch3_conv1)\t\n\tbranch3_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch3_conv2)\n\tbranch4_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch3_pool1)\n\tbranch4_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch4_conv1)\t\n\tbranch4_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch4_conv2)\n\tbranch5_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch4_pool1)\n\tbranch5_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch5_conv1)\t\n\tbranch5_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch5_conv2)\n\tbranch6_conv1=Conv2D(128, kernel_size=(3, 3), border_mode='same', activation='relu')(branch5_pool1)\n\tbranch6_conv2=Conv2D(128, kernel_size=(1, 1), border_mode='same', activation='relu')(branch6_conv1)\t\n\tbranch6_pool1=MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(branch6_conv2)\n\tbranch1_flat=Flatten()(branch6_pool1)\n\tdrop=Dropout(.3)(branch1_flat)\n\t# FC layers group\n\tdense1=Dense(512, activation='relu', name='fc1')(drop)\n\tdrop1=Dropout(.3)(dense1)\n\tdense2=Dense(256, activation='relu', name='fc2')(drop1)\n\tdrop3=Dropout(.3)(dense2)\n\tout=Dense(2, activation='softmax', name='fc4')(drop3)\n\tmodel=Model(inputs=image_input,outputs=out)\n\treturn model", "def cnn_model(input_shape, lr, num_classes):\n\n model = Sequential()\n model.add(Conv2D(filters=16, kernel_size=(3, 3), activation=\"relu\", input_shape=(input_shape)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Conv2D(filters=32, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Conv2D(filters=32, kernel_size=(3, 3), activation=\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n model.add(Flatten())\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(32, activation='relu'))\n model.add(Dropout(0.1))\n model.add(Dense(num_classes, activation='sigmoid'))\n\n opt = keras.optimizers.Adam(learning_rate=lr)\n\n model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])\n\n model.summary()\n\n return model", "def get_model():\r\n model = Sequential([\r\n\r\n Lambda(normalize, input_shape=(66, 200, 3)),\r\n\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(16, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n MaxPooling2D(pool_size=(3, 3)),\r\n\r\n Convolution2D(48, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(32, (3, 3), padding='same', activation='relu', strides=1),\r\n Convolution2D(24, (3, 3), padding='same', activation='relu', strides=2),\r\n Convolution2D(1, (3, 3), padding='same', activation='relu', strides=2),\r\n Flatten(),\r\n\r\n\r\n ])\r\n\r\n model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])\r\n return model", "def build_model():\n model = keras.Sequential()\n\n model.add(Conv2D(32, (5, 5), activation='relu', input_shape=(32, 32, 1)))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.1))\n\n model.add(Conv2D(64, (5, 5), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(43, activation='softmax'))\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n return model", "def create_model():\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(1024, 1024, 1)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.25))\n\n model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Dropout(0.25))\n\n model.add(layers.Flatten())\n model.add(layers.Dense(64, activation='relu'))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(2, activation='softmax'))\n\n model.summary()\n\n model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n return model", "def model_CNN(x_train, y_train, x_test=None, y_test=None, kwargs={}):\n \"\"\"\n Notes on Input shape\n 4D tensor with shape (batch_size, timesteps, features, `colors`).\n 4D tensor with shape: (samples, rows, cols, channels)\n `channels_last` (default)\n Output 4D tensor with shape: (samples, new_rows, new_cols, filters)\n \"\"\"\n ######## CNN for stocks\n # create and fit CNN\n # input_shape = StockDate x Lookback x Features\n from keras.layers import Conv2D, MaxPooling2D\n from keras.optimizers import SGD\n\n\n layers = kwargs.get('layers', 10 ) #TODO\n nodes = kwargs.get('nodes', None) #TODO\n\n if nodes is None or nodes==0 or nodes==[0]:\n nodes = [np.shape(x_train)[1]*3]\n elif isinstance(nodes, (int, np.integer)): # turn int to list\n nodes = [nodes]\n\n if layers > 1 and len(nodes) < layers:\n nodes = list(np.pad(nodes,[0,layers-len(nodes)], mode='constant',constant_values=nodes[-1]))\n\n ndim = np.max([2,len(np.shape(x_train))]) # Min 2D\n if ndim==2:\n input_shape=(x_train.shape[1],)\n elif ndim==3:\n input_shape=(x_train.shape[1],x_train.shape[2])\n elif ndim==4:\n input_shape=(x_train.shape[1],x_train.shape[2],x_train.shape[3])\n else:\n input_shape=x_train.shape[1:]\n if kwargs.get('learning_rate', False):\n lr = kwargs.get('learning_rate')\n else:\n lr = False\n\n if False:\n conv = (3, 3)\n else:\n conv = (2, 2)\n n_conv = 5\n\n if np.ndim(y_train)==1:\n n_out = 1 #e.g. forecast y as float, just 1 step ahead.\n else:\n n_out = np.shape(y_train)[1] #e.g. onehot encoded, or n-steps ahead.\n\n dropout = kwargs.get('dropout',0) # dropout rate between 0 and 1.\n #stateful = kwargs.get('stateful',True)\n actvn = 'relu' #kwargs.get('actvn','relu')\n actvl = kwargs.get('actvl','sigmoid')\n model=[]\n model = Sequential() # https://keras.io/models/sequential/\n model.reset_states()\n # input: 100x100 images with 3 channels -> (100, 100, 3) tensors.\n # this applies 32 convolution filters of size 3x3 each.\n model.add(Conv2D(n_conv, conv, activation=actvn, input_shape=input_shape))\n #model.add(Conv2D(n_conv, conv, activation=actvn))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout ))\n\n model.add(Conv2D(n_conv*2, conv, activation=actvn))\n #model.add(Conv2D(n_conv*2, conv, activation=actvn))\n #model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(dropout ))\n\n model.add(Flatten())\n model.add(Dense(np.min(input_shape), activation=actvn))\n model.add(Dropout(dropout*2))\n model.add(Dense(n_out, activation=actvl))\n\n if hasattr(kwargs,'optimizer'):\n optimizer = kwargs['optimizer']\n elif lr:\n optimizer = SGD(lr=lr, decay=1e-6, momentum=0.01, nesterov=True)\n else:\n optimizer = 'Nadam' #keras.optimizers.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n\n if is_bool_dtype(y_train):\n model.compile(loss='binary_crossentropy', optimizer=optimizer)\n if is_categorical_dtype(y_train) or kwargs.get('onehot',False):\n #TODO Multiple Category\n model.compile(loss='categorical_crossentropy', optimizer=optimizer)\n else:\n #model.compile(loss='mean_squared_error', optimizer=optimizer)\n model.compile(loss='mean_squared_error', optimizer=optimizer, metrics=[r2_keras])\n\n\n if kwargs.get('verbose',False) > 1:\n model.summary()\n print(\"Inputs: {}\".format(model.input_shape))\n print(\"Outputs: {}\".format(model.output_shape))\n print(\"Actual input: {}\".format(x_train.shape))\n print(\"Actual output: {}\".format(y_train.shape))\n print('Model Loss: ' + model.loss)\n\n # For compatability with other models;\n model.score = model.evaluate\n\n return model #self.model=model", "def build_model(self):\n self.model = models.Sequential()\n self.model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.add(layers.MaxPooling2D((2, 2)))\n self.model.add(layers.Conv2D(64, (3, 3), activation='relu'))\n self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), loss='mse', metrics=['mae'])\n self.model.add(layers.Flatten())\n self.model.add(layers.Dense(64, activation='relu'))\n self.model.add(layers.Dense(10, activation='softmax'))\n self.model.compile(optimizer='rmsprop',\n loss='categorical_crossentropy',\n metrics=['accuracy'])", "def CNN_RegModel(input_shape=(1,72,72),output_dim=6,lr=0.0008,model_path='model.h5'):\n model=Sequential()\n model.add(Convolution2D(filters=16,nb_row=5,nb_col=5, padding='same',data_format='channels_first',input_shape=input_shape))\n\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n\n model.add(Convolution2D(filters=32,nb_row=5,nb_col=5, padding='same',data_format='channels_first'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n \n model.add(Convolution2D(filters=64,nb_row=5,nb_col=5, padding='same',data_format='channels_first'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n \n model.add(Convolution2D(filters=128,nb_row=5,nb_col=5, padding='same',data_format='channels_first'))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2),padding='same',data_format='channels_first'))\n \n model.add(Flatten())\n model.add(Dense(5184))\n model.add(Activation('relu'))\n\n model.add(Dense(500))\n model.add(Activation('relu'))\n\n model.add(Dense(100))\n model.add(Activation('relu'))\n model.add(Dense(output_dim))\n model.add(Activation('tanh'))\n\n adam=Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=None, decay=1e-6, amsgrad=False)\n model.compile(loss='mse',optimizer=adam,metrics=['acc']) \n # define the checkpoint\n filepath = model_path\n checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')\n callbacks_list = [checkpoint]\n # print(model.summary())\n return model, callbacks_list", "def compile_model(network, nb_classes, input_shape):\r\n nb_layers = network['nb_layers']\r\n layer = network['layer']\r\n nb_neurons = network['nb_neurons']\r\n activation = network['activation']\r\n optimizer = network['optimizer']\r\n\r\n model = Sequential()\r\n\r\n for i in range(nb_layers):\r\n if i == 0:\r\n model.add(Conv2D(nb_neurons, activation=activation, input_shape=input_shape))\r\n else:\r\n model.add(layer(nb_neurons, activation=activation))\r\n \r\n model.add(Dropout(0.2))\r\n\r\n model.add(Dense(nb_classes, activation='softmax'))\r\n\r\n model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\r\n\r\n return model", "def trainNet():", "def first_model():\n model=Sequential()\n # model.add(Flatten(input_shape=(160,320,3)))\n model.add(Lambda(lambda x: (x-128.0)/128.0,input_shape=(160, 320, 3)))\n model.add(Cropping2D(cropping=((70,25), (0,0))))\n model.add(Convolution2D(32, 3, 3))\n model.add(MaxPooling2D((2, 2)))\n model.add(Dropout(0.5))\n model.add(Activation('relu'))\n model.add(Flatten())\n model.add(Dense(128))\n model.add(Activation('relu'))\n model.add(Dense(1))\n\n model.compile(loss=\"mse\",optimizer=\"adam\")\n return model", "def buildModel(self, img_shape = 20, num_categories = 10):\n\n # Add first convolutional layer and max pooling\n self.model.add(Conv2D(32, # Number of features detectors (filters)\n (3, # 3 Row (conv. matrix)\n 3), # 3 Columns (conv.matrix usually 3x3)\n input_shape=(img_shape, img_shape, 3), # 3 Channels, img_shape x img_shape images\n activation='relu', # Activation function to increase nonlinearity of images\n name = 'FirstConv2DLayer')) # Layer name\n self.model.add(MaxPooling2D(pool_size=(2, 2))) # Size of pooling matrix (usually 2x2)\n\n # Add second convolutional layer and max pooling\n self.model.add(Conv2D(32, (3, 3), input_shape=(img_shape, img_shape, 3), activation='relu', name ='SecondConv2DLayer'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n\n # Flattening\n self.model.add(Flatten())\n\n # Full connection\n self.model.add(Dense(units=128, activation='relu'))\n\n # One output for each category we predicted want to predict\n self.model.add(Dense(units=num_categories, activation='softmax', name='Probabilities')) # Softmax for more than two outcome\n\n # Compile the CNN (Gradient descent)\n self.model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])", "def keras_model_fn(_, config):\n\n cnn_model = tf.keras.Sequential()\n cnn_model.add(layers.Embedding(input_dim=config['embeddings_dictionary_size'],\n output_dim=config['embeddings_vector_size'],\n input_length=config['padding_size']))\n # cnn_model.add(layers.Embedding(input_dim=50000,\n # output_dim=1,\n # input_length=20))\n cnn_model.add(layers.Conv1D(filters=100,kernel_size=2,padding='valid',activation='relu',strides=1))\n cnn_model.add(layers.GlobalMaxPooling1D())\n cnn_model.add(layers.Dense(100, activation='relu'))\n # cnn_model.add(Activation('relu'))\n cnn_model.add(layers.Dense(1, activation = 'sigmoid'))\n # cnn_model.add(Activation('sigmoid'))\n cnn_model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return cnn_model", "def build_cut_model(self):\n model = None\n if self.model_name == 'vgg16':\n model = vgg16(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'resnet':\n model = ResNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'densenet':\n model = DenseNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'mobilenet':\n model = MobileNet(img_dim=(self.ex_input_size, self.ex_input_size, self.ex_channels), num_classes=num_classes).create()\n elif self.model_name == 'efficientnet':\n model = efficientnet(input_shape=(self.ex_input_size, self.ex_input_size, self.ex_channels))\n elif self.model_name == 'vit':\n model = VisionTransformer(image_size=self.ex_input_size,\n patch_size=vit_settings['patch_size'],\n num_layers=vit_settings['num_layers'],\n num_classes=num_classes,\n d_model=vit_settings['d_model'],\n num_heads=vit_settings['num_heads'],\n mlp_dim=vit_settings['mlp_dim'],\n channels=self.ex_channels,\n dropout=vit_settings['dropout']).build_VisionTransformer()\n model.load_weights(self.path_to_weights)\n model = Model(model.input, model.get_layer(self.ex_last_conv_layer_name2).output)\n model.summary()\n return model", "def build_model():\n mdl = Sequential()\n\n # normalization\n mdl.add(Lambda(lambda x: x/128. - 1, input_shape=IMAGE_SHAPE, name=\"input\"))\n\n # trim image\n mdl.add(Lambda(lambda x: x[:, 10:-10, :, :]))\n\n # convolutions\n mdl.add(Convolution2D(24, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(36, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(48, 5, 5, subsample=(2, 2), border_mode='same',))\n mdl.add(MaxPooling2D(pool_size=(2, 2)))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same',))\n mdl.add((Dropout(0.5)))\n mdl.add(Activation('relu'))\n\n mdl.add(Flatten())\n\n mdl.add(Dense(128, activation='relu'))\n mdl.add(Dense(64, activation='relu'))\n mdl.add(Dense(1, name=\"output\"))\n\n mdl.summary()\n\n return mdl", "def c3d(self):\n model = Sequential()\n # 1st layer group\n model.add(Conv3D(64, 3, 3, 3, activation='relu',\n border_mode='same', name='conv1',\n subsample=(1, 1, 1),\n input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),\n border_mode='valid', name='pool1'))\n # 2nd layer group\n model.add(Conv3D(128, 3, 3, 3, activation='relu',\n border_mode='same', name='conv2',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool2'))\n # 3rd layer group\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool3'))\n # 4th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool4'))\n\n # 5th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5b',\n subsample=(1, 1, 1)))\n model.add(ZeroPadding3D(padding=(0, 1, 1)))\n # model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n # border_mode='valid', name='pool5', dim_ordering=\"tf\"))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool5', dim_ordering=\"tf\"))\n model.add(Flatten())\n\n # FC layers group\n model.add(Dense(4096, activation='relu', name='fc6'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu', name='fc7'))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n for layer in model.layers:\n print(layer.output_shape)\n return model", "def genModel():\n inp = (160, 320, 3) # initial image size\n oup1 = (160, 320, 1) # gray image size\n oup2 = (80, 320, 1) # cropped image size\n\n model = Sequential()\n model.add(Lambda(color2gray, input_shape = inp, output_shape= oup1))\n # crop top 50 pixels, bottom 30 pixels, left/right 0 pixels\n model.add(Cropping2D(cropping=((50,30), (0,0))))\n # Preprocess incoming data, centered around zero with small standard deviation \n model.add(Lambda(lambda x: x/127.5 - 1., output_shape= oup2))\n model.add(Convolution2D(24,5,5,subsample=(1,2), activation=\"relu\"))\n model.add(Convolution2D(36,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(48,5,5,subsample=(2,2), activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Convolution2D(64,3,3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dropout(0.3))\n model.add(Dense(180, activation=\"relu\"))\n model.add(Dense(60))\n model.add(Dense(10, activation=\"relu\"))\n model.add(Dense(1))\n # print layer size for each model layers\n for layer in model.layers:\n print(layer.get_output_at(0).get_shape().as_list())\n return model", "def train_CNN(self,member,input_data):\n trainX,trainY,validX,validY = input_data\n \n print('\\nTraining {0} models'.format(member))\n print('Training data shape {0}'.format(np.shape(trainX)))\n print('Training label data shape {0}\\n'.format(np.shape(trainY)))\n print('Validation data shape {0}'.format(np.shape(validX)))\n print('Validation label data shape {0}\\n'.format(np.shape(validY)))\n \n \n model_file = self.model_path + f'/{member}_{self.model_args}_CNN_model.h5'\n print(model_file)\n if not os.path.exists(model_file):\n # Clear graphs\n tf.keras.backend.clear_session()\n \n #Initiliaze Convolutional Neural Net (CNN)\n model = models.Sequential()\n input_shape = np.shape(trainX[0])\n \n #First layer: input shape (y,x,# variables) \n #Add noise\n model.add(layers.GaussianNoise(0.01, input_shape=(input_shape)))\n for filters in [32,64,128]:\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.Conv2D(filters, (3,3),padding='same'))\n model.add(layers.BatchNormalization())\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.MaxPooling2D())\n \n #Flatten the last convolutional layer \n model.add(layers.Flatten())\n model.add(layers.Dense(256))\n model.add(layers.LeakyReLU(alpha=0.3))\n model.add(layers.Dense(4,activation='softmax'))\n #Compile neural net\n model.compile(optimizer='adam',loss='categorical_crossentropy',\n metrics=[tf.keras.metrics.AUC()])\n print(model.summary())\n #fit neural net\n n_epochs = 10\n bs = 256\n\n #augment data\n aug = imagedatagenerator(\n rotation_range=10,zoom_range=0.15,\n width_shift_range=0.2,height_shift_range=0.2,\n fill_mode=\"nearest\")\n \n train_generator = aug.flow(trainx,trainy,batch_size=bs)\n conv_hist = model.fit(\n train_generator,steps_per_epoch=len(trainx) // bs,\n epochs=n_epochs,verbose=1,class_weight=self.class_percentages)\n #save trained model\n model.save(model_file)\n print(f'Writing out {model_file}')\n else:\n model = tf.keras.models.load_model(model_file)\n print(f'\\nOpening {model_file}\\n')\n\n del trainY,trainX\n \n threshold_file = self.model_path + f'/{member}_{self.model_args}_CNN_model_threshold.h5'\n if os.path.exists(threshold_file): \n del validX,validY\n return\n \n self.validate_CNN(model,validX,validY,threshold_file)\n return", "def create_model():\n model = Sequential()\n\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=INPUT_SHAPE))\n\n # Now we are going to add some Convulation Layers identical to paper\n\n model.add(Conv2D(24, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(36, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(48, (5, 5), activation='elu', strides=(2, 2)))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n model.add(BatchNormalization()) \n model.add(Conv2D(64, (3, 3), activation='elu'))\n\n # And now finally we will Flatten our layers and eventually use Fully Connected Layers to reduce features.\n\n model.add(Dropout(0.4))\n model.add(Flatten())\n\n model.add(Dense(256, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(100, activation='elu'))\n model.add(Dropout(0.2))\n model.add(Dense(25, activation='elu'))\n model.add(Dense(1))\n\n model.summary()\n\n return model", "def get_model():\n\n # Create a convolutional neural network\n model = tf.keras.models.Sequential([\n\n tf.keras.layers.Conv2D(32, (3,3), activation=\"relu\", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)), # Convolutional layer with 32 filters of a 3 x 3 kernel\n\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), # Max-pooling layer with a 2 x 2 pool size\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"), # Convolutional layer with 64 filters of a 3 x 3 kernel\n \n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)), # Max-pooling layer with a 2 x 2 pool size\n\n tf.keras.layers.Flatten(), # Flatten units\n\n tf.keras.layers.Dense(256, activation=\"relu\"), # Hidden layer with 256 neurons\n\n tf.keras.layers.Dropout(0.25), # Dropout layer with a rate of 0.25\n\n tf.keras.layers.Dense(NUM_CATEGORIES, activation=\"softmax\") # Output layer with an output unit for each image category\n ])\n\n # Compile model\n model.compile(\n optimizer=\"adam\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n\n return model", "def cnn_model_fn(features):\n print(\"features shape\", features.shape)\n\n input_layer = tf.reshape(features, [-1, 28, 28, 1])\n\n conv1 = tf.layers.conv2d(inputs=input_layer, filters=64, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv1)\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, padding=\"same\")\n print(pool1)\n conv2 = tf.layers.conv2d(inputs=pool1, filters=128, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv2)\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, padding=\"same\")\n print(pool2)\n conv3 = tf.layers.conv2d(inputs=pool2, filters=256, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv3)\n conv4 = tf.layers.conv2d(inputs=conv3, filters=256, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n print(conv4)\n pool3 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2, padding=\"same\")\n print(pool3)\n pool3_flat = tf.reshape(pool3, [-1, 4 * 4 * 256])\n print(pool3_flat)\n fc1 = tf.layers.dense(inputs=pool3_flat, units=1024, activation=tf.nn.relu)\n print(fc1)\n fc2 = tf.layers.dense(inputs=fc1, units=1024, activation=tf.nn.relu)\n print(fc2)\n fc2_bn = tf.nn.batch_normalization(x=fc2, mean=0, variance=1, scale=1, offset=0, variance_epsilon=1e-6)\n print(fc2_bn)\n fc3 = tf.layers.dense(inputs=fc2_bn, units=10)\n print(fc3)\n return fc3", "def compile_model_cnn(genome, nb_classes, input_shape):\n # Get our network parameters.\n nb_layers = genome.geneparam['nb_layers' ]\n nb_neurons = genome.nb_neurons()\n activation = genome.geneparam['activation']\n optimizer = genome.geneparam['optimizer' ]\n\n logging.info(\"Architecture:%s,%s,%s,%d\" % (str(nb_neurons), activation, optimizer, nb_layers))\n\n model = Sequential()\n\n # Add each layer.\n for i in range(0,nb_layers):\n # Need input shape for first layer.\n if i == 0:\n model.add(Conv2D(nb_neurons[i], kernel_size = (3, 3), activation = activation, padding='same', input_shape = input_shape))\n else:\n model.add(Conv2D(nb_neurons[i], kernel_size = (3, 3), activation = activation))\n \n if i < 2: #otherwise we hit zero\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Dropout(0.2))\n\n model.add(Flatten())\n # always use last nb_neurons value for dense layer\n model.add(Dense(nb_neurons[len(nb_neurons) - 1], activation = activation))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes, activation = 'softmax'))\n\n #BAYESIAN CONVOLUTIONAL NEURAL NETWORKS WITH BERNOULLI APPROXIMATE VARIATIONAL INFERENCE\n #need to read this paper\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n return model", "def neural_network():\n model = Sequential()\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\", input_shape=(28, 28, 1)))\n model.add(Conv2D(64, kernel_size=3, activation=\"relu\"))\n model.add(Flatten())\n model.add(Dense(10, activation=\"softmax\"))\n model.compile(optimizer='adam', loss='categorical_crossentropy')\n\n return model", "def setup_model(self) -> (nn.Module, int):", "def cnn_model_fn(features, labels, mode,params):\n\n # Input Layer\n input_layer = tf.reshape(features[\"x\"], params['input_layer_dim'] )\n\n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=32,\n kernel_size=params['conv_dim'],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Q3\n # Norm Layer #1\n norm1 = tf.layers.batch_normalization(inputs=conv1)\n\n # Pooling Layer #1\n pool1 = tf.layers.max_pooling2d(inputs=norm1, pool_size=params['pool_dim'], strides=2)\n\n # Convolutional Layer #2 and Pooling Layer #2\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=params['conv_dim'],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Q3\n # Norm Layer #2\n norm2 = tf.layers.batch_normalization(\n inputs=conv2)\n\n pool2 = tf.layers.max_pooling2d(inputs=norm2, pool_size=params['pool_dim'], strides=2)\n\n # Dense Layer 1\n pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])\n dense = tf.layers.dense(inputs=pool2_flat, units=params['dense_units'], activation=tf.nn.relu)\n\n # Q3\n # Dropout Layer\n dropout = tf.layers.dropout(\n inputs=dense, rate=params['dropout_rate'])\n\n # Dense Layer 2\n dense2 = tf.layers.dense(inputs=dropout, units=params['dense_units'], activation=tf.nn.relu)\n\n # Logits Layer\n logits = tf.layers.dense(inputs=dense2, units=10)\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\n\n predictions = {\n \"classes\": tf.argmax(input=logits, axis=1),\n \"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\"),\n \"loss\" : tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n estimatorSpec = tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n # Configure the Training Op (for TRAIN mode)\n elif mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.Variable(0, trainable=False)\n starter_learning_rate = params['learning_rate']\n\n #Q5b\n if params['optimizer'] == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=starter_learning_rate)\n else:\n # Q5a\n learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,\n params['lr_reduce_every_n'], params['lr_reduce_by'], staircase=True)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n #Q1e\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n\n tensors_to_log = {\"loss\": loss}\n logging_hook = tf.train.LoggingTensorHook(\n tensors=tensors_to_log, every_n_iter=params['iter_prints'])\n\n estimatorSpec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, training_hooks=[logging_hook])\n accuracy = tf.metrics.accuracy(labels=labels, predictions=tf.argmax(input=logits, axis=1))\n tf.summary.scalar('train_accuracy', accuracy[1])\n # Add evaluation metrics (for EVAL mode)\n else:\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n estimatorSpec= tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)\n\n return estimatorSpec", "def build_cnn_classification_model(self, input_shape):\n\n input_image = Input(shape=input_shape)\n\n conv_1 = Conv2D(32, (3, 3), activation='relu')(input_image)\n pool_1 = MaxPooling2D((2, 2))(conv_1)\n\n conv_2 = Conv2D(32, (3, 3), activation='relu')(pool_1)\n pool_2 = MaxPooling2D((2, 2))(conv_2)\n\n conv_3 = Conv2D(64, (3, 3), activation='relu')(pool_2)\n pool_3 = MaxPooling2D((2, 2))(conv_3)\n\n flatten = Flatten()(pool_3)\n dense = Dense(64, activation='relu')(flatten)\n dropout = Dropout(0.5)(dense)\n\n prediction = Dense(1, activation='sigmoid')(dropout)\n\n cnn_classification_model = Model(inputs=input_image, outputs=prediction)\n\n cnn_classification_model.compile(loss=self._loss_function, optimizer='rmsprop', metrics=['accuracy'])\n\n return cnn_classification_model", "def model_build(self):\n\n # Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!\n X_input = Input(self.inputData[0].shape)\n\n '''\n # CONV -> BN -> RELU Block applied to X\n X = Conv2D(8, (8, 8), name='conv0')(X_input)\n X = BatchNormalization(name='bn0')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool0')(X)\n X = Dropout(0.1, name='dropout0')(X)\n\n X = Conv2D(16, (16, 16), name='conv1')(X)\n X = BatchNormalization(name='bn1')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool1')(X)\n X = Dropout(0.1, name='dropout1')(X)\n\n X = Conv2D(16, (32, 32), name='conv2')(X)\n X = BatchNormalization(name='bn2')(X)\n X = Activation('relu')(X)\n X = MaxPooling2D((2, 4), name='max_pool2')(X)\n X = Dropout(0.1, name='dropout2')(X)\n' '''\n\n X = Dense(500, activation='relu', name='fc0')(X_input)\n X = Dropout(0.1, name='dropout1')(X)\n X = Dense(500, activation='relu', name='fc1')(X)\n X = Dropout(0.1, name='dropout2')(X)\n X = Dense(3, activation='softmax', name='fc2')(X)\n\n # Create model. This creates your Keras model instance, you'll use this instance to train/test the model.\n self.model = Model(inputs=X_input, outputs=X, name='acouModel')", "def create_model():\n model = tf.keras.Sequential([\n Conv2D(256, 5, input_shape=[28, 28, 1]),\n BatchNormalization(),\n Activation('relu'),\n \n Conv2D(256, 5),\n BatchNormalization(),\n Activation('relu'),\n \n Conv2D(256, 5),\n MaxPooling2D(2),\n BatchNormalization(),\n Activation('relu'),\n \n Flatten(),\n Dense(256),\n Dropout(.2),\n \n Dense(10),\n Activation('softmax')\n ])\n \n return model", "def model(self, input_shape=(3, 224, 224), nclasses=3):\n inputs = Input(input_shape)\n\n pad1_1 = ZeroPadding2D(padding=(1, 1))(inputs)\n conv1_1 = Conv2D(64, 3, 3, activation='relu', name='conv1_1')(pad1_1)\n pad1_2 = ZeroPadding2D(padding=(1, 1))(conv1_1)\n conv1_2 = Conv2D(64, 3, 3, activation='relu', name='conv1_2')(pad1_2)\n pool1 = MaxPooling2D((2, 2), strides=(2, 2))(conv1_2)\n\n pad2_1 = ZeroPadding2D((1, 1))(pool1)\n conv2_1 = Conv2D(128, 3, 3, activation='relu', name='conv2_1')(pad2_1)\n pad2_2 = ZeroPadding2D((1, 1))(conv2_1)\n conv2_2 = Conv2D(128, 3, 3, activation='relu', name='conv2_2')(pad2_2)\n pool2 = MaxPooling2D((2, 2), strides=(2, 2))(conv2_2)\n\n pad3_1 = ZeroPadding2D((1, 1))(pool2)\n conv3_1 = Conv2D(256, 3, 3, activation='relu', name='conv3_1')(pad3_1)\n pad3_2 = ZeroPadding2D((1, 1))(conv3_1)\n conv3_2 = Conv2D(256, 3, 3, activation='relu', name='conv3_2')(pad3_2)\n pad3_3 = ZeroPadding2D((1, 1))(conv3_2)\n conv3_3 = Conv2D(256, 3, 3, activation='relu', name='conv3_3')(pad3_3)\n pool3 = MaxPooling2D((2, 2), strides=(2, 2))(conv3_3)\n\n pad4_1 = ZeroPadding2D((1, 1))(pool3)\n conv4_1 = Conv2D(512, 3, 3, activation='relu', name='conv4_1')(pad4_1)\n pad4_2 = ZeroPadding2D((1, 1))(conv4_1)\n conv4_2 = Conv2D(512, 3, 3, activation='relu', name='conv4_2')(pad4_2)\n pad4_3 = ZeroPadding2D((1, 1))(conv4_2)\n conv4_3 = Conv2D(512, 3, 3, activation='relu', name='conv4_3')(pad4_3)\n pool4 = MaxPooling2D((2, 2), strides=(2, 2))(conv4_3)\n\n pad5_1 = ZeroPadding2D((1, 1))(pool4)\n conv5_1 = Conv2D(512, 3, 3, activation='relu', name='conv5_1')(pad5_1)\n pad5_2 = ZeroPadding2D((1, 1))(conv5_1)\n conv5_2 = Conv2D(512, 3, 3, activation='relu', name='conv5_2')(pad5_2)\n pad5_3 = ZeroPadding2D((1, 1))(conv5_2)\n conv5_3 = Conv2D(512, 3, 3, activation='relu', name='conv5_3')(pad5_3)\n pool5 = MaxPooling2D((2, 2), strides=(2, 2))(conv5_3)\n\n # fc5 = base_model.layers[-8].output\n fc6 = Flatten()(pool5)\n fc7_1 = Dense(256, activation='relu', name='fc7_1')(fc6)\n dropout7_1 = Dropout(0.3, name='dropout7_1')(fc7_1)\n fc7_2 = Dense(128, activation='relu', name='fc7_2')(dropout7_1)\n # dropout7_2 = Dropout(0.2, name='dropout7_2')(fc7_2)\n # fc7_3 = Dense(128, activation=\"relu\", name=\"fc7_pre3\")(dropout7_2)\n prediction = Dense(nclasses, activation='softmax')(fc7_2)\n model = Model(inputs, prediction)\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n # model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model", "def compile_model(self, data):\n model=Sequential()\n\n model.add(Conv2D(self.conv_1_mapnum, \n (self.filter_width, self.filter_width),\n input_shape=data.shape[1:], \n strides=self.strides_len,\n padding='same', data_format='channels_last',\n dilation_rate=1, activation=self.acti_1_func, use_bias=True, \n kernel_initializer='glorot_uniform', bias_initializer='zeros', \n kernel_regularizer=l2(0.001), bias_regularizer=None, \n activity_regularizer=None, kernel_constraint=None, \n bias_constraint=None))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=3, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n\n if self.spatial_drop:\n model.add(SpatialDropout2D(rate=self.spatial_drop_perc, data_format='channels_last'))\n\n if self.pool_method=='mean':\n model.add(AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n if self.pool_method=='max':\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n \n model.add(Conv2D(self.conv_2_mapnum, \n (self.filter_width, self.filter_width),\n strides=self.strides_len,\n padding='same', data_format='channels_last',\n dilation_rate=1, activation=self.acti_2_func, use_bias=True, \n kernel_initializer='glorot_uniform', bias_initializer='zeros', \n kernel_regularizer=l2(0.001), bias_regularizer=None, \n activity_regularizer=None, kernel_constraint=None, \n bias_constraint=None))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=3, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n\n if self.spatial_drop:\n model.add(SpatialDropout2D(rate=self.spatial_drop_perc, data_format='channels_last'))\n\n if self.pool_method=='mean':\n model.add(AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n if self.pool_method=='max':\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n\n model.add(Conv2D(self.conv_3_mapnum, \n (self.filter_width, self.filter_width),\n strides=self.strides_len,\n padding='same', data_format='channels_last',\n dilation_rate=1, activation=self.acti_3_func, use_bias=True, \n kernel_initializer='glorot_uniform', bias_initializer='zeros', \n kernel_regularizer=l2(0.001), bias_regularizer=None, \n activity_regularizer=None, kernel_constraint=None, \n bias_constraint=None))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=3, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n\n if self.spatial_drop:\n model.add(SpatialDropout2D(rate=self.spatial_drop_perc, data_format='channels_last'))\n\n if self.pool_method=='mean':\n model.add(AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n if self.pool_method=='max':\n model.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='same', \n data_format='channels_last'))\n \n model.add(Flatten())\n \n if self.additional_dense:\n model.add(Dense(units=self.additional_dense_units, activation=self.additional_dense_activation))\n\n if self.batch_norm:\n model.add(BatchNormalization(axis=1, momentum=0.99, epsilon=0.001, \n center=True, scale=True, \n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', \n moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None,\n beta_constraint=None, gamma_constraint=None))\n if self.spatial_drop:\n model.add(Dropout(rate=self.spatial_drop_perc))\n \n model.add(Dense(units=self.denseshape, activation=self.output_activation))\n\n model.compile(optimizer=Adam(lr=self.learning_rate), loss=self.loss_func, metrics=['accuracy', 'mean_squared_error', 'mean_absolute_error'])\n print(model.summary())\n return model", "def buildClassifier(input_shape=(100, 100, 3)):\n # Initialising the CNN\n classifier = Sequential()\n classifier.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding='same'))\n classifier.add(MaxPooling2D(pool_size=(3, 3), padding='same'))\n classifier.add(Dropout(0.5)) # added extra Dropout layer\n classifier.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\n classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n classifier.add(Conv2D(128, (3, 3), padding='same', activation='relu'))\n classifier.add(Dropout(0.5)) # added extra dropout layer\n classifier.add(Conv2D(256, (3, 3), activation='relu', padding='same'))\n classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n classifier.add(Dropout(0.2)) # antes era 0.25\n classifier.add(Conv2D(512, (3, 3), padding='same', activation='relu'))\n classifier.add(Conv2D(1024, (3, 3), activation='relu', padding='same'))\n classifier.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n classifier.add(Dense(units=1024, activation='relu')) # added new dense layer\n classifier.add(Dropout(0.2)) # antes era 0.25\n # Step 3 - Flattening\n classifier.add(Flatten())\n classifier.add(Dense(units=1024, activation='relu')) # added new dense layer\n classifier.add(Dense(units=256, activation='relu')) # added new dense layer\n # Step 4 - Full connection\n classifier.add(Dropout(0.2))\n classifier.add(Dense(units=1, activation='sigmoid'))\n classifier.summary()\n\n # Compiling the CNN\n classifier.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n plot_model(classifier, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\n return classifier", "def nn_model():\n seed = 321\n np.random.seed(seed)\n rmsprop = RMSprop(lr=0.0001)\n # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\n # for train, test in kfold.split(X, y):\n model_nn = Sequential()\n model_nn.add(Dense(100, input_shape=(117,), activation='relu'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(125, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(30, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(1, activation='sigmoid'))#softmax\n model_nn.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n #model_nn.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n # Compile model\n model_nn.compile(optimizer=rmsprop, loss='binary_crossentropy', metrics=['accuracy'])\n return model_nn", "def __init__(self):\n self.model = Sequential()\n self.model.add(AveragePooling2D(pool_size=(4, 4), input_shape=(224, 224, 3)))\n self.model.add(Conv2D(16, (9, 9)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Conv2D(16, (5, 5)))\n self.model.add(Activation('relu'))\n self.model.add(MaxPooling2D(pool_size=(2, 2)))\n self.model.add(Flatten())\n self.model.add(Dropout(0.5))\n self.model.add(Dense(1, activation='sigmoid'))\n self.model.compile(loss=binary_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])", "def build(self, hp):\n\n model = Sequential()\n model.add(Conv2D(filters=hp.Choice('num_filters_0', values=[8, 16, 32, 64]),\n kernel_size=hp.Choice('kernel_size_0', values=[3, 4, 5]),\n activation=hp.Choice('activation_0', values=['relu', 'tanh']),\n input_shape=self.input_shape))\n\n for i in range(hp.Int('num_layers', 1, 3)):\n model.add(Conv2D(filters=hp.Choice('num_filters_%d' % (i + 1), values=[8, 16, 32, 64]),\n kernel_size=hp.Choice('kernel_size_%d' % (i + 1), values=[3, 4, 5]),\n activation=hp.Choice('activation_%d' % (i + 1), values=['relu', 'tanh'])))\n model.add(Flatten())\n model.add(Dense(N_zern))\n model.summary()\n\n model.compile(optimizer=keras.optimizers.Adam(hp.Choice('learning_rate', values=[1e-3, 5e-4, 1e-4])),\n loss='mean_squared_error')\n return model", "def model_create_cnn(input_dim, output_dim, n_features, n_houses, x_train, y_train, x_test, y_test, early=None):\r\n model = Sequential()\r\n for _ in range(nn_hparams['num_layers']):\r\n model.add(Conv1D(filters=nn_hparams['filters'], kernel_size=nn_hparams['kernel_size'], activation=nn_hparams['activation'], input_shape=(input_dim,n_features)))\r\n model.add(MaxPooling1D(pool_size=nn_hparams['pool_size']))\r\n model.add(Flatten())\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Dense(nn_hparams['units'], activation=nn_hparams['activation']))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Dense(nn_hparams['units'], activation=nn_hparams['activation']))\r\n model.add(Dropout(nn_hparams['dropout']))\r\n model.add(Dense(y_train.shape[1]*y_train.shape[2]))\r\n custom_optimizer = getattr(optimizers, nn_hparams['optimizer'])(lr=nn_hparams['learning_rate'], beta_1=nn_hparams['beta_1'], beta_2=nn_hparams['beta_2'])\r\n model.compile(optimizer=custom_optimizer, loss=nn_hparams['loss'])\r\n y_train = y_train.reshape((y_train.shape[0], y_train.shape[1]*y_train.shape[2]))\r\n y_test = y_test.reshape((y_test.shape[0], y_test.shape[1]*y_test.shape[2]))\r\n if early:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1, callbacks=[early])\r\n else:\r\n model.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=nn_hparams['epochs'], verbose=1)\r\n model_loss = model.evaluate(x_train, y_train, verbose=0)\r\n \r\n return model, model_loss", "def keras_sequential_conv_net():\n model = tf.keras.Sequential([\n tf.keras.layers.Input(shape=(28, 28, 3)),\n tf.keras.layers.Conv2D(4, kernel_size=3, activation=None),\n tf.keras.layers.BatchNormalization(),\n tf.keras.layers.Activation(\"relu\"),\n tf.keras.layers.AvgPool2D(),\n tf.keras.layers.Dense(10)\n ])\n return model", "def get_model_2(parameters):\n # Parameters\n BANDS = parameters['num_features']\n CLASSES = parameters['num_classes']\n \n # Sequential model\n model = keras.models.Sequential()\n \n # Add convolution (1)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, 9, BANDS, 1)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 15, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(9, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh',\n input_shape=(9, BANDS,1)))\n \n # Add convolution (2)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, BANDS - 15, 1, 32)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 30, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(1, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh'))\n \n # Add convolution (3)\n # -------------------\n # Input_shape (batch, rows, cols, channels) = (-, BANDS - 30, 1, 32)\n # Output_shape (batch, rows, cols, channels) = (-, BANDS - 45, 1, 32)\n model.add(keras.layers.Conv2D(filters=32,\n kernel_size=(1, 16),\n padding='same',\n data_format=\"channels_last\",\n activation='tanh'))\n \n # Flatten before dense layer\n model.add(keras.layers.Flatten())\n \n # Add fully connected (4)\n # -----------------------\n # Intput_shape (batch, rows, cols, channels) = (-, (BANDS - 45) x 1 x 32)\n # Output_shape (batch, dim) = (-, 800)\n model.add(keras.layers.Dense(units=800,\n activation='tanh'))\n \n # Add fully connected (5)\n # -----------------------\n # Intput_shape (batch, dim) = (-, 800)\n # Output_shape (batch, dim) = (-, 800)\n model.add(keras.layers.Dense(units=800,\n activation='softmax'))\n \n # Add fully connected to reduce to number of categories\n # -----------------------------------------------------\n # Intput_shape (batch, dim) = (-, 800)\n # Output_shape (batch, dim) = (-, CLASSES)\n model.add(keras.layers.Dense(units=CLASSES,\n activation='softmax'))\n \n # Compile model\n model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n \n # Print the model summary to output file\n # To print to stdout: model.summary()\n with open(OUTPUT_FILE, 'a') as f:\n # Pass the file handle in as a lambda function to make it callable\n model.summary(print_fn=lambda x: f.write(x + '\\n'))\n \n # Return the model\n return model", "def cnn_model_fn(features, labels, mode, num_classes=20):\n # Write this function\n # Input Layer\n input_layer = tf.reshape(features[\"x\"], [-1, 256, 256, 3])\n img_num = input_layer.get_shape().as_list()[0]\n input_image_layer = input_layer\n\n '''\n if img_num is not None:\n for img_idx in range(img_num):\n image = input_layer[img_idx,:]\n image = tf.random_crop(value = image, size = [224, 224, 3])\n image = tf.image.flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image=image,target_height = 224, target_width = 224)\n input_image_layer.append(image)\n\n input_image_layer = tf.convert_to_tensor(input_image_layer, dtype=tf.float32)\n else:\n input_image_layer = input_layer\n print('img_num shape {}: input_layer is {} '.format(img_num, np.shape(input_layer.get_shape().as_list())))\n print(\"img_num is None\")\n '''\n\n # Convolutional Layer #1\n conv1_1 = tf.layers.conv2d(\n inputs=input_image_layer,\n filters=64,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n # pad = 1\n conv1_2 = tf.layers.conv2d(\n inputs=conv1_1,\n filters=64,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool1 = tf.layers.max_pooling2d(inputs=conv1_2, pool_size=[2, 2], strides=2)\n\n\n conv2_1 = tf.layers.conv2d(\n inputs=pool1,\n filters=128,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n\n conv2_2 = tf.layers.conv2d(\n inputs= conv2_1,\n filters=128,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(inputs=conv2_2, pool_size=[2, 2], strides=2)\n\n conv3_1 = tf.layers.conv2d(\n inputs= pool2,\n filters=256,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv3_2 = tf.layers.conv2d(\n inputs= conv3_1,\n filters=256,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv3_3 = tf.layers.conv2d(\n inputs= conv3_2,\n filters=256,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool3 = tf.layers.max_pooling2d(inputs=conv3_3, pool_size=[2, 2], strides=2)\n\n\n conv4_1 = tf.layers.conv2d(\n inputs= pool3,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv4_2 = tf.layers.conv2d(\n inputs= conv4_1,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv4_3 = tf.layers.conv2d(\n inputs= conv4_2,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n pool4 = tf.layers.max_pooling2d(inputs=conv4_3, pool_size=[2, 2], strides=2)\n\n conv5_1 = tf.layers.conv2d(\n inputs= pool4,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv5_2 = tf.layers.conv2d(\n inputs= conv5_1,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n conv5_3 = tf.layers.conv2d(\n inputs= conv5_2,\n filters=512,\n kernel_size=[3, 3],\n strides=(1, 1),\n padding=\"same\",\n activation=tf.nn.relu)\n\n pool5 = tf.layers.max_pooling2d(inputs=conv5_3, pool_size=[2, 2], strides=2)\n\n # Dense Layer\n pool5_shape = pool5.get_shape()\n pool5_list = pool5_shape.as_list()\n pool5_product = np.int32(pool5_list[1]*pool5_list[2]*pool5_list[3])\n pool5_flat = tf.reshape(pool5, [-1, pool5_product])\n\n dense6 = tf.layers.dense(inputs=pool5_flat, units=4096,activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0,stddev=0.01),bias_initializer=tf.zeros_initializer(),)\n dropout6 = tf.layers.dropout(\n inputs=dense6, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n\n dense7 = tf.layers.dense(inputs=dropout6, units= 4096, activation=tf.nn.relu, kernel_initializer=tf.random_normal_initializer(mean=0.0,stddev=0.01),\n bias_initializer=tf.zeros_initializer(),)\n dropout7 = tf.layers.dropout(\n inputs=dense7, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n # Logits Layer\n logits = tf.layers.dense(inputs=dropout7, units=20)\n\n\n predictions = {\n # Generate predictions (for PREDICT and EVAL mode)\n \"classes\": tf.argmax(input=logits, axis=1),\n # Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n # `logging_hook`.\n \"probabilities\": tf.nn.sigmoid(logits, name=\"sigmoid_tensor\")\n }\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\n # Calculate Loss (for both TRAIN and EVAL modes)\n\n loss = tf.identity(tf.losses.sigmoid_cross_entropy(multi_class_labels=labels,logits=logits))\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n grad_input = tf.gradients(loss,input_layer)\n grad_conv1_1 = tf.gradients(loss, conv1_1)\n grad_conv2_1 = tf.gradients(loss, conv2_1)\n grad_conv3_1 = tf.gradients(loss, conv3_1)\n grad_conv4_1 = tf.gradients(loss, conv4_1)\n grad_conv5_1 = tf.gradients(loss, conv5_1)\n grad_dense6 = tf.gradients(loss, dense6)\n grad_dense7 = tf.gradients(loss, dense7)\n\n starter_learning_rate = 0.001\n global_step = tf.train.get_global_step()\n learning_rate = tf.train.exponential_decay(learning_rate= starter_learning_rate, global_step = global_step,\n decay_steps = 100000, decay_rate= 0.5, staircase=True)\n optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n #tf.summary()\n # print(\"Training\")\n tf.summary.scalar(name= 'train_loss', tensor = loss )\n tf.summary.scalar(name= 'learning rate', tensor = learning_rate)\n tf.summary.histogram(name='grad_dense7', values=grad_input)\n tf.summary.histogram(name='grad_conv1_1', values= grad_conv1_1)\n tf.summary.histogram(name='grad_conv2_1', values=grad_conv2_1)\n tf.summary.histogram(name='grad_conv3_1', values=grad_conv3_1)\n tf.summary.histogram(name='grad_conv4_1', values=grad_conv4_1)\n tf.summary.histogram(name='grad_conv5_1', values=grad_conv5_1)\n tf.summary.histogram(name='grad_dense6', values=grad_dense6)\n tf.summary.histogram(name='grad_dense7', values=grad_dense7)\n\n tf.summary.image(name='image', tensor= input_layer)\n\n summary_hook = tf.train.SummarySaverHook(\n 10,\n output_dir='./models/03_VGG_Test0301',\n summary_op=tf.summary.merge_all())\n\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, train_op=train_op, training_hooks = [summary_hook])\n\n # Add evaluation metrics (for EVAL mode)\n eval_metric_ops = {\n \"accuracy\": tf.metrics.accuracy(\n labels=labels, predictions=predictions[\"classes\"])}\n return tf.estimator.EstimatorSpec(\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def get_model(img_width,img_height):\n \n model = Sequential()\n model.add(Convolution2D(32, 3, 3, input_shape=(img_width, img_height,3)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Convolution2D(32, 3, 3))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n last_conv_layer = Convolution2D(64, 3, 3)\n model.add(last_conv_layer)\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Flatten())\n model.add(Dense(64))\n model.add(Activation('relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n model.add(Activation('sigmoid'))\n \n model.compile(loss='binary_crossentropy',\n optimizer='rmsprop',\n metrics=['accuracy'])\n \n return model, last_conv_layer", "def _model_definition(self, net):\n \n # Input filtering and downsampling with max pooling\n print(net.shape) #channels must be specified first otherwise keras assumes channels last\n print('resnet17_scp')\n \n net = Conv2D( filters=128, kernel_size=5, activation=None, padding='same', \n data_format=\"channels_first\", input_shape=(1, 100, 100))(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3, activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels\n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n net = Conv2D( filters=64, kernel_size=3,activation=None, padding='same', data_format=\"channels_first\")(net)\n net = BatchNormalization(axis=1)(net) #axis is set to the dimension which hold the colour channels \n net = LeakyReLU()(net)\n net= MaxPooling2D(pool_size=(2,2))(net)\n \n\n\n \n return net", "def build(width, height, depth, classes):\n model = Sequential()\n inputShape = (width, height, depth)\n\n # defining 1st layer : conv => relu\n model.add(Conv2D(32, (3, 3), padding='same', input_shape=inputShape))\n model.add(Activation('relu'))\n\n # softmax classifier\n model.add(Flatten())\n model.add(Dense(classes, activation='softmax'))\n return model", "def build_CNNflexi_Modular(input):\n\n params_3D = input['params']\n sample_shape = input['sample_shape']\n inputA = Input(shape=sample_shape[1:])\n clf = params_3D['clf']\n\n # Model head\n for i_layers in range(0,params_3D['n_layers']):\n if i_layers == 0:\n x = conv3D_block(params_3D,inputA)\n else:\n x = conv3D_block(params_3D, x)\n\n # Classifiers\n if clf == 'dense':\n x = Flatten()(x)\n x = Dense(256, activation='relu', kernel_initializer='he_uniform',activity_regularizer=l1(params_3D['l1_den']))(x)\n x = Dense(128, activation='relu', kernel_initializer='he_uniform',activity_regularizer=l1(params_3D['l1_den']))(x)\n x = Dense(64, activation='relu', kernel_initializer='he_uniform',activity_regularizer=l1(params_3D['l1_den']))(x)\n elif clf == 'gap':\n x = GlobalAveragePooling3D()(x)\n else:\n raise('Invalid classifier choice!')\n\n\n model = Model(inputs=inputA, outputs=x)\n return model", "def CNN_model(ftrs, d, c, r):\n\n# Input Layer\n l1 = tf.reshape(ftrs, [-1, d, d, 1])\n\n# First 2D convolutional layer\n c1 = tf.layers.conv2d(\n inputs=l1,\n filters=d,\n kernel_size=[8, 8],\n padding=\"same\",\n activation=tf.nn.relu)\n\n# First max pooling layer (2,2)\n p1 = tf.layers.max_pooling2d(inputs=c1, pool_size=[2, 2], strides=2)\n\n# Second 2D convolution layer\n c2 = tf.layers.conv2d(\n inputs=p1,\n filters=2*d,\n kernel_size=[8, 8],\n padding=\"same\",\n activation=tf.nn.relu)\n\n# Second max pooling layer (2,2)\n p2 = tf.layers.max_pooling2d(inputs=c2, pool_size=[2, 2], strides=2)\n\n# Flatten Layer\n flat = tf.reshape(p2, [-1, 128*d])\n\n# Dense Layer\n dense = tf.layers.dense(inputs=flat, units=256, activation=tf.nn.relu)\n\n# Dropout layer\n fc = tf.layers.dropout(inputs=dense, rate=r, seed=None)\n\n# Return the fully connected Layer\n output = tf.layers.dense(inputs=fc, units=c)\n\n return output", "def classifier(model):\n \n model.classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(model.classifier[0].in_features, 4096)),\n ('fc2', nn.Linear(4096, 102)),\n ('relu', nn.ReLU()),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n return model", "def cnn_model_fn(features, labels, mode):\n # Input Layer\n # input layer should be of shape [:, NXCHANNELS, NVCHANNELS, 1]\n # NVCHANNELS: number of velocity bins\n \n NVCHANNELS=64\n NXCHANNELS=64\n \n input_layer = tf.reshape(features[\"x\"], [-1,NXCHANNELS, NVCHANNELS, 1])\n \n # Intermediate Layers are specified in different function \n if USE_TWO_LAYER:\n flat_layer = two_layers_cnn(input_layer) \n else:\n flat_layer = three_layers_cnn(input_layer)\n \n # Dense Layer\n # Densely connected layer with 1024 neurons\n # Input Tensor Shape: [batch_size, 4x4x32]\n # Output Tensor Shape: [batch_size, 32]\n dense = tf.layers.dense(inputs=flat_layer, units=32, activation=tf.nn.relu)\n \n # Add dropout operation; 0.7 probability that element will be kept\n dropout = tf.layers.dropout(\n inputs=dense, rate=0.3, training=mode == tf.estimator.ModeKeys.TRAIN)\n # Logits Layer\n \n # Logits layer\n # Input Tensor Shape: [batch_size, 1024]\n # Output Tensor Shape: [batch_size, 1]\n if PREDICT_BOTH:\n n_outputs = 2\n else:\n n_outputs = 1\n logits = tf.layers.dense(inputs=dropout, units=n_outputs)\n \n \n if PREDICT_BOTH:\n beta_pred = logits[:,0]\n gamma_pred = logits[:,1]\n predictions = { \"beta_pred\": beta_pred, \"gamma_pred\": gamma_pred }\n else:\n beta_pred = logits[:,0]\n predictions = {\"beta_pred\": beta_pred}\n\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n \n # Calculate Loss (for both TRAIN and EVAL modes) \n # Simple Mean Squared\n if USE_MEAN_SQUARED:\n if PREDICT_BOTH: \n loss_gamma = tf.losses.mean_squared_error( labels['gamma'] , predictions['gamma_pred'] )\n else:\n loss_gamma = 0.0\n loss_beta = tf.losses.mean_squared_error( labels['beta'] , predictions['beta_pred'] )\n loss = loss_beta + loss_gamma\n else:\n # Compute Weighted Loss\n # Fractional difference from its true value\n ones = tf.ones( tf.shape( labels['beta'] ) , dtype=tf.float64 )\n if PREDICT_BOTH:\n inverse_gamma = tf.div( ones, labels['gamma'] )\n loss_gamma = tf.losses.mean_squared_error( labels['gamma'], predictions['gamma_pred'], weights= inverse_gamma )\n else:\n loss_gamma = 0.0\n inverse_beta = tf.div( ones, labels['beta'] )\n loss_beta = tf.losses.mean_squared_error( labels['beta'], predictions['beta_pred'], weights= inverse_beta )\n loss = loss_beta + loss_gamma \n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n starter_learning_rate = 1.0e-3\n learning_rate = tf.train.exponential_decay(starter_learning_rate, \n tf.train.get_global_step(), 1000000, 0.96, staircase=True)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n train_op = optimizer.minimize(\n loss=loss,\n global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n if USE_MEAN_SQUARED:\n if PREDICT_BOTH: \n gamma_accuracy = tf.metrics.mean_squared_error( labels['gamma'] , predictions['gamma_pred'] )\n beta_accuracy = tf.metrics.mean_squared_error( labels['beta'] , predictions['beta_pred'] )\n else:\n # Compute Weighted Loss\n # Fractional difference from its true value\n ones = tf.ones( tf.shape( labels['beta'] ) , dtype=tf.float64 )\n if PREDICT_BOTH:\n inverse_gamma = tf.div( ones, labels['gamma'] )\n gamma_accuracy = tf.metrics.mean_squared_error( labels['gamma'], predictions['gamma_pred'], weights= inverse_gamma )\n inverse_beta = tf.div( ones, labels['beta'] )\n beta_accuracy = tf.metrics.mean_squared_error( labels['beta'], predictions['beta_pred'], weights= inverse_beta )\n\n\n if PREDICT_BOTH: \n eval_metric_ops = { \"beta_accuracy\": beta_accuracy, \"gamma_accuracy\": gamma_accuracy }\n else:\n eval_metric_ops = { \"beta_accuracy\": beta_accuracy } \n print(eval_metric_ops)\n\n return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def create_basic_cnn_model(num_classes: int):\n model = Sequential()\n\n # Convolutional + spooling layers\n model.add(Conv2D(64, (5, 5), input_shape=(config.ROI_IMG_SIZE['HEIGHT'], config.ROI_IMG_SIZE['WIDTH'], 1)))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Conv2D(32, (5, 5), padding='same'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2)))\n model.add(Flatten())\n\n # Dropout\n model.add(Dropout(0.5, seed=config.RANDOM_SEED, name=\"Dropout_1\"))\n\n # FC\n model.add(Dense(1024, activation='relu', name='Dense_2'))\n\n # Output\n if num_classes == 2:\n model.add(Dense(1, activation='sigmoid', kernel_initializer=\"random_uniform\", name='Output'))\n else:\n model.add(Dense(num_classes, activation='softmax', kernel_initializer=\"random_uniform\", name='Output'))\n\n # Print model details if running in debug mode.\n if config.verbose_mode:\n print(model.summary())\n\n return model", "def _build_model(self):\n \n #convolutional part\n conv_inputs = keras.Input(shape = self._state_shape[0])\n c1 = layers.Conv2D(filters = 4, kernel_size = 2, strides = (2,2), padding = \"same\", activation = 'relu')(conv_inputs)\n c2 = layers.Conv2D(filters = 8, kernel_size = 2, strides = (1,1), padding = \"same\", activation = 'relu')(c1)\n flat = layers.Flatten()(c2)\n\n\n #current green phase layer\n # phase_inputs = keras.Input(shape = (self._state_shape[1],))\n \n #elapsed green time layer\n elapsed_time_inputs = keras.Input(shape = (self._state_shape[2],))\n \n \n #combine elapsed time and green time layer\n # combined_green = layers.concatenate([phase_inputs, elapsed_time_inputs])\n # green_dense = layers.Dense(10, activation='relu')(elapsed_time_inputs)\n \n #combine green layer with conv layer\n all_combined = layers.concatenate([elapsed_time_inputs, flat])\n dense = layers.Dense(32, activation='relu')(all_combined)\n dense = layers.Dense(16, activation='relu')(dense)\n outputs = layers.Dense(self._output_dim, activation='linear')(dense)\n \n model = keras.Model(inputs = [conv_inputs, elapsed_time_inputs], outputs = outputs, name='simple_CNN') \n model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=self._learning_rate))\n \n return model", "def generate_cnn_model(num_classes, num_words):\n def cnn_model(features, target):\n # Create embeddings and map\n\n target = tf.one_hot(target, num_classes, 1, 0)\n word_vectors = tf.contrib.layers.embed_sequence(\n features, vocab_size=num_words, embed_dim=EMBEDDING_SIZE, scope='words')\n word_vectors = tf.expand_dims(word_vectors, 3)\n\n # First Layer here!!!!!!!\n with tf.variable_scope('CNN_MODEL_layer1'):\n # First layer convolution filtering on sequence\n conv1 = tf.contrib.layers.convolution2d(\n word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')\n # First layler adding a RELU for non linearity.\n conv1 = tf.nn.relu(conv1)\n # First layler Max pooling\n pool1 = tf.nn.max_pool(\n conv1,\n ksize=[1, POOLING_WINDOW, 1, 1],\n strides=[1, POOLING_STRIDE, 1, 1],\n padding='SAME')\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\n\n # Second Layer here!!!!!!!\n with tf.variable_scope('CNN_MODEL_layer2'):\n conv2 = tf.contrib.layers.convolution2d(\n pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')\n # Max across each filter to get useful features for classification.\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\n\n # Fully_conncted pool2 and classes\n logits = tf.contrib.layers.fully_connected(pool2, num_classes, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss,\n tf.contrib.framework.get_global_step(),\n optimizer='Adam',\n learning_rate=LEARNING_RATE)\n\n return ({\n 'class': tf.argmax(logits, 1),\n 'prob': tf.nn.softmax(logits)\n }, loss, train_op)\n\n return cnn_model", "def make_cloning_model(input_shape=(66, 200, 3)):\n # Create the Sequential model\n print(\"input shape\", input_shape)\n model = Sequential()\n model.add(Lambda(lambda x: x / 128. - 1., output_shape=input_shape, input_shape=input_shape))\n add_conv_type1(model, 12, input_shape)\n add_conv_type1(model, 18)\n add_conv_type1(model, 24)\n add_conv_type2(model, 30)\n add_conv_type2(model, 30)\n model.add(Flatten(input_shape=(13, 33, 30)))\n model.add(Dense(2000, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(500, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(100, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1))\n return model", "def create_model(X, y, it=1, no_of_filters=32, kern_size=3,\n max_p_size=3, drop_perc_conv=0.3, drop_perc_dense=0.2,\n dens_size=128, val_split_perc=0.1, no_of_epochs=30,\n optimizer=\"adam\", random_search=False, batch_size=64):\n\n y_train_cat = to_categorical(y)\n\n model = Sequential()\n\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n input_shape=(56, 56, 1),\n padding='same'))\n\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n padding='same'))\n model.add(MaxPooling2D((max_p_size, max_p_size)))\n model.add(Dropout(drop_perc_conv))\n\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n padding='same'))\n model.add(Conv2D(no_of_filters,\n kernel_size=(kern_size, kern_size),\n activation='relu',\n padding='same'))\n model.add(MaxPooling2D((max_p_size, max_p_size)))\n model.add(Dropout(drop_perc_conv))\n\n model.add(Flatten())\n\n model.add(Dense(dens_size, activation='relu'))\n model.add(Dropout(drop_perc_dense))\n\n model.add(Dense(36, activation='softmax'))\n\n model.compile(optimizer=optimizer,\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n early_stopping_monitor = EarlyStopping(patience=5)\n rlrop = ReduceLROnPlateau(monitor='val_acc', factor=0.5,\n patience=3, verbose=1, min_lr=0.00001)\n\n history = model.fit(X,\n y_train_cat,\n validation_split=val_split_perc,\n epochs=no_of_epochs,\n callbacks=[early_stopping_monitor, rlrop],\n batch_size=batch_size)\n\n history_dict = history.history\n\n if random_search:\n\n np.save(r\"./models/random_search/hist/history_dict_{}.npy\".format(it),\n history_dict)\n model.save(r\"./models/random_search/models/CNN_{}.h5\".format(it))\n\n else:\n\n np.save(r\"./logs/history_dict_{}.npy\".format(it), history_dict)\n model.save(r\"./models/CNN_FF_{}.h5\".format(it))\n\n return history_dict", "def build_model(self) -> nn.Module:\n pass", "def count_model(input_shape):\n kernel_size = (3, 3)\n pool_size = (2, 2)\n first_filters = 32\n second_filters = 64\n third_filters = 128\n dropout_conv = 0.3\n\n model = Sequential()\n\n model.add(Conv2D(first_filters, kernel_size, activation='relu',\n input_shape=input_shape))\n\n model.add(ZeroPadding2D(padding=(3, 3), data_format=None))\n\n model.add(Conv2D(first_filters, kernel_size, activation='relu'))\n model.add(Conv2D(first_filters, kernel_size, activation='relu'))\n model.add(MaxPooling2D(pool_size=pool_size))\n model.add(Dropout(dropout_conv))\n\n model.add(Conv2D(second_filters, kernel_size, activation='relu'))\n model.add(Conv2D(second_filters, kernel_size, activation='relu'))\n model.add(Conv2D(second_filters, kernel_size, activation='relu'))\n model.add(MaxPooling2D(pool_size=pool_size))\n model.add(Dropout(dropout_conv))\n\n model.add(Conv2D(third_filters, kernel_size, activation='relu'))\n model.add(Conv2D(third_filters, kernel_size, activation='relu'))\n model.add(Conv2D(third_filters, kernel_size, activation='relu'))\n model.add(MaxPooling2D(pool_size=pool_size))\n model.add(Dropout(dropout_conv))\n\n model.add(Conv2D(third_filters, kernel_size, activation='relu'))\n model.add(Conv2D(third_filters, kernel_size, activation='relu'))\n model.add(Conv2D(third_filters, kernel_size, activation='relu'))\n model.add(MaxPooling2D(pool_size=pool_size))\n model.add(Dropout(dropout_conv))\n\n model.add(Flatten())\n\n model.add(Dense(1024))\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n model.add(Dense(512))\n model.add(LeakyReLU())\n model.add(BatchNormalization())\n\n # set activation='relu' to keep all values positive\n model.add(Dense(1, activation='relu'))\n\n return model", "def char_cnn_model(x, y):\n y = tf.one_hot(y, NUMBER_OF_CATEGORIES, 1, 0)\n byte_list = tf.reshape(learn.ops.one_hot_matrix(x, 256),\n [-1, MAX_DOCUMENT_LENGTH, 256, 1])\n with tf.variable_scope('CNN_Layer1'):\n # Apply Convolution filtering on input sequence.\n conv1 = tf.contrib.layers.convolution2d(byte_list, N_FILTERS,\n FILTER_SHAPE1, padding='VALID')\n # Add a RELU for non linearity.\n conv1 = tf.nn.relu(conv1)\n # Max pooling across output of Convolution+Relu.\n pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],\n strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')\n # Transpose matrix so that n_filters from convolution becomes width.\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\n with tf.variable_scope('CNN_Layer2'):\n # Second level of convolution filtering.\n conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,\n FILTER_SHAPE2,\n padding='VALID')\n # Max across each filter to get useful features for classification.\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\n\n # Apply regular WX + B and classification.\n prediction, loss = learn.models.logistic_regression(pool2, y)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n\n return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op", "def generate_model():\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(\n 32,\n (3, 3),\n padding=\"same\",\n activation=\"relu\",\n input_shape=(IMG_SIZE, IMG_SIZE, IMG_CHANNELS)\n ),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(32, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(64, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Conv2D(128, (3, 3), activation=\"relu\"),\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\n\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dropout(0.3),\n tf.keras.layers.Dense(LABEL_COUNT, activation=\"softmax\")\n ])\n model.compile(\n optimizer=\"adam\",\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n return model", "def context(model: Sequential) -> Sequential:\n model.add(ZeroPadding2D(padding=(33, 33)))\n model.add(Conv2D(42, (3, 3), activation='relu', name='ct_conv1_1'))\n model.add(Conv2D(42, (3, 3), activation='relu', name='ct_conv1_2'))\n model.add(AtrousConvolution2D(84, 3, 3, atrous_rate=(2, 2), activation='relu', name='ct_conv2_1'))\n model.add(AtrousConvolution2D(168, 3, 3, atrous_rate=(4, 4), activation='relu', name='ct_conv3_1'))\n model.add(AtrousConvolution2D(336, 3, 3, atrous_rate=(8, 8), activation='relu', name='ct_conv4_1'))\n model.add(AtrousConvolution2D(672, 3, 3, atrous_rate=(16, 16), activation='relu', name='ct_conv5_1'))\n model.add(Conv2D(672, (3, 3), activation='relu', name='ct_fc1'))\n model.add(Conv2DTranspose(256, (2, 2), strides=(2, 2), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2D(256, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2DTranspose(128, (7, 7), strides=(7, 7), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n model.add(Conv2D(64, (3, 3), padding='same'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n \n # last conv\n model.add(Conv2D(1, (3, 3), activation='sigmoid', padding='same'))\n \n return model", "def sub_model_net(self):\r\n # define input\r\n x = keras.Input(shape=(960,), name='input')\r\n fc_2 = keras.layers.Dense(160, name='fc_2')(x)\r\n add_1 = keras.layers.Activation('relu')(fc_2)\r\n drop = keras.layers.Dropout(0.5)\r\n # output\r\n y_hat = keras.layers.Dense(1283, activation='softmax', name='output')(add_1)\r\n model = keras.Model(inputs=x, outputs=y_hat)\r\n\r\n return model", "def init_model(self) -> keras.Model:\n model_input = keras.Input(shape=(self.num_classes, self.nun_models))\n\n layer_out = Conv1D(64, kernel_size=self.num_classes, activation=\"sigmoid\")(\n model_input\n )\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n\n layer_out = Flatten()(layer_out)\n\n layer_out = Dense(128)(layer_out)\n layer_out = Dropout(0.2)(layer_out)\n output = Dense(self.num_classes, activation=\"softmax\")(layer_out)\n\n return keras.Model(inputs=model_input, outputs=output)", "def demoModel(dim, num_classes):\n import numpy as np\n from keras.models import Sequential, Model\n from keras.layers import Input\n from keras.layers import Conv2D, ZeroPadding2D, MaxPooling2D, Conv2DTranspose, Cropping2D\n from keras.layers import concatenate, UpSampling2D, Reshape\n import keras.backend as K\n\n # Build model\n input_image = Input(shape=(dim, dim, 3))\n\n conv = Conv2D(24, (3, 3), activation='relu', padding='same')(input_image)\n\n pool = MaxPooling2D((2, 2), strides=(2, 2), name=\"pool\")(conv)\n\n conv1x1 = Conv2D(24, (1, 1), padding='same', activation='relu')(pool)\n\n up = UpSampling2D(size=(2,2))(conv1x1)\n up_conv = Conv2D(24, 2, activation = 'relu', padding = 'same')(up)\n merge = concatenate([conv,up_conv], axis = 3)\n\n conv = Conv2D(12, 3, activation = 'relu', padding = 'same')(merge)\n\n activation = Conv2D(num_classes, (1, 1), activation = \"softmax\")(conv)\n\n # need to reshape for training\n output = Reshape((dim*dim, 3))(activation)\n\n model = Model(inputs=[input_image], outputs=output)\n\n model.summary()\n\n return model", "def build_model(keep_prob):\n model=Sequential()\n #normalization\n model.add(Lambda(lambda x: x/127.5-1.0, input_shape=glob_image_shape))\n #convolutional layers\n model.add(Conv2D(24, 5, 5, activation='elu', subsample=(2, 2)))\n model.add(Conv2D(36, 5, 5, activation='elu', subsample=(2, 2)))\n model.add(Conv2D(48, 5, 5, activation='elu', subsample=(2, 2)))\n model.add(Conv2D(64, 3, 3, activation='elu'))\n model.add(Conv2D(64, 3, 3, activation='elu'))\n #drop out to prevent over fitting\n model.add(Dropout(keep_prob))\n model.add(Flatten())\n #fully connected layers\n model.add(Dense(100, activation='elu'))\n model.add(Dense(50, activation='elu'))\n model.add(Dense(10, activation='elu'))\n model.add(Dense(1))\n return model", "def keras_functional_conv_net():\n inputs = tf.keras.layers.Input(shape=(28, 28, 3))\n x = tf.keras.layers.Conv2D(4, kernel_size=3, activation=None)(inputs)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation(\"relu\")(x)\n x = tf.keras.layers.Conv2D(16, kernel_size=3, activation=None)(x)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.PReLU()(x)\n x = tf.keras.layers.Conv2D(16, kernel_size=3, activation=None)(x)\n outputs = tf.keras.layers.Conv2D(32, kernel_size=3, activation=\"relu\")(x)\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n\n return model", "def main():\n nn = CarsClassifierModel()\n train_x, train_y, test_x, test_y = nn.load_data_preprocess()\n history = nn.run(train_x,train_y)\n nn.evaluate(test_x, test_y)\n nn.save(\"keras_nn_5\")\n #nn.plots(history)\n #print(train_x.shape)\n #plt.imshow(train_x[52])\n #plt.title(\"Car\")\n #plt.show()\n #print(train_y[52])", "def conv_3d(self):\n # Model.\n model = Sequential()\n model.add(Conv3D(32, (3,3,3), activation='relu', input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(64, (3,3,3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(128, (3,3,3), activation='relu'))\n model.add(Conv3D(128, (3,3,3), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n model.add(Conv3D(256, (2,2,2), activation='relu'))\n model.add(Conv3D(256, (2,2,2), activation='relu'))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)))\n\n model.add(Flatten())\n model.add(Dense(1024))\n model.add(Dropout(0.5))\n model.add(Dense(1024))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model", "def build_critic_x(input_shape ,num_filters: int = 64, num_cnn_blocks: int = 4) -> tf.keras.Model:\n input = tf.keras.layers.Input(shape=input_shape, name=\"critic_x_input\")\n #create a convolutional layer with num_filters filters\n conv = tf.keras.layers.Conv1D(filters=num_filters, kernel_size=5)(input)\n conv = tf.keras.layers.LeakyReLU(alpha=0.2)(conv)\n conv = tf.keras.layers.Dropout(0.25)(conv)\n\n for _ in range(num_cnn_blocks):\n conv = tf.keras.layers.Conv1D(filters=num_filters, kernel_size=5, padding=\"same\")(conv)\n conv = tf.keras.layers.LeakyReLU(alpha=0.2)(conv)\n conv = tf.keras.layers.Dropout(0.25)(conv)\n\n #flatten the output and create a de#nse output layer\n conv = tf.keras.layers.Flatten()(conv)\n conv = tf.keras.layers.Dense(units=1)(conv)\n\n return tf.keras.Model(inputs=input, outputs=conv, name=\"critic_x\")", "def get_model(width=128, height=128, depth=64):\n inputs = keras.Input((width, height, depth, 1))\n x = layers.Conv3D(filters=64, kernel_size=3, activation=\"relu\")(inputs)\n x = layers.MaxPool3D(pool_size=2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Conv3D(filters=64, kernel_size=3, activation=\"relu\")(x)\n x = layers.MaxPool3D(pool_size=2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Conv3D(filters=128, kernel_size=3, activation=\"relu\")(x)\n x = layers.MaxPool3D(pool_size=2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Conv3D(filters=256, kernel_size=3, activation=\"relu\")(x)\n x = layers.MaxPool3D(pool_size=2)(x)\n x = layers.BatchNormalization()(x)\n x = layers.Dense(units=512, activation=\"relu\")(x)\n x = layers.Dropout(0.3)(x)\n outputs = layers.Dense(3, activation=\"softmax\")(x)\n # Define the model.\n model = keras.Model(inputs, outputs, name=\"3dcnn\")\n return model", "def _model(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n\t\tmodel.add(Dense(units=32, activation=\"relu\"))\n\t\tmodel.add(Dense(units=16, activation=\"relu\"))\n\t\tmodel.add(Dense(units=8, activation=\"relu\"))\n\t\tmodel.add(Dense(self.action_size, activation=\"linear\"))\n\t\tmodel.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n\n\t\treturn model", "def build_model(embedding_size=512, attribute_vector_size=33, depth=5, \n\t\t\t\tscale_output=0.05, padding=False, \n\t\t\t\tmol_conv_inner_activation='tanh',\n mol_conv_outer_activation='softmax',\n\t\t\t\thidden=50, hidden_activation='tanh',\n\t\t\t\toutput_activation='linear', output_size=1, \n\t\t\t\tlr=0.01, optimizer='adam', loss='mse'):\n\t\n\tmodel = Sequential()\n\n\tmodel.add(MoleculeConv(units=embedding_size, \n\t\tinner_dim=attribute_vector_size-1, \n\t\tdepth=depth,\n\t\tscale_output=scale_output,\n\t\tpadding=padding,\n\t\tactivation_inner=mol_conv_inner_activation,\n\t\tactivation_output=mol_conv_outer_activation))\n\t\n\tlogging.info('cnn_model: added MoleculeConv layer ({} -> {})'.format('mol', embedding_size))\n\tif hidden > 0:\n\t\t\n\t\tmodel.add(Dense(hidden, activation=hidden_activation))\n\t\tlogging.info('cnn_model: added {} Dense layer (-> {})'.format(hidden_activation, hidden))\n\t\t\n\tmodel.add(Dense(output_size, activation=output_activation))\n\tlogging.info('cnn_model: added {} Dense layer (-> {})'.format(output_activation, output_size))\n\n\t# Compile\n\tif optimizer == 'adam':\n\t\toptimizer = Adam(lr=lr)\n\telif optimizer == 'rmsprop':\n\t\toptimizer = RMSprop(lr=lr)\n\telse:\n\t\tlogging.info('Can only handle adam or rmsprop optimizers currently')\n\t\tquit(1)\n\n\tif loss == 'custom':\n\t\tloss = mse_no_NaN\n\n\tlogging.info('compiling cnn_model...')\n\tmodel.compile(loss=loss, optimizer=optimizer)\n\tlogging.info('done compiling.')\n\n\treturn model", "def keras_model():\n\n model = Sequential([\n Conv2D(8, (2, 2), input_shape=(16, 16, 3,)),\n BatchNormalization(momentum=.3, epsilon=.65),\n AvgPool2D(),\n MaxPool2D(),\n BatchNormalization(momentum=.4, epsilon=.25),\n Conv2D(4, (2, 2), activation=tf.nn.tanh, kernel_regularizer=tf.keras.regularizers.l2(0.5)),\n Flatten(),\n Dense(2, activation='softmax', name=\"keras_model\")])\n return model", "def __call__(self, inputs, training):\n\n self.training = training\n input_shape = inputs.shape\n if self.data_format == 'channels_first':\n img_size = (input_shape[2], input_shape[3])\n else:\n img_size = (input_shape[1], input_shape[2])\n\n with self._model_variable_scope('ssd300_model'):\n if self.data_format == 'channels_first':\n # Convert the inputs from channels_last (NHWC) to channels_first (NCHW).\n # This provides a large performance boost on GPU. See\n # https://www.tensorflow.org/performance/performance_guide#data_formats\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n net = super(Model, self).__call__(inputs, training)\n\n with self._model_variable_scope('ssd300_model'):\n\n net = self._atrous_convolution_2d(net, filters=1024,\n kernel_size=3,\n atrous_rate=6, name='fc6')\n\n net = self._conv2d(net, filters=1024, kernel_size=1,\n padding='same', name='fc7')\n\n net = self._conv2d(net, filters=256, kernel_size=1,\n padding='same', name='conv6_1')\n\n net = self._conv2d(net, filters=512, kernel_size=3,\n strides=2,\n padding='same', name='conv6_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv7_1')\n\n net = self._conv2d(fixed_padding(net, 3, self.data_format),\n filters=256, kernel_size=3,\n strides=2,\n padding='valid', name='conv7_2')\n\n net = self._conv2d(net, filters=128, kernel_size=1,\n padding='same', name='conv8_1')\n\n net = self._conv2d(net, filters=256, kernel_size=3,\n strides=2,\n padding='same', name='conv8_2')\n\n if self.data_format == 'channels_first':\n net = tf.reduce_mean(net, [2, 3])\n else:\n net = tf.reduce_mean(net, [1, 2])\n self.layers['pool6'] = net\n\n # Prediction from conv4_3\n conv4_3_norm = self._normalize(net, 20, name='conv4_3_norm')\n num_priors = 3\n x = self._conv2d(conv4_3_norm, filters=num_priors * 4, kernel_size=3,\n padding='same', name='conv4_3_norm_mbox_loc')\n self.layers['conv4_3_norm_mbox_loc_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_loc_flat')\n\n x = self._conv2d(conv4_3_norm, filters=num_priors * self.num_classes,\n kernel_size=3, padding='same',\n name='conv4_3_norm_mbox_conf')\n self.layers['conv4_3_norm_mbox_conf_flat'] = tf.layers.flatten(x, name='conv4_3_norm_mbox_conf_flat')\n\n prior_box = PriorBox(img_size, min_size=30.0, aspect_ratios=[2],\n variances=[0.1, 0.1, 0.2, 0.2],\n name='conv4_3_norm_mbox_priorbox')\n net['conv4_3_norm_mbox_priorbox'] = prior_box(conv4_3_norm)\n\n return net", "def cnn_model_fn(features, labels, mode):\n\t# Input Layer\n\t# Reshape X to 4-D tensor: [batch_size, width, height, channels]\n\t# MNIST images are 28x28 pixels, and have one color channel\n\t#input_layer = tf.reshape(features[\"x\"], [-1, 28, 28, 1])\n\tinput_layer = tf.reshape(features, [-1, 32, 32, 3])\n\t\n\t# Convolutional Layer #1\n\t# Computes 32 features using a 5x5 filter with ReLU activation.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 32, 32, 3]\n\t# Output Tensor Shape: [batch_size, 32, 32, 32]\n\tconv1 = tf.layers.conv2d(\n\t\tinputs=input_layer,\n\t\tfilters=32,\n\t\tkernel_size=[5, 5],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\t# Pooling Layer #1\n\t# First max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 32, 32, 32]\n\t# Output Tensor Shape: [batch_size, 16, 16, 32]\n\tpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\t\n\t# Convolutional Layer #2\n\t# Computes 64 features using a 5x5 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 16, 16, 32]\n\t# Output Tensor Shape: [batch_size, 16, 16, 64]\n\tconv2 = tf.layers.conv2d(\n\t\tinputs=pool1,\n\t\tfilters=64,\n\t\tkernel_size=[5, 5],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.relu)\n\n\t# Pooling Layer #2\n\t# Second max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 16, 16, 64]\n\t# Output Tensor Shape: [batch_size, 8, 8, 64]\n\tpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\t\n\t# Flatten tensor into a batch of vectors\n\t# Input Tensor Shape: [batch_size, 8, 8, 64]\n\t# Output Tensor Shape: [batch_size, 8 * 8 * 64]\n\tpool2_flat = tf.reshape(pool2, [-1, 8 * 8 * 64])#*****\n\t\n\t# Dense Layer\n\t# Densely connected layer with 1024 neurons\n\t# Input Tensor Shape: [batch_size, 8 * 8 * 64]\n\t# Output Tensor Shape: [batch_size, 1024]\n\tdense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\t\n\t# Add dropout operation; 0.6 probability that element will be kept\n\tdropout = tf.layers.dropout(\n\t\tinputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n\t# Logits layer\n\t# Input Tensor Shape: [batch_size, 1024]\n\t# Output Tensor Shape: [batch_size, 10]\n\t#logits = tf.layers.dense(inputs=dropout, units=10)\n\tlogits = tf.layers.dense(inputs=dropout, units=1001)\n\n\tpredictions = {\n\t\t# Generate predictions (for PREDICT and EVAL mode)\n\t\t\"classes\": tf.argmax(input=logits, axis=1),\n\t\t# Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n\t\t# `logging_hook`.\n\t\t\"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n\t}\n\tif mode == tf.estimator.ModeKeys.PREDICT:\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\t# Calculate Loss (for both TRAIN and EVAL modes)\n\tloss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)#oneshot , number_lable\n\n # Configure the Training Op (for TRAIN mode)\n\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n\t\ttrain_op = optimizer.minimize(\n\t\t\tloss=loss,\n\t\t\tglobal_step=tf.train.get_global_step())\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n\teval_metric_ops = {\n\t\t\"accuracy\": tf.metrics.accuracy(\n\t\tlabels=labels, predictions=predictions[\"classes\"])} \n\treturn tf.estimator.EstimatorSpec(\n\t\tmode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def get_network(x):\n n_classes = 5\n batch_size = x.get_shape().as_list()[0]\n channels = x.get_shape().as_list()[3]\n\n # Model Helpers --------------------------------------------------------\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#conv2d\n def conv2d(img, w, b):\n \n x = tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='VALID')\n z = tf.nn.bias_add(x, b)\n return tf.nn.relu(z)\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#max_pool\n def max_pool(img, k):\n ks = [1, k, k, 1]\n return tf.nn.max_pool(img, ksize=ks, strides=ks, padding='VALID')\n\n # TODO implement\n def maxout(x):\n raise NotImplemented()\n\n def fc(x, w, b, act):\n if act:\n return act(tf.add(tf.matmul(x, w), b))\n else:\n return tf.add(tf.matmul(x, w), b)\n\n def conv_net(_X, _weights, _biases):\n # First convolution layer\n #print 'x: {}'.format(_X.get_shape())\n \n conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'])\n # k used to be 2\n conv1 = max_pool(conv1, k=2)\n\n #print 'conv1: {}'.format(conv1.get_shape())\n\n # Second Covolution layer\n conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'])\n conv2 = max_pool(conv2, k=2)\n\n #print 'conv2: {}'.format(conv2.get_shape())\n\n # Thrid Convolution Layer\n conv3 = conv2d(conv2, _weights['wc3'], _biases['bc3'])\n\n #print 'conv3: {}'.format(conv3.get_shape())\n\n # Fourth Convolution Layer\n conv4 = conv2d(conv3, _weights['wc4'], _biases['bc4'])\n conv4 = max_pool(conv4, k=2)\n\n #print 'conv4: {}'.format(conv4.get_shape())\n\n # In the paper the FC layers suggest that you use maxout, but\n # there isn't a native maxout in TensorFlow, so I used ReLU for now.\n\n # First Fully Connected Layer, flatten out filters first\n fc1 = tf.reshape(conv4, [batch_size, -1])\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#relu\n fc1 = fc(fc1, _weights['wf1'], _biases['bf1'], tf.nn.relu)\n # TODO dropout should be a parameter\n fc1 = tf.nn.dropout(fc1, tf.Variable(tf.constant(0.5)))\n\n\n # Second Fully Connected Layer\n fc2 = fc(fc1, _weights['wf2'], _biases['bf2'], tf.nn.relu)\n # TODO dropout should be a parameter\n fc2 = tf.nn.dropout(fc2, tf.Variable(tf.constant(0.5)))\n\n # Output\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#sigmoid\n output = fc(fc2, _weights['out'], _biases['out'], None)\n return output\n\n # Model Helpers --------------------------------------------------------\n\n\n # Model weights and biases\n weights = {\n # 6x6 conv, 3-channel input, 32-channel outputs\n 'wc1': tf.Variable(tf.truncated_normal([3, 3, channels, 32], stddev=0.01)), #0.01\n # 5x5 conv, 32-channel inputs, 64-channel outputs\n 'wc2': tf.Variable(tf.truncated_normal([5, 5, 32, 64], stddev=0.01)), #0.01\n # 3x3 conv, 64-channel inputs, 128-channel outputs\n 'wc3': tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.01)), #0.01\n # 3x3 conv, 128-channel inputs, 128-channel outputs\n 'wc4': tf.Variable(tf.truncated_normal([3, 3, 128, 128], stddev=0.1)), #0.1\n # fully connected, 512 inputs, 2048 outputs\n # was 4608 for 84x84\n 'wf1': tf.Variable(tf.truncated_normal([6272, 2048], stddev=0.001)), #0.001\n # fully coneected 2048 inputs, 2048 outputs\n 'wf2': tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.001)), #0.001\n # 2048 inputs, 5 outputs (class prediction)\n 'out': tf.Variable(tf.truncated_normal([2048, n_classes], stddev=0.01)) #0.01\n }\n\n biases = {\n 'bc1': tf.Variable(tf.constant(0.1, shape=[32])),\n 'bc2': tf.Variable(tf.constant(0.1, shape=[64])),\n 'bc3': tf.Variable(tf.constant(0.1, shape=[128])),\n 'bc4': tf.Variable(tf.constant(0.1, shape=[128])),\n 'bf1': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'bf2': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'out': tf.Variable(tf.constant(0.1, shape=[n_classes]))\n }\n\n return conv_net(x, weights, biases)", "def cnn_model_fn(features, labels, mode):\n\t# Input Layer\n\t# Reshape X to 4-D tensor: [batch_size, width, height, channels]\n\t# Our images are 400x400 pixels, and have one color channel (greyscale)\n\tinput_layer = tf.reshape(features[\"x\"], [-1, 400, 400, 1])\n\n\t# Convolutional Layer #1\n\tconv1 = tf.layers.conv2d(\n\t\tinputs=input_layer,\n\t\tfilters=32,\n\t\tkernel_size=[20, 20],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t\n\n\n\t# Pooling Layer #1\n\t# First max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 400, 400, 32]\n\t# Output Tensor Shape: [batch_size, 200, 200, 32]\n\tpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n\t# Convolutional Layer #2\n\t# Computes 64 features using a 5x5 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 200, 200, 32]\n\t# Output Tensor Shape: [batch_size, 200, 200, 64]\n\tconv2 = tf.layers.conv2d(\n\t\tinputs=pool1,\n\t\tfilters=64,\n\t\tkernel_size=[10, 10],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t# Pooling Layer #2\n\t# Second max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 200, 200, 64]\n\t# Output Tensor Shape: [batch_size, 100, 100, 64]\n\tpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n\n\t# Convolutional Layer #3\n\t# Computes 64 features using a 10x10 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 100, 100, 64]\n\t# Output Tensor Shape: [batch_size, 100, 100, 64]\n\tconv3 = tf.layers.conv2d(\n\t\tinputs=pool2,\n\t\tfilters=64,\n\t\tkernel_size=[10, 10],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t# Pooling Layer #3\n\t# Second max pooling layer with a 4x4 filter and stride of 4\n\t# Input Tensor Shape: [batch_size, 100, 100, 64]\n\t# Output Tensor Shape: [batch_size, 50, 50, 64]\n\tpool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)\n\n\n\t# Convolutional Layer #4\n\t# Computes 64 features using a 10x10 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 50, 50, 64]\n\t# Output Tensor Shape: [batch_size, 50, 50, 128]\n\tconv4 = tf.layers.conv2d(\n\t\tinputs=pool3,\n\t\tfilters=128,\n\t\tkernel_size=[5, 5],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\n\t# Convolutional Layer #4\n\t# Computes 64 features using a 10x10 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 50, 50, 128]\n\t# Output Tensor Shape: [batch_size, 50, 50, 64]\n\tconv5 = tf.layers.conv2d(\n\t\tinputs=conv4,\n\t\tfilters=64,\n\t\tkernel_size=[10, 10],\n\t\tpadding=\"same\",\n\t\tactivation=tf.nn.leaky_relu)\n\n\t# Pooling Layer #4\n\t# Second max pooling layer with a 4x4 filter and stride of 4\n\t# Input Tensor Shape: [batch_size, 50, 50, 64]\n\t# Output Tensor Shape: [batch_size, 25, 25, 64]\n\tpool4 = tf.layers.max_pooling2d(inputs=conv5, pool_size=[2, 2], strides=2)\n\n\n\t# Flatten tensor into a batch of vectors\n\t# Input Tensor Shape: [batch_size, 25, 25, 128]\n\t# Output Tensor Shape: [batch_size, 25 * 25 * 128]\n\tpool4_flat = tf.reshape(pool4, [-1, 25 * 25 * 64])\n\n\t# Dense Layer\n\t# Densely connected layer with 1024 neurons\n\t# Input Tensor Shape: [batch_size, 25 * 25 * 96]\n\t# Output Tensor Shape: [batch_size, 1024]\n\tdense1 = tf.layers.dense(inputs=pool4_flat, units=1024, activation=tf.nn.leaky_relu)\n\n\t# Dense Layer\n\t# Densely connected layer with 512 neurons\n\t# Input Tensor Shape: [batch_size, 1024]\n\t# Output Tensor Shape: [batch_size, 512]\n\tdense2 = tf.layers.dense(inputs=dense1, units=512, activation=tf.nn.leaky_relu)\n\n\t# Dense Layer\n\t# Densely connected layer with 512 neurons\n\t# Input Tensor Shape: [batch_size, 512]\n\t# Output Tensor Shape: [batch_size, 256]\n\tdense3 = tf.layers.dense(inputs=dense2, units=256, activation=tf.nn.leaky_relu)\n\n\t# Add dropout operation; 0.5 probability that element will be kept\n\tdropout = tf.layers.dropout(\n\t\tinputs=dense3, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n\t# Logits layer\n\t# Input Tensor Shape: [batch_size, 512]\n\t# Output Tensor Shape: [batch_size, 6]\n\tlogits = tf.layers.dense(inputs=dropout, units=NUM_CLASSES)\n\n\t# Avoid NaN loss error by perturbing logits\n\tepsilon = tf.constant(1e-8)\n\tlogits = logits + epsilon \n\n\t\n\n\n\tpredictions = {\n\t\t# Generate predictions (for PREDICT and EVAL mode)\n\t\t\"classes\": tf.argmax(input=logits, axis=1),\n\t\t# Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n\t\t# `logging_hook`.\n\t\t\"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n\t}\n\n\tif mode == tf.estimator.ModeKeys.PREDICT:\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\t# Calculate Loss (for both TRAIN and EVAL modes)\n\tonehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=NUM_CLASSES)\n\tloss = tf.losses.softmax_cross_entropy(\n\t\tonehot_labels=onehot_labels, logits=logits)\n\n\t# Configure the Training Op (for TRAIN mode)\n\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\t# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.04)\n\t\toptimizer = tf.train.AdamOptimizer(learning_rate=0.000006)\n\t\ttrain_op = optimizer.minimize(\n\t\t\tloss=loss,\n\t\t\tglobal_step=tf.train.get_global_step())\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n\t# Add evaluation metrics (for EVAL mode)\n\teval_metric_ops = {\n\t\t\"accuracy\": tf.metrics.accuracy(\n\t\t\tlabels=labels, predictions=predictions[\"classes\"])}\n\treturn tf.estimator.EstimatorSpec(\n\t\tmode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def build(self):\n input_shape_img = (None, None, 3)\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(None, 4))\n shared_layers = self.cnn_model.nn_base(img_input, trainable=True)\n num_anchors = len(self.C.anchor_scales) * len(self.C.anchor_ratios)\n \n output_region_proposal = self.region_proposal_net(shared_layers, num_anchors)\n output_classifier = self.classifier(shared_layers,\n self.cnn_model.classifier_layers, \n roi_input, self.C.num_roi, \n num_class=len(self.class_count), trainable=True)\n \n self.model_region_proposal = Model(img_input, output_region_proposal[:2])\n self.model_classifier = Model([img_input, roi_input], output_classifier)\n self.model_all = Model([img_input, roi_input], output_region_proposal[:2] + output_classifier)\n\n optimizer = Adam(lr=1e-5)\n self.model_region_proposal.compile(optimizer=optimizer, \n loss=[losses.rpn_loss_cls(num_anchors), \n losses.rpn_loss_regr(num_anchors)])\n self.model_classifier.compile(optimizer=optimizer, \n loss=[losses.class_loss_cls, \n losses.class_loss_regr(len(self.class_count)-1)], \n metrics={'dense_class_{}'.format(len(self.class_count)): 'accuracy'})\n self.model_all.compile(optimizer='sgd', loss='mae')\n\n # print(self.model_all.summary())\n plot_model(self.model_region_proposal, show_shapes=True, to_file='./frcnn/images/region_proposal.png')\n plot_model(self.model_classifier, show_shapes=True, to_file='./frcnn/images/classifier.png')\n plot_model(self.model_all, show_shapes=True, to_file='./frcnn/images/model_all.png')", "def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)", "def get_network(x):\n n_classes = 5\n batch_size = x.get_shape().as_list()[0]\n channels = x.get_shape().as_list()[3]\n \n # split channels to process separately\n c1, c2, c3, c4 = tf.split(3, channels, x)\n \n # Model Helpers --------------------------------------------------------\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#conv2d\n def conv2d(img, w, b):\n \n x = tf.nn.conv2d(img, w, strides=[1, 1, 1, 1], padding='VALID')\n z = tf.nn.bias_add(x, b)\n return tf.nn.relu(z)\n\n # https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#max_pool\n def max_pool(img, k):\n ks = [1, k, k, 1]\n return tf.nn.max_pool(img, ksize=ks, strides=ks, padding='VALID')\n\n # TODO implement\n def maxout(x):\n raise NotImplemented()\n\n def fc(x, w, b, act):\n return act(tf.add(tf.matmul(x, w), b))\n\n def conv_net(_x):\n # First convolution layer\n #print 'x: {}'.format(_X.get_shape())\n weights = {\n # 6x6 conv, 3-channel input, 32-channel outputs\n 'wc1': tf.Variable(tf.truncated_normal([10, 10, 1, 32], stddev=0.01)),\n # 5x5 conv, 32-channel inputs, 64-channel outputs\n 'wc2': tf.Variable(tf.truncated_normal([7, 7, 32, 64], stddev=0.01)),\n # 3x3 conv, 64-channel inputs, 128-channel outputs\n 'wc3': tf.Variable(tf.truncated_normal([3, 3, 64, 128], stddev=0.01)),\n # 3x3 conv, 128-channel inputs, 128-channel outputs\n 'wc4': tf.Variable(tf.truncated_normal([3, 3, 128, 128], stddev=0.1)),\n }\n \n biases = {\n 'bc1': tf.Variable(tf.constant(0.1, shape=[32])),\n 'bc2': tf.Variable(tf.constant(0.1, shape=[64])),\n 'bc3': tf.Variable(tf.constant(0.1, shape=[128])),\n 'bc4': tf.Variable(tf.constant(0.1, shape=[128])),\n } \n \n \n conv1 = conv2d(_x, weights['wc1'], biases['bc1'])\n # k used to be 2\n conv1 = max_pool(conv1, k=4)\n\n #print 'conv1: {}'.format(conv1.get_shape())\n\n # Second Covolution layer\n conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])\n conv2 = max_pool(conv2, k=2)\n\n # Thrid Convolution Layer\n conv3 = conv2d(conv2, weights['wc3'], biases['bc3'])\n\n #print 'conv3: {}'.format(conv3.get_shape())\n\n # Fourth Convolution Layer\n conv4 = conv2d(conv3, weights['wc4'], biases['bc4'])\n conv4 = max_pool(conv4, k=2)\n\n return tf.reshape(conv4, [batch_size, -1])\n\n \n fc_weights = {\n 'wf1': tf.Variable(tf.truncated_normal([512, 2048], stddev=0.001)),\n # fully coneected 2048 inputs, 2048 outputs\n 'wf2': tf.Variable(tf.truncated_normal([2048, 2048], stddev=0.001)),\n # 2048 inputs, 5 outputs (class prediction)\n 'out': tf.Variable(tf.truncated_normal([2048, n_classes], stddev=0.01))\n }\n \n fc_biases = {\n 'bf1': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'bf2': tf.Variable(tf.constant(0.01, shape=[2048])),\n 'out': tf.Variable(tf.constant(0.1, shape=[n_classes]))\n }\n\n c1 = conv_net(c1)\n c2 = conv_net(c2)\n c3 = conv_net(c3)\n c4 = conv_net(c4)\n \n # feed this into one fully connected layer\n cmb = tf.concat(1, [c1,c2,c3,c4]) \n \n # fully connected\n fc1 = fc(cmb, fc_weights['wf1'], fc_biases['bf1'], tf.nn.relu)\n fc2 = fc(fc1, fc_weights['wf2'], fc_biases['bf2'], tf.nn.relu)\n \n # output\n output = fc(fc2, fc_weights['out'], fc_biases['out'], tf.nn.softmax)\n \n return output", "def __init__(self, X_train, y_train, input_shape, filters, kernel_size,\n maxpool, loss_function='categorical_crossentropy', nb_classes= 2, droput_iteration=20, dropout = 0.05):\n\n # We normalize the training data to have zero mean and unit standard\n # deviation in the training set if necessary\n\n # if normalize:\n # self.std_X_train = np.std(X_train, 0)\n # self.std_X_train[ self.std_X_train == 0 ] = 1\n # self.mean_X_train = np.mean(X_train, 0)\n # else:\n # self.std_X_train = np.ones(X_train.shape[ 1 ])\n # self.mean_X_train = np.zeros(X_train.shape[ 1 ])\n\n self.droput_iteration = droput_iteration\n self.nb_classes = nb_classes\n self.mean_y_train = np.mean(y_train)\n self.std_y_train = np.std(y_train)\n\n\n\n # model = Sequential()\n # model.add(Conv2D(filters, (kernel_size, kernel_size), padding='same',\n # input_shape=input_shape))\n # model.add(Activation('relu'))\n # model.add(Conv2D(filters, (kernel_size, kernel_size)))\n # model.add(Activation('relu'))\n # model.add(MaxPooling2D(pool_size=(maxpool, maxpool)))\n # model.add(Dropout(dropout))\n # c = 3.5\n # Weight_Decay = c / float(X_train.shape[0])\n # model.add(Flatten())\n # model.add(Dense(128, W_regularizer=l2(Weight_Decay)))\n # model.add(Activation('relu'))\n # model.add(Dropout(dropout))\n # model.add(Dense(nb_classes))\n # model.add(Activation('softmax'))\n\n # model.compile(loss=loss_function, optimizer='adam')\n\n c = 3.5\n Weight_Decay = c / float(X_train.shape[0])\n\n model = Sequential()\n model.add(Dense(256, input_shape =input_shape))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Dense(256, W_regularizer=l2(Weight_Decay)))\n model.add(Activation('relu'))\n model.add(Dropout(dropout))\n model.add(Flatten())\n model.add(Dense(nb_classes))\n model.add(Activation('softmax'))\n\n\n model.compile(loss=loss_function, optimizer='adam')\n\n\n self.model = model\n # # We iterate the learning process\n # model.fit(X_train, y_train, batch_size=self.batch_size, nb_epoch=n_epochs, verbose=0)\n\n # #function for bayesian inference using dropouts\n # self.f = K.function([model.layers[0].input, K.learning_phase()],\n # [model.layers[-1].output])", "def keras_model_fn(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n ## build model - , weights=[embeddings[1]]\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.CuDNNLSTM(lstm_hs, return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.CuDNNGRU(gru_hs, return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n \n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer = ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics = ['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)]) # metric what?\n return model", "def my_hom_cnn_model_fn(features, labels, mode):\n # Input Layer\n # Reshape X to 4-D tensor: [batch_size, width, height, channels]\n\n #HOM Images are 128x128, and have two channels\n input_layer = tf.reshape(features[\"x\"], [-1, pSize, pSize, 2])\n\n # Convolutional Layer #1\n # Computes 32 features using a 3x3 filter with ReLU activation.\n # Padding is added to preserve width and height.\n conv1 = tf.layers.conv2d(inputs=input_layer, filters=64,kernel_size=[3, 3], padding=\"same\",activation=tf.nn.relu) #None\n conv2 = tf.layers.conv2d(inputs=conv1, filters=64, kernel_size=[3, 3], padding=\"same\", activation=tf.nn.relu)\n pool1 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n conv3 = tf.layers.conv2d(inputs=pool1, filters=64, kernel_size=[3, 3], padding=\"same\",activation=tf.nn.relu)\n conv4 = tf.layers.conv2d(inputs=conv3,filters=64,kernel_size=[3, 3],padding=\"same\",activation=tf.nn.relu)\n pool2 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)\n\n conv5 = tf.layers.conv2d(inputs=pool2, filters=128, kernel_size=[3, 3], padding=\"same\",activation=tf.nn.relu)\n conv6 = tf.layers.conv2d(inputs=conv5,filters=128,kernel_size=[3, 3],padding=\"same\",activation=tf.nn.relu)\n pool3 = tf.layers.max_pooling2d(inputs=conv6, pool_size=[2, 2], strides=2)\n\n conv7 = tf.layers.conv2d(inputs=pool3, filters=128, kernel_size=[3, 3], padding=\"same\",activation=tf.nn.relu)\n conv8 = tf.layers.conv2d(inputs=conv7,filters=128,kernel_size=[3, 3],padding=\"same\",activation=tf.nn.relu)\n\n # Dense Layer\n # Densely connected layer with 1024 neurons\n conv2_flat = tf.reshape(conv8, [-1, 128 * 128 * 2])\n # Add dropout operation; 0.5 probability that element will be kept\n dropout = tf.layers.dropout(inputs=conv2_flat, rate=0.5, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n fully_connect = tf.layers.dense(inputs=dropout, units=1024, activation=None) #activation=None\n predictions = tf.layers.dense(inputs=fully_connect, units=8, activation=None)\n\n #predictions\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(mode=mode, predictions = predictions)\n \n loss = tf.losses.mean_squared_error(labels=labels, predictions = predictions)\n \n #loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=predictions, name=\"softmax_tensor\")\n\n # Configure the Training Op (for TRAIN mode)\n if mode == tf.estimator.ModeKeys.TRAIN:\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n # Add evaluation metrics (for EVAL mode)\n #eval_metric_ops = {\"accuracy\": tf.metrics.accuracy(labels=labels, predictions=predictions[\"coord\"])}#predictions=predictions[\"classes\"])}\n \n if mode == tf.estimator.ModeKeys.EVAL:\n eval_metric_ops = {\n \"mean_square_error\": tf.metrics.mean_squared_error(labels=labels, predictions = predictions)}#predictions=predictions[\"classes\"])}\n\n return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops, predictions = predictions)", "def prepare_model(ninputs=9600, nclass=5):\n lrmodel = Sequential()\n lrmodel.add(Dense(input_dim=ninputs, output_dim=nclass))\n lrmodel.add(Activation('softmax'))\n lrmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n return lrmodel", "def build_classifier_model():\n model = keras.Sequential([\n keras.layers.SimpleRNN(64, input_shape=(\n special_train_data.shape[1], special_train_data.shape[2])),\n keras.layers.Dense(64),\n keras.layers.Dense(64, activation=tf.nn.relu),\n keras.layers.Dense(64, activation=tf.nn.tanh),\n keras.layers.Dense(64, activation=tf.nn.relu),\n keras.layers.Dense(1, activation=tf.nn.tanh),\n keras.layers.Lambda(lambda x: (x+1)/2, output_shape=(1,)),\n ])\n\n try:\n optimizer = tf.optimizers.Adam(0.001)\n except:\n optimizer = tf.train.AdamOptimizer(0.001)\n\n model.compile(loss='mse',\n optimizer=optimizer,\n metrics=[keras.metrics.mae])\n return model", "def cnn_model_fn(features, labels, mode):\n\t# Input Layer\n\t# Reshape X to 4-D tensor: [batch_size, width, height, channels]\n\t# Modified MNIST images are 64x64 pixels, and have one color channel\n\tinput_layer = tf.reshape(features[\"x\"], [-1, 64, 64, 1])\n\n\t# Convolutional Layer #1\n\t# Computes 32 features using a 5x5 filter with ReLU activation.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 64, 64, 1]\n\t# Output Tensor Shape: [batch_size, 64, 64, 32]\n\tconv1 = tf.layers.conv2d(\n\t\t\tinputs=input_layer,\n\t\t\tfilters=32,\n\t\t\tkernel_size=[5, 5],\n\t\t\tpadding=\"same\",\n\t\t\tactivation=tf.nn.relu)\n\n\t# Pooling Layer #1\n\t# First max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 64, 64, 32]\n\t# Output Tensor Shape: [batch_size, 32, 32, 32]\n\tpool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)\n\n\t# Convolutional Layer #2\n\t# Computes 64 features using a 5x5 filter.\n\t# Padding is added to preserve width and height.\n\t# Input Tensor Shape: [batch_size, 32, 32, 32]\n\t# Output Tensor Shape: [batch_size, 32, 32, 64]\n\tconv2 = tf.layers.conv2d(\n\t\t\tinputs=pool1,\n\t\t\tfilters=64,\n\t\t\tkernel_size=[5, 5],\n\t\t\tpadding=\"same\",\n\t\t\tactivation=tf.nn.relu)\n\n\t# Pooling Layer #2\n\t# Second max pooling layer with a 2x2 filter and stride of 2\n\t# Input Tensor Shape: [batch_size, 32, 32, 64]\n\t# Output Tensor Shape: [batch_size, 16, 16, 64]\n\tpool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)\n\n\t# Flatten tensor into a batch of vectors\n\t# Input Tensor Shape: [batch_size, 16, 16, 64]\n\t# Output Tensor Shape: [batch_size, 16 * 16 * 64]\n\tpool2_flat = tf.reshape(pool2, [-1, 16 * 16 * 64])\n\n\t# Dense Layer\n\t# Densely connected layer with 1024 neurons\n\t# Input Tensor Shape: [batch_size, 16 * 16 * 64]\n\t# Output Tensor Shape: [batch_size, 1024]\n\tdense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)\n\n\t# Add dropout operation; 0.6 probability that element will be kept\n\tdropout = tf.layers.dropout(\n\t\t\tinputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)\n\n\t# Logits layer\n\t# Input Tensor Shape: [batch_size, 1024]\n\t# Output Tensor Shape: [batch_size, 40]\n\tlogits = tf.layers.dense(inputs=dropout, units=40)\n\n\tpredictions = {\n\t\t\t# Generate predictions (for PREDICT and EVAL mode)\n\t\t\t\"classes\": tf.argmax(input=logits, axis=1),\n\t\t\t# Add `softmax_tensor` to the graph. It is used for PREDICT and by the\n\t\t\t# `logging_hook`.\n\t\t\t\"probabilities\": tf.nn.softmax(logits, name=\"softmax_tensor\")\n\t}\n\tif mode == tf.estimator.ModeKeys.PREDICT:\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)\n\n\t# Calculate Loss (for both TRAIN and EVAL modes)\n\tonehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=40)\n\tloss = tf.losses.softmax_cross_entropy(\n\t\t\tonehot_labels=onehot_labels, logits=logits)\n\n\t# Configure the Training Op (for TRAIN mode)\n\tif mode == tf.estimator.ModeKeys.TRAIN:\n\t\toptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\n\t\ttrain_op = optimizer.minimize(\n\t\t\t\tloss=loss,\n\t\t\t\tglobal_step=tf.train.get_global_step())\n\t\treturn tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)\n\n\t# Add evaluation metrics (for EVAL mode)\n\teval_metric_ops = {\n\t\t\t\"accuracy\": tf.metrics.accuracy(\n\t\t\t\t\tlabels=labels, predictions=predictions[\"classes\"])}\n\treturn tf.estimator.EstimatorSpec(\n\t\t\tmode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def model(pretrained=False, **kwargs):\r\n\r\n layers = make_layers(cfg['O'], dilation=dilation['D1'])\r\n cnv = np.cumsum(cnvs['OI']) if kwargs['args'].IN or kwargs['args'].INL else np.cumsum(cnvs['O'])\r\n model = VGG(layers, cnvs=cnv, **kwargs)\r\n if pretrained:\r\n pre2local_keymap = [('features.{}.weight'.format(i), 'conv1_2.{}.weight'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.bias'.format(i), 'conv1_2.{}.bias'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 10), 'conv3.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 10), 'conv3.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 17), 'conv4.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 17), 'conv4.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 24), 'conv5.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 24), 'conv5.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap = dict(pre2local_keymap)\r\n\r\n\r\n model_dict = model.state_dict()\r\n pretrained_file = os.path.join(kwargs['args'].pretrained_model_dir, kwargs['args'].pretrained_model)\r\n if os.path.isfile(pretrained_file):\r\n pretrained_dict = torch.load(pretrained_file)\r\n print('load pretrained model from {}'.format(pretrained_file))\r\n else:\r\n pretrained_dict = model_zoo.load_url(model_urls['vgg16'])\r\n print('load pretrained model from {}'.format(model_urls['vgg16']))\r\n # 0. replace the key\r\n pretrained_dict = {pre2local_keymap[k] if k in pre2local_keymap.keys() else k: v for k, v in\r\n pretrained_dict.items()}\r\n # *. show the loading information\r\n for k in pretrained_dict.keys():\r\n if k not in model_dict:\r\n print('Key {} is removed from vgg16'.format(k))\r\n print(' ')\r\n for k in model_dict.keys():\r\n if k not in pretrained_dict:\r\n print('Key {} is new added for DA Net'.format(k))\r\n # 1. filter out unnecessary keys\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n # 2. overwrite entries in the existing state dict\r\n model_dict.update(pretrained_dict)\r\n # 3. load the new state dict\r\n model.load_state_dict(model_dict)\r\n return model", "def build_model(self):\n cfg = self.cfg\n\n print('Building model')\n self.model = SimpleNet(cfg, cfg.MODEL, 0, **cfg.MODEL.BACKBONE.PARAMS)\n self.model.to(self.device)\n print('# params: {:,}'.format(count_num_param(self.model)))\n self.optim = build_optimizer(self.model, cfg.OPTIM)\n self.sched = build_lr_scheduler(self.optim, cfg.OPTIM)\n self.register_model('model', self.model, self.optim, self.sched)\n\n fdim = self.model.fdim\n self.classifier = nn.Linear(fdim, self.num_classes)\n print('# params: {:,}'.format(count_num_param(self.classifier)))\n self.classifier.to(self.device)\n self.optim_classifier = build_optimizer(self.classifier, cfg.OPTIM)\n self.sched_classifier = build_lr_scheduler(self.optim_classifier, cfg.OPTIM)\n self.register_model('classifier', self.classifier, self.optim_classifier, self.sched_classifier)", "def get_model(self, number_of_classes, weight_path):\n\n if K.image_dim_ordering() == 'th':\n input_shape_img = (3, None, None)\n else:\n input_shape_img = (self.config.crop_height, self.config.crop_width, 3)\n\n img_input = Input(shape=input_shape_img, name=\"image_input\")\n\n # Define ResNet50 model Without Top\n net = ModelZoo()\n model_resnet50 = net.resnet50_base(img_input, trainable=True)\n model_resnet50 = GlobalAveragePooling2D(name='global_avg_pool')(model_resnet50)\n output_resnet50 = Dense(number_of_classes, kernel_initializer=\"he_normal\", activation='softmax', name='fc')(\n model_resnet50)\n\n # Define the model\n model = Model(inputs=img_input, outputs=output_resnet50, name='resnet50')\n # In the summary, weights and layers from ResNet50 part will be hidden, but they will be fit during the training\n model.summary()\n\n # Load pre-trained weights for ResNet50\n try:\n print(\"Start loading Weights\")\n model.load_weights(weight_path, by_name=True)\n print('Finished successfully loading weights from {}'.format(weight_path))\n\n except Exception as e:\n print('Could not load pretrained model weights. Weights can be found at {} and {}'.format(\n 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_th_dim_ordering_th_kernels_notop.h5',\n 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n ))\n raise Exception(e)\n\n print('Finished successfully loading Model')\n return model" ]
[ "0.7548566", "0.7336699", "0.7334111", "0.7312466", "0.7297639", "0.72836435", "0.7270212", "0.7219095", "0.7172809", "0.7157214", "0.7129322", "0.7123428", "0.7075449", "0.70725286", "0.70709413", "0.70399654", "0.70386714", "0.7026649", "0.69520336", "0.694842", "0.6929263", "0.6918556", "0.6914558", "0.690893", "0.687278", "0.68676305", "0.6846631", "0.6834375", "0.68102777", "0.68066967", "0.6805155", "0.68049896", "0.6800795", "0.67864025", "0.6786015", "0.67738134", "0.67507464", "0.6746528", "0.6736834", "0.67360103", "0.6721814", "0.6720901", "0.6713957", "0.67116636", "0.6703714", "0.6700961", "0.66874236", "0.66858363", "0.66842836", "0.66691047", "0.66665787", "0.66601163", "0.66573745", "0.6652831", "0.66528094", "0.6641128", "0.663945", "0.66368794", "0.6613317", "0.6606673", "0.6601687", "0.65967804", "0.6589931", "0.65731096", "0.6559694", "0.653244", "0.6522474", "0.6521691", "0.65113837", "0.6495604", "0.6486972", "0.6484136", "0.64820397", "0.6481478", "0.6478285", "0.6477306", "0.6470886", "0.6470291", "0.64681166", "0.64642525", "0.6461026", "0.6459535", "0.64585215", "0.6456184", "0.6455244", "0.64470744", "0.64414746", "0.6438649", "0.64341104", "0.64337903", "0.64299303", "0.6428543", "0.6425091", "0.64247626", "0.6421401", "0.64202946", "0.6419036", "0.64137095", "0.641185", "0.64115226" ]
0.6860718
26
Call API, get returned model output_text
def formatText(input_text): data = {"text": input_text} print 'Waiting for return ...' req = requests.post('http://34.212.39.136:5678/format', json = data) output_text = req.json()['result'] return output_text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_text(self):\n return self.output.getvalue()", "def get_text(self):\n\n return self.output['text']", "def _text_command(self, request):\n response = self._send(request)\n self._check_response(response)\n return response.text", "def get_output(self):\r\n return self._api.get_output()", "def api_call():\n\n json_str = load_input()\n output = {\n 'inputs': json_str,\n 'results': 'cool results'}\n\n return json.dumps(output), 200, {'Content-Type': 'text/plain;charset=utf-8'}", "def GetModelOutputInfo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_text(self):\n return self.res.text", "def temp(request):\n if request.method == 'GET':\n response = {request.GET.get('text', None)}\n # Exception Block t handle errors\n try:\n # Try to get output from our model\n model = joblib.load(os.getcwd()+'/model.pkl')\n output_array = model.predict([main.spacy_cleaner(str(response))])\n return {\"Sucess\": True ,'Sentiment': output_array[0].item()}\n\n except (ValueError, TypeError) as e:\n # If any error occurs\n return {\"Sucess\": False ,'Sentiment':'Null'}", "def api():\n\treturn \"The API call\"", "def make_request_txt(self):\n #print (self.url)\n try:\n with closing(get(self.url, stream=True)) as resp: #returns b`txt`\n if self.is_txt(resp):\n return resp.content.decode(\"utf-8\")\n else:\n return None\n except RequestException as e:\n print('Error during requests to {0} : {1}'.format(url, str(e)))\n return None", "def get_model(self):\n url = self.resource()\n params = {'data': ''}\n resp = self._client.get(url, params=params)\n\n return resp.text", "def api():\n input = request.json\n app.logger.info(\"api_input: \" + str(input))\n # output_data = model_api(input_data)\n # get predictions\n prediction, _, losses = learn.predict(str(input))\n output = \"FOUL!!\" if prediction.obj == '1' else \"Not Foul\"\n app.logger.info(\"api_output: \" + str(output))\n data = {'input': input, 'output': output}\n response = jsonify(data)\n return response", "def get_text_prediction():\n json = request.get_json()\n print(json)\n if len(json['text']) == 0:\n return jsonify({'error': 'invalid input'})\n\n return jsonify({'you sent this': json['text']})", "def get_text(self):", "def api_convert():\n try:\n rest_data = request.get_json()\n text = rest_data.get('text')\n return jsonify({'text': process_text(text)})\n except Exception as e:\n response = jsonify({'error': 'API error'})\n response.status_code = 400\n return response", "async def get_model(self):\n\n # Display info message\n log.info(\"get_model\")\n\n # Get model\n output = await self.send_command(self.cmd_get_model)\n\n # Display info message\n log.info(f\"get_model: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split('\"')[3]\n\n # Display info message\n log.info(f\"get_model: model found: '{output}'\")\n\n # Return the model of the device\n return output", "def output(self):\r\n return self.result", "def ez_derive_text(auth_token, model_id, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_derive_text\"\n payload = {\n \"model_id\": model_id,\n \"options\": options\n }\n headers = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = json.dumps(payload)\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def obtain_text():\n pass", "def text(request):\n data = []\n\n if request.method == \"GET\":\n response = {\"success\": False, \"error\": \"Get method is not allowed\"}\n return HttpResponse(json.dumps(response), status=501)\n\n elif request.method == \"POST\":\n ner_logger.debug(\"Fetching result\")\n\n try:\n verify_text_request(request)\n # if verify success get detection data\n data = get_text_entity_detection_data(request)\n\n except KeyError as err:\n response = {\"success\": False, \"error\": str(err)}\n # TODO: move to ner_logger.error\n ner_logger.exception(response)\n return HttpResponse(json.dumps(response), content_type='application/json',\n status=400)\n except TypeError as err:\n response = {\"success\": False, \"error\": str(err)}\n ner_logger.exception(response)\n return HttpResponse(json.dumps(response), content_type='application/json',\n status=400)\n except Exception as err:\n response = {\"success\": False, \"error\": str(err)}\n ner_logger.exception(response)\n return HttpResponse(json.dumps(response), content_type='application/json',\n status=400)\n\n if data:\n response = {\"success\": True, \"error\": None, \"data\": data}\n return HttpResponse(json.dumps(response), content_type='application/json', status=200)\n else:\n response = {\"success\": False, \"error\": \"Some error while parsing\"}\n return HttpResponse(json.dumps(response), status=400)", "def return_output(self):\n return self.output", "def dispatch_request(self):\n\n text = request.args.get('text')\n model_name = request.args.get('model')\n\n if 'text' in request.form:\n text = request.form['text']\n if 'model' in request.form:\n model_name = request.form['model']\n\n if text is None:\n message = \"The service accepts GET and POST requests containing a mandatory 'text' parameter\"\n raise InvalidAPIUsage(message, status_code=400)\n\n models = app.config['MODELS']\n\n if model_name is None:\n model_name = app.config['DEFAULT_MODEL']\n\n if model_name not in models:\n message = 'Unknown model: %s' % model_name\n raise InvalidAPIUsage(message, status_code=400)\n\n # Compute answer\n answer = None\n\n return jsonify(answer)", "def get_output(self):\n return self.output", "def get_output(self):\n return self.output", "def result(self): \n return self.body", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "def _get_output(self):\n return self.__output", "async def process(document_id, text, model, out='downloads/'):\n\n file_id = upload(text)\n data = {\n 'lpmn': 'any2txt|wcrft2({\"morfeusz2\":false})|liner2({\"model\":\"'+model+'\"})',\n 'user': 'geocoder',\n 'file': file_id\n }\n\n response, errors = start_task(data)\n\n if errors is not None:\n return {'errors': errors}\n\n if response is not None:\n response = response[0][\"fileID\"]\n content = urlopen(Request(url + '/download' + response)).read().decode()\n with open(out + os.path.basename(document_id) + '.' + model, \"w\") as outfile:\n outfile.write(content)\n\n return {'model': model,\n 'path': out + os.path.basename(document_id) + '.' + model,\n 'errors': None}", "def api(self) -> str:", "def post(self):\n # use parser and find the user's query\n args = parser.parse_args()\n title = args['title']\n author = model.encode_author(args['author'])\n text = args['text']\n\n X = model.vector_and_stack(title=title, text=text, author=author)\n\n prediction = model.predict(X)\n\n # Output either 'Negative' or 'Positive' along with the score\n if round(prediction[0]) == 0:\n pred_text = 'Reliable News'\n else:\n pred_text = 'Unreliable News'\n\n # round the predict proba value and set to new variable\n confidence = round(prediction[0], 3)\n\n # create JSON object\n output = {'prediction': pred_text, 'fake_rate': confidence}\n\n return output, 200", "def get_console_output(self, req, id, body):\n context = req.environ['nova.context']\n instance = common.get_instance(self.compute_api, context, id)\n context.can(co_policies.BASE_POLICY_NAME,\n target={'project_id': instance.project_id})\n\n length = body['os-getConsoleOutput'].get('length')\n # TODO(cyeoh): In a future API update accept a length of -1\n # as meaning unlimited length (convert to None)\n\n try:\n output = self.compute_api.get_console_output(context,\n instance,\n length)\n # NOTE(cyeoh): This covers race conditions where the instance is\n # deleted between common.get_instance and get_console_output\n # being called\n except (exception.InstanceNotFound,\n exception.ConsoleNotAvailable) as e:\n raise webob.exc.HTTPNotFound(explanation=e.format_message())\n except exception.InstanceNotReady as e:\n raise webob.exc.HTTPConflict(explanation=e.format_message())\n except NotImplementedError:\n common.raise_feature_not_supported()\n\n # XML output is not correctly escaped, so remove invalid characters\n # NOTE(cyeoh): We don't support XML output with V2.1, but for\n # backwards compatibility reasons we continue to filter the output\n # We should remove this in the future\n remove_re = re.compile('[\\x00-\\x08\\x0B-\\x1F]')\n output = remove_re.sub('', output)\n\n return {'output': output}", "def format_model_output(self, output, batch_size=1):\r\n return output", "def get_console_text(self):\n console_text_api = '/consoleText'\n return self._api_request(self.url + console_text_api)", "def test_get_request_output(self):\n pass", "def get(self, request):\r\n data = {\r\n 'results': 'THIS IS THE PROTECTED STRING FROM SERVER',\r\n }\r\n return Response(data, status=status.HTTP_200_OK)", "def get_text(self):\n return ''.join(self.result)", "def transformation():\n data = None\n text = None\n\n if flask.request.content_type == \"application/json\":\n print(\"calling json launched\")\n data = flask.request.get_json(silent=True)\n\n text = data[\"text\"]\n try:\n bing_key = data[\"bing_key\"]\n except Exception:\n bing_key = None\n\n else:\n return flask.Response(\n response=\"This predictor only supports JSON data\",\n status=415,\n mimetype=\"text/plain\",\n )\n\n print(\"Invoked with text: {}.\".format(text.encode(\"utf-8\")))\n\n # Do the prediction\n predictions = ScoringService.predict(text, bing_key)\n\n result = json.dumps(predictions[:10])\n\n return flask.Response(response=result, status=200, mimetype=\"application/json\")", "def test_get_text(self):\n request = {\n 'jsonrpc': '2.0',\n 'id': 3,\n 'method': 'curl',\n 'params': ['http://' + os.environ['SERVER_NAME'] + '/curl_test/data?serve=mytext&key='+ str(self.item.key())]\n }\n response = self.send_request('&log=1',request)\n result = json.loads(response.content)\n self.assertEqual(result['result'], self.item.mytext)", "def get_response_for_api(self):\n coll_1 = \"I'm from Telegram\"\n coll_2 = \" Controller Class\"\n result = coll_1 + coll_2\n return {\n 'response': result\n }", "def get_response(text: str):\n # Step 01: Initialize the response.\n response = dict()\n results = dict()\n\n vectorized_text = dict()\n vectorized_text['test'] = (PredictionService.__vc.transform([text])) # see options in the above cell\n\n print ('DONE - [EMBEDDING] Apply Chosen Embeddings to the Tweets')\n # Step 02: Predict the label/class of the received text.\n predicted_sentiment = PredictionService.__model.predict(vectorized_text['test']).tolist()\n\n # Step 03: Parse the prediction result.\n if (predicted_sentiment[0] == 1):\n results[\"label\"] = \"Relevant\"\n else:\n results[\"label\"] = \"Not Relevant\"\n\n # Step 04: Prepare the response.\n response[\"status\"] = 200\n response[\"results\"] = results\n\n # Step 05: Return the response.\n return response", "def text(self) -> str:", "def get_output(self):\n raise NotImplementedError", "def get_output(self):\n print(\"DEPRECATED get_output\")\n return self._operations.get_output()", "def api_return(success, model_ws):\n fpth = os.path.join(model_ws, \"mfsim.stdout\")\n return success, open(fpth).readlines()", "def get_raw_output(self):\n return self._engine.get_raw_output()", "def download_text_command():\n # 1. Get input scan id from Demisto\n scanid = demisto.args().get('scanid')\n # 2. Get the forensic webpage text from SlashNext API\n response = download_text(scanid=scanid)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n text_base64 = response.get('textData').get('textBase64')\n text_data = base64.b64decode(text_base64)\n\n text_file = fileResult('slashnext_{}.txt'.format(scanid), text_data, entryTypes['file'])\n\n demisto.results({\n 'Type': entryTypes['file'],\n 'ContentsFormat': formats['text'],\n 'Contents': 'Forensics: Webpage Rendered Text for URL Scan ID = {}'.format(scanid),\n 'File': text_file.get('File'),\n 'FileID': text_file.get('FileID')\n })", "def get_text_prediction():\n json = request.get_json()\n # print(json)\n if len(json['image']) == 0:\n return jsonify({'error': 'invalid input'})\n imgdata = base64.b64decode(json['image'])\n filename = 'some_image.png' # I assume you have a way of picking unique filenames\n with open(filename, 'wb') as f:\n f.write(imgdata)\n idx, dis= x.search(querry_image = 'some_image.png')\n print(idx, dis)\n # print(idx)\n data_path = x.find(idx)\n json_results = x.return_json(data_path)\n print(data_path)\n # print(json_results)\n # json_results = jsonify(json_results)\n return jsonify(json_results)", "def fetch_ml_model_info() -> ApiResponse:\n return _api_response(settings.ML_MODELS)", "def get_text(self, caller):\n \n if caller == \"txt_search\":\n search_text = self.builder.get_object(\"txt_search\").get_text()\n return search_text\n elif caller == \"txt_tweet\":\n tweet_text = self.builder.get_object(\"txt_tweet\").get_text() \n return tweet_text", "def result(target_text):\n\n display_text(target_text)\n readability(target_text)", "def output_from_json(self, output: Dict[str, Any]) -> OutputInfo:", "def api_outputmodel(api: str, model: BaseModel, servicename: str,\n service_logger: logger) -> Callable:\n\n def decorator(func):\n @wraps(func)\n async def function_wrapper(request, *args, **kwargs):\n service_result = await func(request, *args, **kwargs)\n try:\n if isinstance(service_result, model):\n result = service_result\n else:\n result = model(**service_result)\n output = response.json(result.dict())\n except Exception as err:\n msg = ('an internal error occured (service: '\n f'{servicename}, api: {api}): {err}')\n raise ServerError(msg)\n service_logger.info(f'processed result {result} => '\n f'{output.content_type} [{output.status}] '\n f'{output.body}')\n return output\n\n return function_wrapper\n\n return decorator", "async def text(self, encoding=\"utf-8\", errors=\"strict\"):\n return self.response.decode(encoding, errors=errors)", "def via_response(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"via_response\")", "def getText(self):", "def __call__(self, *args, **kwargs):\n kwargs.setdefault(\"print_output\", self.PRINT_OUTPUT)\n kwargs.setdefault(\"return_output\", self.RETURN_OUTPUT)\n\n s = self.output(*args, **kwargs)\n if kwargs[\"print_output\"]:\n self.writeline(s)\n\n return s.strip() if kwargs[\"return_output\"] else None", "def getOutput(self):\r\n return self._output", "def output_handler(response, context):\n if response.status_code != 200:\n _return_error(response.status_code, response.content.decode(\"utf-8\"))\n response_content_type = context.accept_header\n prediction = response.content\n return prediction, response_content_type", "def predict_api():\n pass", "def echo(self):\r\n request = http.Request('GET', self.get_url() + '/echo')\r\n\r\n return request, parsers.parse_json", "def get_text(text_input):\r\n return text_input", "def get_model_output(\n model,\n batch_x\n):\n outputs = model(batch_x, training=False)\n return outputs", "def _processGETResp(self, output, request):\r\n msg = {'key' : output}\r\n\r\n self._render_GET(request, httplib.OK,\r\n 'application/json; charset=utf-8', json.dumps(msg))", "def LaunchRequest() -> str:\n response = {'outputSpeech' : {'text': 'What would you like to do?', 'type': 'PlainText'},\n 'shouldEndSession': False}\n # Add the response headers\n response = utils.add_response_headers(response)\n return response", "def get(self, *args, **kwargs):\n as_text = kwargs.pop('as_text', True)\n kwargs['follow_redirects'] = kwargs.get('follow_redirects', True)\n response = self.app.get(*args, **kwargs)\n if as_text:\n return response.get_data(as_text=True)\n return response", "def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)", "def __call__(self, command):\n return self.execute(command).response.body", "def get_output(self):\n return self._output", "def output_text(text):\n print(text)", "def get_output(self, **kwargs):\n return self.out", "def post(self):\n data = request.json\n return analyze_text(data)", "def get_model_output(self):\n\n return self.model_output_file", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_Response(self):\n return self._output.get('Response', None)", "def get_api_result(self, url, params):\n return self.HANDLER_HTTP.send_get_request(url, params)", "def get(self, id=None):\n self.reset(id)\n url = ASSEMBLYAI_URL + '/model/' + str(self.id)\n response = requests.get(url, headers=self.headers)\n self.warning = handle_warnings(response, 'model')\n response = response.json()['model']\n # self.phrases = response['phrases']\n self.dict = response\n self.status = response['status']\n logging.debug('Model %s %s' % (self.id, self.status))\n return self", "def makeresponse(self, txt, result=[], dot=\", \", *args, **kwargs):\n res = []\n dres = []\n if type(txt) == types.DictType or type(txt) == types.ListType:\n result = txt\n if type(result) == types.DictType:\n for key, value in result.iteritems():\n dres.append(u\"%s: %s\" % (key, unicode(value)))\n if dres: target = dres\n else: target = result\n if target:\n txt = u\"<b>\" + txt + u\"</b>\"\n for i in target:\n if not i: continue\n if type(i) == types.ListType or type(i) == types.TupleType:\n try:\n res.append(dot.join(i))\n except TypeError: res.extend(i)\n elif type(i) == types.DictType:\n for key, value in i.iteritems():\n res.append(u\"%s: %s\" % (key, unicode(value)))\n else: res.append(unicode(i))\n ret = \"\"\n if txt: ret = unicode(txt) + dot.join(res) \n elif res: ret = dot.join(res)\n if ret: return ret\n return \"\"", "def output(self) -> List[str]:\n return self._model.output", "def text(self) -> str:\n return self._impl.get_text()", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "async def classify_text_endpoint(case: Case):\n\n # Use the pretrained model to classify the incoming text in the request.\n classified_text = classify_text(classifier, case.text)\n\n return classified_text" ]
[ "0.6547523", "0.64406526", "0.6372505", "0.63234127", "0.63181823", "0.62049085", "0.61882645", "0.6186225", "0.61427337", "0.61218154", "0.611173", "0.6090764", "0.60520905", "0.6046867", "0.60038114", "0.59461546", "0.58610237", "0.5852279", "0.5844785", "0.58447844", "0.58341223", "0.58249974", "0.581133", "0.5778573", "0.5778573", "0.5757767", "0.5747152", "0.5747152", "0.5747152", "0.5747152", "0.5747152", "0.5747152", "0.5740884", "0.5740263", "0.57181674", "0.57168275", "0.5711629", "0.5671503", "0.56566083", "0.5638384", "0.56261873", "0.5623162", "0.5614313", "0.5614254", "0.5608458", "0.55857486", "0.55841476", "0.5581937", "0.557769", "0.55739975", "0.5570074", "0.5569538", "0.5568629", "0.5551608", "0.5542358", "0.55356866", "0.5531043", "0.552924", "0.55204284", "0.54954624", "0.54943395", "0.548444", "0.5461234", "0.5446634", "0.54303914", "0.5428419", "0.5417027", "0.5413256", "0.53923565", "0.53910094", "0.5390021", "0.53886", "0.53758085", "0.53724796", "0.53709304", "0.5368574", "0.5364674", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53622264", "0.53604335", "0.5360032", "0.5331778", "0.5328398", "0.5296679", "0.5294079", "0.5294079", "0.5294079", "0.5294079", "0.5294079", "0.52870303" ]
0.6135099
9
Run a command and echo it first
def run_cmd(call, cmd, *, echo=True, **kwargs): if echo: print('$> ' + ' '.join(map(pipes.quote, cmd))) return call(cmd, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(command):\n if arguments['--dry-run']:\n print command\n else:\n subprocess.call(command, shell=True)", "async def terminal(event):\r\n command = utils.raw(event.message)\r\n await event.edit(f\"**Running command:**\\n`{command}`\")\r\n result = subprocess.getoutput(command)\r\n await event.edit(f\"**Running command:**\\n`{command}`\\n**Result:**\\n`{result}`\")", "def system_call(command):\n print(\"\\n### {}\".format(command))\n stderr = subprocess.STDOUT\n pipe = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n )\n stdout, stderr = pipe.communicate()\n print(stdout)", "def run_command(opts, cmd):\n print(cmd)\n if not opts.dryrun:\n print(check_output(cmd, shell=True))", "def shell(cmd):\n print('Running \"{}\"...'.format(cmd))\n subprocess.check_call(cmd, shell=True)", "def local_command(command):\n print('Executing command: {0}\\n'.format(command))\n p = Popen([command], stdout=PIPE, stderr=PIPE, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n break\n line = line.strip()\n print(line)\n stdout, stderr = p.communicate()\n print(stdout)\n print(stderr)", "def shell(cmd, check=True):\n eprint(f\"+ {cmd}\")\n return run(cmd, shell=True, check=check)", "def runCommand(command):\n None", "def print_stdout(command):\n sys.stdout.write(\"%s\\n\" % command)\n sys.stdout.flush()", "def run(self, command):\n try:\n print(f\"RUNNING: {command}\")\n print(\"-\" * 80)\n print(subprocess.check_output(command, shell=True).decode('utf-8'))\n except subprocess.CalledProcessError as e:\n print(f\"ERROR calling '{command}'\")\n print(\"-\" * 20)\n print(e.output and e.output.decode('utf-8'))\n sys.exit(-1)", "def exec_cmd(command):\r\n global _verbose\r\n debug(\"Executing command: %s\" % command)\r\n if not _verbose:\r\n command = \"%s > /dev/null 2>&1\" % command\r\n resp = os.system(command)\r\n if resp != 0:\r\n exit(\"Command [%s] failed\" % command, resp)", "def run(cmd: str, verbose: bool = False):\n\n if verbose:\n print(cmd)\n\n out = subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n if verbose:\n print(out)\n\n return out", "def cmd(command):\n pflush(\"[%s]> %s\" % (HOSTNAME, command))\n code = os.system(command)\n if code != 0:\n raise RuntimeError(\"Error executing: \" + command)", "def run_command(self, command):\n subprocess.call(command, shell=True)", "def run(cmd, comment):\n print(\"―\" * 80)\n if comment:\n print(f\"💬 {comment}\")\n print(f\"➤ {cmd}\")\n proc = subprocess.run(cmd, shell=True) # nosec\n if proc.returncode == 0:\n print(\"✅ success\")\n else:\n print(f\"❌ ERROR! return code: {proc.returncode}\")\n sys.exit(proc.returncode)", "def run_command(self, command, timeout=None, stdout=True):\n print('Running \"{}\"...'.format(command))\n output = self._shell.run_command(\n command, timeout=timeout, async_=False\n )\n if stdout:\n print(output)\n print(\"Done!\")\n return output", "def run(cmd):\n cmd = str(cmd)\n\n if env['verbose']:\n sys.stdout.write('--> %s\\n' % cmd)\n\n cmd_list = shlex.split(cmd)\n\n p = subprocess.Popen(\n cmd_list,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n return p.communicate()", "def shell_cmd(self, cmd):\n cmd_ex = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT)\n output = cmd_ex.communicate()[0]", "def execute(cmd) :\n return os.system( cmd )", "def do_command(command):\n send_command(command)\n response = get_response()\n print(\"Rcvd: <<< \\n\" + response)\n return response", "def execute_command(self):\n return ''", "def execute_command(cmd):\n popen = Popen(cmd, stdout=PIPE, stderr=PIPE)\n stdout = b''\n while True: # Save output to youtube_stdout while this being echoed\n tmp = popen.stdout.read(1)\n stdout += tmp\n _print(tmp, end=\"\")\n sys.stdout.flush()\n # do it until the process finish and there isn't output\n if tmp == b\"\" and popen.poll() is not None:\n break", "def run_command(command):\n os.system('(echo {} | {})&'.format(command, SHELL))", "def _printAndRun(self, logger, prefix, command, check=False):\n logger.info(prefix + \"Run: {}\".format(command), False)\n subprocess.run(command, check=check)", "def run_command(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print output.strip()\n\n rc = process.poll()\n return rc", "def issue(self, cmd):\n self.send([cmd])\n return self.read_until_prompt()[1:] # drop the echo", "def run(cmd):\n print(cmd)\n r = os.system(cmd)\n if r:\n print(\"ERROR: command returned {0}\".format(r))\n sys.exit(r)", "def cmd( self, *args, **kwargs ):\n verbose = kwargs.get( 'verbose', False )\n log = info if verbose else debug\n log( '*** %s : %s\\n' % ( self.name, args ) )\n self.sendCmd( *args, **kwargs )\n return self.waitOutput( verbose )", "def run_command(cmd):\n\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n for line in iter(proc.stdout.readline, b''):\n print(\">>> \" + line.rstrip())\n (stdout, stderr) = proc.communicate()\n return proc.returncode == 0, proc", "def do_shell(self, command):\n proc = subprocess.Popen(command, stdout=self.stdout, shell=True)\n proc.communicate()", "def run_cmd(cmd):\n return check_output(cmd, shell=True).decode('utf-8')", "def test_command(self):\n\n expected = \"PyFunceble has been written by Fun Ilrys.\"\n actual = Command(\"echo '%s'\" % expected).execute()\n\n self.assertEqual(expected + \"\\n\", actual)", "def Run(self, text):\n self.cli.command_count += 1\n status = self.coshell.Run(text)\n if status > 128:\n # command interrupted - print an empty line to clear partial output\n print()\n return status # currently ignored but returned for completeness", "def do_shell(self, command):\n os.system(command)", "def do_command(self, args):\n chk_arg_count(args, 0)\n sys.stdout.write('temare %s\\n' % (version.__version__, ))", "def exec_cmd(cmd):\n print(' '.join(str(e) for e in cmd))\n try:\n res = subprocess.run(cmd, capture_output=True, check=True)\n print(res.stdout.decode(\"utf8\"))\n return res\n except subprocess.CalledProcessError as err:\n logging.error(err.stderr)\n raise err", "def run_command(cmd_str):\n cmd = Command(\"Local Command\", cmd_str)\n cmd.run(validateAfter = True)\n results = cmd.get_results()\n\n if results.rc != 0:\n return results.stderr.strip()\n else:\n return results.stdout.strip()", "def do_command(command):\n send_command(command)\n # time.sleep(0.1) # may be required on slow machines\n response = get_response()\n print(\"Rcvd: <<< \" + response)\n return response", "def runCommand(command):\n p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=1)\n for line in p.stdout:\n print (line.decode(\"utf-8\"),end=\"\") # the end=\"\" argument to print prevents unwanted newlines after each line\n p.wait()", "def _execute_command(self, cmd):\n LOG.info(\"Executing: %s\" % cmd)\n status, stdout, stderr = self.client.execute(cmd)\n if status:\n raise RuntimeError(\"Failed executing command: \",\n cmd, stderr)\n return stdout", "def subprocess_run(cmd):\n print(shlex.join(cmd))\n try:\n ret = subprocess.run(cmd, capture_output=True,\n text=True, env=os.environ.copy(), check=True)\n if (ret.stdout):\n print(ret.stdout)\n return ret\n except subprocess.CalledProcessError as e:\n if (e.stderr):\n print(e.stderr)\n raise e", "def send_command(self, cmd, shell=None, silent=False):", "def call_command(self, name, *args, **kwargs):\r\n out = StringIO() # To Capture the output of the command\r\n call_command(name, *args, stdout=out, **kwargs)\r\n out.seek(0)\r\n return out.read()", "def echo_command(command: Sequence[str], *, save_to: Optional[Path]) -> Sequence[str]:\n output = \" \".join(shlex.quote(part) for part in command)\n print(output)\n if save_to is not None:\n with save_to.open(mode=\"a\", encoding=\"utf-8\") as save_to_file:\n print(output, file=save_to_file)\n return command", "def run_shell(cmd: str):\n print_color(f\"** RUNNING: {cmd}\")\n os.system(cmd)", "def Executingbysubprocess(command):\n result = subprocess.Popen(command, shell=True, stdout=PIPE).stdout\n output = result.read()\n print output", "def command():\n pass", "def run(command):\n\n out = \"\"\n try:\n out = str(subprocess.check_output(command,\n shell=True,\n universal_newlines=True))\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\n 'Failed to execute command %s: %s' % (e.cmd, e.returncode))\n else:\n return out", "def run(self,command):\n #--------------------------------------------------------------------------\n res = subprocess.run(command,stdout=subprocess.DEVNULL,stderr=subprocess.STDOUT).returncode\n return res", "def run_command(cmd):\n return subprocess.call(cmd, shell=True)", "def cmd(self, command):\n self.enode.get_shell('bash').send_command(command, matches=self.scapy_prompt)\n response = self.enode.get_shell('bash').get_response()\n return response", "def run_command(command: str) -> str:\n path_command = f\"PATH={shell_path()} {command}\"\n status, output = getstatusoutput(path_command)\n if status == 0:\n return output\n raise ShellError(status, output)", "def run(cmd: str) -> None:\n subprocess.run(cmd, shell=True, check=True)", "def run_command(cmd, debug=False):\n if debug:\n msg = ' PWD: {}'.format(os.getcwd())\n print_warn(msg)\n msg = ' COMMAND: {}'.format(cmd)\n print_warn(msg)\n cmd()", "def shellcommand(command):\n\n subprocess.call(str(command))", "def run_human_command( self, command ):\n self.run_command( self._convert_command( command ) )", "def quick_execute(self,command,interactive=False,stdin=None,stdout=sys.stdout,stderr=sys.stderr):\n self.execute_command(command=command,interactive=interactive,stdin=stdin,stdout=stdout,stderr=stderr)", "def run(command):\n return Effect(Run(command=command))", "def do_shell(self, line):\n print 'Running shell command:', line\n output = os.popen(line).read()\n print output\n self.last_output = output", "def run_next_action():\n os.environ[\"BROWSER\"] = 'echo %s'\n result = subprocess.run(context.arguments, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=\"utf-8\")\n return result.stdout + result.stderr", "def system_call(command):\n p = subprocess.Popen([command], stdout=subprocess.PIPE, shell=True)\n return p.stdout.read()", "def sys_exec(command):\n print('Running: {}'.format(command))\n return os.popen(command).read().rstrip()", "def shell(self, cmd):\n raise NotImplementedError", "def run(cmd):\n print('running', cmd)\n proc = sp.Popen([cmd], shell=True)\n proc.wait()\n assert proc.poll() == 0", "def _run_command(command):\n full_command = \"xcrun simctl %s\" % (command,)\n # Deliberately don't catch the exception - we want it to bubble up\n return subprocess.check_output(full_command, universal_newlines=True, shell=True)", "def run_cmd(cmd):\n logging.debug('Run command \"'+cmd+'\"')\n try:\n process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n process.check_returncode()\n\n except Exception as e:\n logging.exception(str(e) +\"\\nCMD_SHELL : \"+cmd+\"\\nSTDOUT : \"+process.stdout.decode()+\"\\nSTDERR : \"+process.stderr.decode(), exc_info=True)\n #logging.critical(\"{CDM : \"+cmd+\", \"} : \"+cmd)\n #logging.critical(\"STDOUT : \"+process.stdout.decode())\n #logging.critical(\"STDERR : \"+process.stderr.decode())\n #raise e\n\n return process.stdout.decode()", "def executeCommand(command):\n time.sleep(1)\n #return os.system(command)\n subprocess.Popen(command, shell=True)", "def do_command(): # pragma: no cover\n args = parse_args(sys.argv[1:])\n status = run(args)\n sys.exit(status)", "def RunCommand(command):\n proc = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT, shell=False)\n return proc.communicate()[0]", "def _say(text):\n subprocess.call(['say', text])", "async def command(self,ctx):\n await ctx.send(\"Yes this is a command.\")", "def test_command(self):\n output, _error = self.executor.command(['echo', 'hello']).batch()\n self.assertEqual(output, 'hello\\n')", "def cmd(*args, **kwargs):\n cmd_s = ' '.join(args)\n print('+ {}'.format(cmd_s))\n proc = subprocess.Popen(cmd_s, shell=True, stdout=subprocess.PIPE, **kwargs)\n for line in iter(proc.stdout.readline, ''):\n sys.stdout.write('> {}'.format(line))\n while proc.poll() is None:\n time.sleep(0.5)\n if proc.returncode != 0:\n raise CmdError(cmd_s, proc.returncode)", "def myrun(cmd):\n\tp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\tstdout = []\n\twhile True:\n\t\tline = p.stdout.readline()\n\t\tstdout.append(line)\n\t\t#print line\n\t\tph1 = line[9:19]\n\t\t#print (ph1)\n\t\tif ph1 == 'no carrier':\n\t\t\tmail(\"NOT WORKING\")\n\t\t\ttime.sleep(60)", "def run_cmd(self):\r\n self.run = True", "def shell_command(context, cmd, err_msg=\"Shell command error\"):\n try:\n\n context.last_cmd = cmd\n output = check_output(cmd, shell=True, cwd=os.getcwd())\n context.output = output\n\n except:\n raise Exception(err_msg)", "def do_command(self, args):\n pass", "def system_call(command):\n process = subprocess.Popen(command.split(), stdout=subprocess.PIPE)\n return process.communicate()[0]", "def execute(self, cmd=\"\", msg=\"\", speak=False, duration=0):\n\n self.speak = speak\n\n if self.server or not self.testing:\n if self.speak:\n self.say(msg)\n try:\n subprocess.Popen([\"notify-send\", \"Dragonfire\", msg])\n except BaseException:\n pass\n if cmd != \"\":\n time.sleep(duration)\n try:\n subprocess.Popen(cmd, stdout=FNULL, stderr=FNULL)\n except BaseException:\n pass\n return msg", "def Run(cmd):\n return os.popen(cmd).read()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def runCommand(cmd):\n print cmd\n args = shlex.split(cmd)\n p = subprocess.Popen(args) # shell=bash is not recommended. Only use when '>' must be in cmd. \n return p.communicate()\n #p = Popen(cmd.split(' '), stdout=PIPE)\n #return p.communicate()", "def run_cmd(cmd_str, show=True):\n\n proc = subprocess.run(cmd_str.split(), capture_output=True, encoding=\"utf-8\")\n if proc.returncode != 0:\n print(f'*** ERROR occurred during subprocess.run call, command: ***\\n{\" \".join(proc.args)}'\n f'\\n*** stderr: ***\\n{proc.stdout.strip()}')\n raise RuntimeError\n else:\n if show:\n print(proc.stdout.strip())\n return proc.stdout.strip()", "def execCMD(self, cmd, arg):\n result = subprocess.check_output([cmd, arg])\n return result", "def RunCommand(command, debug=False, print_command=False):\n if( print_command ):\n print '[localhost] local: %s' % command\n process_handle = subprocess.Popen(shlex.split(str(command)), \n stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout_output, stderr_output = process_handle.communicate()\n\n return_string = '%s\\n%s' % (stderr_output, stdout_output)\n return_code = process_handle.returncode\n\n if( debug ):\n print return_string\n if( return_code is None ):\n return_code = 0\n return (return_string, return_code)", "def run(self, stdout=None, stderr=None):", "def print_command(self):\n self.success = False\n command = ['lame', '-h', '--silent']\n command.append('-b ' + str(self.bitrate))\n command.append(self.source)\n command.append(self.target)\n print(' '.join(command))", "def runCommand(AppCommand,quickLogger,run=True,printCmd=True):\n\n if(run):\n AppCommand.run(print_command=printCmd)\n quickLogger.info(AppCommand.command())\n else:\n print AppCommand.command()", "def system(cmd):\n print cmd\n try:\n output = subprocess.check_output(cmd, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print \"\"\"Command\n %s\nfailed\"\"\" % cmd\n print 'Return code:', e.returncode\n print e.output\n print(\"Hello\")\n sys.exit(1)\n print output\n f = open(logfile, 'a'); f.write(output); f.close()\n unix_command_recorder.append(cmd) # record command for bash script\n return output", "def do(self, line): \n self.interface.onecmd(line)", "def cli(username):\n cmd = 'echo Hello {0}'.format(username)\n return subprocess.call(cmd, shell=True)", "def run_command(command, env=None):\n merged_env = os.environ\n if env:\n merged_env.update(env)\n process = subprocess.Popen(\n command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n shell=True,\n env=merged_env,\n )\n while True:\n line = process.stdout.readline()\n line = str(line, \"utf-8\")[:-1]\n print(line)\n if line == \"\" and process.poll() is not None:\n break\n\n if process.returncode != 0:\n raise Exception(\n f\"Non zero return code: {process.returncode}\\n\" f\"{command}\\n\\n{process.stdout.read()}\"\n )", "def run_command(cmd):\n p = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)\n data = p.communicate()\n return p.returncode == 0", "def run_command(command: str) -> str:\n full_command = f\"xcrun simctl {command}\"\n # Deliberately don't catch the exception - we want it to bubble up\n return subprocess.run(\n full_command,\n universal_newlines=True,\n shell=True,\n check=True,\n stdout=subprocess.PIPE,\n ).stdout", "async def run_command(self, cmd: str) -> None:\n # pause logic\n if not self.running.is_set():\n self.add_to_output(\"Paused...\")\n await self.running.wait()\n result = None\n # get/create ssh connection to miner\n conn = await self.get_connection(\"root\", \"admin\")\n # send the command and store the result\n for i in range(3):\n try:\n result = await conn.run(cmd)\n except:\n if i == 3:\n self.add_to_output(f\"Unknown error when running the command {cmd}...\")\n return\n pass\n # let the user know the result of the command\n if result is not None:\n if result.stdout != \"\":\n self.add_to_output(result.stdout)\n if result.stderr != \"\":\n self.add_to_output(\"ERROR: \" + result.stderr)\n elif result.stderr != \"\":\n self.add_to_output(\"ERROR: \" + result.stderr)\n else:\n self.add_to_output(cmd)", "def run(cmd: list) -> str:\n\n try:\n s = subprocess.run(cmd, shell=True, check=True, capture_output=True)\n except subprocess.CalledProcessError as error:\n out = error.stderr or error.stdout\n raise Failure(out.decode().strip())\n\n return s.stdout.decode(\"iso-8859-1\").strip()", "def shell(cmd):\n return G.DEVICE.shell(cmd)", "def call(cmd):\n if isinstance(cmd, str):\n cmd = shlex.split(cmd)\n return check_call(cmd, stdout=sys.stdout, stderr=sys.stderr)", "def subprocess_cmd(command):\n process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n proc_stdout = process.communicate()[0].strip()\n\n return(proc_stdout)" ]
[ "0.69847924", "0.6959119", "0.69450945", "0.6919458", "0.6859296", "0.6812634", "0.67663026", "0.6756266", "0.67383146", "0.67201954", "0.670066", "0.6692517", "0.66810435", "0.6675754", "0.6665769", "0.6646724", "0.65908474", "0.6578724", "0.65752876", "0.6571929", "0.65608656", "0.6559181", "0.6520646", "0.64916915", "0.64899254", "0.6484098", "0.6469638", "0.6465695", "0.646413", "0.643645", "0.6435354", "0.6428957", "0.6422641", "0.64206606", "0.6419292", "0.64053476", "0.6398217", "0.6382091", "0.63790375", "0.63616306", "0.63614404", "0.6359364", "0.63526076", "0.6346588", "0.6340691", "0.6339104", "0.6338442", "0.6319333", "0.6316727", "0.631355", "0.63129807", "0.6311397", "0.6301927", "0.6301645", "0.62986517", "0.628627", "0.6286079", "0.62726545", "0.62724113", "0.62705576", "0.6259337", "0.6252888", "0.62524766", "0.6221792", "0.6216603", "0.6208291", "0.6204853", "0.6197294", "0.61901236", "0.61885154", "0.617645", "0.6172066", "0.61706305", "0.6165796", "0.6157693", "0.61554915", "0.6150256", "0.6145884", "0.61391085", "0.61388284", "0.6127139", "0.6127139", "0.6127139", "0.61178446", "0.6115428", "0.61140937", "0.6112993", "0.61111045", "0.6109135", "0.6097553", "0.6092747", "0.60910517", "0.6088366", "0.6084936", "0.6083647", "0.6073544", "0.60671663", "0.60610986", "0.6058069", "0.60543424" ]
0.696063
1
Return the URL for remote git repository. Depending on the system setup it returns ssh or https remote.
def git_remote(git_repo): github_token = os.getenv(GITHUB_TOKEN_KEY) if github_token: return 'https://{0}@github.com/{1}'.format( github_token, git_repo) return '[email protected]:{0}'.format(git_repo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def git_remote_url(self):\n return self._git_remote_url", "def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))", "def get_repository_uri(self) -> str:\n try:\n url = subprocess.check_output(\n ['git', 'config', '--get', 'remote.origin.url']\n ).decode('utf-8').strip()\n return self.norm_uri(url)\n except subprocess.CalledProcessError as error:\n # no remote origin defined, log and continue\n logger.debug('Unable to get remote origin {}'.format(str(error)))\n return None", "def repo_url(self):\n return self._repo_url", "def get_gitlab_remote(self):\n return self.get_remote('gitlab')", "def get_api_url(self):\n\n url = 'https://api.{}/repos/{}/{}/git/'.format(HOST_GITHUB, \\\n self.repo, self.product)\n return url", "def remote_origin_url(self):\n if self._remote_origin_url:\n return self._remote_origin_url\n\n topleveldata = self.git(\"config\", \"--get\", \"remote.origin.url\")\n self._remote_origin_url = topleveldata[0]\n return self._remote_origin_url", "def get_remote(repo, name='origin'):\n config_name = 'remote.{}.url'.format(name)\n return subprocess.check_output(['git', 'config', '--get',\n config_name], cwd=repo).rstrip()", "def github_url(self):\n return self.github.replace('.git', '')", "def repo_link(repo):\n return \"https://github.com/\" + repo", "def get_git_upstream_remote():\n cmd = \"git remote get-url upstream\"\n if run_cmd(cmd):\n return \"upstream\"\n else:\n return \"origin\"", "def get_git_repo_url(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n repo = Repo(path, search_parent_directories=True)\n return next((remote.url for remote in repo.remotes), None)\n except Exception:\n return None", "def get_repo_url(package_name):\n package_info = get_package_info(package_name)\n\n if package_info and package_info.get('links'):\n links = package_info['links']\n\n if links.get('repository'):\n return links['repository']", "def get_repo_url(repo, access_protocol, github_login):\n prop = {\n 'https': repo.clone_url,\n 'ssh': repo.ssh_url\n }[access_protocol]\n if access_protocol == 'https' and github_login:\n # we were provided explicit github login. For ssh access it is\n # impossible to specify different login within ssh RI, but it is\n # possible to do so for https logins\n url = URL(prop)\n assert url.scheme in ('http', 'https')\n url.username = github_login\n prop = url.as_str()\n return prop", "def current_github_repo(remote='origin'):\n\n proc = run(['git', 'remote', 'get-url', remote], stdout=PIPE)\n url = proc.stdout.strip().decode('utf-8')\n\n if 'github.com' in url:\n match = re.search('([^:/]+)/([^/]+)\\.git$', url)\n org = match.group(1)\n repo = match.group(2)\n return (org, repo)\n else:\n return None", "def _getRemoteUrlTheOldWay(self):\n utool = getUtility(IURLTool)\n if self.remote_url:\n return utool() + '/' + self.remote_url\n else:\n return utool()", "def get_config_from_remote_git(git_url):\n raise ConfigError('%s is an URL to a git repo but this functionality is '\n 'currently unsupported' % (git_url))", "def get_repository_uri(self) -> str:\n raise NotImplementedError", "def GetGerritFetchUrl(host):\n return 'https://%s/' % host", "def base_branch_remote(self):\n return self.git.config('--get', 'branch.{}.remote'.format(self.base_branch))", "def git_remote_settings(self) -> Optional[pulumi.Input['RepositoryGitRemoteSettingsArgs']]:\n return pulumi.get(self, \"git_remote_settings\")", "def git_remote_settings(self) -> Optional[pulumi.Input['RepositoryGitRemoteSettingsArgs']]:\n return pulumi.get(self, \"git_remote_settings\")", "def git_remote(uri):\n remotes = git(['remote', '-v']).split('\\n')\n pattern = re.compile(r'(?iu)^(?P<name>[^ ]+)[\\t]+bzr::(?P<remote>[^ ]+)')\n for remote in remotes:\n log.debug('check remote: %s', remote)\n matches = pattern.match(remote)\n if matches and matches.group('remote') == uri:\n return matches.groups()\n return None, None", "def get_github_url(package_name: str, user_name: str):\n # Will keep ssh version for reference.\n # '%s @ git+ssh://[email protected]/draustin/%s.git'%(name, name)\n return '%s @ git+https://github.com/%s/%s.git'%(package_name, user_name, package_name)", "def get_repo_name(remote_name=\"origin\"):\n remote = check_output([\"git\", \"config\", \"--get\", \"remote.{0}.url\".format(remote_name)],\n cwd=get_repo_dir()).strip()\n remote = remote.split(\"/\")[-1]\n return remote[:-4] if remote.endswith(\".git\") else remote", "def RepositoryUrl(name):\n repository = ndb.Key(Repository, name).get()\n if not repository:\n raise KeyError('Unknown repository name: ' + name)\n return repository.urls[0]", "def git_remote_settings(self) -> pulumi.Output[Optional['outputs.RepositoryGitRemoteSettings']]:\n return pulumi.get(self, \"git_remote_settings\")", "def repo_full_name_from_remote(remote_url):\n # Check whether we have a https or ssh url\n if remote_url.startswith(\"https\"):\n path = urllib.parse.urlparse(remote_url)\n path = path.path\n # Remove the intial '/'\n path = path[1:]\n # Remove extension\n path = os.path.splitext(path)[0]\n else:\n # Remove the initial `git@``\n path = remote_url.split(\"@\")\n path = path[-1] if len(path) > 1 else path[0]\n path = urllib.parse.urlparse(path)\n path = path.path\n # Remove extension\n path = os.path.splitext(path)[0]\n return path", "def scm_url(self):\n return self._data.get('scm_url')", "def lookup_scm_url(package_location):\n scm_cfg = configparser.ConfigParser()\n if os.path.exists('%s/.git' % package_location):\n scm_cfg.read('%s/.git/config' % package_location)\n if 'remote \"origin\"' in scm_cfg:\n return scm_cfg['remote \"origin\"'].get('url')\n elif os.path.exists('%s/.hg' % package_location):\n scm_cfg.read('%s/.hg/hgrc' % package_location)\n if 'paths' in scm_cfg:\n return scm_cfg['paths'].get('default')", "def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))", "def _get_git_url_if_present(uri):\n if '#' in uri:\n # Already a URI in git repo format\n return uri\n try:\n from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError\n except ImportError as e:\n print(\"Notice: failed to import Git (the git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\" % e, file=sys.stderr)\n return uri\n try:\n # Check whether this is part of a git repo\n repo = Repo(uri, search_parent_directories=True)\n\n # Repo url\n repo_url = \"file://%s\" % repo.working_tree_dir\n\n # Sub directory\n rlpath = uri.replace(repo.working_tree_dir, '')\n if (rlpath == ''):\n git_path = repo_url\n elif (rlpath[0] == '/'):\n git_path = repo_url + '#' + rlpath[1:]\n else:\n git_path = repo_url + '#' + rlpath\n return git_path\n except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):\n return uri", "def git_remote(**kw):\n return sh('git', 'remote', **kw).strip().split('\\n')", "def scm_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"scm_url\")", "def get_url(self):\n if self.url:\n return self.url\n # if we have a uuid and happen to know the URL for it, use that\n elif self.uuid and PathIdentifier.repo_hints.has_key(self.uuid):\n self.url = PathIdentifier.repo_hints[self.uuid] + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n return self.url\n # if we've only seen one rep, use that (a guess, but an educated one)\n elif not self.uuid and len(PathIdentifier.repo_hints) == 1:\n uuid, root = PathIdentifier.repo_hints.items()[0]\n if uuid:\n self.uuid = uuid\n PathIdentifier.locobjs['uuid://%s%s' % (uuid, self.repo_relative_path)] = self\n self.url = root + self.repo_relative_path\n PathIdentifier.locobjs[self.url] = self\n report(\"Guessing that '%s' refers to '%s'\" % (self, self.url))\n return self.url\n else:\n error(\"Cannot determine URL for '%s'; \" % self +\n \"Explicit source argument (-S/--source) required.\\n\")", "def test_fix_repo_url():\n repo_url_git = 'git://github.com/Tinche/bower-cache'\n repo_url_https = 'https://github.com/Tinche/bower-cache'\n fixed_url_https = 'https://:@github.com/Tinche/bower-cache'\n assert repo_url_git == gitwrapper._fix_repo_url(repo_url_git)\n assert fixed_url_https == gitwrapper._fix_repo_url(repo_url_https)", "def git_url():\n return \"https://github.com/tisnik/victimsdb-sample-data.git\"", "def scm_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"scm_url\")", "def remote(self, name: str, **kwargs: Any) -> Optional[GitRemote]:\n\n try:\n ret = self.cmd.remote.show(\n name=name, no_query_remotes=True, log_in_real_time=True\n )\n lines = ret.split(\"\\n\")\n remote_fetch_url = lines[1].replace(\"Fetch URL: \", \"\").strip()\n remote_push_url = lines[2].replace(\"Push URL: \", \"\").strip()\n if remote_fetch_url != name and remote_push_url != name:\n return GitRemote(\n name=name, fetch_url=remote_fetch_url, push_url=remote_push_url\n )\n else:\n return None\n except exc.LibVCSException:\n return None", "def __gitChangeRemoteUrl(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Rename\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ok:\n self.vcs.gitChangeRemoteUrl(self.project.getProjectPath(), remote)", "def getRemoteUrl(self):\n # try getting the remote object by unique id\n remote_url = self._getRemoteUrlTheOldWay()\n remote_obj = self._getObjectByUid()\n if remote_obj:\n url = remote_obj.absolute_url()\n # update the url when changed (avoid unnecessary ZODB writes)\n if url != remote_url:\n self.edit(url)\n return url\n\n return remote_url", "def git(self):\n return self['git']", "def SvnUrl(self):\n return self._module.url", "def git_remote_url(self, git_remote_url):\n self._git_remote_url = git_remote_url", "def get_url(path, repo=None, rev=None, remote=None):\n with _make_repo(repo, rev=rev) as _repo:\n _require_dvc(_repo)\n out = _repo.find_out_by_relpath(path)\n remote_obj = _repo.cloud.get_remote(remote)\n return str(remote_obj.checksum_to_path_info(out.checksum))", "def _get_repo_url(self, descriptor):\n configured_repositories = config.get('repositories')\n\n # We need to remove the custom \"__name__\" element before we can show\n # which repository keys are defined in the configuration\n configured_repository_names = configured_repositories.keys()\n\n if '__name__' in configured_repository_names:\n configured_repository_names.remove('__name__')\n\n if descriptor['name'] not in configured_repositories:\n if len(configured_repository_names):\n logger.warning(\"Package repository '%s' used in descriptor is not \"\n \"available in Cekit configuration file. \"\n \"Available repositories: %s\"\n % (descriptor['name'], ' '.join(configured_repository_names)))\n else:\n logger.warning(\"Package repository '%s' used in descriptor is not \"\n \"available in Cekit configuration file. \"\n % descriptor['name'])\n return None\n\n return configured_repositories[descriptor['name']]", "def repo_value(url):\n if url == '^':\n return url\n tup = urlsplit(url)\n if tup.scheme or tup.netloc:\n return urlunsplit(tup[:3]+('', ''))\n raise ValueError('URL %(url)r doesn\\'t contain a scheme '\n 'nor a hostname'\n % locals())", "def address_remote(self):\n if self.remote_ip is None or self.port is None:\n return None\n return URL_API.format(ip=self.remote_ip, port=self.port)", "def git_url(fp: str) -> str:\n return f\"https://github.com/pantsbuild/pants/blob/release_{PANTS_SEMVER}/{fp}\"", "def get_repository(url):\n if not '://' in url:\n url = 'https://' + url\n\n parsed_url = urllib.parse.urlparse(url)\n if parsed_url.netloc.endswith('github.com'):\n g = get_github_auth_token()\n repo_url = parsed_url.path.strip('/')\n repo = GitHubRepository(g.get_repo(repo_url))\n return repo\n\n raise Exception('Unsupported url!')", "def _remote_path(self):\n return self._remote_dir", "def query_repo_url(repo_name):\n return buildapi.query_repo_url(repo_name)", "def test_remote(self):\n\n self.assertEqual(description.RepositoryDescription(\n '[email protected]:/example/remote', '/path/to/local').remote,\n implementation.RemoteRepository(\n '[email protected]:/example/remote'))", "def __fetch_remote_source(self):\n # type: () -> Union(Git, None)\n if self.source == 'git':\n return self.git_source_class(**self.configuration).fetch()\n return None", "def getUrls(self):\n # in case you need to move from a read only Url to a writeable one, here it gets replaced\n repopath = self.repositoryUrl().replace(\"[git]\", \"\")\n repoString = utils.replaceVCSUrl(repopath)\n [repoUrl, repoBranch, repoTag] = utils.splitVCSUrl(repoString)\n if not repoBranch and not repoTag:\n repoBranch = \"master\"\n print(\"|\".join([repoUrl, repoBranch, repoTag]))\n return True", "def parse_git_repo(potential_url: str) -> Optional[RepoUrl]:\n return RepoUrl.parse(potential_url)", "def add_repo_url(image, repository, repositories):\n try:\n path = repositories[repository]\n path = path.strip(\"/\").replace(\"https://\", \"\").replace(\"http://\", \"\")\n image = \"/\".join([path, image])\n except KeyError:\n raise KeyError(f\"Repository {repository} not defined!\")\n return image", "def git(self) -> Optional['outputs.FeatureMembershipConfigmanagementConfigSyncGit']:\n return pulumi.get(self, \"git\")", "def get_repository(self,options):\n if options.remote:\n try:\n self.remote_fbtest = rpyc.connect_by_service('fbtest',\n service=rpyc.SlaveService)\n except:\n if options.host:\n remote_host = options.host\n else:\n remote_host = 'localhost'\n self.remote_fbtest = rpyc.connect(remote_host,18861,service=rpyc.SlaveService)\n\n r = self.remote_fbtest.root\n repository = r.get_repository()\n else:\n repository = Repository(os.getenv('FBT_REPO',os.getcwd()))\n repository.load()\n return repository", "def svnurl(self):\r\n info = self.info()\r\n return py.path.svnurl(info.url)", "def origin(self):\n for item in os.popen('git remote -v'):\n split_item = item.strip().split()\n if split_item[0] == 'origin' and split_item[-1] == '(push)':\n return split_item[1]", "def get_url(self):\r\n if self.mod.filename:\r\n return self.mod.service.get_mirror() + self.mod.filename", "def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)", "def remote(self):\r\n return self._url.scheme in ('http', 'https')", "def gitHubConnect():\n return HUB", "def get_remote_host(request):\n \n return utilities.get_remote_host(request)", "def repository(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository\", host, owner, repo)", "def get_url_tag_commit(self, git_sha):\n\n url = 'https://{}/{}/{}/commit/{}'.format(\n HOST_GITHUB,\n self.repo,\n self.product,\n git_sha\n )\n return url", "def _get_base_url(self):\n template = config.baseurl_template\n # get distro name and arch\n base_url = template.format(\n host=config.gitbuilder_host,\n proj=self.project,\n pkg_type=self.pkg_type,\n arch=self.arch,\n dist=self.distro,\n flavor=self.flavor,\n uri=self.uri_reference,\n )\n return base_url", "def build_url(cls, config, namespace, name):\n return \"hxxp://mock.repo.url/\" + namespace + \"/\" + name + \".git\"", "def workspace_url(self):\n return os.environ.get('TEAMRAUM_URL', '').strip('/')", "def svn_info_t_repos_root_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def github_link(self):\n if self.test_type == TestType.commit:\n test_type = 'commit'\n test_id = self.commit\n else:\n test_type = 'pull'\n test_id = self.pr_nr\n\n return f\"{self.fork.github_url}/{test_type}/{test_id}\"", "def is_git_repo(template_repo):\n return template_repo.startswith(\"git@\") or \\\n template_repo.startswith(\"https://\")", "def is_git_repo(template_repo):\n return template_repo.startswith(\"git@\") or \\\n template_repo.startswith(\"https://\")", "def getRemoteHost():", "def get_reponame_from_git_url(url: str) -> Optional[str]:\n repo_url = parse_git_repo(url)\n if repo_url:\n return repo_url.repo\n return None", "def config_url(config):\n if 'url' not in config:\n raise Exception('The config file does not contain \"url\"')\n return config['url']", "def get_component_name_from_git(shell_runner):\n returncode, stdout, stderr = shell_runner.run('git config remote.origin.url', capture=True)\n if returncode != 0:\n if stdout == '':\n raise UserError('could not get component name from name of remote - '\n 'no remote returned from \"git config remote.origin.url\": ' + stderr)\n else:\n raise Exception(\n 'git returned non-zero exit status - ' + stderr)\n result = search(r'/([^/.\\n]+)(?:\\.git)?$', stdout)\n if result is None:\n raise Exception(\n 'could not get component name from remote \"%s\"' % (stdout))\n return result.group(1)", "def source_repo_url(branch_url_mode, vcs, source_repo, source_repo_branch):\n return {\n 'short': source_repo_branch,\n 'medium': '{source_repo.strpath}#{source_repo_branch}'.format(**locals()),\n 'long': '{vcs}+{source_repo.strpath}#{source_repo_branch}'.format(**locals())\n }[branch_url_mode]", "def getProjectURL():", "def remote_exists(location, remote):\n ensure_dir(location)\n with utils.cd(location):\n cmd = '/usr/bin/git config --local --get remote.{}.url'.format(remote)\n return subprocess.call(cmd, shell=True) == 0", "def name(self):\n\n return maybe_string(C.git_remote_name(self._remote))", "def remotehost_setup(request, integration_test_setup):\n git_command = request.param[0]\n configholder = request.param[1]\n target = request.param[2]\n get_remotehost_repos(git_command, configholder, target)", "def get_remotehost_repos(git_command, configholder, dest):\n os.chdir(dest)\n REMOTE_USER = configholder.get_config_value(\"REMOTE_USER\")\n REMOTE_ADDR = configholder.get_config_value(\"REMOTE_ADDR\")\n REMOTE_BARE_REPOS_DIR_PATH = configholder.get_config_value(\n \"REMOTE_BARE_REPOS_DIR_PATH\"\n )\n target = f\"{REMOTE_USER}@{REMOTE_ADDR}\"\n target_path = expand_target_path_on_host(\n target, REMOTE_BARE_REPOS_DIR_PATH\n )\n remote_script_target_path = (\n f\"{REMOTE_BARE_REPOS_DIR_PATH}{REMOTE_SCRIPT_GET_BARE_REPOS_NAME}\"\n )\n copy_script_to_host(target, target_path, REMOTE_SCRIPT_GET_BARE_REPOS_PATH)\n bare_repos = execute_script_on_host(\n target, target_path, remote_script_target_path\n )\n remove_script_on_host(target, remote_script_target_path)\n for bare_repo_name in bare_repos:\n git_command(\n join(f\"{target}:{target_path}\", bare_repo_name), bare_repo_name\n )\n os.chdir(\"..\")", "def config(args):\n uname = getpass.getuser()\n name = raw_input('Enter remote name (example: xfer): ') or 'xfer'\n if name in args.remotes:\n sys.exit('\\n{} is already listed as a remote.\\nPlease choose a different name or remove the remote using `git remote remove`\\n'.format(name))\n if args.type == 'ssh':\n server = raw_input('Enter remote url (example: {}@localhost): '.format(uname)) or uname + '@localhost'\n repo = os.path.join(args.home, os.path.basename(args.base))\n dest = raw_input('Enter remote destination for repo (default: {}): '.format(repo)) or repo\n dest = dest.replace('.git', '')\n port = raw_input('Enter port for server (default: 22): ') or 22\n remote = 'ssh://{}:{}{}.git'.format(server, port, dest)\n elif args.type == 's3':\n server = raw_input('Enter remote bucket name (example: mybucket): '.format(uname)) or uname\n remote = 's3://{}'.format(server)\n elif args.type == 'gs':\n server = raw_input('Enter remote bucket name (example: mybucket): '.format(uname)) or uname\n remote = 'gs://{}'.format(server)\n else:\n sys.exit('No rule for processing server type: {}'.format(args.type))\n run('git remote add {} {}'.format(name, remote))\n return", "def svn_url(svninfo=None):\n if svninfo is None:\n svninfo = svn_info()\n return svninfo.find('entry/url').text", "def get_public_baseurl(self):\n server_name = self.get_external_domain()\n tls = self.get_tls()\n if self.external_port == 80 and not tls:\n return \"http://{}\".format(server_name)\n elif tls:\n return \"https://{}\".format(server_name)\n return \"http://{}:{}\".format(server_name, self.external_port)", "def svn_info_t_URL_get(svn_info_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def fetch_url(self):\n return self._fetch_url", "def set_git_url(context, url):\n context.url = url", "def test_returns_cloned_repo_by_name_auto_host(self):\n # Need to set up a git repo with origin info.\n full_path = path.join(settings.REPO_ROOT, 'test')\n envoy.run('git init {0}'.format(full_path))\n fake_origin = 'git://localhost'\n envoy.run('git -C {0} remote add origin {1}'.format(full_path,\n fake_origin))\n url = reverse(\"find\", kwargs={'name': 'test'})\n\n del settings.REPO_URL\n\n response = self.client.get(url, HTTP_HOST='test-host')\n\n self.assertEqual(200, response.status_code)\n result = json.loads(response.content.decode())\n expected_url = 'git://test-host/test'\n self.assertEqual(result['url'], expected_url)\n self.assertEqual(result['name'], u'test')", "def as_url(self):\n\n if self.host.startswith(('http://', 'https://')):\n # Some persons have put HTTP details in an FtpUser. At least\n # partially any UI's fault, though still their fault...\n return self.host\n\n protocol, port, host = self.protocol, self.port, self.host\n\n if '://' in host:\n host = host.split('://', 1)[1]\n if '@' in host:\n # Probably already has the username and password embedded.\n # Sensible, I'd say, if contrary to the design of this thing.\n return self.host\n if ':' in host:\n host, port = host.split(':', 1)\n else:\n port = None\n else:\n protocol, port, host = self.protocol, self.port, self.host\n\n if (protocol, port) in (('ftp', 21), ('sftp', 22), ('ftps', 990)):\n port = None\n\n username = self.username\n password = self.decrypt_password()\n return '{scheme}://{auth}{host}{port}/'.format(\n scheme=protocol,\n auth='{}:{}@'.format(urlquote(username), urlquote(password))\n if username or password else '',\n host=host,\n port=':{}'.format(port) if port else '')", "def repo(self):\n return self._repo", "def getRepo(session, name=None, url=None):\r\n\r\n try:\r\n # Look up repository by name\r\n if name is not None:\r\n return session.get_repo(name)\r\n\r\n # Look up repository by clone URL\r\n if url is not None:\r\n # Parse URL\r\n url = urlparse(url)\r\n\r\n # Check that this is a github URL\r\n if not url.hostname.endswith(\"github.com\"):\r\n return None\r\n\r\n # Get repository name from clone URL\r\n name = url.path\r\n if name.startswith(\"/\"):\r\n name = name[1:]\r\n if name.endswith(\".git\"):\r\n name = name[:-4]\r\n\r\n # Look up repository by name\r\n return getRepo(session, name=name)\r\n\r\n except:\r\n pass\r\n\r\n return None", "def __gitShowRemote(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Show Remote Info\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ok:\n self.vcs.gitShowRemote(self.project.getProjectPath(), remote)", "def test_repository(self):\n os.environ['GITHUB_REPOSITORY'] = 'repo/owner'\n self.assertIsNone(self.env.git_url)", "def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()", "def url(request):\n return request.config.getoption(\"--url\")", "def find_patches_url(configp, user, pkg):\n try:\n patchesbaseurl = configp.get('rhcephpkg', 'patchesbaseurl')\n except configparser.Error:\n log.info('no patchesbaseurl configured, skipping patches remote')\n return None\n # Ubuntu python packages are named eg. \"execnet\", whereas the RPM name is\n # \"python-execnet\".\n for module in [pkg, 'python-%s' % pkg]:\n patches_url = patchesbaseurl % {'user': user, 'module': module}\n if check_git_url(patches_url):\n return patches_url" ]
[ "0.8327677", "0.8047084", "0.74365985", "0.74162096", "0.70978475", "0.70588034", "0.7024028", "0.70150566", "0.69381243", "0.69171757", "0.6888431", "0.6873803", "0.6837741", "0.6745437", "0.6676825", "0.66446656", "0.6639019", "0.6631967", "0.6614763", "0.6497674", "0.6481872", "0.6481872", "0.647787", "0.64774543", "0.6455317", "0.6452108", "0.6451547", "0.64480895", "0.64265966", "0.64214766", "0.64090246", "0.64009774", "0.6381457", "0.63638055", "0.63424945", "0.6298732", "0.6267532", "0.624398", "0.6235971", "0.62114173", "0.6196914", "0.61756116", "0.6173882", "0.61501086", "0.61130905", "0.61056215", "0.6072118", "0.60668886", "0.6056623", "0.60358036", "0.6020736", "0.6020489", "0.6011986", "0.5987013", "0.59690505", "0.596579", "0.5962044", "0.5946995", "0.5943067", "0.5920782", "0.5902762", "0.58553725", "0.5847153", "0.5840284", "0.5801865", "0.58007014", "0.57840127", "0.57833785", "0.5780393", "0.57664216", "0.5762281", "0.5759059", "0.57526314", "0.5749288", "0.5749288", "0.5748593", "0.5745891", "0.57358366", "0.5730571", "0.57112765", "0.5694046", "0.56798726", "0.56790406", "0.5673556", "0.56662863", "0.56472415", "0.5642051", "0.5624213", "0.56172526", "0.5610065", "0.5601067", "0.5601034", "0.5596287", "0.5580927", "0.5578613", "0.55742735", "0.5563301", "0.5550305", "0.5544445", "0.55434614" ]
0.75394326
2
Get the last commit to modify the given paths
def last_modified_commit(*paths, **kwargs): return check_output([ 'git', 'log', '-n', '1', '--pretty=format:%h', '--', *paths ], **kwargs).decode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_diff_to_last_commit(path_to_repository, ignore_subrepositories):\n repo = Repo(path_to_repository)\n if ignore_subrepositories==True:\n unstaged_diff = repo.index.diff(other=None, paths=None, create_patch=False, ignore_submodules=\"all\")\n staged_diff = repo.head.commit.diff(other=Diffable.Index, paths=None, create_patch=False, ignore_submodules=\"all\")\n else:\n unstaged_diff = repo.index.diff(other=None, paths=None, create_patch=False)\n staged_diff = repo.head.commit.diff(other=Diffable.Index, paths=None, create_patch=False)\n\n return unstaged_diff + staged_diff", "def get_first_last_commit_date(path):\n # %at specifies a UNIX time stamp\n process = subprocess.Popen(['git', 'log', '--format=%at'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n log = stdout.decode().strip('\\n').split('\\n')\n last = int(log[0])\n first = int(log[-1])\n return (first, last)", "def cur_commit():\n result = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, stderr=PIPE, encoding=\"utf-8\",\n )\n result.check_returncode()\n return result.stdout.strip()", "def get_current_commit():\n import os\n import subprocess\n git_dir = \"{}/.git\".format(settings.BASE_DIR)\n if os.name == 'nt':\n git_dir = \"{}\\\\.git\".format(settings.BASE_DIR)\n return subprocess.check_output([\"git\", \"--git-dir={}\".format(git_dir), \"rev-parse\", \"--verify\", \"HEAD\", \"--short\"]).decode(\"utf-8\")", "def get_last_commit_id(commits):\n print(commits)\n if bool(commits):\n return commits[-1].get('id')\n return \"no commits\"", "def getLast():\n try:\n open(os.path.join(basepath, 'last'))\n except IOError:\n try:\n arguments.project\n except NameError:\n print(\"No current project. Start one with -p\")\n exit()\n else:\n f = open(os.path.join(basepath, 'last'), 'w')\n f.write(arguments.project[0])\n f.close()\n store = open(os.path.join(basepath, 'last'), 'r')\n last = store.readline().rstrip('\\n')\n last = [last, 's']\n store.close()\n path = getPath(last[0])\n with open(path, 'r') as log:\n reader = csv.reader(log)\n for row in reader:\n if row[1] == 'a' or row[1] == 's':\n line = row\n try:\n line\n except NameError:\n last[1] = 's'\n else:\n last[1] = line[1]\n return last", "async def get_last_commit(self) -> None:\n _endpoint = f\"/repos/{self.full_name}/branches/{self.default_branch}\"\n response = await self.client.get(endpoint=_endpoint)\n return AIOGitHubAPIReposCommit(response.get(\"commit\", {}))", "def get_last_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[0] if commits else None", "def last_modified_date(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%cd',\n '--date=iso',\n '--',\n *paths\n ], **kwargs).decode('utf-8')", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_first_commit(repo, changes, since):\n if since:\n first = since\n else:\n first = get_latest_rev(changes)\n\n if first:\n try:\n return repo.rev_parse(first)\n except GitRepositoryError:\n if since:\n raise GbsError(\"Invalid commit: %s\" % (first))\n else:\n raise GbsError(\"Can't find last commit ID in the log, \"\\\n \"please specify it by '--since'\")", "def get_commit(self, cwd=None):\n cwd = cwd or self.path\n if isinstance(cwd, str):\n cwd = config.Path(cwd)\n if not cwd.exists():\n return None\n try:\n return subprocess.check_output([\n \"git\", \"rev-parse\", \"HEAD\"\n ], cwd=str(cwd)).decode(\"utf-8\").strip()\n except subprocess.CalledProcessError:\n return \"Failed\"", "def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()", "def get_commit():\n cmd = \"git rev-parse HEAD\"\n result = subprocess.run(shlex.split(cmd), stdout=subprocess.PIPE)\n return result.stdout.decode(\"utf-8\").strip()", "def get_git_commit(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n try:\n if os.path.isfile(path):\n path = os.path.dirname(path)\n repo = Repo(path, search_parent_directories=True)\n commit = repo.head.commit.hexsha\n return commit\n except Exception:\n return None", "def current_commit(self) -> str:\n # TODO: Do we want short ids?\n head = self.open_repo().head\n if head is None:\n return None # TODO: This is bad\n else:\n return str(head.target)", "def get_sha_commit(self):\n self.get_meta()\n filename = 'lastshacommit'\n # For unittest read from localfile\n if app.config['TEST']:\n filename = 'lastshacommittest'\n app.logger.debug(\"App config set to TEST. Reading shacommit from file \" + filename)\n\n try:\n handle = open(filename, \"r\")\n except Exception as e:\n app.logger.error(\"Error occurred when opening file \" + filename)\n app.logger.error(e)\n raise\n l_shacommit = handle.read().rstrip()\n handle.close()\n return l_shacommit", "def svn_client_commit_item_t_path_get(svn_client_commit_item_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def last_commit_short_log():\n subprocess.check_output('git log -1 --pretty=format:%h:%s'.split()).decode()", "def get_commit_message():\n return shell_output('git log HEAD -1 --pretty=%B')", "def __last_commit_date(self):\n return utils.run('git', ['log', '--all', '-1', '--format=%cI'],\n self.__project.location).rstrip()", "def git_get_mtime_at_commit(path: Path, revision: str, cwd: Path) -> str:\n cmd = [\"log\", \"-1\", \"--format=%ct\", revision, \"--\", path.as_posix()]\n lines = _git_check_output_lines(cmd, cwd)\n return datetime.utcfromtimestamp(int(lines[0])).strftime(GIT_DATEFORMAT)", "def svn_client_commit_item2_t_path_get(svn_client_commit_item2_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_commit_hash():\n git_dir = get_git_root()\n args = [\"git\", \"-C\", git_dir, \"rev-parse\", \"--short\", \"--verify\", \"HEAD\"]\n return subprocess.check_output(args).strip().decode()", "def get_current_commit_hash() -> FullCommitHash:\n return get_commit_hash(\"HEAD\")", "def getConfig(self, committed=False, ignore_last=False):\n # go back from the latest entry, find the most recent config entry\n for idx, entry in list(enumerate(self.log))[::-1]:\n if 'config' in entry.command:\n if not committed: break\n if entry.command['config'] == 'joint': break\n if self.commit_idx >= idx and not ignore_last: break\n # print('committed: %s, ignore_last: %s, FETCHED config: %s' % (committed, ignore_last, entry.command))\n return entry.command", "async def __last_commit(self, file_path: str) -> SourceResponses:\n files_api_url = await self._gitlab_api_url(\n f\"repository/files/{file_path}?ref={self._parameter('branch', quote=True)}\"\n )\n response = await self._session.head(files_api_url, headers=self._headers())\n last_commit_id = response.headers[\"X-Gitlab-Last-Commit-Id\"]\n commit_api_url = await self._gitlab_api_url(f\"repository/commits/{last_commit_id}\")\n return await super()._get_source_responses(commit_api_url)", "async def set_last_commit(self) -> None:\n _endpoint = f\"/repos/{self.full_name}/branches/{self.default_branch}\"\n response = await self.client.get(endpoint=_endpoint)\n self._last_commit = response[\"commit\"][\"sha\"][0:7]", "def last_rev(self, path, peg_revision, limit_revision=None):\n \n # Here's the plan, man. In the trivial case (where PEG_REVISION is\n # the same as LIMIT_REVISION), this is a no-brainer. If\n # LIMIT_REVISION is older than PEG_REVISION, we can use Subversion's\n # history tracing code to find the right location. If, however,\n # LIMIT_REVISION is younger than PEG_REVISION, we suffer from\n # Subversion's lack of forward history searching. Our workaround,\n # ugly as it may be, involves a binary search through the revisions\n # between PEG_REVISION and LIMIT_REVISION to find our last live\n # revision.\n peg_revision = self._getrev(peg_revision)\n limit_revision = self._getrev(limit_revision)\n if peg_revision == limit_revision:\n return peg_revision, path\n elif peg_revision > limit_revision:\n path = self.get_location(path, peg_revision, limit_revision)\n return limit_revision, path\n else:\n direction = 1\n while peg_revision != limit_revision:\n mid = (peg_revision + 1 + limit_revision) / 2\n try:\n path = self.get_location(path, peg_revision, mid)\n except vclib.ItemNotFound:\n limit_revision = mid - 1\n else:\n peg_revision = mid\n return peg_revision, path", "def get_latest_path(self):\n files = [fname for fname in os.listdir(self.checkpoint_dir) if fname.endswith(\".pth\")]\n filepaths = [os.path.join(self.checkpoint_dir, filepath) for filepath in files]\n latest_file = max(filepaths, key=os.path.getctime)\n return latest_file", "def get_latest_rev(changesfile):\n if os.path.exists(changesfile):\n with open(changesfile) as chlog:\n line = chlog.readline()\n return line.strip().split(\" \")[-1].split(\"@\")[-1]\n return ''", "def get_commits(git_path):\n\n proc = subprocess.Popen(\n [\"git\", \"--git-dir=%s\" % git_path, \"log\", \"--full-history\",\n \"--format=NEW COMMIT%n%ct%n%aN%n%aE\", \"--numstat\"],\n stdout=subprocess.PIPE)\n line_stack = []\n\n def peek_line():\n if not line_stack:\n line_stack.append(proc.stdout.readline())\n return line_stack[-1]\n\n def pop_line():\n if line_stack:\n return line_stack.pop()\n return proc.stdout.readline()\n\n def push_line(line):\n line_stack.append(line)\n\n def read_commit():\n while peek_line() and not peek_line().strip():\n pop_line()\n if not peek_line(): return None\n assert peek_line().strip() == \"NEW COMMIT\"\n pop_line()\n\n date = int(pop_line())\n name = pop_line().strip()\n email = pop_line().strip()\n author = sanitize_author(name, email)\n\n if peek_line().strip() == \"NEW COMMIT\":\n return date, author, 0, 0, 0\n\n pop_line()\n insertion_count = 0\n deletion_count = 0\n file_count = 0\n while peek_line().strip() and peek_line().strip() != \"NEW COMMIT\":\n insertions, deletions, path = pop_line().strip().split(None, 2)\n if insertions == \"-\": insertions = 0\n if deletions == \"-\": deletions = 0\n insertion_count += int(insertions)\n deletion_count += int(deletions)\n file_count += 1\n\n return date, author, insertion_count, deletion_count, file_count\n\n while True:\n commit = read_commit()\n if commit is None:\n break\n yield commit", "def commit(self):\n return settings.GIT_COMMIT", "def get_commit(self, height=1):\n if height == 'latest' or not height:\n v = self.status()['latest_block_height']\n return self.call('commit', [v])\n if height <= 0:\n raise ValueError(\"Height must be greater then 0\")\n return self.call('commit', [height])", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def get_commit_id():\n return about.get_commit_id()", "def _get_git_commit_id():\n from git import Repo\n from os.path import split, dirname\n path = split(dirname(__file__))[0]\n commit_id = Repo(path).head.object.hexsha\n return commit_id[:8]", "def lastPath(self, toNative=True):\n return self.paths(toNative=toNative)[-1]", "def getLastPath(self):\n return self.getSection(CFG_GENERAL, CFG_LASTPATH)", "def get_git_commit():\n try:\n commit = Popen([\"git\", \"describe\", \"--always\"],\n stdout=PIPE).communicate()[0].strip()\n # the following only works in Python 2.7\n # commit = subprocess.check_output(['git', 'describe']).strip()\n return commit\n except OSError:\n return \"unknown\"", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def svn_client_commit_item3_t_path_get(svn_client_commit_item3_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def getLastFile(self):\n lastFile = None if len(self.recentFiles) == 0 else self.recentFiles[0]\n self.setLastPath(lastFile)\n return lastFile", "async def _get_commit(self: Self, checkout_dir: Path) -> str:\n git_sha_process = await create_subprocess_exec(\n *[\"git\", \"rev-parse\", \"HEAD\"],\n cwd=checkout_dir,\n stdout=PIPE,\n )\n git_sha_stdout, _ = await git_sha_process.communicate()\n assert (\n await git_sha_process.wait() == 0\n ), f\"Failed to retrieve commit sha at {checkout_dir}\"\n return git_sha_stdout.decode().strip()", "def last_rev(svnrepos, path, peg_revision, limit_revision=None):\n \n # Here's the plan, man. In the trivial case (where PEG_REVISION is\n # the same as LIMIT_REVISION), this is a no-brainer. If\n # LIMIT_REVISION is older than PEG_REVISION, we can use Subversion's\n # history tracing code to find the right location. If, however,\n # LIMIT_REVISION is younger than PEG_REVISION, we suffer from\n # Subversion's lack of forward history searching. Our workaround,\n # ugly as it may be, involves a binary search through the revisions\n # between PEG_REVISION and LIMIT_REVISION to find our last live\n # revision.\n peg_revision = svnrepos._getrev(peg_revision)\n limit_revision = svnrepos._getrev(limit_revision)\n try:\n if peg_revision == limit_revision:\n return peg_revision, path\n elif peg_revision > limit_revision:\n fsroot = svnrepos._getroot(peg_revision)\n history = fs.node_history(fsroot, path, svnrepos.scratch_pool)\n while history:\n path, peg_revision = fs.history_location(history,\n svnrepos.scratch_pool);\n if peg_revision <= limit_revision:\n return max(peg_revision, limit_revision), _cleanup_path(path)\n history = fs.history_prev(history, 1, svnrepos.scratch_pool)\n return peg_revision, _cleanup_path(path)\n else:\n ### Warning: this is *not* an example of good pool usage.\n orig_id = fs.node_id(svnrepos._getroot(peg_revision), path,\n svnrepos.scratch_pool)\n while peg_revision != limit_revision:\n mid = (peg_revision + 1 + limit_revision) / 2\n try:\n mid_id = fs.node_id(svnrepos._getroot(mid), path,\n svnrepos.scratch_pool)\n except core.SubversionException, e:\n if e.apr_err == core.SVN_ERR_FS_NOT_FOUND:\n cmp = -1\n else:\n raise\n else:\n ### Not quite right. Need a comparison function that only returns\n ### true when the two nodes are the same copy, not just related.\n cmp = fs.compare_ids(orig_id, mid_id)\n\n if cmp in (0, 1):\n peg_revision = mid\n else:\n limit_revision = mid - 1\n\n return peg_revision, path\n finally:\n svnrepos._scratch_clear()", "def get_commit_id():\n p_short = subprocess.Popen(['git describe --always'],\n stdout=subprocess.PIPE, shell=True)\n output_short, err = p_short.communicate()\n if err:\n print 'Error seen: {0}'.format(err)\n raise ValueError(\"Problem invoking 'git describe' command.\")\n\n p_long = subprocess.Popen(['git rev-parse HEAD'],\n stdout=subprocess.PIPE, shell=True)\n output_long, err = p_long.communicate()\n if err:\n print 'Error seen: {0}'.format(err)\n raise ValueError(\"Problem invoking 'git rev-parse HEAD' command.\")\n\n return output_short.rstrip(), output_long.rstrip()", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def get_first_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[-1] if commits else None", "def __first_commit_date(self):\n return utils.run('git',\n ['log', '--all', '--format=%cI', '--first-parent',\n '--reverse', '--max-parents=0'],\n self.__project.location).splitlines()[0].rstrip()", "def get_current_commit_sha():\n return check_output(\n \"git rev-parse HEAD\".split(\" \")\n ).decode('utf-8').strip()", "def commit(which):\n return subprocess.Popen([p['paths']['w2l'],'lni',os.path.abspath(which)]).wait()", "def get_current_commit_sha(path_to_repository):\n return Repo(path_to_repository).active_branch.commit.hexsha", "def _get_git_hash(self):\n try:\n with open(os.path.join(self._base_dir, '.git', 'HEAD'), 'r') as head_file:\n ref = head_file.read().strip()\n if ref[:5] == 'ref: ':\n with open(os.path.join(self._base_dir, '.git', ref[5:]), 'r') as commit_file:\n return commit_file.read().strip()\n else:\n return ref[5:]\n except Exception as err:\n self._logger.warning('Couldnt read the git commit hash: %s :: %s',\n err.__class__.__name__, err)\n return 'UNKNOWN'", "def get_last_update(self):\n last_update = os.path.getmtime(self.parent_filepath)\n return last_update", "def get_changed_files(path_to_repository, ignore_subrepositories):\n diff = _get_diff_to_last_commit(path_to_repository, ignore_subrepositories)\n return [item.b_path for item in diff if item.change_type in _CHANGE_TYPES_CONSIDERED_FOR_PRECOMMIT]", "def get_last_cmd(self):\r\n return self.command_manager.get_last_command()", "def svn_info_t_last_changed_rev_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def get_git_changeset(filename=None):\n dirname = os.path.dirname(filename or __file__)\n git_show = sh('git show --pretty=format:%ct --quiet HEAD',\n cwd=dirname)\n timestamp = git_show.partition('\\n')[0]\n try:\n timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))\n except ValueError:\n return None\n return timestamp.strftime('%Y%m%d%H%M%S')", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def getRepoRev(self, path):\r\n\r\n if self.verbose:\r\n print(\"INFO : Getting info in {}\".format(path))\r\n\r\n rev = None\r\n with workInDirectory(path):\r\n\r\n rev_cmd_args = ['git', 'rev-parse', 'HEAD']\r\n\r\n if self.verbose:\r\n print(\"INFO : Running command : {}\".format(\" \".join(rev_cmd_args)))\r\n\r\n rev = SubProcessUtility.runCommand(rev_cmd_args)\r\n\r\n if rev == None:\r\n print(\"Unable to get revision for {}, make sure config is correct\".format(path))\r\n\r\n return rev", "def get_latest_sha(repo):\n cwd = os.getcwd()\n command = \"git rev-list -1 HEAD -- {0}\".format(repo)\n os.chdir(repo)\n git_sha = process_helpers.run(command.split(\" \"))\n os.chdir(cwd)\n return git_sha.strip()", "def repo_rev(self, repository):\n sql = sa.select([sa.func.max(history.c.rev)]).where(history.c.path.like(repository + '%'))\n result = self.engine.execute(sql).first()[0]\n if result is None:\n result = -1\n return result", "def git_sha1_commit():\n return local('git rev-parse --short HEAD', capture=True)", "def current_git_hash():\n git_file = \".git/refs/heads/master\"\n git_path = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),\n os.pardir, os.pardir, git_file))\n\n if not os.path.exists(git_path):\n git_path = os.getcwd() + \"/\" + git_file\n if not os.path.exists(git_path):\n git_path = os.getcwd() + \"/../\" + git_file\n if not os.path.exists(git_path):\n return None\n\n with open(git_path, \"r\") as git:\n git_hash = git.read()\n\n return git_hash[0:5]", "def rev_if_git(ctx: \"PlanemoCliContext\", directory: str) -> Optional[str]:\n try:\n the_rev = rev(ctx, directory)\n is_dirty = is_rev_dirty(ctx, directory)\n if is_dirty:\n the_rev += \"-dirty\"\n return the_rev\n except RuntimeError:\n return None", "def updateLastCommitFile(self):\n f = open(self.last_released, 'w')\n f.write(self.new_rev)\n f.close()", "def get_commit_hash(self, directory):\n\n return (\n subprocess.check_output([\"git\", \"rev-parse\", \"HEAD\"], cwd=directory)\n .decode(\"utf-8\")\n .replace(\"\\n\", \"\")\n )", "def get_recently_modified_scratch_file(settings):\n dir_contents = os.listdir(settings.location)\n full_paths = map(lambda f: os.path.join(settings.location, f), dir_contents)\n files = filter(lambda f: os.path.isfile(str(f)), full_paths)\n if not files:\n return \"\"\n files = sorted(files, key=_get_mtime)\n return files[-1]", "def _get_last_modified_date(path):\n last_date = 0\n root_dir, subdirs, files = os.walk(path).next()\n # get subdirs and remove hidden ones\n subdirs = [s for s in subdirs if not s.startswith('.')]\n for subdir in subdirs:\n for root, _, _ in os.walk(join(path, subdir)):\n base = os.path.basename(root)\n # checking if is a hidden path\n if not base.startswith(\".\") and not base.startswith(\"/.\"):\n last_date = max(last_date, os.path.getmtime(root))\n\n # check files of interest in the skill root directory\n files = [f for f in files\n if not f.endswith('.pyc') and f != 'settings.json']\n for f in files:\n last_date = max(last_date, os.path.getmtime(os.path.join(path, f)))\n return last_date", "def get_commit(self, commit_id):\n commit = next((\n commit for commit in self.repo.iter_commits()\n if commit.hexsha == commit_id\n ), None)\n if commit is None:\n raise Exception(f'Commit {commit_id} not found!')\n return commit", "def get_current_timestamp(path_to_repository):\n repo = Repo(path_to_repository)\n return repo.head.commit.committed_date", "def _get_git_branch_and_commit() -> Tuple[str, str]:\n branch_name = \"NO_BRANCH\"\n commit = \"NO_COMMIT\"\n try:\n repo = Repo(__file__, search_parent_directories=True)\n try:\n branch_name = str(repo.active_branch)\n except TypeError:\n pass # Keep current/default branch_name\n commit = str(repo.commit())\n if repo.is_dirty():\n commit += \" + uncomitted changes\"\n except InvalidGitRepositoryError:\n pass # Keep current/default branch_name and commit\n return branch_name, commit", "def test_get_git_commit(self):\n git_commit = get_git_commit()\n # output format: ['fafdb957049917ede565cebc58b29899f597fb5a', 'Fri Mar 29 11:09:50 2019 -0400']\n self.assertEqual(len(git_commit[0]), 40)\n self.assertEqual(len(git_commit[1].split()), 6)", "def svn_fs_youngest_rev(*args):\r\n return _fs.svn_fs_youngest_rev(*args)", "def test_heads_pop_returns_commit(repository: Repository) -> None:\n heads = repository.heads\n heads[\"branch\"] = repository.head.commit\n commit = heads.pop(\"branch\")\n assert repository.head.commit == commit", "def path_touched(*paths, commit_range):\n return check_output([\n 'git', 'diff', '--name-only', commit_range, '--', *paths\n ]).decode('utf-8').strip() != ''", "def get_file_last_modification_date(filename=None):\n with open(filename, 'r') as fp:\n for line in fp:\n if line.startswith('Modify'):\n date_line = line.split()[1]\n file_date = datetime.strptime(date_line, \"%Y-%m-%d\")\n return filename, file_date", "def get_commit_history(self, max_history: int = 500) -> list:\n if max_history < 0:\n raise ValueError(\"max_history needs to be non-negative.\")\n if max_history > 1:\n limit_history = max_history - 1\n else:\n limit_history = 1\n woql_query = (\n WOQLQuery()\n .using(\"_commits\")\n .limit(limit_history)\n .select(\n \"v:cid\",\n \"v:author\",\n \"v:message\",\n \"v:timestamp\",\n \"v:cur_cid\",\n \"v:cur_author\",\n \"v:cur_message\",\n \"v:cur_timestamp\",\n )\n .triple(\"v:branch\", \"ref:branch_name\", self.checkout())\n .triple(\"v:branch\", \"ref:ref_commit\", \"v:commit\")\n .woql_or(\n WOQLQuery()\n .path(\n \"v:commit\",\n \"ref:commit_parent+\",\n \"v:target_commit\",\n \"v:path\",\n )\n .triple(\"v:target_commit\", \"ref:commit_id\", \"v:cid\")\n .triple(\"v:target_commit\", \"ref:commit_author\", \"v:author\")\n .triple(\"v:target_commit\", \"ref:commit_message\", \"v:message\")\n .triple(\"v:target_commit\", \"ref:commit_timestamp\", \"v:timestamp\")\n .triple(\"v:commit\", \"ref:commit_id\", \"v:cur_cid\")\n .triple(\"v:commit\", \"ref:commit_author\", \"v:cur_author\")\n .triple(\"v:commit\", \"ref:commit_message\", \"v:cur_message\")\n .triple(\"v:commit\", \"ref:commit_timestamp\", \"v:cur_timestamp\"),\n WOQLQuery()\n .triple(\"v:commit\", \"ref:commit_id\", \"v:cur_cid\")\n .triple(\"v:commit\", \"ref:commit_author\", \"v:cur_author\")\n .triple(\"v:commit\", \"ref:commit_message\", \"v:cur_message\")\n .triple(\"v:commit\", \"ref:commit_timestamp\", \"v:cur_timestamp\"),\n )\n )\n result = self.query(woql_query).get(\"bindings\")\n result_item = result[0]\n cid_list = [result_item[\"cur_cid\"][\"@value\"]]\n result_list = [\n {\n \"commit\": result_item[\"cur_cid\"][\"@value\"],\n \"author\": result_item[\"cur_author\"][\"@value\"],\n \"message\": result_item[\"cur_message\"][\"@value\"],\n \"timstamp\": datetime.fromtimestamp(\n int(result_item[\"cur_timestamp\"][\"@value\"])\n ),\n }\n ]\n if max_history > 1:\n for result_item in result:\n if (\n result_item[\"cid\"] != \"system:unknown\"\n and result_item[\"cid\"][\"@value\"] not in cid_list\n ):\n result_list.append(\n {\n \"commit\": result_item[\"cid\"][\"@value\"],\n \"author\": result_item[\"author\"][\"@value\"],\n \"message\": result_item[\"message\"][\"@value\"],\n \"timstamp\": datetime.fromtimestamp(\n int(result_item[\"timestamp\"][\"@value\"])\n ),\n }\n )\n cid_list.append(result_item[\"cid\"][\"@value\"])\n return result_list", "def get_changeset(self):\n if self._repository:\n return str(self._repository.parents()[0])", "def get_commit_ref(refenv, commit_hash):\n reftxn = TxnRegister().begin_reader_txn(refenv)\n try:\n cmtRefKey = commit_ref_db_key_from_raw_key(commit_hash)\n cmtSpecKey = commit_spec_db_key_from_raw_key(commit_hash)\n cmtParentKey = commit_parent_db_key_from_raw_key(commit_hash)\n\n cmtRefVal = reftxn.get(cmtRefKey, default=False)\n cmtSpecVal = reftxn.get(cmtSpecKey, default=False)\n cmtParentVal = reftxn.get(cmtParentKey, default=False)\n except lmdb.BadValsizeError:\n raise ValueError(f'No commit exists with the hash: {commit_hash}')\n finally:\n TxnRegister().abort_reader_txn(refenv)\n\n if (cmtRefVal is False) or (cmtSpecVal is False) or (cmtParentVal is False):\n raise ValueError(f'No commit exists with the hash: {commit_hash}')\n\n commitRefs = commit_ref_raw_val_from_db_val(cmtRefVal)\n commitSpecs = commit_spec_raw_val_from_db_val(cmtSpecVal)\n commitParent = commit_parent_raw_val_from_db_val(cmtParentVal)\n\n calculatedDigest = cmt_final_digest(\n parent_digest=commitParent.digest,\n spec_digest=commitSpecs.digest,\n refs_digest=commitRefs.digest)\n\n if calculatedDigest != commit_hash:\n raise IOError(\n f'Data Corruption Detected. On retrieval of stored references for '\n f'commit_hash: {commit_hash} validation of commit record/contents '\n f'integrity failed. Calculated digest: {calculatedDigest} != '\n f'expected: {commit_hash}. Please alert the Hangar development team to '\n f'this error if possible.')\n\n return commitRefs.db_kvs", "def get_commit(*args, **kwargs):\n return get_commit_async(*args, **kwargs).get_result()", "def git_get_modified_files(\n paths: Iterable[Path], revrange: RevisionRange, cwd: Path\n) -> Set[Path]:\n relative_paths = {p.resolve().relative_to(cwd) for p in paths}\n str_paths = [path.as_posix() for path in relative_paths]\n if revrange.use_common_ancestor:\n rev2 = \"HEAD\" if revrange.rev2 == WORKTREE else revrange.rev2\n merge_base_cmd = [\"merge-base\", revrange.rev1, rev2]\n rev1 = _git_check_output_lines(merge_base_cmd, cwd)[0]\n else:\n rev1 = revrange.rev1\n diff_cmd = [\n \"diff\",\n \"--name-only\",\n \"--relative\",\n rev1,\n # revrange.rev2 is inserted here if not WORKTREE\n \"--\",\n *str_paths,\n ]\n if revrange.rev2 != WORKTREE:\n diff_cmd.insert(diff_cmd.index(\"--\"), revrange.rev2)\n lines = _git_check_output_lines(diff_cmd, cwd)\n if revrange.rev2 == WORKTREE:\n ls_files_cmd = [\n \"ls-files\",\n \"--others\",\n \"--exclude-standard\",\n \"--\",\n *str_paths,\n ]\n lines.extend(_git_check_output_lines(ls_files_cmd, cwd))\n changed_paths = (Path(line) for line in lines)\n return {path for path in changed_paths if should_reformat_file(cwd / path)}", "def tree_lookup(self, target_path, commit):\n segments = target_path.split(\"/\")\n tree_or_blob = commit.tree\n path = ''\n while segments:\n dirent = segments.pop(0)\n if isinstance(tree_or_blob, pygit2.Tree):\n if dirent in tree_or_blob:\n tree_or_blob = self.repo[tree_or_blob[dirent].oid]\n # self.logger.debug('%s in %s' % (dirent, path))\n if path:\n path += '/'\n path += dirent\n else:\n # This is probably because we were called on a\n # commit whose parent added a new directory.\n self.logger.debug(' %s not in %s in %s' %\n (dirent, path, commit.hex[:8]))\n return None\n else:\n self.logger.debug(' %s not a tree in %s' %\n (tree_or_blob, commit.hex[:8]))\n return None\n return tree_or_blob", "def get_git_hash() -> Optional[str]:\n rv = _git('rev-parse', 'HEAD')\n if rv:\n return rv[:6]", "def get_git_timestamp(path):\n return int(_run_command(path, 'git log -1 --format=%ct'))", "def get_command(bare, path):\n\n if bare:\n cmd = [\"git\", \"fetch\"]\n return cmd\n\n directories = list_directories(os.path.join(path, git_signature))\n\n if \"svn\" in directories:\n cmd = [\"git\", \"svn\", \"rebase\"]\n else:\n cmd = [\"git\", \"pull\"]\n\n return cmd", "def getLast(self):\n if self.last != None:\n return self.last.filename\n else:\n return None", "def GetChangeCommit(host, change, revision='current'):\n path = '%s/revisions/%s/commit' % (_GetChangePath(change), revision)\n return FetchUrlJson(host, path)", "def svn_fs_history_location(*args):\r\n return _fs.svn_fs_history_location(*args)", "def fetch_last_model_file(self):\n try:\n filename = self.model_files[-1]\n return self.make_path(filename)\n except IndexError:\n return None", "def commit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"commit\")", "def get_one_path(commands):\n path = []\n last_position = (0, 0)\n for command in commands:\n path += list(apply_one_command(last_position, command))\n last_position = path[-1]\n return path", "def get_commit_hash(repo_location, commit='origin/HEAD'):\n if not os.path.exists(pjoin(repo_location, '.git')):\n raise ValueError\n ret, out = spawn_get_output(\n ['git', 'rev-parse', commit], cwd=repo_location)\n if ret != 0:\n raise ValueError(\n f'failed retrieving {commit} commit hash '\n f'for git repo: {repo_location}')\n return out[0].strip()", "def svn_fs_history_prev(*args):\r\n return _fs.svn_fs_history_prev(*args)", "def gitstr():\n try:\n return \"%s\" % (open('.git/refs/heads/master').read().strip()[0:10])\n except FileNotFoundError:\n return \"\"\n except IndexError:\n return \"\"", "def last_modified(path):\n\n date = os.path.getmtime(path)\n date = datetime.datetime.fromtimestamp(date)\n return time.mktime(date.timetuple())", "def get_tree(gitdir=\".\"):\n\n cmd = [\"git\", \"log\", \"--all\", \"--branches\", '--pretty=format:{ \"commit\": \"%H\", \"abbreviated_commit\": \"%h\", \"tree\": \"%T\", \"abbreviated_tree\": \"%t\", \"parent\": \"%P\", \"abbreviated_parent\": \"%p\", \"refs\": \"%d\", \"encoding\": \"%e\", \"subject\": \"%s\", \"sanitized_subject_line\": \"%f\", \"commit_notes\": \"\", \"author\": { \"name\": \"%aN\", \"email\": \"%aE\", \"date\": \"%ai\" }, \"commiter\": { \"name\": \"%cN\", \"email\": \"%cE\", \"date\": \"%ci\" }},']\n\n output = run(cmd)\n lines = output.split(\"\\n\")\n\n content = \"\"\n history = []\n for l in lines:\n try:\n revisedcontent = content + l\n if revisedcontent.count('\"') % 2 == 0:\n j = json.loads(revisedcontent[:-1])\n if \"Notes added by\" in j['subject']:\n content = \"\"\n continue\n history.append(j)\n content = \"\"\n else:\n content = revisedcontent\n except Exception as e:\n print(\"Error while parsing record\")\n print(revisedcontent)\n content = \"\"\n\n # Order by time. First commit first...\n history.reverse()\n\n #\n changes = get_change()\n\n for i in range(len(history)):\n abbrev_commit = history[i]['abbreviated_commit']\n if abbrev_commit not in changes:\n raise Exception(\"Missing changes for \" + abbrev_commit)\n\n history[i]['changes'] = changes[abbrev_commit]['changes']\n\n\n return history", "def last_modified(self) -> str:\n\t\tif not self._closed:\n\t\t\ttimestamp = self.ds.last_modified()\n\t\t\treturn timestamp\n\t\treturn None", "def GetLastBuildRevision(self):\n last_build_revision = None\n if os.path.exists(self.last_change_file):\n last_build_revision = int(open(self.last_change_file).read())\n\n if os.path.exists(self.revisions_path):\n fp = open(self.revisions_path)\n try:\n line = fp.readline()\n\n # TODO(markhuang): remove this block after all builders are updated\n line = line.replace('\\'', '\"')\n\n revisions_dict = simplejson.loads(line)\n if revisions_dict:\n self.last_chromium_revision = revisions_dict['chromium_revision']\n self.last_webkit_revision = revisions_dict['webkit_revision']\n self.last_v8_revision = revisions_dict['v8_revision']\n except (IOError, KeyError, ValueError), e:\n self.last_chromium_revision = None\n self.last_webkit_revision = None\n self.last_v8_revision = None\n print e\n fp.close()\n return last_build_revision", "def _last_roll_revision(self):\n if not self._cached_last_roll_revision:\n revinfo = subprocess2.check_output(['gclient', 'revinfo'],\n cwd=self._path_to_chrome)\n project_path = 'src/' + self._path_to_project\n for line in revinfo.splitlines():\n dep_path, source = line.split(': ', 1)\n if dep_path == project_path:\n self._cached_last_roll_revision = source.split('@')[-1]\n break\n assert len(self._cached_last_roll_revision) == 40\n return self._cached_last_roll_revision" ]
[ "0.66357094", "0.646586", "0.64447695", "0.6435589", "0.6427532", "0.6335761", "0.629556", "0.6295136", "0.6277516", "0.6207843", "0.6153193", "0.61308944", "0.6121598", "0.6120476", "0.60908526", "0.5947546", "0.5922661", "0.5920262", "0.5910456", "0.58968264", "0.5894823", "0.5834052", "0.58259857", "0.58242947", "0.58126366", "0.57875985", "0.57866156", "0.5778886", "0.5738794", "0.5732797", "0.57305837", "0.5729715", "0.5714778", "0.5691979", "0.56696296", "0.5650943", "0.56124973", "0.5590917", "0.5580614", "0.5561407", "0.5560053", "0.5556644", "0.5553563", "0.5519699", "0.5519107", "0.55102646", "0.5501737", "0.550107", "0.5491379", "0.5490835", "0.5484026", "0.548129", "0.5480082", "0.546701", "0.5459162", "0.54570335", "0.54563475", "0.5452439", "0.54431856", "0.54369056", "0.5435668", "0.5434593", "0.5431265", "0.53527623", "0.5347847", "0.5333667", "0.5330133", "0.5330055", "0.53254724", "0.53238523", "0.53192663", "0.5317473", "0.5313761", "0.53027904", "0.5292432", "0.5282829", "0.5272357", "0.5271433", "0.52709204", "0.52687985", "0.52670467", "0.5263718", "0.5262791", "0.52622765", "0.52588135", "0.52556103", "0.52530825", "0.52497834", "0.52440614", "0.5231291", "0.5230246", "0.5228889", "0.52263033", "0.5218852", "0.52176034", "0.52160186", "0.52130777", "0.5198685", "0.51887757", "0.5172938" ]
0.7615342
0
Return the last modified date (as a string) for the given paths
def last_modified_date(*paths, **kwargs): return check_output([ 'git', 'log', '-n', '1', '--pretty=format:%cd', '--date=iso', '--', *paths ], **kwargs).decode('utf-8')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_date_modified(path):\n return str(datetime.datetime.fromtimestamp(os.path.getmtime(path)))", "def _get_last_modified_date(path):\n last_date = 0\n root_dir, subdirs, files = os.walk(path).next()\n # get subdirs and remove hidden ones\n subdirs = [s for s in subdirs if not s.startswith('.')]\n for subdir in subdirs:\n for root, _, _ in os.walk(join(path, subdir)):\n base = os.path.basename(root)\n # checking if is a hidden path\n if not base.startswith(\".\") and not base.startswith(\"/.\"):\n last_date = max(last_date, os.path.getmtime(root))\n\n # check files of interest in the skill root directory\n files = [f for f in files\n if not f.endswith('.pyc') and f != 'settings.json']\n for f in files:\n last_date = max(last_date, os.path.getmtime(os.path.join(path, f)))\n return last_date", "def last_modified(path):\n\n date = os.path.getmtime(path)\n date = datetime.datetime.fromtimestamp(date)\n return time.mktime(date.timetuple())", "def last_modified():\n return \"Last modified: %s\" % time.ctime(os.path.getmtime(FILE_NAME))", "def getDate(path):\n utime = ftp.stat(path=path).st_mtime\n last_modified = datetime.fromtimestamp(utime)\n return last_modified", "def mtime(path):", "def get_file_last_modification_date(filename=None):\n with open(filename, 'r') as fp:\n for line in fp:\n if line.startswith('Modify'):\n date_line = line.split()[1]\n file_date = datetime.strptime(date_line, \"%Y-%m-%d\")\n return filename, file_date", "def get_file_modification_date() -> str:\n file_modification_date = datetime.now().strftime(\"%d.%m.%Y\")\n print(file_modification_date)\n return file_modification_date", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)", "def last_modified_commit(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n '--',\n *paths\n ], **kwargs).decode('utf-8')", "def getmtime(self, path):\n return os.path.getmtime(path)", "def get_file_modified_date(filepath):\n return datetime.datetime.fromtimestamp(os.path.getmtime(filepath))", "def last_modified(self) -> str:\n\t\tif self.name == \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/matrix\"].attrs:\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/matrix\"].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/matrix\"].attrs[\"last_modified\"]\n\n\t\tif self.name != \"\":\n\t\t\tif \"last_modified\" in self.ds._file[\"/layers/\" + self.name].attrs:\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\t\t\telif self.ds._file.mode == 'r+':\n\t\t\t\tself.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"] = timestamp()\n\t\t\t\tself.ds._file.flush()\n\t\t\t\treturn self.ds._file[\"/layers/\" + self.name].attrs[\"last_modified\"]\n\n\t\treturn timestamp()", "def last_modified(self):\n return os.path.getmtime(self.filename)", "def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")", "def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")", "def last_modified_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified_time\")", "def DateModified(filepath, stringformat=False):\n time_in_s = os.path.getmtime(filepath)\n if stringformat:\n return time.ctime(time_in_s)\n else:\n return time_in_s", "def get_modified_time(fname):\n return os.stat(fname).st_mtime", "def getmtime(path):\n return get_instance(path).getmtime(path)", "def last_modified(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified\")", "def get_mtime(path):\n try:\n return path.lstat().mtime\n except error.Error:\n pass", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def get_last_modified() -> str:\n service = get_authenticated_service(\"drive\", \"v3\")\n response = (\n service.files().get(fileId=SPREADSHEET_ID, fields=\"modifiedTime\").execute()\n )\n return response[\"modifiedTime\"]", "def last_modified_date(when):\n\n # FIXME: this should use the Moira server timezone\n delta = datetime.datetime.now() - when\n if delta.days > 0:\n if delta.days > 365:\n return \"%.2f years\" % (delta.days / 365.25)\n else:\n return plural(delta.days, \"day\", \"days\")\n else:\n if delta.seconds > 3600:\n hours = delta.seconds / 3600\n minutes = (delta.seconds - hours * 3600) / 60\n return plural(hours, \"hour\", \"hours\") + ' ' + plural(minutes, \"minute\", \"minutes\")\n elif delta.seconds > 60:\n return plural(delta.seconds / 60, \"minute\", \"minutes\")\n else:\n return plural(delta.seconds, \"second\", \"seconds\")", "def get_recently_modified_scratch_file(settings):\n dir_contents = os.listdir(settings.location)\n full_paths = map(lambda f: os.path.join(settings.location, f), dir_contents)\n files = filter(lambda f: os.path.isfile(str(f)), full_paths)\n if not files:\n return \"\"\n files = sorted(files, key=_get_mtime)\n return files[-1]", "def last_modified_date_time(self):\n if \"lastModifiedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastModifiedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def last_modified(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified\")", "def last_modified(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_access_time(path):\r\n return os.stat(path)[stat.ST_ATIME]", "def getLastModifiedTime(self): #$NON-NLS-1$\r", "def get_latest_timestamp_file_path(files: List[str]) -> str:\n\n logger = prefect.context.get(\"logger\")\n\n extract_fname = (\n lambda f: os.path.basename(f).replace(\".csv\", \"\").replace(\".parquet\", \"\")\n )\n file_names = [extract_fname(file) for file in files]\n latest_file_name = max(file_names, key=lambda d: datetime.fromisoformat(d))\n latest_file = files[file_names.index(latest_file_name)]\n\n logger.debug(f\"Latest file: {latest_file}\")\n\n return latest_file", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifiedDate', self.handle)", "def get_last_usage_date(path: str) -> datetime:\r\n date: datetime\r\n\r\n if not os.path.exists(path):\r\n raise FileNotFoundError(\"Path does not exist: \" + path)\r\n\r\n try:\r\n date = datetime.fromtimestamp(os.path.getmtime(path))\r\n except Exception:\r\n pass\r\n\r\n try:\r\n compare_date = datetime.fromtimestamp(os.path.getatime(path))\r\n if date.date() < compare_date.date():\r\n # compare date is newer\r\n date = compare_date\r\n except Exception:\r\n pass\r\n\r\n try:\r\n compare_date = datetime.fromtimestamp(os.path.getctime(path))\r\n if date.date() < compare_date.date():\r\n # compare date is newer\r\n date = compare_date\r\n except Exception:\r\n pass\r\n\r\n return date", "def last_modified_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_time\")", "def get_file_modification_time(file_path):\n timestamp = os.path.getmtime(file_path)\n time_string = datetime.datetime.fromtimestamp(\n int(os.path.getmtime(file_path))\n ).strftime('%Y:%m:%d %H')\n return time_string", "def get_mtime(self):\n return os.path.getmtime(self.get_path())", "def get_timestamp(file_path):\n mtime = os.stat(file_path).st_mtime\n return datetime.datetime.fromtimestamp(mtime).isoformat()", "def __get_modification_time(filename: str) -> float:\n return os.stat(filename).st_mtime", "def creation_date(path_to_file):\n if platform.system() == 'Windows':\n print(\"last modified: %s\" % time.ctime(os.path.getmtime(path_to_file)))\n modtime = time.ctime(os.path.getmtime(path_to_file))\n \n print(\"created: %s\" % time.ctime(os.path.getctime(path_to_file)))\n modtime = datetime.datetime.strptime(modtime, \"%a %b %d %H:%M:%S %Y\")\n modtime = datetime.datetime.strftime(modtime, \"%Y-%m-%d\")\n return modtime", "def paths_sort(path):\n base_name = os.path.basename(path)\n \n stat_name = base_name.split('.')[0] \n\n date = base_name.split('.')[1]\n \n try:\n date = datetime.datetime.strptime(date, '%Y-%m-%d')\n \n return date, stat_name\n except Exception as e:\n print(e)", "def date_modified():\n return render_template(\"date_modified.html\", date_modified=last_modified())", "def last_modified(self) -> str:\n\t\tif not self._closed:\n\t\t\ttimestamp = self.ds.last_modified()\n\t\t\treturn timestamp\n\t\treturn None", "def last_modified_at(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_at\")", "def get_modified_time(self, name):\n full_path = self.path(name)\n return self.__volume.getmtime(full_path)", "def last_modified_at(self):\n return self.viztrail.last_modified_at", "def creation_date(path_to_file):\n if platform.system() == 'Windows':\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n return stat.st_mtime", "def getlmtime(self):\n if self.islink() and self.lexists():\n st = os.lstat(self.path)\n return st.st_mtime\n return Stat.getmtime(self)", "def get_last_update(self):\n last_update = os.path.getmtime(self.parent_filepath)\n return last_update", "def get_sta_last_date(sta_path):\n # ---------------------------------------------------------------------\n logger.debug(\"get_sta_last_date\")\n\n rest_dirs = sorted(glob.glob(os.path.join(sta_path, \"rest/*\")))\n\n if rest_dirs:\n _, rest_dir = os.path.split(rest_dirs[-1])\n return rest_dir\n\n return \"0000-00-00\"", "def creation_date(path):\n if platform.system() == 'Windows':\n return os.path.getctime(path)\n else:\n stat = os.stat(path)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime", "def created(path):\n\n # TODO: Test this code block on other platforms (OS X/Linux)\n\n if platform.system() == 'Windows':\n date = datetime.fromtimestamp(os.path.getctime(path)).strftime('%Y-%m-%d')\n return date\n\n else:\n stat = os.stat(path)\n\n try:\n return stat.st_birthtime\n\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime", "def _creation_date(path_to_file):\n if platform.system() == \"Windows\":\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime", "def get_file_mtime(url, path):\n path = os.path.join(msettings['STATIC_ROOT'], path)\n try:\n return datetime.datetime.fromtimestamp(os.path.getmtime(os.path.abspath(path))).strftime('%S%M%H%d%m%y')\n except OSError:\n # If the file can't be found.\n return '0'", "def modified(self) -> datetime:\n # TODO: Should this be overridden for LocalDirectoryAsset?\n return datetime.fromtimestamp(self.filepath.stat().st_mtime).astimezone()", "def get_mod_time(self):\n if self.file_meta[:2] == b'bp':\n file_meta_plist = ccl_bplist.load(BytesIO(self.file_meta))\n raw_date_time = file_meta_plist['$objects'][1]['LastModified']\n converted_time = datetime.datetime.fromtimestamp(raw_date_time)\n converted_time = converted_time.timetuple()\n return converted_time\n else:\n file_meta_plist = plistlib.loads(self.file_meta)\n return file_meta_plist['modified'].timetuple()", "def get_inbound_statement_details_last_modified_date(self):\n return self.get_text_from_element(self.inbound_statements_details_last_modified_date_locator, False)", "def last_log(self) -> List:\n logs_list: List = os.listdir(LOGS_BASE_PATH)\n full_list = [os.path.join(LOGS_BASE_PATH, i) for i in logs_list]\n time_sorted_list: List = sorted(full_list, key=os.path.getmtime)\n return time_sorted_list[-1]", "def modified(filename: str) -> datetime.datetime:\n fs, relative_path = url_to_fs(filename)\n return cast(datetime.datetime, fs.modified(relative_path))", "def get_mtime(self):\n return max(asset.get_mtime() for asset in self._assets)", "def get_files_last_modified(self, files_list):\n filepath_hash_set = set()\n ret_dict = {}\n\n for e in files_list:\n repo_id, file_path, file_id = e\n path_hash = calc_file_path_hash(file_path)\n filepath_hash_set.add(path_hash)\n\n m_infos = super(FileLastModifiedInfoManager, self).filter(\n file_path_hash__in=list(filepath_hash_set))\n for f in files_list:\n repo_id, file_path, file_id = f\n for info in m_infos:\n if repo_id == info.repo_id and file_path == info.file_path:\n # Got the record in db\n ret_key = '|'.join(f)\n if file_id != info.file_id:\n # record is outdated, need re-calculate\n info.delete()\n email, last_modified = self._calc_file_last_modified(\n info.repo_id, info.file_path, info.file_path_hash,\n file_id)\n ret_dict[ret_key] = last_modified\n continue\n else:\n # record is valid\n ret_dict[ret_key] = info.last_modified\n continue\n \n # Process the remaining files.\n for f in files_list:\n ret_key = '|'.join(f)\n if ret_dict.has_key(ret_key):\n continue\n\n repo_id, file_path, file_id = f\n path_hash = calc_file_path_hash(file_path)\n email, last_modified = self._calc_file_last_modified(\n repo_id, file_path, path_hash, file_id)\n ret_dict[ret_key] = last_modified\n \n return ret_dict", "def _get_most_recent_file(path):\n allowed_ext = tuple(MIMES.keys())\n files_iter = (\n entry for entry in os.scandir(path)\n if entry.is_file() and entry.name.lower().endswith(allowed_ext)\n )\n for entry in sorted(files_iter, key=_newest_ctime):\n return entry.path\n return None", "def getmtime(self):\n if self.exists():\n return os.path.getmtime(self.path)\n return 0", "def recursive_mtime(path):\n if os.path.isfile(path):\n return mtime(path)\n current = 0\n for dirname, _, filenames in os.walk(path):\n if filenames:\n current = max(\n current, max(mtime(os.path.join(dirname, f)) for f in filenames)\n )\n return current", "def creation_date(path_to_file):\n if platform.system() == 'Windows':\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime", "def creation_date(path_to_file):\n if platform.system() == 'Windows':\n return os.path.getctime(path_to_file)\n else:\n stat = os.stat(path_to_file)\n try:\n return stat.st_birthtime\n except AttributeError:\n # We're probably on Linux. No easy way to get creation dates here,\n # so we'll settle for when its content was last modified.\n return stat.st_mtime", "def last_modified_dts(self):\n return self._last_modified_dts", "def get_last_modified_value(self):\n return self.get_text_from_element(self.last_modified_value_locator)", "def get_last_modified_from_first_matching_file(key_list, framework_slug, prefix):\n path_starts_with = '{}/{}'.format(framework_slug, prefix)\n return next((key for key in key_list if key.get('path').startswith(path_starts_with)), {}).get('last_modified')", "def get_file_mtime(local_path):\n mod_time = os.path.getmtime(local_path) * 1000\n return int(mod_time)", "def get_latest_file(files, current_directory):\n time_deltas = [\n time.time() - creation_date(Path(current_directory) / f) for f in files]\n latest_file = files.pop(time_deltas.index(min(time_deltas)))\n\n return Path(current_directory) / latest_file", "def most_recent_file(folder):\n files = ErrorLog.get_files(folder)\n files_with_mod_dates = [[os.path.abspath(file),\n datetime.datetime.fromtimestamp(os.path.getmtime(file))] # modified date\n for file in files]\n if not files_with_mod_dates:\n return None, None\n most_recent_file = files_with_mod_dates[0][0]\n most_recent_file_date = files_with_mod_dates[0][1]\n for file, mod_date in files_with_mod_dates:\n if mod_date > most_recent_file_date:\n most_recent_file = file\n most_recent_file_date = mod_date\n return most_recent_file, most_recent_file_date", "def last_modified_header(last_modified_date):\n return {\"Last-Modified\": last_modified_date}", "def mtime(self) -> str:\n return self._mtime", "def mtime(name):", "def mtime(self):\r\n return self.info().mtime", "def get_destinations_grid_last_modified_date(self):\n return self.get_specific_column_value_from_grid(self.destinations_grid_div_id, self.destinations_grid_row_count, self.last_modified_column_name)", "def _get_file_mtime(self, sentry_unit, filename):\n return sentry_unit.file_stat(filename)['mtime']", "def _get_dir_mtime(self, sentry_unit, directory):\n return sentry_unit.directory_stat(directory)['mtime']", "def get_dialed_digits_grid_last_modified_date(self):\n return self.get_specific_column_value_from_grid(self.dialed_digits_grid_div_id, self.dialed_digits_grid_row_count, self.modified_date_column_name)", "def last_file_updated(self):\n query = '*.xml'\n keymap_files = glob.glob(query)\n\n sorted_files = sorted(keymap_files, key=self.mtime, reverse=1)\n last_modified_file = sorted_files[0]\n second_last_modified_file = sorted_files[1]\n\n t1 = self.mtime(last_modified_file)\n t2 = self.mtime(second_last_modified_file)\n\n logger.debug('Last modified time: {0}'.format(t1))\n logger.debug('Second Last modified time: {0}'.format(t2))\n\n last_modified_time = self.mtime(last_modified_file)\n last_access_time = self.atime(last_modified_file)\n\n if sys.platform == \"win32\":\n logger.info('Detected Windows environment')\n # self.regenerate_osx(last_access_time, last_modified_time)\n elif sys.platform == 'darwin':\n logger.info('Detected OSX environment')\n # self.regenerate_windows(last_access_time, last_modified_time)\n else:\n logger.error('Unhandled platform: {0}'.format(sys.platform))\n pass", "def stat_timestamp(filename):\n return os.stat(filename).st_mtime", "def getctime(path):\n return get_instance(path).getctime(path)", "def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")", "def time_modified(self) -> str:\n return pulumi.get(self, \"time_modified\")", "def getLastMod(page):\n return page.info().get(\"Last-Modified\")", "def get_mtime(self):\n if settings.DEBUG:\n return os.path.getmtime(self.get_path())\n return staticfiles_storage.modified_time(self.get_name())", "def getFileModifTime(filename):\n\tfilename = adaptPath(filename)\n\tfrom os import stat\n\ttry:\n\t\tresult = stat(filename).st_mtime\n\texcept:\n\t\tresult = 0\n\treturn result", "def _first_more_recent (f1, path) :\n import datetime\n import re\n import time\n s = str (f1.info ())\n da = re.compile (\"Last[-]Modified: (.+) GMT\").search (s)\n if da is None :\n return True\n \n da = da.groups () [0]\n gr = re.compile (\"[\\w, ]* ([ \\d]{2}) ([\\w]{3}) ([\\d]{4}) ([\\d]{2}):([\\d]{2}):([\\d]{2})\").search (da)\n if gr == None : return True\n gr = gr.groups ()\n da = datetime.datetime ( int (gr [2]), sys.hal_log_values [\"month_date\"] [gr [1].lower ()], int (gr [0]),\n int (gr [3]), int (gr [4]), int (gr [5]) )\n \n p = time.ctime (os.path.getmtime (path))\n gr = re.compile (\"[\\w, ]* ([\\w]{3}) ([ \\d]{2}) ([\\d]{2}):([\\d]{2}):([\\d]{2}) ([\\d]{4})\").search (p)\n if gr is None :\n return True\n gr = gr.groups ()\n da = datetime.datetime ( int (gr [5]), sys.hal_log_values [\"month_date\"] [gr [0].lower ()], int (gr [1]),\n int (gr [2]), int (gr [3]), int (gr [4]) )\n \n file = da\n \n return da > file", "def find_file_modified_time(file):\n#\n#--- find stat of the file. one of them is the file creation date\n#\n (mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(file)\n out = time.ctime(mtime)\n#\n#--- out is in \"Mon Dec 01 15:22:37 2014\" format\n#\n atemp = re.split('\\s+', out)\n \n month = changeMonthFormat(atemp[1])\n date = int(float(atemp[2]))\n year = int(float(atemp[4]))\n\n btemp = re.split(':', atemp[3])\n\n hours = int(float(btemp[0]))\n minutes = int(float(btemp[1]))\n seconds = int(float(btemp[2]))\n\n stime = convertDateToTime2(year, month, date, hours, minutes, seconds)\n \n return stime", "def get_rates_grid_last_modified_date(self):\n return self.get_specific_column_value_from_grid(self.rates_grid_div_id, self.rates_grid_row_count, self.last_modified_column_name)", "def git_get_mtime_at_commit(path: Path, revision: str, cwd: Path) -> str:\n cmd = [\"log\", \"-1\", \"--format=%ct\", revision, \"--\", path.as_posix()]\n lines = _git_check_output_lines(cmd, cwd)\n return datetime.utcfromtimestamp(int(lines[0])).strftime(GIT_DATEFORMAT)", "def get_file_modification_time(self, filename):\n mtime = None\n if os.path.exists(filename):\n mtime = datetime.datetime.fromtimestamp(os.path.getmtime(filename))\n return mtime" ]
[ "0.8046738", "0.7416479", "0.7245152", "0.7219578", "0.71944565", "0.6939524", "0.67891824", "0.6737202", "0.6734333", "0.67058384", "0.6656483", "0.6634494", "0.6624429", "0.6620233", "0.6591059", "0.6591059", "0.65882814", "0.6566373", "0.6562852", "0.6555743", "0.6548625", "0.653265", "0.64970225", "0.64970225", "0.6482359", "0.64482903", "0.64400184", "0.64279085", "0.63936424", "0.63936424", "0.6311749", "0.6311749", "0.6311749", "0.6311749", "0.6311749", "0.6311749", "0.63040894", "0.62987244", "0.62791926", "0.62751937", "0.6242109", "0.6215718", "0.6169163", "0.61656564", "0.6159834", "0.61462873", "0.6141946", "0.613375", "0.61257094", "0.61034626", "0.60916215", "0.6084548", "0.60732526", "0.60599273", "0.60399777", "0.60242397", "0.6022083", "0.5996619", "0.5992625", "0.59839475", "0.59794414", "0.596576", "0.59630924", "0.5924523", "0.592139", "0.59195966", "0.59114546", "0.5903405", "0.58940125", "0.589316", "0.5891363", "0.58874005", "0.58874005", "0.58802915", "0.5871229", "0.58625853", "0.58593947", "0.5842515", "0.58085394", "0.5808042", "0.5798278", "0.5796057", "0.5792857", "0.5788796", "0.5785506", "0.57795674", "0.5775286", "0.5766856", "0.576511", "0.5746789", "0.5735572", "0.5735572", "0.57351553", "0.5734794", "0.5725697", "0.5706173", "0.5705122", "0.5690408", "0.5681955", "0.56664115" ]
0.7935193
1
Return whether the given paths have been changed in the commit range Used to determine if a build is necessary
def path_touched(*paths, commit_range): return check_output([ 'git', 'diff', '--name-only', commit_range, '--', *paths ]).decode('utf-8').strip() != ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_rev_dirty(ctx: \"PlanemoCliContext\", directory: str) -> bool:\n return io.shell([\"git\", \"diff\", \"--quiet\"], cwd=directory) != 0", "def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False", "def has_changes(self):\n if self.repo_is_empty:\n return True\n\n tree = self.repo.get(self.index.write_tree(self.repo))\n diff = tree.diff_to_tree(self.repo.get(self.repo.head.target).tree)\n return bool(diff)", "def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty", "def should_build(target_platform, changed_files):\n return any(_should_file_trigger_build(target_platform, file) for file in changed_files)", "def ShouldBuild(self, src_files, dst_files):\n if self.force:\n return True\n\n oldest = None\n for dst in dst_files:\n if not os.path.exists(dst):\n self.DebugMsg(\"Build because %s does not exist\" % dst)\n return True\n modified = os.path.getmtime(dst)\n if oldest == None or modified < oldest:\n old = dst\n oldest = modified\n\n for src in src_files:\n modified = os.path.getmtime(src)\n if modified > oldest:\n self.DebugMsg(\"Build because %s is newer than %s\" % (src, old))\n return True\n\n self.DebugMsg(\"%s are up to date\" % \", \".join(dst_files))\n return False", "def check_dependency_change(targets: List[str], dependencies: List[str]) -> bool:\n min_target_mtime = min([get_mtime(path) for path in targets])\n max_dep_mtime = max([get_mtime(path) for path in dependencies])\n return max_dep_mtime > min_target_mtime", "def has_changes(directory=None):\n out = check_output('git status', shell=True, cwd=directory)\n if 'nothing to commit (working directory clean)' in out:\n return False\n if 'nothing to commit, working directory clean' in out:\n return False\n if 'nothing to commit, working tree clean' in out:\n return False\n if 'nothing added to commit' in out:\n return False\n return True", "def local_changes():\n result, output = popen('git status', False, False)\n try:\n return not output[-1].startswith(\"nothing to commit\")\n except IndexError:\n return True", "def _check_guts_toc_mtime(attr, old, toc, last_build, pyc=0):\n for (nm, fnm, typ) in old:\n if mtime(fnm) > last_build:\n print \"building because %s changed\" % fnm\n return True\n elif pyc and mtime(fnm[:-1]) > last_build:\n print \"building because %s changed\" % fnm[:-1]\n return True\n return False", "def index_is_dirty():\n result, output = popen('git diff --cached', False, False)\n return len(output) > 0", "def repo_has_incoming(*repo_paths):\n incoming = False\n\n for repo_path in repo_paths:\n try:\n subprocess.check_output(['hg', 'incoming', '-R', repo_path])\n print('Detected incoming changesets in \"{}\"'.format(repo_path))\n incoming = True\n except subprocess.CalledProcessError as e:\n if e.returncode != 1:\n raise\n\n return incoming", "def has_staged_changes(repo):\n return subprocess.call(['git', 'diff-index', '--cached', '--quiet', 'HEAD'],\n cwd=repo) != 0", "def different_required(old_required: Dict[str, UpToDate], new_required: Dict[str, UpToDate]) -> bool:\n for new_path in sorted(new_required.keys()):\n if new_path not in old_required:\n Logger.why(f\"Must run actions because changed to require: {new_path}\")\n return True\n\n for old_path in sorted(old_required.keys()):\n if old_path not in new_required:\n Logger.why(f\"Must run actions because changed to not require: {old_path}\")\n return True\n\n for path in sorted(new_required.keys()):\n old_up_to_date = old_required[path]\n new_up_to_date = new_required[path]\n if old_up_to_date.producer != new_up_to_date.producer:\n Logger.why(\n f\"Must run actions because the producer of the required: {path} \"\n f'has changed from: {old_up_to_date.producer or \"source file\"} '\n f'into: {new_up_to_date.producer or \"source file\"}'\n )\n return True\n if not is_exists(path) and old_up_to_date.mtime_ns != new_up_to_date.mtime_ns:\n Logger.why(\n f\"Must run actions \"\n f\"because the modification time of the required: {path} \"\n f\"has changed from: \"\n f\"{_datetime_from_nanoseconds(old_up_to_date.mtime_ns)} \"\n f\"into: \"\n f\"{_datetime_from_nanoseconds(new_up_to_date.mtime_ns)}\"\n )\n return True\n\n return False", "def dirty(self) -> bool:\n return len(self.detect_changed_files()) != 0", "def svn_fs_paths_changed(*args):\r\n return _fs.svn_fs_paths_changed(*args)", "def has_diff(ds, remote_branch_name, remote, paths):\n remote_ref = '/'.join((remote, remote_branch_name))\n if remote_ref not in ds.repo.get_remote_branches():\n lgr.debug(\"Remote '%s' has no branch matching %r. Will publish\",\n remote, remote_branch_name)\n # we don't have any remote state, need to push for sure\n return True\n\n lgr.debug(\"Testing for changes with respect to '%s' of remote '%s'\",\n remote_branch_name, remote)\n current_commit = ds.repo.get_hexsha()\n within_ds_paths = [p['path'] for p in paths if p['path'] != ds.path]\n commit_differ = current_commit != ds.repo.get_hexsha(remote_ref)\n # yoh: not sure what \"logic\" was intended here for comparing only\n # some files. By now we get a list of files, if any were changed,\n # from the commit on remote, and somehow diff says below that they didn't differ...\n # but if commit is different -- there must be differences and we\n # should publish. otherwise now skips publishing root dataset\n # although its master is behind by 1 commit. Moreover there could\n # be an empty commit -- shouldn't we publish then???\n if not commit_differ and within_ds_paths:\n # only if any paths is different from just the parentds root\n # in which case we can do the same muuuch cheaper (see below)\n # if there were custom paths, we will look at the diff\n lgr.debug(\"Since paths provided, looking at diff\")\n return any(r[\"state\"] != \"clean\"\n for r in ds.diff(path=within_ds_paths,\n fr=\"HEAD\",\n to=remote_ref,\n untracked=\"no\"))\n else:\n # if commits differ at all\n lgr.debug(\"Since no paths provided, comparing commits\")\n return commit_differ", "def hasChanged(self):\n return ((self.mtime != getmtime(self.path)) or\n (self.size != os.path.getsize(self.path)) )", "def is_commit_affecting_directory(self, commit, directory):\n exit_code = self.run([\n 'git', 'diff-tree', '--quiet', '--no-commit-id', '-r', commit,\n '--', directory\n ],\n return_exit_code=True)\n return exit_code == 1", "def needs_rebuild(source, target):\n return not os.path.isfile(target) or (\n os.path.getmtime(source) > os.path.getmtime(target))", "def files_are_modified(filenames, lastupdate):\n for filename in filenames:\n if file_is_modified(filename, lastupdate):\n return True\n return False", "def commits_exist(repo, commits):\n for commit in commits:\n if not commit_exists(repo, commit):\n return False\n return True", "def is_release_notes_changed(self):\n # there exists a difference between origin/master and current branch\n if self.master_diff:\n diff_releases = self.master_diff.split('##')\n unreleased_section = diff_releases[1]\n unreleased_section_lines = unreleased_section.split('\\n')\n\n adds_in_diff = 0\n removes_in_diff = 0\n\n for line in unreleased_section_lines:\n if line.startswith('+'):\n adds_in_diff += 1\n elif line.startswith('-') and not re.match(r'- *$', line):\n removes_in_diff += 1\n\n # means that at least one new line was added\n if adds_in_diff - removes_in_diff > 0:\n return True\n\n print_error(F'No new comment has been added in the release notes file: {self.release_notes_path}')\n return False", "def is_valid_commits(args):\n if args.commits is not None:\n return True\n return False", "def check_for_major_changes(cabal: CabalFile) -> bool:\n old_ver = cabal.get_version()\n old_tag = None\n if f'v{old_ver}' in get_tags():\n old_tag = f'v{old_ver}'\n if f'{old_ver}' in get_tags():\n old_tag = f'{old_ver}'\n if old_tag is None:\n print(f\"Couldn't find tag {old_tag} for current version; skipping revision check.\\n\")\n return False\n\n cmd = ['git', 'diff', '--name-only', f'{old_tag}..HEAD']\n changed_files = [ l.strip()\n for l in check_output(cmd).decode('UTF-8').split('\\n')\n if len(l.strip()) > 0 ]\n non_cabals = [ f\n for f in changed_files\n if not f.endswith('.cabal') ]\n print(f\"{len(changed_files)} files have changed since {old_tag}:\\n \",\n ' \\n'.join(changed_files))\n\n if len(non_cabals) > 0:\n return False\n else:\n print(dedent(f'''\n It appears that the only changes between {old_tag} and now are in the\n cabal file. Perhaps you want to make a revision instead?\n\n y = make a revision\n n = do a full release anyways\n d = show me a diff\n '''))\n while True:\n resp = prompt_for_char('How to proceed?', options='ynd')\n if resp == 'd':\n cmd = ['git', 'diff', f'{old_tag}..HEAD']\n print(' '.join(cmd))\n check_call(cmd)\n elif resp == 'y':\n return True\n elif resp == 'n':\n return False", "def has_unstaged_changes(repo):\n subprocess.check_call(['git', 'update-index', '-q', '--ignore-submodules',\n '--refresh'], cwd=repo)\n return subprocess.call(['git', 'diff-index', '--quiet', 'HEAD'],\n cwd=repo) != 0", "def _check_guts_eq(attr, old, new, last_build):\n if old != new:\n print \"building because %s changed\" % attr\n return True\n return False", "def _paths_are_consistent_with_hash_prefixes(self, paths,\n path_hash_prefixes):\n\n # Assume that 'paths' and 'path_hash_prefixes' are inconsistent until\n # proven otherwise.\n consistent = False\n\n if len(paths) > 0 and len(path_hash_prefixes) > 0:\n for path in paths:\n path_hash = self._get_target_hash(path)\n # Assume that every path is inconsistent until proven otherwise.\n consistent = False\n\n for path_hash_prefix in path_hash_prefixes:\n if path_hash.startswith(path_hash_prefix):\n consistent = True\n break\n\n # This path has no matching path_hash_prefix. Stop looking further.\n if not consistent: break\n\n return consistent", "def needs_update(self, *path):\n dt_fmt = \"%Y-%m-%d %H:%M:%S\"\n try:\n linfo = self.info(*path)\n dt_local = datetime.datetime.strptime(\n linfo[\"datetime\"][:19], dt_fmt)\n dt_server = datetime.datetime.strptime(\n self.serverfiles.info(*path)[\"datetime\"][:19], dt_fmt)\n return dt_server > dt_local\n except FileNotFoundError:\n return True\n except KeyError:\n return True", "def _any_files_newer(cls, files, check_mtime):\n for path in files:\n path_mtime = os.path.getmtime(path)\n if path_mtime > check_mtime:\n # This path was modified more recently than the\n # check_mtime.\n return True\n # If we made it here, nothing was newer than the check_mtime\n return False", "def _assets_are_stale(self, sourcedirectory, cachedirectory):\n comparison = filecmp.dircmp(sourcedirectory, cachedirectory, [], [])\n if comparison.left_only or comparison.right_only:\n # We have files in one directory and not the other\n return True\n if comparison.diff_files:\n # Some of the files have changed\n return True\n\n return False", "def _path_added(self, path, fecommit):\n # Because git-fast-export includes the entire tree in its output,\n # regardless of whether the requested commit is the first in the\n # branch or not, we need to check the repo itself to be certain if\n # this path was truly introduced in this commit, or simply existed\n # in the tree prior to the \"first\" commit.\n commit = self.ctx.repo.get(fecommit['sha1'])\n if commit is None:\n # empty repository?\n LOG.debug2(\"_path_added() commit {} is missing\".format(fecommit['sha1']))\n return True\n for parent in commit.parents:\n if p4gf_git.exists_in_tree(self.ctx.repo, path, parent.tree):\n LOG.debug2(\"_path_added() {} exists in parent tree {}\".format(\n path, p4gf_util.abbrev(p4gf_pygit2.object_to_sha1(parent))))\n return False\n return True", "def checkFileDirty(self, fn):\n for editor in self.editors:\n if Utilities.samepath(fn, editor.getFileName()):\n break\n else:\n return True\n \n res = self.checkDirty(editor)\n return res", "def repo_has_uncommitted():\n buff = subprocess.check_output(['hg', 'status'])\n\n if len(buff):\n print('Dirty / uncommitted changes in repository!')\n return True\n\n return False", "def _latest_checkpoints_changed(configs, run_path_pairs):\n for run_name, logdir in run_path_pairs:\n if run_name not in configs:\n config = ProjectorConfig()\n config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)\n if file_io.file_exists(config_fpath):\n file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')\n text_format.Merge(file_content, config)\n else:\n config = configs[run_name]\n\n # See if you can find a checkpoint file in the logdir.\n ckpt_path = latest_checkpoint(logdir)\n if not ckpt_path:\n # See if you can find a checkpoint in the parent of logdir.\n ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))\n if not ckpt_path:\n continue\n if config.model_checkpoint_path != ckpt_path:\n return True\n return False", "def is_outdated(self):\n\n if not self.is_done:\n return False\n elif not (self.input_files and self.output_files):\n return False\n\n return fileutils.modified_after(self.input_files, self.output_files)", "def check_bisect_finished(self, revision):\n if (revision.bad and revision.previous_revision and\n revision.previous_revision.good): # pragma: no cover\n if revision.deps_change() and self._expand_deps_revisions(revision):\n return False\n self.culprit = revision\n return True\n if (revision.good and revision.next_revision and\n revision.next_revision.bad):\n if (revision.next_revision.deps_change()\n and self._expand_deps_revisions(revision.next_revision)):\n return False\n self.culprit = revision.next_revision\n return True\n return False", "def check_commit_problems(self, commit, diff):\n\n # Initialise\n self._license_problem = False\n self._commit_problem = False\n self._commit_notes = defaultdict(list)\n\n # Unsafe regex checks...\n unsafe_matches = list()\n unsafe_matches.append( r\"\\b(KRun::runCommand|K3?ShellProcess|setUseShell|setShellCommand)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"\\b(system|popen|mktemp|mkstemp|tmpnam|gets|syslog|strptime)\\b\\s*[\\(\\r\\n]\" )\n unsafe_matches.append( r\"(scanf)\\b\\s*[\\(\\r\\n]\" )\n valid_filename_regex = r\"\\.(cpp|cc|cxx|C|c\\+\\+|c|l|y||h|H|hh|hxx|hpp|h\\+\\+|qml)$\"\n\n # Retrieve the diff and do the problem checks...\n filename = unicode(\"\")\n filediff = list()\n for line in diff:\n file_change = re.match( \"^diff --(cc |git a\\/.+ b\\/)(.+)$\", line )\n if file_change:\n # Are we changing file? If so, we have the full diff, so do a license check....\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))\n\n filediff = list()\n filename = file_change.group(2)\n continue\n\n # Diff headers are bogus\n if re.match(\"@@ -\\d+,\\d+ \\+\\d+ @@\", line):\n filediff = list()\n continue\n\n # Do an incremental check for *.desktop syntax errors....\n if re.search(\"\\.desktop$\", filename) and re.search(\"[^=]+=.*[ \\t]$\", line) and line.startswith(\"+\") and not re.match(\"^\\+#\", line):\n self._commit_notes[filename].append( \"[TRAILING SPACE] **\" )\n self._commit_problem = True\n\n # Check for things which are unsafe...\n for safety_match in unsafe_matches:\n match = re.match(safety_match, line)\n if match:\n note = \"[POSSIBLY UNSAFE: {0}] **\".format( match.group(1) )\n self._commit_notes[filename].append(note)\n self._commit_problem = True\n\n # Store the diff....\n filediff.append(line)\n\n if filename != \"\" and commit.files_changed[ filename ][\"change\"] in ['A'] and re.search(valid_filename_regex, filename):\n self.check_commit_license(filename, ''.join(filediff))", "def svn_diff_contains_conflicts(diff):\n return _diff.svn_diff_contains_conflicts(diff)", "def source_changed(source, cache):\n return os.path.getmtime(source)>os.path.getmtime(cache)", "def check_dependency(self, repo, minhash=None):\n try:\n p = Project.objects.get(repo_url=repo)\n except Project.DoesNotExist:\n return False\n j = p.last_successful_job()\n\n if j:\n if minhash:\n if p.commit_in_history(minhash, j.commit):\n # We already have a successful job that is new enough\n return True\n else:\n return True\n\n return False", "def changed_in_diff(diff: PatchedFile, line_n: int):\n for hunk in diff:\n hunk: Hunk\n for line_change in hunk:\n line_change: Line\n if line_change.is_added and line_change.target_line_no == line_n:\n return True\n return False", "def svn_diff_contains_diffs(diff):\n return _diff.svn_diff_contains_diffs(diff)", "def is_git_dirty():\n dirty_status = local('git diff --quiet || echo \"*\"', capture=True)\n if dirty_status == '*':\n return True\n\n untracked_count = int(local('git status --porcelain 2>/dev/null| grep \"^??\" | wc -l', capture=True))\n if untracked_count > 0:\n return True\n\n return False", "def has_repos(self):\n for section in self.sections():\n if section not in self.reserved_sections:\n return True\n return False", "def checkAllDirty(self):\n for editor in self.editors:\n if not self.checkDirty(editor):\n return False\n \n return True", "def check_diff(filepath, original, formatted):\n diff = list(unified_diff(original, formatted))\n if diff:\n print('{} diff:'.format(filepath))\n print((\"\".join(diff)).replace('\\r', ''))\n print()\n\n return bool(diff)", "def _has_project_changed(self, pid, modified_at):\n # look up the existing project entry\n if not self.ts_projects_cache:\n self.ts_projects_cache = self.get_url('https://cdn.door43.org/v2/ts/catalog.json', True)\n\n if not self.ts_projects_cache:\n # The cache could not be built, so automatically consider changed\n return True\n try:\n projects = json.loads(self.ts_projects_cache)\n except:\n return False\n\n # check if the resource has been modified\n for p in projects:\n if p['slug'] != pid:\n continue\n if 'long_date_modified' in p:\n return date_is_older(p['long_date_modified'], modified_at)\n else:\n # backwards compatibility\n return date_is_older(p['date_modified'], make_legacy_date(modified_at))", "def has_changes(self):\n return self.has_state_change(\n \"select_col.value\", \"condition.value\", \"input_val.value\"\n )", "def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")", "def is_new(self, owner, repo, start, end):\n key = (owner, repo)\n if key not in self.pr_merge_range:\n return False\n merge_start, merge_end = self.pr_merge_range[key]\n return start <= merge_start.date() <= end", "def changelog_updated(target_branch):\n\n output = subprocess.getoutput(['git diff HEAD origin/{}'.format(target_branch)])\n return 'a/changelog.md b/changelog.md' in output.lower()", "def is_match_with_edit(self, old_log_file, new_log_file, repos_log_file):\n Gumtree.gumtree.setOldAndNewFile(old_log_file, new_log_file)\n Gumtree.gumtree.getEditedNodes()\n return Gumtree.gumtree.isMatchWithEdit(repos_log_file)", "def pending_update(*args):\n if len(args) == 0:\n return len(cf.symbolic_updates) > 0\n else:\n for x in _expand_args(args):\n if is_graph_object(x) and x in cf.symbolic_updates:\n return True\n return False", "def tracked(path):\n return not any(fnmatch(part, pattern) for pattern in untracked for part in path.split(os.sep))", "def has_changed(self) -> bool:\n # TODO: Add in change logic here\n state = None\n if state != self._file_state:\n self._changed_flag = True\n self._file_state = state\n return self._changed_flag", "def _check_overlap(self, fe_commit):\n # +++ Avoid O(b branches * r rev) checks when\n # overlap is impossible because current branch\n # overlaps no other branch.\n if self._current_branch not in self._overlapping_branch_list():\n return\n\n for fe_file in fe_commit['files']:\n gwt_path = fe_file['path']\n depot_path = self.ctx.gwt_to_depot_path(gwt_path)\n\n for branch in self._overlapping_branch_list():\n if branch == self._current_branch:\n continue\n if not branch.intersects_depot_path(depot_path):\n continue\n\n LOG.debug(\"_check_overlap() branch {br1} <> {br2}\"\n \" gwt={gwt:<40} {dp}\\n{view}\"\n .format(\n br1 = p4gf_util.abbrev(self._current_branch.branch_id)\n , br2 = p4gf_util.abbrev(branch.branch_id)\n , gwt = gwt_path\n , dp = depot_path\n , view = \"\\n\".join(branch.view_p4map.as_array())\n ))\n\n if self._current_branch.is_new_fp_from_push or branch.is_new_fp_from_push:\n current_branch_name = self._current_branch.git_branch_name\n if self._current_branch.is_new_fp_from_push:\n current_branch_name += '(new)'\n other_branch_name = branch.git_branch_name\n if branch.is_new_fp_from_push:\n other_branch_name += '(new)'\n human_msg = (_(\n \"Perforce: Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\\n\"\n \" You are attempting to push and create a new fully populated branch\\n\"\n \" with paths which overlap another branch. Contact your admin\\n\"\n \" to configure non-conflicting destination branch paths.\\n\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = current_branch_name\n , b2 = other_branch_name ))\n else:\n human_msg = (_(\n \"Cannot commit {sha1} '{gwt_path}' to '{depot_path}'.\"\n \" Paths that overlap multiple Git Fusion branches are read-only.\"\n \" Branches: '{b1}', '{b2}'\")\n .format( sha1 = p4gf_util.abbrev(fe_commit['sha1'])\n , gwt_path = gwt_path\n , depot_path = depot_path\n , b1 = self._current_branch.branch_id\n , b2 = branch.branch_id ))\n raise PreflightException(human_msg)", "def can_safely_release(*repo_paths):\n if repo_has_uncommitted():\n return False\n if repo_has_incoming(*repo_paths):\n return False\n if repo_has_outgoing():\n return continue_with_outgoing()\n return True", "def is_git_dir_modified(c):\n res = c.run('git status --porcelain', hide='both')\n for line in res.stdout:\n if line.startswith(' M '):\n print('Git directory {} is modified'.format(c.cwd))\n return True\n\n print('Git directory {} is unmodified'.format(c.cwd))\n return False", "def checkGit(directory):", "def valid(self):\n return (self.get(\"~#mtime\", 0) and\n self[\"~#mtime\"] == util.mtime(self[\"~filename\"]))", "def _blocks_in_components_changed(self):\n for name, component in self._components.items():\n if name in self._cached_components \\\n and _blocks_changed_in_config(self._cached_components[name], self._components[name]):\n return True\n return False", "def svn_fs_paths_changed2(*args):\r\n return _fs.svn_fs_paths_changed2(*args)", "def _is_updated(self, var):\n # unknown\n if var.find(\"#u\") != -1:\n return True\n # constant\n if AsmParser.is_constant(var):\n return False\n cvar = self.arch.expand_reg_expr(var)\n for wrt in self.syncinfo.wrt_set:\n # If the var is updated with the lexicographically same value,\n # then we consider that the var is not updated.\n if var == wrt:\n continue\n # Otherwise, check it is updated.\n if var.find(wrt) != -1:\n # not in write set: comparison in a lexical form\n return True\n elif AsmParser.is_register(wrt):\n # not in write set: comparison in a normalized form\n cwrt = self.arch.expand_reg_expr(wrt)\n if self._overlap_cvars(cvar, cwrt) != None:\n return True\n return False", "def _newer(a: str, b: str) -> bool:\n if not os.path.exists(a):\n return False\n if not os.path.exists(b):\n return True\n return os.path.getmtime(a) >= os.path.getmtime(b)", "def new_commits(repo, sha):\n from datetime import datetime\n\n dateformat = \"%a, %d %b %Y %H:%M:%S GMT\"\n release_commit = repo.get_commit(sha)\n since = datetime.strptime(release_commit.last_modified, dateformat)\n commits = repo.get_commits(since=since)\n if len(list(commits)) == 1:\n return False\n return reversed(list(commits)[:-1])", "def check(self, path: str) -> bool:\n return (\n any([self._check_inc(path, i) for i in self.include])\n and path not in self.exclude\n )", "def _r_env_needs_updating(local_history: History, remote_history: History) -> bool:\n if not local_history:\n new_actions = remote_history.actions\n else:\n new_actions = set(remote_history.actions) - set(local_history.actions)\n for action in new_actions:\n if action.startswith(R_COMMAND):\n return True\n return False", "def validate(cfg: defs.Config) -> List[str]: # noqa: C901\n res: List[str] = []\n\n def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n \"\"\"Validate versions within a single branch.\"\"\"\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")\n\n def check_component(comp_name: str, comp: defs.Component) -> None:\n \"\"\"Validate the definition of a single component.\"\"\"\n if not RE_COMP_NAME.match(comp_name):\n res.append(f\"Invalid component name: {comp_name}\")\n\n for branch_name, branch in sorted(comp.branches.items()):\n check_branch(comp_name, branch_name, branch)\n\n for comp_name, comp in sorted(cfg.all_components.components.items()):\n check_component(comp_name, comp)\n\n return res", "def has_changed(self):\n timestamp = os.stat(self.filename).st_mtime\n if timestamp > self.last_timestamp:\n self.last_timestamp = timestamp\n return True\n return False", "def is_dirty(self):\n return True in [n.is_dirty for n in self.nodes]", "def is_reachable_mut(self, mut, prev_args):\n mut_args = mut.args()\n for arg in prev_args:\n if arg not in mut_args:\n return False\n return True", "def _is_commit_sha(commit):\n return len(commit) == 40 and all([\n ch.isdigit() or (ch >= \"a\" and ch <= \"f\")\n for ch in commit.elems()\n ])", "def _blocks_changed(block1, block2):\n if block1.name != block2.name:\n return True\n\n # Check for any changed blocks (symmetric difference operation of sets)\n block_diff = set(block1.to_dict().items()) ^ set(block2.to_dict().items())\n if len(block_diff) > 0:\n return True\n\n return False", "def is_log_path_valid(self):\n if self.log_paths:\n return self.path in self.log_paths\n else:\n # If .log_paths is empty, just assume all paths are legal\n return True", "def status():\n if not check_for_wit():\n raise NoWitError(f'No .wit folder exists in {os.getcwd()}')\n if not os.path.exists(refs_path):\n print('No files have been committed yet')\n return False\n print(f'Current commit ID: {get_current_commit_id()}')\n print('Changes to be committed:')\n print('-' * 20)\n for num, file in enumerate(get_files_to_be_committed()):\n print(f'{num + 1}: {file}')\n print('\\n')\n print('Changes not staged for commit')\n print('-' * 20)\n for num, file in enumerate(get_files_not_staged()):\n print(f'{num + 1}: {file}')\n for file in deleted_files:\n print(f'{file} - deleted from main folder')\n print('\\n')\n print('Untracked files')\n print('-' * 20)\n for num, file in enumerate(get_untracked_files()):\n print(f'{num + 1}: {file}')", "def git_get_modified_files(\n paths: Iterable[Path], revrange: RevisionRange, cwd: Path\n) -> Set[Path]:\n relative_paths = {p.resolve().relative_to(cwd) for p in paths}\n str_paths = [path.as_posix() for path in relative_paths]\n if revrange.use_common_ancestor:\n rev2 = \"HEAD\" if revrange.rev2 == WORKTREE else revrange.rev2\n merge_base_cmd = [\"merge-base\", revrange.rev1, rev2]\n rev1 = _git_check_output_lines(merge_base_cmd, cwd)[0]\n else:\n rev1 = revrange.rev1\n diff_cmd = [\n \"diff\",\n \"--name-only\",\n \"--relative\",\n rev1,\n # revrange.rev2 is inserted here if not WORKTREE\n \"--\",\n *str_paths,\n ]\n if revrange.rev2 != WORKTREE:\n diff_cmd.insert(diff_cmd.index(\"--\"), revrange.rev2)\n lines = _git_check_output_lines(diff_cmd, cwd)\n if revrange.rev2 == WORKTREE:\n ls_files_cmd = [\n \"ls-files\",\n \"--others\",\n \"--exclude-standard\",\n \"--\",\n *str_paths,\n ]\n lines.extend(_git_check_output_lines(ls_files_cmd, cwd))\n changed_paths = (Path(line) for line in lines)\n return {path for path in changed_paths if should_reformat_file(cwd / path)}", "def is_changed(self) -> bool:\n return self.selected_vms != self._initial_vms", "def is_changed(obj):\n revision_field = get_version_fieldname(obj)\n version = get_revision_of_object(obj)\n return not obj.__class__.objects.filter(**{obj._meta.pk.name: obj.pk,\n revision_field: version}).exists()", "def is_path_valid(self,path):\n null_state=[0 for i in range(len(self.node_names))]\n null_state_matrix=np.matrix(null_state).T\n new_state=np.matrix(self.state).T\n for index,edge in enumerate(path):\n #print index\n #print edge\n edge_position=self.edges.index(edge)\n move_matrix=self.edge_matrices[edge_position]\n #print move_matrix\n new_state=move_matrix*new_state\n if new_state.any()==null_state_matrix.any():\n #print new_state\n #print null_state_matrix\n return False\n return True", "def approved(self) -> bool:\n return all(d.approved for d in self.affected_directories)", "def exists_path(self, start, end):\n return end in self.paths(start)", "def changed(self) -> bool:\n for chunk_location, chunk in self._chunk_cache.items():\n if chunk is None:\n # if the chunk is None and the saved record is not None, the chunk has changed.\n if chunk_location not in self._chunk_index:\n return True\n _, save_chunk_index = self._chunk_index[chunk_location]\n chunk_storage = self._chunk_history[chunk_location]\n if chunk_storage[save_chunk_index] is not None:\n return True\n elif chunk.changed:\n return True\n for chunk_index, save_chunk_index in self._chunk_index.values():\n if chunk_index != save_chunk_index:\n return True\n return False", "def is_staging_clean() -> bool:\n c = cmd.run(\"git diff --no-ext-diff --cached --name-only\")\n return not bool(c.out)", "def has_conflict(self):\n for diffstat in self.diffstat():\n if diffstat.has_conflict:\n return True\n return False", "def is_modified(self):\n return self._original_sections != self._sections", "def _git_exists_in_revision(path: Path, rev2: str) -> bool:\n # Surprise: On Windows, `git cat-file` doesn't work with backslash directory\n # separators in paths. We need to use Posix paths and forward slashes instead.\n cmd = [\"git\", \"cat-file\", \"-e\", f\"{rev2}:{path.as_posix()}\"]\n result = run(cmd, check=False, stderr=DEVNULL, env={\"LC_ALL\": \"C\"})\n return result.returncode == 0", "def check_modified(self) -> bool:\n return bool(self._modified)", "def _compare_revisions(self, last_roll_revision, new_roll_revision):\n # Ensure that new_roll_revision is not an ancestor of old_roll_revision.\n try:\n subprocess2.check_call(['git', '--git-dir', self._project_git_dir,\n 'merge-base', '--is-ancestor',\n new_roll_revision, last_roll_revision])\n print ('Already at %s refusing to roll backwards to %s.' % (\n last_roll_revision, new_roll_revision))\n return False\n except subprocess2.CalledProcessError:\n pass\n return True", "def _abort_on_conflicting_untracked_paths(self) -> None:\n repo = get_git_repo()\n\n if not repo or self._base_commit is None:\n return\n\n changed_paths = set(\n self._status.added\n + self._status.modified\n + self._status.removed\n + self._status.unmerged\n )\n untracked_paths = {\n self._fname_to_path(repo, str(path))\n for path in (self._dirty_paths_by_status.get(StatusCode.Untracked, []))\n }\n overlapping_paths = untracked_paths & changed_paths\n\n if overlapping_paths:\n raise ActionFailure(\n \"Some paths that changed since the baseline commit now show up as untracked files. \"\n f\"Please commit or stash your untracked changes in these paths: {overlapping_paths}.\"\n )", "def check_build_status(owner, repository, ref):\n return get_hvcs().check_build_status(owner, repository, ref)", "def check_paths(self):\r\n\t\tself.check_line_edits_and_refresh_filestate()\r\n\t\t# paths\r\n\t\tsource_img_filename = self.source_img_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_dir_name = self.sink_dir_entry.text().replace(\"\\\\\", \"/\")\r\n\t\tsink_db_name_entry_text = self.sink_db_name_entry.text()\r\n\t\tdb_ext = \".db\" if not sink_db_name_entry_text.lower().endswith(\".db\") else \"\"\r\n\t\tsink_db_filename = os.path.join(sink_dir_name, sink_db_name_entry_text + db_ext).replace(\"\\\\\", \"/\")\r\n\t\tsource_db_filename = \"\"\r\n\r\n\t\t# check validity\r\n\t\tsource_img_filename_valid = self.filestate.is_valid(source_img_filename, SOURCE_IMG)\r\n\t\tsink_dir_name_valid = self.filestate.is_valid(sink_dir_name, SINK_DIR)\r\n\t\tsink_db_filename_valid = self.filestate.is_valid(sink_db_filename, SINK_DB)\r\n\t\tsource_db_filename_valid = True\r\n\r\n\t\tall_paths_valid = source_img_filename_valid and sink_dir_name_valid and sink_db_filename_valid\r\n\r\n\t\tif self.existing_case:\r\n\t\t\tsource_db_filename = self.source_db_entry.text()\r\n\t\t\tsource_db_filename_valid = self.filestate.is_valid(source_db_filename, SOURCE_DB)\r\n\t\t\tall_paths_valid = all_paths_valid and source_db_filename_valid\r\n\r\n\t\tif all_paths_valid:\r\n\t\t\tself.filestate.set_source_img_filename(source_img_filename)\r\n\t\t\tself.filestate.set_sink_dir_name(sink_dir_name)\r\n\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\tif self.existing_case:\r\n\t\t\t\tself.filestate.set_source_db_filename(source_db_filename)\r\n\t\t\tself.refresh_UI()\r\n\t\t\treturn True\r\n\r\n\t\t# in the case of invalidity\r\n\t\tif not source_img_filename_valid:\r\n\t\t\tif not self.filestate.source_img_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file at does not exist.\")\r\n\t\t\telif not self.filestate.source_img_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source image file type is invalid (must be .npy).\")\r\n\t\t\tself.filestate.set_source_img_filename(\"\")\r\n\t\tif not source_db_filename_valid: # only if existing case\r\n\t\t\tif not self.source_db_file_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file does not exist.\")\r\n\t\t\telif not self.filestate.source_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided source database file type is invalid (must be .db)\")\r\n\t\t\tself.filestate.set_source_db_filename(\"\")\r\n\t\tif not sink_dir_name_valid:\r\n\t\t\tif not self.filestate.sink_dir_exists:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory does not exist.\")\r\n\t\t\telif not self.sink_dir_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Provided sink directory format is invalid.\")\r\n\t\t\tself.filestate.set_sink_dir_name(\"\")\r\n\t\tif not sink_db_filename_valid:\r\n\t\t\tif sink_dir_name_valid and not self.filestate.sink_db_file_preexists and \\\r\n\t\t\t\t\tself.filestate.sink_db_file_format_valid and \\\r\n\t\t\t\t\tdisplay_yes_no_message(self, \"Create file at \" + sink_db_filename + \"?\"):\r\n\t\t\t\t# create file with read write permissions\r\n\t\t\t\t###########################################\r\n\t\t\t\ttry:\r\n\t\t\t\t\tsink_db_file = open(sink_db_filename, \"w+\")\r\n\t\t\t\t\tsink_db_file.close()\r\n\t\t\t\texcept IOError as error:\r\n\t\t\t\t\tdisplay_warning_message(self, \"Failed to create provided sink database file: \" + error)\r\n\t\t\t\t###########################################\r\n\t\t\t\t# set sink db filename\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.filestate.set_sink_db_filename(sink_db_filename)\r\n\t\t\t\t\tself.refresh_UI()\r\n\t\t\t\t\treturn True\r\n\t\t\telif not self.filestate.sink_db_file_format_valid:\r\n\t\t\t\tdisplay_warning_message(self, \"Be sure to specify a name for the sink database.\")\r\n\t\t\tself.filestate.set_sink_db_filename(\"\")\r\n\r\n\t\t# print(\"paths invalid\")\r\n\t\tself.refresh_UI()\r\n\t\treturn False", "def blocks_changed(self):\n return self._blocks_in_top_level_config_changed() \\\n or self._blocks_in_components_changed() \\\n or self._blocks_removed_from_top_level_config() \\\n or self._new_components_containing_blocks() \\\n or self._removed_components_containing_blocks()", "def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)", "def file_is_modified(filename, lastupdate):\n now = datetime.datetime.utcnow()\n update = file_get_mdatetime(filename)\n return now >= update and update >= lastupdate", "def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")", "def has_unsaved_changes(self):\n return self._file_content != self.buffer.text", "def _have_strat_array_deps_changed(self, dependent_files=None, lut_file=None):\n def check_hash(file_path, hash):\n if file_path is None:\n return True\n else:\n return hash == ioutils.make_dependent_file_hash(file_path)\n\n dependent_files = self.list_strat_dependent_files() if dependent_files is None else dependent_files\n lut_file = self.get_strat_lut_file() if lut_file is None else lut_file\n\n with xr.open_dataset(lut_file) as ds:\n # First verify that the SHA1 hashes for the MLO and SMO match. If not, we should recalculate the strat array\n # rather than use one calculated with old MLO/SMO data\n for att_name, file_path in dependent_files.items():\n if att_name not in ds.attrs:\n logger.important('{dep_file} not listed as an attribute in {lut_file}, assuming strat LUT needs '\n 'regenerated'.format(dep_file=att_name, lut_file=lut_file))\n return True\n if not check_hash(file_path, ds.attrs[att_name]):\n logger.important('{dep_file} appears to have changed since the last time the {lut_file} was '\n 'generated'.format(dep_file=att_name, lut_file=lut_file))\n return True\n\n return False", "def _check_guts_toc(attr, old, toc, last_build, pyc=0):\n return _check_guts_eq (attr, old, toc, last_build) \\\n or _check_guts_toc_mtime(attr, old, toc, last_build, pyc=pyc)", "def _changes_are_easy(klass, event, series_data, date_data):\n # Changes in how the recurrence behaves are hard.\n for k, v in series_data.iteritems():\n if getattr(event.parent_series, k) != v:\n return False\n\n # Changes in start and end dates are hard.\n for k, v in date_data.iteritems():\n if getattr(event, k) != v:\n return False\n\n # Changes are easy\n return True" ]
[ "0.6485178", "0.64627033", "0.63761", "0.6337259", "0.6332157", "0.6283598", "0.6267245", "0.6246275", "0.6215795", "0.6191025", "0.6181917", "0.6179466", "0.6164216", "0.6145015", "0.6080553", "0.6079178", "0.6058201", "0.6047565", "0.6034595", "0.6026013", "0.6012013", "0.60063803", "0.59711635", "0.5956637", "0.5941676", "0.5896293", "0.5873296", "0.58268684", "0.58145463", "0.57890725", "0.5783472", "0.5775918", "0.5764553", "0.5744434", "0.5724406", "0.56982964", "0.56547755", "0.56202036", "0.5611878", "0.5611666", "0.55765176", "0.5571821", "0.55689114", "0.5567348", "0.55576396", "0.5557254", "0.55480915", "0.5517215", "0.5500637", "0.549926", "0.5498779", "0.5491383", "0.54822814", "0.54804647", "0.5478743", "0.54699093", "0.546547", "0.54360217", "0.5433928", "0.53849375", "0.53825206", "0.5382476", "0.5377063", "0.53758204", "0.5372892", "0.5356493", "0.53516716", "0.53495026", "0.53489244", "0.53454673", "0.5336967", "0.53364784", "0.53311217", "0.5327658", "0.53262734", "0.5323181", "0.5322112", "0.5306363", "0.53046334", "0.52921045", "0.52825785", "0.52785033", "0.5263253", "0.52620816", "0.5261322", "0.52432126", "0.52423286", "0.5237318", "0.52331495", "0.52282035", "0.5227946", "0.5226091", "0.52206475", "0.5211404", "0.5208203", "0.52074176", "0.5204629", "0.52046245", "0.52037525", "0.5202239" ]
0.8217679
0
Get docker build args dict, rendering any templated args.
def render_build_args(options, ns): build_args = options.get('buildArgs', {}) for key, value in build_args.items(): build_args[key] = value.format(**ns) return build_args
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def docker_build_context(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"docker_build_context\")", "def docker_build_context(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"docker_build_context\")", "def read_dockerfile_for_args(target):\n import colorama\n build_args = {}\n missing_args = {}\n empty_string = \"\"\n\n # read dockerfile for args that have no value\n try:\n with open(target + '/Dockerfile') as dockerfile:\n for line in dockerfile:\n if line.startswith(\"ARG \"):\n dockerfile_args = line.replace(\n \"ARG \", \"\").strip(\"\\n\").split(\"=\")\n\n arg_name = dockerfile_args[0]\n arg_value = \"\"\n\n if len(dockerfile_args) > 1:\n arg_value = dockerfile_args[1].strip(\"\\n\")\n\n env_value = os.environ.get(arg_name)\n\n build_args[arg_name] = arg_value\n if not env_value is None:\n build_args[arg_name] = env_value\n\n if build_args[arg_name] is empty_string:\n missing_args[arg_name] = arg_name\n except FileNotFoundError:\n exit(f\"Dockerfile not found: {target}/Dockerfile\")\n\n if len(missing_args) > 1:\n message = \"WARNING: Arguments found with no defined value \" \\\n \"found in Dockerfile or environment [{}]\"\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT +\n message.format(\", \".join(missing_args)))\n\n return build_args", "def cookiecutter_args(self) -> dict[str, str]:\n local_args = {\n \"add_golden\": \"y\" if self.golden_tests else \"n\",\n \"copyright_holder\": self.copyright_holder,\n \"copyright_year\": (\n self.today.strftime(\"%Y\")\n if not self.copyright_year\n else self.copyright_year\n ),\n \"github_owner\": self.github_owner,\n \"name\": self.name,\n \"slug\": self.slug,\n # The template expects the test cases in a single string separated by\n # spaces.\n \"test_cases\": \" \".join(self.test_cases),\n }\n cruft_json = self.target_dir / \".cruft.json\"\n if cruft_json.is_file():\n with open(cruft_json, \"r\", encoding=\"utf-8\") as f:\n cruft_json_data = json.load(f)\n args = cruft_json_data[\"context\"][\"cookiecutter\"]\n for k, v in local_args.items():\n args[k] = v\n else:\n args = local_args\n\n return args", "def build_docker_build_command(configuration):\n parts = configuration.pop('docker', 'docker').split()\n parts.append('build')\n\n build = configuration.pop('build')\n\n build['path'] = os.path.join(configuration['workspace'], build['path'])\n build['file'] = os.path.join(build['path'], build['file'])\n\n parts.extend(build_parameter_parts(\n build, 'tag', 'file', 'no-cache', 'quiet', 'cpu-shares', 'memory'))\n\n parts.extend(build_dict_parameter_parts(build, 'build-arg'))\n parts.append(build.pop('path'))\n\n return parts", "def build_args(self, project_update, private_data_dir, passwords):\n args = []\n if getattr(settings, 'PROJECT_UPDATE_VVV', False):\n args.append('-vvv')\n if project_update.job_tags:\n args.extend(['-t', project_update.job_tags])\n return args", "def _get_context(data):\n try:\n docker_options = DockerRunCommandOptions(cmd=\"docker run --help\",\n start=\"Options:\",\n end=None).get_options_json()\n except Exception as ex:\n print(ex)\n docker_options = {}\n context = DEFAULT_DATA.copy()\n context[\"docker_options\"] = docker_options\n context.update(data)\n context[\"registry\"][\"address_select\"] = \"\"\n if context[\"registry\"][\"address\"] in context[\"registry_options\"].keys():\n context[\"registry\"][\"address_select\"] = context[\"registry\"][\"address\"]\n return context", "def parse_arguments(self):\n self.args = self.argparser.parse_args(self.template_args) # noqa: T484\n\n # get values from args or defaults\n for name, (categ, rest) in self.data.items():\n if categ not in '<>?':\n continue\n val = getattr(self.args, name)\n if rest.get('type') == 'flag':\n val = str(rest.get('val')) if val else ''\n else:\n val = val if val is not None else rest.get('default')\n self.variables[name] = val\n\n # possibly fill in substitutions in the template variables\n findreplace = re.compile(r'{{\\s*(\\w+)\\s*}}')\n for name, val in self.variables.items():\n if findreplace.search(val):\n t = jinja2.Template(val)\n self.variables[name] = t.render(self.variables)", "def docker_params(self):\n return {}", "def _render_args(self, target, output_dir):\n args = []\n\n # Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.\n # : 'package' is the main aapt operation (see class docstring for more info).\n # : '-m' is to \"make\" a package directory under location '-J'.\n # : '-J' Points to the output directory.\n # : '-M' is the AndroidManifest.xml of the project.\n # : '-S' points to the resource_dir to \"spider\" down while collecting resources.\n # : '-I' packages to add to base \"include\" set, here it is the android.jar of the target-sdk.\n args.extend([self.aapt_tool(target.build_tools_version)])\n args.extend(['package', '-m', '-J', output_dir])\n args.extend(['-M', target.manifest.path])\n args.extend(['-S', target.resource_dir])\n args.extend(['-I', self.android_jar_tool(target.manifest.target_sdk)])\n args.extend(['--ignore-assets', self.ignored_assets])\n logger.debug('Executing: {0}'.format(' '.join(args)))\n return args", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def docker_build(c):\n cli_tasks.docker_build.run(c)", "def build(parser):\n parser.add_argument(\n '-i', '--identity-file',\n help=(\n 'A SSH private key file which may be used to pull down '\n 'repositories when building.'\n ),\n )\n parser.add_argument(\n '-e', '--env',\n action='append',\n default=[],\n help=(\n 'Add environ variables to the build. These may be accessed in '\n 'the build scripts. Each variable should be of the format '\n 'KEY=VALUE. This may be used to pass in credentials required '\n 'to access private repositories. May be specified more than once.'\n ),\n )\n parser.add_argument(\n '-b', '--build-dir',\n default=os.getcwd(),\n help=(\n 'This folder should be accessible from the docker instance.'\n ),\n )\n parser.add_argument(\n '--archive',\n help=(\n 'Archive the build files into a local tarball.'\n ),\n )\n parser.add_argument(\n '--archive-only',\n action='store_true',\n default=False,\n help=(\n 'Skip tagging and building the runner image.'\n ),\n )\n parser.add_argument(\n '-t', '--tag',\n help=(\n 'Tag to apply to the built image. '\n 'This will default to the current date/time.'\n ),\n )\n parser.add_argument(\n '--no-cache',\n dest='use_cache',\n action='store_false',\n default=True,\n help=(\n 'Do not mount a cache volume when compiling the app.'\n ),\n )\n parser.add_argument(\n '--cache',\n metavar='CONTAINER:PATH',\n help=(\n 'An optional volume or location for the cache. The format is '\n '\"<volume_id>:<path>\" where the \"volume_id\" must be the '\n 'name or hash of an existing volume. The \"path\" is an absolute '\n 'path to the cache folder/volume within the build container.'\n '\\n\\n'\n 'By default a container will be created by mangling the name of '\n 'the app by appending \"__buildcache\" (e.g. \"myapp__buildcache\").'\n '\\n\\n'\n 'This option is ignored if --no-cache is specified.'\n '\\n\\n'\n 'The \"volume_id\" may be an absolute path on the host filesystem.'\n '\\n\\n'\n 'The \"path\" may be dropped, in which case it will default to '\n '/tmp/cache inside the build container.'\n '\\n\\n'\n 'Examples:'\n '\\n\\n'\n ' # custom volume with default path\\n'\n ' --cache my_cache'\n '\\n\\n'\n ' # custom path inside of volume\\n'\n ' --cache my_cache:/tmp/cache'\n '\\n\\n'\n ' # host filesystem\\n'\n ' --cache /tmp/cache'\n ),\n )\n parser.add_argument(\n '--rebuild-cache',\n action='store_true',\n default=False,\n help=(\n 'Delete any cached artifacts prior to building.'\n ),\n )\n parser.add_argument(\n '--skip-cleanup',\n action='store_true',\n default=False,\n help=(\n 'Skip removal of images and containers.'\n ),\n )\n parser.add_argument(\n 'app',\n help=(\n 'Path to an application folder with a meta.yml file'\n ),\n )", "def _generateWindowsBuildArgs(\n self, logger, basetagOverride=None, isolationOverride=None\n ):\n\n # Determine the appropriate container image base tag for the host system release unless the user specified a base tag\n buildArgs = []\n hostBaseTag = WindowsUtils.getHostBaseTag()\n baseTag = basetagOverride if basetagOverride is not None else hostBaseTag\n\n if baseTag is None:\n raise RuntimeError(\n \"unable to determine Windows Server Core base image tag from host system. Specify it explicitly using -basetag command-line flag\"\n )\n\n buildArgs = [\"--build-arg\", \"BASETAG={}\".format(baseTag)]\n\n # Use the default isolation mode unless requested otherwise\n dockerInfo = DockerUtils.info()\n isolation = (\n isolationOverride\n if isolationOverride is not None\n else dockerInfo[\"Isolation\"]\n )\n buildArgs += [\"--isolation={}\".format(isolation)]\n\n # If the user specified process isolation mode and a different base tag to the host system then warn them\n prefix = self.getPrefix()\n if isolation == \"process\" and baseTag != hostBaseTag:\n logger.info(\n \"[{}] Warning: attempting to use different Windows container/host versions\".format(\n prefix\n ),\n False,\n )\n logger.info(\n \"[{}] when running in process isolation mode, this will usually break!\".format(\n prefix\n ),\n False,\n )\n\n # Set a sensible memory limit when using Hyper-V isolation mode\n if isolation == \"hyperv\":\n buildArgs += [\"-m\", \"4GiB\"]\n\n return buildArgs", "def main():\n parser = argparse.ArgumentParser(\n epilog=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-d\", \"--dry-run\", action=\"store_true\", default=0, help=\"Dry run mode.\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Verbosity. Default is WARNING level.\",\n )\n\n subparsers = parser.add_subparsers(help=\"Sub commands\", dest=\"subparser\")\n subparsers.required = True\n\n build_parser = subparsers.add_parser(\n \"build\",\n description=\"Build an image from Dockerfile, caching image hierarchy\",\n help=\"Build an image from a Dockerfile\",\n )\n build_parser.add_argument(\n \"path\", metavar=\"PATH\", help=\"The build context directory\"\n )\n build_parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Name of the Dockerfile. If not provided, \"\n \"will use config.DOCKERFILE_PATH_PATTERN to compute. \",\n )\n build_parser.add_argument(\n \"-v\",\n \"--git-sha\",\n required=True,\n help=\"The version of code to build against, \" \"will pass as GIT_SHA variable\",\n )\n build_parser.add_argument(\n \"-n\", \"--name\", required=True, help=\"The name of the image to build\"\n )\n build_parser.add_argument(\n \"--build-arg\",\n metavar=\"ARG=VALUE\",\n nargs=\"*\",\n default=[],\n help=\"Set extra build-time variables. GIT_SHA, TIMESTAMP will be passed by default.\",\n )\n build_parser.add_argument(\n \"-r\",\n \"--raw\",\n action=\"store_true\",\n help=\"Whether to use raw docker build command to build, skipping caching logic\",\n )\n build_parser.add_argument(\n \"--registry\",\n default=config.DOCKER_REGISTRY,\n help=\"Docker registry use to determine the image identity, \"\n \"can be set via IMAGE_BUILDER_DOCKER_REGISTRY environment variable, \"\n 'or set DOCKER_REGISTRY in config.py. Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-t\",\n \"--tag-pattern\",\n default=config.GIT_SHA_TAG_PATTERN,\n help=\"Tag pattern, can only include one `{git_sha}` placeholder, \"\n 'such as \"{git_sha}-new\". If the tag exists, we won\\'t rebuild it. '\n 'Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-e\",\n \"--extra-tag\",\n nargs=\"*\",\n default=[],\n help=\"Extra tags to tag to the final images\",\n )\n build_parser.add_argument(\n \"--extra-name\",\n nargs=\"*\",\n default=[],\n help=\"Extra name and optionally with a tag in the 'name:tag' format\",\n )\n build_parser.add_argument(\n \"-o\", \"--output-hash\", help=\"The output filename of the files hash log.\"\n )\n build_parser.set_defaults(func=build)\n\n args = parser.parse_args()\n if args.dry_run:\n # DRY_RUN env will be read in image_builder.libs.process\n os.environ[\"DRY_RUN\"] = \"1\"\n\n if args.func == build:\n args.path = expand_path(args.path)\n if args.output_hash:\n args.output_hash = expand_path(args.output_hash)\n\n args.file = args.file or locate_dockerfile(args.name)\n args.file = expand_path(args.file)\n # set environ for main dockerfile for possibly retrieving later\n os.environ[\n config.DOCKERFILE_ENV_PATTERN.format(image_name=args.name)\n ] = args.file\n\n # change CWD to PATH\n os.chdir(args.path)\n\n if not args.registry:\n parser.error(\n \"--registry should be provied \"\n \"or specified by IMAGE_BUILDER_DOCKER_REGISTRY environment variable or set DOCKER_REGISTRY in config.py\"\n )\n if not all(\"=\" in kv for kv in args.build_arg):\n parser.error(\"--build_arg must be in ARG=VALUE format\")\n\n # set git_sha_tag\n try:\n args.git_sha_tag = args.tag_pattern.format(git_sha=args.git_sha)\n except KeyError:\n parser.error(\n 'Wrong --tag-pattern provided. Can only include one `{git_sha}` placeholder, such as \"{git_sha}-new\"'\n )\n\n # setup logging\n level = logging.WARNING - args.verbose * 10\n logging.basicConfig(\n level=level, format=\"%(asctime)s %(name)s %(levelname)s %(message)s\"\n )\n\n if args.output_hash:\n h = logging.FileHandler(args.output_hash)\n h.setLevel(logging.DEBUG)\n h.setFormatter(logging.Formatter(\"%(message)s\"))\n hash_logger.addHandler(h)\n\n # Suppress warning when we don't verify ssl\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n return args.func(args)", "def args(self):\n if not self.__args_updated:\n for inc in self.include_templates:\n self.__args.update(inc.args)\n self.__args_updated = True\n return self.__args", "def _translate_docker_properties(self):\n self.spec.setdefault(\"name\", self.spec.pop(\"container_name\", self.name))\n self.spec.setdefault(\"command\", shlex.split(self.spec.pop(\"entrypoint\", \"\")))\n self.spec.setdefault(\"args\", shlex.split(self.spec.pop(\"cmd\", \"\")))\n \n self.spec.setdefault(\"env\", _make_env(self.spec.pop(\"environment\", {})))\n for env in self.spec.get(\"env\", []):\n if \"value\" in env:\n env[\"value\"] = str(env[\"value\"])\n\n self.spec.setdefault(\"stdin\", self.spec.pop(\"stdin_open\", None))\n self.spec.setdefault(\"workingDir\", self.spec.pop(\"working_dir\", None))\n\n privileged = self.spec.pop(\"privileged\", None)\n if privileged:\n self.spec.setdefault(\"securityContext\", {})\n self.spec[\"securityContext\"].setdefault(\"privileged\", privileged)\n\n # Clean-up any empty fields\n self.spec = {k: v for k, v in self.spec.items() if v}", "def docker_info(args): # type: (CommonConfig) -> t.Dict[str, t.Any]\n stdout, _dummy = docker_command(args, ['info', '--format', '{{json .}}'], capture=True, always=True)\n return json.loads(stdout)", "def wrapper_environment(args):\n\n return {\n ENVIRONMENT_KEY: json.dumps({\n 'verbose': args.verbose,\n 'cc': shlex.split(args.cc),\n 'cxx': shlex.split(args.cxx)\n })\n }", "def build(context, cache=True, force_rm=False, hide=False):\n python_name = f\"{IMAGE_NAME}-{IMAGE_VER}\"\n docker_name = f\"{IMAGE_NAME}:{IMAGE_VER}\"\n\n print(f\"Building Python package {python_name}\")\n run_cmd(\n context=context,\n exec_cmd=\"poetry build\",\n pty=False,\n error_message=f\"Failed to build Python package {python_name}\",\n )\n\n print(f\"Building Docker image {docker_name}\")\n command = (\n f\"docker build --tag {docker_name} \"\n f\"--build-arg LMA_VERSION={IMAGE_VER} --build-arg WHEEL_DIR=dist \"\n f\"-f Dockerfile .\"\n )\n\n if not cache:\n command += \" --no-cache\"\n if force_rm:\n command += \" --force-rm\"\n\n run_cmd(\n context=context,\n exec_cmd=command,\n pty=False,\n hide=hide,\n error_message=f\"Failed to build Docker image {docker_name}\",\n )", "def build(args):\n\n logging.info(\"Parsing configuration...\")\n try:\n config = {\"site\": configurator.get_config(os.path.join(args.src, args.configfile))}\n except Exception as exc:\n sys.exit(\"Error during configuration: \" + str(exc))\n\n if (args.autobaseurl):\n config[\"site\"][\"baseurl\"] = os.path.abspath(args.dest)\n\n logging.info(\"Loading and pre-processing content...\")\n if (os.path.isdir(os.path.join(args.src, paths.POSTS_PATH))):\n try:\n config[\"posts\"] = loader.get_from_folder(os.path.join(args.src, paths.POSTS_PATH), config)\n except ValueError as exc:\n sys.exit(\"Error loading posts: \" + str(exc))\n else:\n config[\"posts\"] = {}\n\n if (os.path.isdir(os.path.join(args.src, paths.PAGES_PATH))):\n try:\n config[\"pages\"] = loader.get_from_folder(os.path.join(args.src, paths.PAGES_PATH), config)\n except ValueError as exc:\n sys.exit(\"Error loading pages: \" + str(exc))\n else:\n config[\"pages\"] = {}\n\n logging.debug(\"Configuring Jinja2 environment...\")\n jinjaEnv = configurator.configure_jinja(config[\"site\"][\"theme\"], args.src)\n\n logging.debug(\"Initializing builder...\")\n Builder(jinjaEnv, config, args.src, args.dest, args.noclean).build()", "def render(self, *args, **kwargs):\r\n for dictarg in args: kwargs.update(dictarg)\r\n stdout = []\r\n self.execute(stdout, kwargs)\r\n return ''.join(stdout)", "def build_arguments(self, *cmd_args, **cmd_kwargs):\n args = []\n args.extend(cmd_args)\n\n for raw_key, value in cmd_kwargs.items():\n if len(raw_key) == 1:\n args.append('-{}'.format(raw_key))\n else:\n key = raw_key.replace('_', '-')\n args.append('--{}'.format(key))\n\n if value is True:\n # If True, it is enough.\n # e.g.: system=True translates to --system\n continue\n\n args.append(str(value))\n\n return args", "def _dict_to_args(self, arg_dict):\n if arg_dict:\n yield \"--{}=data:application/json;charset=utf-8,{}\".format(\n self._CONFIG_FLAG.name,\n urllib.parse.quote(json_encode(arg_dict, pretty=False), encoding=\"utf-8\")\n )", "def cmd_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--image\",\n help=\"Full image path can be optionally supplied.\")\n args = parser.parse_args()\n return args", "def _generate_template_context(arguments: PackagingResourceArguments,\n manifest: OdahuProjectManifest,\n output_folder: str) -> DockerTemplateContext:\n logging.info('Building context for template')\n\n return DockerTemplateContext(\n model_name=manifest.model.name,\n model_version=manifest.model.version,\n odahuflow_version=manifest.odahuflowVersion,\n timeout=arguments.timeout,\n host=arguments.host,\n port=arguments.port,\n workers=arguments.workers,\n threads=arguments.threads,\n pythonpath=output_folder,\n wsgi_handler=f'{HANDLER_MODULE}:{HANDLER_APP}',\n model_location=ODAHU_SUB_PATH_NAME,\n entrypoint_target=ENTRYPOINT_TEMPLATE,\n handler_file=f'{HANDLER_MODULE}.py',\n base_image=arguments.dockerfileBaseImage,\n conda_file_name=CONDA_FILE_NAME,\n conda_server_file_name=CONDA_SERVER_FILE_NAME,\n entrypoint_docker=ENTRYPOINT_TEMPLATE\n )", "def get_args_from_console(args):\n return {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }", "def get_argdict(cls, toolchain, args):\n return {} # Empty must be overloaded (if required)", "def _create_container_args(kwargs):\n # Copy over kwargs which can be copied directly\n create_kwargs = {}\n for key in copy.copy(kwargs):\n if key in RUN_CREATE_KWARGS:\n create_kwargs[key] = kwargs.pop(key)\n host_config_kwargs = {}\n for key in copy.copy(kwargs):\n if key in RUN_HOST_CONFIG_KWARGS:\n host_config_kwargs[key] = kwargs.pop(key)\n\n # Process kwargs which are split over both create and host_config\n ports = kwargs.pop('ports', {})\n if ports:\n host_config_kwargs['port_bindings'] = ports\n\n volumes = kwargs.pop('volumes', {})\n if volumes:\n host_config_kwargs['binds'] = volumes\n\n network = kwargs.pop('network', None)\n network_driver_opt = kwargs.pop('network_driver_opt', None)\n if network:\n network_configuration = {'driver_opt': network_driver_opt} \\\n if network_driver_opt else None\n\n create_kwargs['networking_config'] = {network: network_configuration}\n host_config_kwargs['network_mode'] = network\n\n # All kwargs should have been consumed by this point, so raise\n # error if any are left\n if kwargs:\n raise create_unexpected_kwargs_error('run', kwargs)\n\n create_kwargs['host_config'] = HostConfig(**host_config_kwargs)\n\n # Fill in any kwargs which need processing by create_host_config first\n port_bindings = create_kwargs['host_config'].get('PortBindings')\n if port_bindings:\n # sort to make consistent for tests\n create_kwargs['ports'] = [tuple(p.split('/', 1))\n for p in sorted(port_bindings.keys())]\n if volumes:\n if isinstance(volumes, dict):\n create_kwargs['volumes'] = [\n v.get('bind') for v in volumes.values()\n ]\n else:\n create_kwargs['volumes'] = [\n _host_volume_from_bind(v) for v in volumes\n ]\n return create_kwargs", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def args(hub, val: List[str] or str) -> Tuple[List[str], Dict[str, str]]:\n args = []\n kwargs = {}\n for v in hub.render.cli.render(val):\n if isinstance(v, dict):\n kwargs.update(v)\n else:\n args.append(v)\n\n return args, kwargs", "def templateargs(self, target_jar, confs=None):\r\n raise NotImplementedError()", "def parse_arguments():\n custom_config = config.read()\n arguments = docopt(__doc__, version='Montanus %s' % __version__)\n logger.debug(custom_config)\n conf_file = arguments.get('--with-conf')\n if conf_file is not None:\n conf_config = config.read(conf_file)\n\n for (k, v) in conf_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(arguments)\n command_config = {\n 'templates_path': arguments.get('<templates_path>'),\n 'static_files_path': arguments.get('--with-static-files-path') \\\n if arguments.get('-with-static-files-path') is not None \\\n else arguments.get('<templates_path>'),\n 'delete_source': arguments.get('--delete'),\n 'protocol': arguments.get('--with-protocol'),\n 'domains': arguments.get('--with-domains').split(',') \\\n if arguments.get('--with-domains') is not None \\\n else None,\n 'md5_len': int(arguments.get('--with-md5-len')),\n 'md5_concat_by': arguments.get('--with-md5-concat-by')\n }\n logger.debug(command_config)\n\n for (k, v) in command_config.items():\n if v is not None:\n custom_config[k] = v\n\n logger.debug(custom_config)\n return DictWrapper(custom_config)", "def _get_docker_run_cmd(name, image,\n uidgid=None,\n commands=None,\n use_shell=True):\n tpl = (\n 'exec $TREADMILL/bin/treadmill sproc docker'\n ' --name {name}'\n ' --envdirs /env,/docker/env,/services/{name}/env'\n )\n\n # FIXME: hardcode volumes for now\n treadmill_bind = subproc.resolve('treadmill_bind_distro')\n volumes = [\n ('/var/log', '/var/log', 'rw'),\n ('/var/spool', '/var/spool', 'rw'),\n ('/var/tmp', '/var/tmp', 'rw'),\n ('/docker/etc/hosts', '/etc/hosts', 'ro'),\n ('/env', '/env', 'ro'),\n (treadmill_bind, TREADMILL_BIND_PATH, 'ro'),\n ]\n for volume in volumes:\n tpl += ' --volume {source}:{dest}:{mode}'.format(\n source=volume[0],\n dest=volume[1],\n mode=volume[2]\n )\n\n if uidgid is not None:\n tpl += ' --user {uidgid}'.format(uidgid=uidgid)\n\n tpl += ' --image {image}'\n\n # put entrypoint and image in the last\n if commands is not None:\n commands = shlex.split(commands)\n if not use_shell:\n tpl += ' --entrypoint {entrypoint}'\n entrypoint = commands.pop(0)\n else:\n entrypoint = None\n if commands:\n tpl += ' -- {cmds}'\n else:\n commands = []\n entrypoint = None\n\n return tpl.format(\n name=name,\n image=image,\n entrypoint=entrypoint,\n cmds=' '.join((shlex.quote(cmd) for cmd in commands))\n )", "def render_dockerfile(self):\n logger.info(\"Rendering Dockerfile...\")\n\n if self._params.get('redhat'):\n self._inject_redhat_defaults()\n\n self.image['pkg_manager'] = self._params.get('package_manager', 'yum')\n self.image.process_defaults()\n\n template_file = os.path.join(os.path.dirname(__file__),\n '..',\n 'templates',\n 'template.jinja')\n loader = FileSystemLoader(os.path.dirname(template_file))\n env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)\n env.globals['helper'] = TemplateHelper()\n env.globals['addhelp'] = self._params.get('addhelp')\n\n template = env.get_template(os.path.basename(template_file))\n\n dockerfile = os.path.join(self.target,\n 'image',\n 'Dockerfile')\n if not os.path.exists(os.path.dirname(dockerfile)):\n os.makedirs(os.path.dirname(dockerfile))\n\n with open(dockerfile, 'wb') as f:\n f.write(template.render(\n self.image).encode('utf-8'))\n logger.debug(\"Dockerfile rendered\")\n\n if self.image.get('help', {}).get('template', \"\"):\n help_template_path = self.image['help']['template']\n elif self._params.get('help_template'):\n help_template_path = self._params['help_template']\n else:\n help_template_path = os.path.join(os.path.dirname(__file__),\n '..',\n 'templates',\n 'help.jinja')\n\n help_dirname, help_basename = os.path.split(help_template_path)\n loader = FileSystemLoader(help_dirname)\n env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)\n env.globals['helper'] = TemplateHelper()\n help_template = env.get_template(help_basename)\n\n helpfile = os.path.join(self.target, 'image', 'help.md')\n with open(helpfile, 'wb') as f:\n f.write(help_template.render(\n self.image).encode('utf-8'))\n logger.debug(\"help.md rendered\")", "def get_command_arguments(self, format_vars):\n rval = {}\n for setting, value in self.settings.items():\n if setting in self.command_arguments:\n if value:\n rval[setting] = self.command_arguments[setting].format(**format_vars)\n else:\n rval[setting] = \"\"\n else:\n rval[setting] = value\n return rval", "def _getArgs():\n parser = getCommonArgsParser(\n 'Generate OpenShift deployment YAML file'\n )\n\n addArgOverlayUuid(parser)\n addArgOutputFile(parser, None)\n\n return parser.parse_args()", "def get_args(self):\n return {\n 'contents': self.get_formatted_code()\n }", "def template(self) -> str:\n arguments = []\n for arg in self.arg_list:\n flag = arg._flag\n arg = _flag_to_arg(flag)\n placeholder = _arg_to_empty(arg)\n arguments.append(placeholder)\n return ' '.join(arguments)", "def GenerateConfig(context):\n\n resources = [{\n 'name': 'my-build',\n 'action': 'gcp-types/cloudbuild-v1:cloudbuild.projects.builds.create',\n 'metadata': {\n 'runtimePolicy': ['UPDATE_ALWAYS']\n },\n 'properties': {\n 'steps': [\n {\n 'name': 'gcr.io/cloud-builders/gcloud',\n 'args': ['deployment-manager',\n context.properties['resourceToList'],\n 'list']\n }\n ],\n 'timeout': '120s'\n }\n }]\n return { 'resources': resources }", "def build_docker_run_command(configuration):\n parts = configuration.pop('docker').split()\n parts.append('run')\n\n run = configuration.pop('run')\n\n # Ensure all env-files have proper paths\n if 'env-file' in run:\n run['env-file'] = [os.path.join(configuration['workspace'], env_file)\n for env_file in run['env-file']]\n\n parts.extend(build_parameter_parts(\n run, 'user', 'workdir', 'rm', 'interactive', 'tty', 'env-file', 'cpu-shares', 'name',\n 'network', 'label', 'memory', 'entrypoint', 'runtime', 'privileged', 'group-add', 'gpus'\n ))\n\n # Add the mounts\n # The following code requires docker >= 17.06\n '''for mount in run.pop('mount', []):\n if mount['type'] == 'bind':\n mount['source'] = os.path.join(\n configuration['workspace'], mount['source'])\n parts.extend(['--mount', \",\".join([\"%s=%s\" % item for item in mount.items()])])'''\n\n # Add the mounts\n for mount in run.pop('mount', []):\n if mount['type'] == 'tmpfs':\n raise RuntimeError('tmpfs-mounts are currently not supported via the mount ' +\n 'directive in docker_interface. Consider using the tmpfs ' +\n 'directive instead.')\n if mount['type'] == 'bind':\n mount['source'] = os.path.abspath(\n os.path.join(configuration['workspace'], mount['source']))\n vol_config = '--volume=%s:%s' % (mount['source'], mount['destination'])\n if 'readonly' in mount and mount['readonly']:\n vol_config += ':ro'\n parts.append(vol_config)\n\n # Set or forward environment variables\n for key, value in run.pop('env', {}).items():\n if value is None:\n parts.append('--env=%s' % key)\n else:\n parts.append('--env=%s=%s' % (key, value))\n parts.append('--env=DOCKER_INTERFACE=true')\n\n # Forward ports\n for publish in run.pop('publish', []):\n parts.append('--publish=%s:%s:%s' % tuple([\n publish.get(key, '') for key in \"ip host container\".split()]))\n\n # Add temporary file systems\n for tmpfs in run.pop('tmpfs', []):\n destination = tmpfs['destination']\n options = tmpfs.pop('options', [])\n for key in ['mode', 'size']:\n if key in tmpfs:\n options.append('%s=%s' % (key, tmpfs[key]))\n if options:\n destination = \"%s:%s\" % (destination, \",\".join(options))\n parts.extend(['--tmpfs', destination])\n\n parts.append(run.pop('image'))\n parts.extend(run.pop('cmd', []))\n\n return parts", "def get_dockerfile_content(self):\n\n dockerfile_content: List[str] = [\n 'FROM nginx:latest',\n '# Update and install required packages',\n 'RUN apt-get update',\n 'RUN apt-get install vim -y',\n '',\n 'COPY ./.docker/config/nginx.conf /etc/nginx/conf.d/nginx.conf',\n '',\n 'ENTRYPOINT [\"nginx\"]',\n 'CMD [\"-g\",\"daemon off;\"]'\n ]\n return dockerfile_content", "def run(args):\n docker(' '.join(args))", "def _get_args(self) -> str:\n args = \"\"\n\n for config_key, config_option in self.config_options.items():\n if config_key in self.special_args:\n get_args_func = self.special_args[config_key]\n args += get_args_func(config_option)\n elif config_key in self.ignore_args:\n continue\n else:\n args += self._get_normal_args(config_key, config_option)\n\n return args.strip()", "def _get_args(self):\n parser = ArgumentParser(\n description=\"Dynamically generates Snakefiles for data \"\n \"integration and machine learning pipelines.\"\n )\n\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=(\n \"Configuration filepath. (Will look for file named config.yml \"\n \"in current working directory, if none specified.)\"\n ),\n )\n\n parser.add_argument(\n \"-r\",\n \"--run\",\n default=False,\n help=(\n \"Runs pipeline, in addition to generating Snakefile.\"\n ),\n )\n\n # convert command-line args to a dict and return\n args = parser.parse_args()\n\n args = dict(\n (k, v) for k, v in list(vars(args).items()) if v is not None\n )\n\n return args", "def docker_version(args): # type: (CommonConfig) -> t.Dict[str, t.Any]\n stdout, _dummy = docker_command(args, ['version', '--format', '{{json .}}'], capture=True, always=True)\n return json.loads(stdout)", "def parse_bld_args(self, args: argparse.Namespace) -> RepoBuildArgs:", "def build_docker(c):\n tag = c.run('git describe', hide=True)\n docker_img = f'{docker_repo}:{tag.stdout.strip()}'\n c.run(f'docker build -t {docker_img} .')", "def get_frontend_args(self) -> Dict[str, Any]:\n return {\n \"task_description\": \"Placeholder Task Description - Javascript failed to load\",\n \"frame_height\": 650,\n \"num_subtasks\": self.opts[\"subtasks_per_unit\"],\n \"question\": self.opts[\"eval_question\"],\n \"block_mobile\": True,\n \"get_task_feedback\": False, # TODO(#95) make option\n \"additional_task_description\": self.opts['additional_task_description'],\n }", "def get_docker_compose_content(self):\n\n docker_compose_content: List[str] = [\n \" nginx:\",\n \" container_name: {}\".format(self.container_name),\n \" build:\",\n \" context: .\",\n \" dockerfile: .docker/{}\".format(self.dockerfile_name),\n \" volumes:\",\n \" - ./src:/var/www/src\",\n \" working_dir: /var/www/src\",\n \" ports:\",\n \" - '{}:{}'\".format(self.port, self.port),\n \" networks:\",\n \" - {}-network\".format(self.prefix)\n ]\n if self.depends_on_string != '':\n docker_compose_content.insert(2, self.depends_on_string)\n return docker_compose_content", "def get_args():\n # create the parser\n parser = argparse.ArgumentParser()\n # Add the arguments to be parsed\n parser.add_argument(\"--num_rollouts\", type=int, default=1, help=\"Number of times to rollout agent in env\")\n parser.add_argument(\"--render\", choices=('True','False'), help=\"Render the rollout\")\n parser.add_argument(\"--seed\", type=int, default=4)\n parser.add_argument(\"--x_thresh\", type=float, default=1.5)\n args = parser.parse_args()\n args.render = True if args.render == 'True' else False\n\n return args", "def get_context():\n context = {\"cookiecutter\": {\n {%- for key, value in cookiecutter.items()|sort %}\n \"{{ key }}\": {{ \"{0!r}\".format(value) }},\n {% endfor -%}", "def build_image(image, build_args):\n\n subprocess.check_call(['docker', 'build', '--no-cache', '--force-rm', '-t',\n image] + build_args)", "def build_args(self, job, private_data_dir, passwords):\n creds = job.machine_credential\n\n ssh_username, become_username, become_method = '', '', ''\n if creds:\n ssh_username = creds.get_input('username', default='')\n become_method = creds.get_input('become_method', default='')\n become_username = creds.get_input('become_username', default='')\n else:\n become_method = None\n become_username = \"\"\n # Always specify the normal SSH user as root by default. Since this\n # task is normally running in the background under a service account,\n # it doesn't make sense to rely on ansible-playbook's default of using\n # the current user.\n ssh_username = ssh_username or 'root'\n args = []\n if job.job_type == 'check':\n args.append('--check')\n args.extend(['-u', sanitize_jinja(ssh_username)])\n if 'ssh_password' in passwords:\n args.append('--ask-pass')\n if job.become_enabled:\n args.append('--become')\n if job.diff_mode:\n args.append('--diff')\n if become_method:\n args.extend(['--become-method', sanitize_jinja(become_method)])\n if become_username:\n args.extend(['--become-user', sanitize_jinja(become_username)])\n if 'become_password' in passwords:\n args.append('--ask-become-pass')\n\n # Support prompting for multiple vault passwords\n for k, v in passwords.items():\n if k.startswith('vault_password'):\n if k == 'vault_password':\n args.append('--ask-vault-pass')\n else:\n # split only on the first dot in case the vault ID itself contains a dot\n vault_id = k.split('.', 1)[1]\n args.append('--vault-id')\n args.append('{}@prompt'.format(vault_id))\n\n if job.forks:\n if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:\n logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')\n args.append('--forks=%d' % settings.MAX_FORKS)\n else:\n args.append('--forks=%d' % job.forks)\n if job.force_handlers:\n args.append('--force-handlers')\n if job.limit:\n args.extend(['-l', job.limit])\n if job.verbosity:\n args.append('-%s' % ('v' * min(5, job.verbosity)))\n if job.job_tags:\n args.extend(['-t', job.job_tags])\n if job.skip_tags:\n args.append('--skip-tags=%s' % job.skip_tags)\n if job.start_at_task:\n args.append('--start-at-task=%s' % job.start_at_task)\n\n return args", "def build_parms(args):\r\n readDir=args.dir\r\n #target_date=args.target_date\r\n target_date=args.target_date\r\n outdir=args.outdir \r\n parms = {\"readDir\":readDir,\r\n \"target_date\":target_date,\r\n \"outdir\":outdir}\r\n \r\n return(parms)", "def build_task_args(self, test_name):\n task_args = {'service_list': [test_name]}\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n task_args['flavor_alt_name'] = str(self.flavor_alt.name)\n task_args['glance_image_location'] = str(self.filename)\n task_args['glance_image_format'] = str(self.image_format)\n task_args['tmpl_dir'] = str(self.template_dir)\n task_args['sup_dir'] = str(self.support_dir)\n task_args['users_amount'] = self.users_amount\n task_args['tenants_amount'] = self.tenants_amount\n task_args['use_existing_users'] = False\n task_args['iterations'] = self.iterations_amount\n task_args['concurrency'] = self.concurrency\n task_args['smoke'] = self.smoke\n task_args['volume_version'] = self.volume_version\n task_args['volume_service_type'] = self.volume_service_type\n task_args['block_migration'] = env.get(\"BLOCK_MIGRATION\").lower()\n task_args['username'] = self.username\n\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n\n if self.network:\n task_args['netid'] = str(self.network.id)\n else:\n LOGGER.warning(\n 'No tenant network created. '\n 'Trying EXTERNAL_NETWORK as a fallback')\n if env.get(\"EXTERNAL_NETWORK\"):\n network = self.cloud.get_network(env.get(\"EXTERNAL_NETWORK\"))\n task_args['netid'] = str(network.id) if network else ''\n else:\n task_args['netid'] = ''\n\n return task_args", "def build_task_args(self, test_name):\n task_args = {}\n if self.ext_net:\n task_args['floating_network'] = str(self.ext_net.name)\n else:\n task_args['floating_network'] = ''\n task_args['image_name'] = str(self.image.name)\n task_args['flavor_name'] = str(self.flavor.name)\n return task_args", "def _build(build_context,\n image_tag,\n image_name,\n nocache,\n credstore_env=None,\n registries=None):\n _logger.info('Starting build ...')\n\n # Build the image\n docker_builder = DockerBuilder(\n build_context=build_context,\n image_name=image_name,\n image_tag=image_tag,\n credstore_env=credstore_env,\n registries=registries,\n )\n docker_builder.login_private_registries()\n if docker_builder.check_image():\n # Image already built\n docker_builder.clean()\n return docker_builder\n if not docker_builder.build(nocache=nocache):\n docker_builder.clean()\n raise BuildException('The docker image could not be built.')\n return docker_builder", "def get_args(self):\n result = []\n submission_args = {}\n\n submission_args[\"local_upload\"] = self._upload[\"local\"]\n submission_args[\"upload_only\"] = self._upload[\"only\"]\n submission_args[\"force\"] = self._upload[\"force\"]\n submission_args[\"project\"] = self.project_name\n\n if self.email_addresses:\n addresses = \", \".join(self.email_addresses)\n submission_args[\"notify\"] = {\"emails\": addresses, \"slack\": []}\n else:\n submission_args[\"notify\"] = None\n\n for job in self._jobs:\n args = job.get_args()\n args.update(submission_args)\n result.append(args)\n return result", "def do_run(cs, args):\n opts = {}\n opts['name'] = args.name\n opts['image'] = args.image\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['environment'] = zun_utils.format_args(args.environment)\n opts['workdir'] = args.workdir\n opts['auto_remove'] = args.auto_remove\n opts['labels'] = zun_utils.format_args(args.label)\n opts['image_pull_policy'] = args.image_pull_policy\n opts['image_driver'] = args.image_driver\n opts['hints'] = zun_utils.format_args(args.hint)\n opts['nets'] = zun_utils.parse_nets(args.net)\n opts['mounts'] = zun_utils.parse_mounts(args.mount)\n opts['runtime'] = args.runtime\n opts['hostname'] = args.hostname\n opts['disk'] = args.disk\n opts['availability_zone'] = args.availability_zone\n opts['command'] = args.command\n opts['registry'] = args.registry\n opts['host'] = args.host\n if args.entrypoint:\n opts['entrypoint'] = zun_utils.parse_entrypoint(args.entrypoint)\n if args.healthcheck:\n opts['healthcheck'] = zun_utils.parse_health(args.healthcheck)\n\n if args.auto_heal:\n opts['auto_heal'] = args.auto_heal\n if args.security_group:\n opts['security_groups'] = args.security_group\n if args.expose_port:\n opts['exposed_ports'] = zun_utils.parse_exposed_ports(args.expose_port)\n if args.restart:\n opts['restart_policy'] = zun_utils.check_restart_policy(args.restart)\n if args.interactive:\n opts['interactive'] = True\n if args.privileged:\n opts['privileged'] = True\n opts = zun_utils.remove_null_parms(**opts)\n container = cs.containers.run(**opts)\n _show_container(container)\n container_uuid = getattr(container, 'uuid', None)\n if args.interactive:\n ready_for_attach = False\n while True:\n container = cs.containers.get(container_uuid)\n if zun_utils.check_container_status(container, 'Running'):\n ready_for_attach = True\n break\n if zun_utils.check_container_status(container, 'Error'):\n raise exceptions.ContainerStateError(container_uuid)\n print(\"Waiting for container start\")\n time.sleep(1)\n if ready_for_attach is True:\n response = cs.containers.attach(container_uuid)\n websocketclient.do_attach(cs, response, container_uuid, \"~\", 0.5)\n else:\n raise exceptions.InvalidWebSocketLink(container_uuid)", "def make_args(self, args):\n result_str = \"?\"\n for k, v in args.iteritems():\n result_str = result_str + k + \"=\" + v + \"&\"\n return result_str", "def build_show_tags(ctx, args):\n for build_id in args:\n data = ctx.obj.get_build_tags_by_build_id(build_id)\n output_json_data(data)", "def get_additional_args(self):\n additional = \"\"\n if not self.workflow.cleanup_scripts:\n additional += \" --skip-script-cleanup \"\n if self.workflow.shadow_prefix:\n additional += \" --shadow-prefix {} \".format(self.workflow.shadow_prefix)\n if self.workflow.use_conda:\n additional += \" --use-conda \"\n if self.workflow.conda_prefix:\n additional += \" --conda-prefix {} \".format(self.workflow.conda_prefix)\n if self.workflow.use_singularity:\n additional += \" --use-singularity \"\n if self.workflow.singularity_prefix:\n additional += \" --singularity-prefix {} \".format(\n self.workflow.singularity_prefix\n )\n if self.workflow.singularity_args:\n additional += ' --singularity-args \"{}\"'.format(\n self.workflow.singularity_args\n )\n\n if self.workflow.use_env_modules:\n additional += \" --use-envmodules\"\n\n return additional", "def build_show_parameters(ctx, output_format, args):\n column_names = ['name', 'value']\n for build_id in args:\n response = ctx.obj.get_build_parameters_by_build_id(build_id)\n data = response['property']\n if output_format == 'table':\n output_table(column_names, data)\n elif output_format == 'json':\n output_json_data(data)", "def get_args():\n\n parser = argparse.ArgumentParser(description=\"Get DC, Clusters, Hosts and VM in JSON.\")\n parser.add_argument('-H', '--host', nargs=1, required=True, help='The vCenter to connect to',\n dest='host', type=str)\n parser.add_argument('-p', '--password', nargs=1, required=False,\n help='The password with which to connect to the VC. If not specified, the user is prompted at runtime for a password',\n dest='password', type=str)\n parser.add_argument('-u', '--user', nargs=1, required=True, help='The username with which to connect to the host',\n dest='username', type=str)\n args = parser.parse_args()\n return args", "def setup_args():\n parser = ParlaiParser(add_parlai_args=True, description=\"Render data as HTML\")\n conv_render = parser.add_argument_group('Conversation Rendering Arguments')\n conv_render.add_argument(\n \"--input\", \"-i\", help=\"Input file to read conversations from\"\n )\n conv_render.add_argument(\n \"--output\",\n \"-o\",\n help=\"Output file to write conversations to. One of [.pdf, .png, .html] only\",\n )\n conv_render.add_argument(\n \"--width\", \"-wd\", help=\"Width of output file\", type=int, default=8\n )\n conv_render.add_argument(\n \"--height\", \"-ht\", help=\"Height of output file\", type=int, default=10\n )\n conv_render.add_argument(\n \"--user-icon\",\n \"-uic\",\n help=\"Absolute Path/URL to user image icon\",\n default=HUMAN_EMOJI_IMG,\n )\n conv_render.add_argument(\n \"--alt-icon\",\n \"-aic\",\n help=\"Absolute Path/URL to alternate image icon\",\n default=ALT_EMOJI_IMG,\n )\n conv_render.add_argument(\n \"--num-examples\",\n \"-ne\",\n help=\"Number of conversations to render\",\n type=int,\n default=10,\n )\n\n return parser", "def GetBuildInfoDict(self):\n build_info_dict = {\n key: val for key, val in utils.GetDictItems(self.build_info) if val}\n\n build_info_dict.update(\n {\"kernel_%s\" % key: val\n for key, val in utils.GetDictItems(self.kernel_build_info) if val}\n )\n build_info_dict.update(\n {\"system_%s\" % key: val\n for key, val in utils.GetDictItems(self.system_build_info) if val}\n )\n build_info_dict.update(\n {\"bootloader_%s\" % key: val\n for key, val in utils.GetDictItems(self.bootloader_build_info) if val}\n )\n return build_info_dict", "def _do_build(self) -> List[types.Action]:\n return [\n docker_command.DockerRun(\n command=[\"/entrypoint.sh\", self.tag],\n builder=builder.GO_BUILDER,\n run_config=docker_command.default_run_config(\n constants.STORAGE_OPERATOR_ROOT / \"entrypoint.sh\"\n ),\n mounts=[\n utils.bind_mount(\n target=Path(\"/storage-operator\"),\n source=constants.STORAGE_OPERATOR_ROOT,\n ),\n # This container (through operator-sdk) will call `docker\n # build`, so we need to expose our Docker socket.\n utils.bind_mount(\n target=Path(\"/var/run/docker.sock\"),\n source=Path(\"/var/run/docker.sock\"),\n ),\n ],\n )\n ]", "def _template_kwargs(*, logical_name: str, bucket: str, key: str) -> Dict[str, str]:\n if logical_name == \"ArtifactBuilder\":\n return dict(ArtifactBucketName=bucket, WorkersS3Key=key)\n elif logical_name == \"LayerBuilder\":\n return dict(ReplicationBucket=bucket, WorkersS3Key=key)\n else:\n raise ValueError(f\"Unknown logical name: {logical_name}\")", "def parse_arguments(\n input_args: List[str] = None, argument_parser: argparse.ArgumentParser = None\n) -> dict:\n if argument_parser is None:\n argument_parser = argparse.ArgumentParser()\n\n argument_parser.add_argument(\n \"--\" + FLAG_DOCKER_IMAGE_PREFIX.replace(\"_\", \"-\"),\n help=\"Provide a prefix for a Docker image, e.g. 'mltooling/' or even a repository path. When leaving blank, the default Dockerhub Repository is used.\",\n required=False,\n default=\"\",\n )\n\n return build_utils.parse_arguments(\n input_args=input_args, argument_parser=argument_parser\n )", "def CreateArgs(run_task_request, args):\n if getattr(args, \"ARGS\", None):\n args_ref = dataplex_api.FetchExecutionSpecArgs(args.ARGS)\n if len(args_ref) > 0:\n return run_task_request.ArgsValue(\n additionalProperties=[\n run_task_request.ArgsValue.AdditionalProperty(\n key=key, value=value\n )\n for key, value in sorted(args_ref.items())\n ]\n )\n return None", "def render_env(self):\n return {\n jinja2.Template(k).render({self.name: self}):\n jinja2.Template(v).render({self.name: self})\n for k, v in self.env.items()\n } if self.env else self.env", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def _generate_run_args(self, args_list, kwargs):\n return _get_args_for_run(self, args_list, kwargs)", "def render_templates(self):\n\n # dockerfile\n try:\n t = self.templates.get_template(\n 'docker/dockerfiles/{}.dockerfile.template'.format(self.repo)\n )\n except TemplateNotFound:\n t = self.templates.get_template(\n 'docker/dockerfiles/default.dockerfile.template'\n )\n\n self.files.append({\n 'name': 'Dockerfile',\n 'content': t.render(commit=self.commit),\n })\n\n # gunicorn\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.conf.py'\n )\n self.files.append({\n 'name': 'gunicorn.conf.py',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/gunicorn/gunicorn.sh'\n )\n self.files.append({\n 'name': 'gunicorn.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # nginx\n t = self.templates.get_template(\n 'docker/nginx/app.nginx.conf'\n )\n self.files.append({\n 'name': 'app.nginx.conf',\n 'content': t.render(),\n })\n\n t = self.templates.get_template(\n 'docker/nginx/nginx.sh'\n )\n self.files.append({\n 'name': 'nginx.sh',\n 'content': t.render(),\n 'mode': 0555,\n })\n\n # cron/, etc/ iif there exists a `self.repo` directory\n def _filter(p):\n return (\"cron/\" in p or \"etc/\" in p) and (self.repo in p) and \\\n (not os.path.basename(p).startswith('.'))\n\n for t in self.templates.list_templates(\n filter_func=_filter):\n\n self.files.append({\n 'name': os.path.basename(t),\n 'content': self.templates.get_template(t).render(),\n })", "def _DetermineImageFromArgs(self, args):\n if args.tag:\n if (properties.VALUES.builds.check_tag.GetBool() and\n 'gcr.io/' not in args.tag):\n raise c_exceptions.InvalidArgumentException(\n '--tag',\n 'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')\n return args.tag\n\n elif args.image:\n if (properties.VALUES.builds.check_tag.GetBool() and\n 'gcr.io/' not in args.image):\n raise c_exceptions.InvalidArgumentException(\n '--image',\n 'Image value must be in the gcr.io/* or *.gcr.io/* namespace.')\n return args.image\n\n else: # Default tag\n if args.app_name:\n default_name = args.app_name\n elif os.path.isdir(args.source): # I.e., the source is not a tarball\n default_name = os.path.basename(os.path.abspath(args.source))\n else:\n raise c_exceptions.OneOfArgumentsRequiredException(\n ['--app-name', '--tag'],\n 'Cannot resolve default container image. Provide an app name with '\n '--app-name to use as the container image, or provide a full '\n 'tag using --tag.')\n\n if args.app_version:\n default_tag = args.app_version\n elif git.IsGithubRepository(\n args.source) and not git.HasPendingChanges(args.source):\n default_tag = git.GetGitHeadRevision(args.source)\n if not default_tag:\n raise c_exceptions.OneOfArgumentsRequiredException(\n ['--app-version', '--tag'],\n 'Cannot resolve default container tag using the Git commit SHA. '\n 'Provide an app version with --app-version to use as the '\n 'container tag, or provide a full tag using --tag.')\n else:\n raise c_exceptions.OneOfArgumentsRequiredException(\n ['--app-version', '--tag'],\n 'Cannot resolve default container tag. '\n 'Provide an app version with --app-version to use as the '\n 'container tag, or provide a full tag using --tag.')\n\n return 'gcr.io/$PROJECT_ID/{name}:{tag}'.format(\n name=default_name, tag=default_tag)", "def configs(self):\n yield \"singleimage\", build_config.BuildConfig()", "def get_args(self):\n rqst = self.request\n args = rqst.arguments()\n resp = {}\n for arg in args:\n resp[arg] = repr(rqst.get_all(arg))\n return resp", "def get_arguments_configuration(argv):\n arguments_config = {\n 'execution_arguments': str(argv),\n 'execution_config': {\n 'component': ExecutionConfig,\n 'component_args': ['env_layer', 'composite_logger'],\n 'component_kwargs': {\n 'execution_parameters': str(argv)\n }\n }\n }\n return arguments_config", "def get_jinja_context(include_dict=None):\n context = {\n \"app_name\": app_config[\"APP\"][\"app_name\"],\n \"app_version\": app_config[\"APP\"][\"app_version\"],\n \"app_description\": app_config[\"APP\"][\"app_description\"],\n \"app_author\": app_config[\"APP\"][\"app_author\"],\n \"app_author_website\": app_config[\"APP\"][\"app_author_website\"],\n \"is_user_logged_in\": True if get_user_id() else False\n }\n if include_dict:\n context = {**context, **include_dict} # merge dictionaries\n return context", "def build_args(self, ad_hoc_command, private_data_dir, passwords):\n creds = ad_hoc_command.credential\n ssh_username, become_username, become_method = '', '', ''\n if creds:\n ssh_username = creds.get_input('username', default='')\n become_method = creds.get_input('become_method', default='')\n become_username = creds.get_input('become_username', default='')\n else:\n become_method = None\n become_username = \"\"\n # Always specify the normal SSH user as root by default. Since this\n # task is normally running in the background under a service account,\n # it doesn't make sense to rely on ansible's default of using the\n # current user.\n ssh_username = ssh_username or 'root'\n args = []\n if ad_hoc_command.job_type == 'check':\n args.append('--check')\n args.extend(['-u', sanitize_jinja(ssh_username)])\n if 'ssh_password' in passwords:\n args.append('--ask-pass')\n # We only specify sudo/su user and password if explicitly given by the\n # credential. Credential should never specify both sudo and su.\n if ad_hoc_command.become_enabled:\n args.append('--become')\n if become_method:\n args.extend(['--become-method', sanitize_jinja(become_method)])\n if become_username:\n args.extend(['--become-user', sanitize_jinja(become_username)])\n if 'become_password' in passwords:\n args.append('--ask-become-pass')\n\n if ad_hoc_command.forks: # FIXME: Max limit?\n args.append('--forks=%d' % ad_hoc_command.forks)\n if ad_hoc_command.diff_mode:\n args.append('--diff')\n if ad_hoc_command.verbosity:\n args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))\n\n if ad_hoc_command.limit:\n args.append(ad_hoc_command.limit)\n else:\n args.append('all')\n\n return args", "def _dockerfile(self):\n return self.config.get('docker', {}).get('dockerfile', 'Dockerfile')", "def _CommonArgs(parser):\n image_args = parser.add_mutually_exclusive_group(required=True)\n image_building_args = image_args.add_argument_group()\n parser.add_argument(\n 'template_file_gcs_path',\n metavar='TEMPLATE_FILE_GCS_PATH',\n help=('The Google Cloud Storage location of the flex template file.'\n 'Overrides if file already exists.'),\n type=arg_parsers.RegexpValidator(r'^gs://.*',\n 'Must begin with \\'gs://\\''))\n\n image_args.add_argument(\n '--image',\n help=('Path to the any image registry location of the prebuilt flex '\n 'template image.'))\n\n parser.add_argument(\n '--sdk-language',\n help=('SDK language of the flex template job.'),\n choices=['JAVA', 'PYTHON'],\n required=True)\n\n parser.add_argument(\n '--metadata-file',\n help='Local path to the metadata json file for the flex template.',\n type=arg_parsers.FileContents())\n\n parser.add_argument(\n '--print-only',\n help=('Prints the container spec to stdout. Does not save in '\n 'Google Cloud Storage.'),\n default=False,\n action=actions.StoreBooleanProperty(\n properties.VALUES.dataflow.print_only))\n\n image_building_args.add_argument(\n '--image-gcr-path',\n help=('The Google Container Registry location to store the flex '\n 'template image to be built.'),\n type=arg_parsers.RegexpValidator(\n r'^(.*\\.){0,1}gcr.io/.*',\n ('Must begin with \\'[multi-region.]gcr.io/\\'. Please check '\n 'https://cloud.google.com/container-registry/docs/overview '\n 'for available multi-regions')),\n required=True)\n\n image_building_args.add_argument(\n '--jar',\n metavar='JAR',\n type=arg_parsers.ArgList(),\n action=arg_parsers.UpdateAction,\n help=('Local path to your dataflow pipeline jar file and all their '\n 'dependent jar files required for the flex template classpath. '\n 'You can pass them as a comma separated list or repeat '\n 'individually with --jar flag. Ex: --jar=\"code.jar,dep.jar\" or '\n '--jar code.jar, --jar dep.jar.'),\n required=True)\n\n image_building_args.add_argument(\n '--flex-template-base-image',\n help=('Flex template base image to be used while building the '\n 'container image. Allowed choices are JAVA8, JAVA11 or gcr.io '\n 'path of the specific version of the base image. For JAVA8 and '\n 'JAVA11 option, we use the latest base image version to build '\n 'the container. You can also provide a specific version from '\n 'this link https://gcr.io/dataflow-templates-base/'),\n type=arg_parsers.RegexpValidator(\n r'^JAVA11$|^JAVA8$|^gcr.io/.*',\n 'Must be JAVA11 or JAVA8 or begin with \\'gcr.io/\\''),\n required=True)\n\n image_building_args.add_argument(\n '--env',\n metavar='ENV',\n type=arg_parsers.ArgDict(),\n action=arg_parsers.UpdateAction,\n help=\n ('Environment variables to create for the Dockerfile. '\n 'You can pass them as a comma separated list or repeat individually '\n 'with --env flag. Ex: --env=\"A=B,C=D\" or --env A=B, --env C=D.'\n 'You can find the list of supported environment variables in this '\n 'link. https://cloud.google.com/dataflow/docs/guides/templates/'\n 'troubleshooting-flex-templates'\n '#setting_required_dockerfile_environment_variables'),\n required=True)", "def render_template(*args, **kwargs):\r\n params = {'cache_buster': cache_buster, 'user': {}, 'user_json': {}, 'PROD': PRODUCTION,\r\n 'static_route': 'http://cdn1.pythonhackers.com'}\r\n params.update(**kwargs)\r\n\r\n return template_render(*args, **params)", "def get_args():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n\n arg('--raw_source_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/source_data'))\n arg('--meta_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir'))\n arg('--img_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir/input_data'))\n arg('--output_data_dir',\n default=os.path.expanduser(\n '~/Personal/Columbia/Applied_DL/Camelyon_Project/data/test_dir/output_data'))\n\n arg('--img_partition_option', default='zoom_1_256_256')\n\n input_args = parser.parse_known_args()[0]\n\n return input_args", "def get_args():\n # Strip anything other than characters listed\n starting_view = pattern.sub(\"\", request.form.get(\"starting_view\"))\n envelope_id = \"envelope_id\" in session and session[\"envelope_id\"]\n args = {\n \"envelope_id\": envelope_id,\n \"starting_view\": starting_view,\n \"account_id\": session[\"ds_account_id\"],\n \"base_path\": session[\"ds_base_path\"],\n \"access_token\": session[\"ds_access_token\"],\n \"ds_return_url\": url_for(\"ds.ds_return\", _external=True),\n }\n\n return args", "def pkgbuildContentVars( self, pars, directory ):\n\n ret = {\n 'pkgname' : '$(basename $(pwd))',\n 'pkgver' : '0.1',\n 'pkgrel' : '1',\n 'pkgdesc' : '\"' + pars['description'] + '\"',\n 'developer' : '\"' + pars['developer'] + '\"',\n 'url' : '\"' + pars['url'] + '\"',\n 'maintainer' : '\"' + pars['developer'] + '\"',\n 'arch' : '(\"any\")',\n 'license' : '(\"' + pars['license'] + '\")',\n 'depends' : \"\"\"\\\n(\n # Insert your UBOS package dependencies here as a bash array, like this:\n # 'perl-archive-zip'\n # 'ubos-perl-utils'\n)\\\n\"\"\",\n 'makedepends' : \"\"\"\\\n(\n)\\\n\"\"\",\n 'backup' : \"\"\"\\\n(\n # List any config files your package uses that should NOT be overridden\n # upon the next package update if the user has modified them.\n)\\\n\"\"\",\n 'source' : \"\"\"\\\n(\n # Insert URLs to the source(s) of your code here, usually one or more tar files\n # or such, like this:\n # \"https://download.nextcloud.com/server/releases/nextcloud-\\{pkgver}.tar.bz2\"\n)\\\n\"\"\",\n 'sha512sums' : \"\"\"\\\n(\n 'placeholder, generate with: makepkg -g'\n)\\\n\"\"\"\n }\n\n return ret", "async def dict(self):\n\n #pylint disable=no-member\n\n args = {}\n for item, value in self._args._asdict().items():\n args[item] = value\n\n message = {\n 'message_type': 'command',\n 'command_type': self.__class__.__name__,\n 'message': {\n 'arguments': args\n }\n }\n return message", "def get_project_and_build_from_arguments():\n parser = OptionParser()\n parser.add_option('-b', '--build', dest='build',\n help='The build number of the Deployment Pipeline to deploy a unique artifact to Nexus.')\n parser.add_option('-p', \"--project\", dest=\"project\",\n help='''Project name to be deployed to tomcat (e.g. jcommune, poulpe). Script looks up the app\n configs in `~/.jtalks/environments` by this name.''')\n parser.add_option('-e', \"--environment\", dest=\"env\",\n help='''Environment to be deployed. Environment MUST exist on current server. Run [list-envs] to\n see possible values''')\n parser.add_option('-g', '--grab-envs', dest=\"grab_envs\", default=\"false\",\n help='''Whether or not to clone configs from JTalks Git repository,\n requires appropriate SSH access allowed. Possible values - true and false.\n False by default''')\n parser.add_option('-t', '--sanity-test-timeout-sec', dest='sanity_test_timeout_sec', default=120,\n help='''After the app is deployed, scripts check whether it was deployed successfully by sending\n an HTTP request. This argument says for how long to wait before we consider the\n deployment failed.''')\n parser.add_option('-d', '--debug', dest='debug', default='off',\n help='Whether to show additional errors and logs or not. Possible values: on/off.')\n (options, args) = parser.parse_args()\n if len(args) == 0:\n logger.error(\"No command was specified, you can use: [deploy], [upload-to-nexus]\")\n sys.exit(1)\n return options, args", "def prepare_args(config, bootstrap):\n config = copy.deepcopy(config)\n environ = dict(copy.deepcopy(os.environ))\n\n data = {'env': bootstrap['env'],\n 'pip': pip_cmd(bootstrap['env'], '', return_path=True),\n 'requirements': bootstrap['requirements']}\n environ.update(data)\n\n if isinstance(config, string_types):\n return config.format(**environ)\n\n for key, value in iteritems(config):\n if not isinstance(value, string_types):\n continue\n config[key] = value.format(**environ)\n\n return config_to_args(config)", "def dict2argstr(args_dict):\n arg_str = \"\"\n for arg, value in args_dict.items():\n if value is not None:\n arg_str += \" --{} {}\".format(str(arg), str(value))\n return arg_str", "def full_args():\n return setup_args()", "def collect_args() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help=\"Config file\", type=str, default=Path(__file__).parent / \"data/params.yaml\")\n args = parser.parse_args()\n return args", "def get_kwargs():\n\treturn get_kwargs_raw(sys.argv)", "def _get_reproducing_arguments(self):\n reproducing_arguments = {\n 'include': self.include,\n 'exclude': self.exclude,\n 'copy': self.copy,\n }\n args_names = {name: getattr(self, name) for name in self.args_names}\n reproducing_arguments.update(args_names)\n return reproducing_arguments", "def get_merged_args(args):\n config_dict = load_config(args.config)\n\n args_dict = {\n \"cleaning_policy\": args.cleaning_policy,\n \"clear\": args.clear,\n \"content\": args.content,\n \"dry_run\": args.dry_run,\n \"force\": args.force,\n \"in_lines\": args.in_lines,\n \"max_size\": args.max_size,\n \"regex\": args.regex,\n \"restore\": args.restore,\n \"rmdir\": args.rmdir,\n \"short\": args.short,\n \"silent\": args.silent,\n \"storage_time\": args.storage_time,\n \"wastebasket_path\": args.wastebasket_path\n }\n\n for arg, value in args_dict.iteritems():\n if not value:\n args_dict[arg] = config_dict[arg]\n\n if args_dict[\"cleaning_policy\"] == POLICY:\n args_dict[\"cleaning_policy\"] = config_dict[\"cleaning_policy\"]\n\n if args_dict[\"storage_time\"] == STORAGE_TIME:\n args_dict[\"storage_time\"] = config_dict[\"storage_time\"]\n\n if args_dict[\"max_size\"] == MAX_SIZE:\n args_dict[\"max_size\"] = config_dict[\"max_size\"]\n\n return args_dict", "def build_args(self, parser):\n raise NotImplementedError('build_args() must be implemented')", "def get_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "def process_args(self, args, cache=None):\n\n new_args = dict()\n\n for k, v in args.items():\n if k == 'trial':\n if isinstance(v, str):\n hashid, rev = v.split('_')\n rev = int(rev)\n\n v = self.backend.get_trial(Trial(_hash=hashid, revision=rev))\n for i in v:\n if i.revision == rev:\n v = i\n break\n else:\n warning('Was not able to find the correct trial revision')\n\n v = from_json(v)\n\n elif k == 'project':\n if isinstance(v, str):\n v = self.backend.get_project(Project(name=v))\n\n v = from_json(v)\n\n elif k == 'group':\n if isinstance(v, str):\n v = self.backend.get_trial_group(TrialGroup(_uid=v))\n\n v = from_json(v)\n\n new_args[k] = v\n\n return new_args", "def build_args(self, inventory_update, private_data_dir, passwords):\n # Get the inventory source and inventory.\n inventory_source = inventory_update.inventory_source\n inventory = inventory_source.inventory\n\n if inventory is None:\n raise RuntimeError('Inventory Source is not associated with an Inventory.')\n\n args = ['ansible-inventory', '--list', '--export']\n\n # special case for constructed inventories, we pass source inventories from database\n # these must come in order, and in order _before_ the constructed inventory itself\n if inventory_update.inventory.kind == 'constructed':\n inventory_update.log_lifecycle(\"start_job_fact_cache\")\n for input_inventory in inventory_update.inventory.input_inventories.all():\n args.append('-i')\n script_params = dict(hostvars=True, towervars=True)\n source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)\n args.append(to_container_path(source_inv_path, private_data_dir))\n # Include any facts from input inventories so they can be used in filters\n start_fact_cache(\n input_inventory.hosts.only(*HOST_FACTS_FIELDS),\n os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),\n inventory_id=input_inventory.id,\n )\n\n # Add arguments for the source inventory file/script/thing\n rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)\n container_location = os.path.join(CONTAINER_ROOT, rel_path)\n source_location = os.path.join(private_data_dir, rel_path)\n\n args.append('-i')\n args.append(container_location)\n # Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596\n # limit should be usable in ansible-inventory 2.15+\n if inventory_update.limit:\n args.append('--limit')\n args.append(inventory_update.limit)\n\n args.append('--output')\n args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))\n\n if os.path.isdir(source_location):\n playbook_dir = container_location\n else:\n playbook_dir = os.path.dirname(container_location)\n args.extend(['--playbook-dir', playbook_dir])\n\n if inventory_update.verbosity:\n args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))\n\n return args" ]
[ "0.64995056", "0.6460428", "0.5992242", "0.59243613", "0.5775845", "0.56487375", "0.5570073", "0.5546533", "0.5522067", "0.5497052", "0.5493939", "0.54721403", "0.5428458", "0.5366059", "0.5361795", "0.535775", "0.5269126", "0.5204375", "0.5196358", "0.51938367", "0.518926", "0.51665103", "0.515709", "0.5149833", "0.51478", "0.51452595", "0.51444477", "0.51331633", "0.5128882", "0.50937015", "0.5091239", "0.50760007", "0.50711", "0.50638723", "0.5033535", "0.5028839", "0.5028382", "0.50199187", "0.50147", "0.5007748", "0.49912646", "0.49884826", "0.49849722", "0.49823225", "0.49810585", "0.49673855", "0.49585405", "0.49532053", "0.4940924", "0.493939", "0.49292374", "0.49091277", "0.4884996", "0.48798704", "0.4876488", "0.48758486", "0.48582357", "0.48488396", "0.4829783", "0.4827247", "0.48209545", "0.48160252", "0.48136804", "0.47944194", "0.47942486", "0.47941327", "0.47889584", "0.47761706", "0.4772736", "0.47606668", "0.47576687", "0.47533017", "0.47503048", "0.4749483", "0.47466376", "0.47453895", "0.47452068", "0.4745107", "0.47421613", "0.4741552", "0.47394177", "0.47378495", "0.4736585", "0.47258696", "0.47256252", "0.4723876", "0.47226265", "0.4722398", "0.47189066", "0.47178185", "0.4710978", "0.47074336", "0.4705623", "0.47050807", "0.47034562", "0.4696578", "0.46879107", "0.46830925", "0.46827057", "0.4682567" ]
0.7038579
0
Cached getter for docker client
def docker_client(): return docker.from_env()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_docker_client(self) -> \"DockerClient\":", "def docker_client():\n client = docker.from_env()\n return client", "def get_client():\n info = {}\n host = os.environ.get('DOCKER_HOST')\n net_host = os.environ.get('DOCKER_NET_HOST')\n\n client_api_version = os.environ.get('DOCKER_API_VERSION')\n if not client_api_version:\n client_api_version = \"auto\"\n\n # IP to use for started containers\n if net_host:\n info['host'] = net_host\n elif host:\n info['host'] = urlparse.urlparse(host).netloc.split(':')[0]\n else:\n info['host'] = 'localhost'\n\n verify = os.environ.get('DOCKER_TLS_VERIFY') == '1'\n if verify: # use TLS\n assert_hostname = None\n cert_path = os.environ.get('DOCKER_CERT_PATH')\n if cert_path:\n client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem'))\n ca_cert = os.path.join(cert_path, 'ca.pem')\n else:\n client_cert = ca_cert = None\n\n tls_config = docker.tls.TLSConfig(\n client_cert=client_cert,\n ca_cert=ca_cert,\n verify=verify,\n assert_hostname=assert_hostname,\n )\n return docker.Client(base_url=host, tls=tls_config, version=client_api_version), info\n else:\n return docker.Client(base_url=host, version=client_api_version), info", "def get_docker_client(kard):\n return ComposePkr(kard, DOCKER_SOCK)", "def _cache(self):\n return self._class(self.client_servers, **self._options)", "def docker_client(request):\n client = docker.from_env()\n yield client\n client.close()", "def redis_client(self) -> Redis:\n return self.app.key_value_store.redis_client", "def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client", "def _connect_docker_client(self):\n # lets check if Docker ENV information is set and use local socket as fallback\n if os.environ.get(\"DOCKER_HOST\") is None:\n os.environ[\"DOCKER_HOST\"] = \"unix://var/run/docker.sock\"\n LOG.warning(\"ENV variable 'DOCKER_HOST' not set. Using %r as fallback.\" % os.environ[\"DOCKER_HOST\"])\n\n # lets connect to the Docker instance specified in current ENV\n # cf.: http://docker-py.readthedocs.io/en/stable/machine/\n dc = docker.from_env(assert_hostname=False)\n # do a call to ensure that we are connected\n dc.info()\n LOG.info(\"Connected to Docker host: %r\" % dc.base_url)\n return dc", "async def login(self) -> \"DockerClient\":", "def get_client():\n\n return MongoClientManager().client", "def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)", "def docker(self, obj):\n\n if self._dockerclient is not None:\n return self._dockerclient\n host = self.properties[self.HOST_NODE]\n host_ip = self.get_host_ip(self, obj, host)\n url = 'tcp://' + host_ip + ':2375'\n self._dockerclient = docker.Client(base_url=url)", "def redis_client(docker_ip, docker_services):\n client = redis.StrictRedis(host='localhost', port=6379, db=0)\n docker_services.wait_until_responsive(\n timeout=30.0, pause=0.1,\n check=lambda: is_responsive(client)\n )\n return client", "def get_redis_client():\n return redis.from_url(settings.REDIS_URI)", "def Get():\n return ServiceConfig() # Singleton decorator ensures there's only one", "def configure_client(self):\n self.client = self.get_redis_client()\n return self.client", "def get(key):\n return Cache.cache_connector.get(key)", "def _getMemcacheClient(self, refresh=False):\n if refresh or not hasattr(self, \"memcacheClient\"):\n\n if config.Memcached.Pools.Default.MemcacheSocket:\n client_addr = \"unix:{}\".format(config.Memcached.Pools.Default.MemcacheSocket)\n else:\n client_addr = \"{}:{}\".format(\n config.Memcached.Pools.Default.BindAddress,\n config.Memcached.Pools.Default.Port,\n )\n self.memcacheClient = ClientFactory.getClient([client_addr], debug=0, pickleProtocol=2)\n return self.memcacheClient", "def get_client() -> 'MongoCLient':\n client = pymongo.MongoClient()\n db = client['c3']\n c = db['json']\n return c", "def get_client():\n return Client(__address, authkey='strumamor')", "def _get_dask_client(client: Optional[Client]) -> Client:\n if client is None:\n return default_client()\n else:\n return client", "def test_redisdb_get_client():\n test_redisdb = RedisClient()\n test_redisdb.client = \"mock_client\"\n\n test_redisdb_client = test_redisdb.get_client()\n assert test_redisdb_client == \"mock_client\"", "def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)", "def _init_raw_client(self) -> None:\n if self.credentials:\n auth = HTTPBasicAuth(self.credentials['username'], self.credentials['password'])\n else:\n auth = None\n base_url = \"http://\" if self.untrusted else \"https://\"\n base_url += self.url\n self.raw_client = client.DockerRegistryClient(base_url=base_url, auth=auth)", "def get_client(self):\n return self.client", "def docker_client(environment, version=None, tls_config=None, host=None,\n tls_version=None):\n try:\n kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)\n except TLSParameterError:\n raise UserError(\n \"TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY \"\n \"and DOCKER_CERT_PATH are set correctly.\\n\"\n \"You might need to run `eval \\\"$(docker-machine env default)\\\"`\")\n\n if host:\n kwargs['base_url'] = host\n if tls_config:\n kwargs['tls'] = tls_config\n\n if version:\n kwargs['version'] = version\n\n timeout = environment.get('COMPOSE_HTTP_TIMEOUT')\n if timeout:\n kwargs['timeout'] = int(timeout)\n else:\n kwargs['timeout'] = HTTP_TIMEOUT\n\n kwargs['user_agent'] = generate_user_agent()\n\n # Workaround for\n # https://pyinstaller.readthedocs.io/en/v3.3.1/runtime-information.html#ld-library-path-libpath-considerations\n if 'LD_LIBRARY_PATH_ORIG' in environment:\n kwargs['credstore_env'] = {\n 'LD_LIBRARY_PATH': environment.get('LD_LIBRARY_PATH_ORIG'),\n }\n\n client = APIClient(**kwargs)\n client._original_base_url = kwargs.get('base_url')\n\n return client", "def get_cache(self):\n return self.cache", "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def __getattr__(self, name):\n impl = self._get_client_impl()\n return getattr(impl, name)", "def _pull_image(self, docker_client: \"DockerClient\"):\n image, tag = self._get_image_and_tag()\n\n return docker_client.images.pull(image, tag)", "def __getattr__(self, name):\n return getattr(self._client, name)", "def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client", "def test_get_container(self):\n pass", "def get_cache(self):\n return self._instance._cache[self.name]", "def get(ctx, key):\n config = buildConfig(ctx.obj[\"HOST\"], ctx.obj[\"PORT\"])\n client = getClient(config)\n click.echo(client.get(key))", "def _get_client():\n\n return datastore.Client()", "def _get_client():\n\n return datastore.Client()", "def _get_client(self, requester_name: str) -> Any:\n return self.datastore.get_client_for_requester(requester_name)", "def redis_client(self) -> Redis:\n if self._redis_client is None:\n redis_client = Redis(connection_pool=self.redis_conn_pool)\n\n self._redis_client = redis_client\n\n self._logger.debug(\n \"[%s]: Initialized Redis client: %s\", self.__name__, self._redis_client\n )\n\n return self._redis_client", "def client(self):\n return self._client", "def get_redis_server():\n return redis_server", "def caching_getter(getter, name, *args, **kwargs):\n key = (self._caching_devices[i], name)\n if key in cache:\n return cache[key]\n\n v = getter(name, *args, **kwargs)\n with tf.device(self._caching_devices[i]):\n # ret = tf.identity(v._ref()) # pylint: disable=protected-access\n ret = v.read_value()\n _add_variable_proxy_methods(v, ret)\n cache[key] = ret\n return ret", "def CachedProxy(getter):\n container = LazyContainer(getter)\n return LocalProxy(container.get)", "def operations_client(self) -> operations_v1.OperationsClient:\n # Sanity check: Only create a new client if we do not already have one.\n if \"operations_client\" not in self.__dict__:\n self.__dict__[\"operations_client\"] = operations_v1.OperationsClient(\n self.grpc_channel\n )\n\n # Return the client from cache.\n return self.__dict__[\"operations_client\"]", "def get_docker_version() -> Union[version.LegacyVersion, version.Version]:\n output = subprocess.check_output(\n [\"docker\", \"version\", \"--format\", \"{{json .Client.Version }}\"],\n stderr=subprocess.DEVNULL,\n ).strip()\n return version.parse(output.strip(b'\"').decode(\"utf8\"))", "def get_product_caching_service():\n return APP.config['ProductCachingService']", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def get_redis_client(self):\n\n client = Client(\n #connection_pool=connection_pool,\n host=self.backend_settings.get('HOST', 'localhost'),\n port=int(self.backend_settings.get('PORT', 6379)),\n io_loop=self.io_loop,\n password=self.backend_settings.get('PASSWORD', None),\n selected_db=int(self.backend_settings.get('DB', 0)),\n reconnect_callback=self.listen)\n\n return client", "def _get_container(self) -> Container:\n obj = self.get_container()\n return to_container(obj)", "def decapod_client(get_decapod_client):\n return get_decapod_client()", "def __getattr__(self, name: str):\n return getattr(self._client, name)", "def get_container(self) -> CNT:\n raise NotImplementedError()", "def raw_client(self):\r\n warnings.warn(\"raw_client is deprecated. use self.client.get_client instead\",\r\n DeprecationWarning, stacklevel=2)\r\n return self.client.get_client(write=True)", "def get_cache(self, key):\n return self.r.get(key)", "def expose_docker_socket(self):\n return self._expose_docker_socket", "def _http_client(self):\n\n self.__enforce_connected()\n return self.collection._http_client", "def _read_from_config(key, default):\n if config.has_option('docker', key):\n return config.get('docker', key)\n else:\n return default", "def _get_client(wsdl_url, cache_duration=(\"default\",)):\n global _suds_client\n\n print(wsdl_url)\n # Handle new or changed client request (create new client)\n if _suds_client is None or _suds_client.wsdl.url != wsdl_url:\n _suds_client = Client(wsdl_url)\n if cache_duration is None:\n _suds_client.set_options(cache=None)\n else:\n cache = _suds_client.options.cache\n # could add some error catching ...\n if cache_duration[0] == \"default\":\n cache.setduration(days=1)\n else:\n # noinspection PyTypeChecker\n cache.setduration(**dict([cache_duration]))\n\n return _suds_client", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config", "def _get_config(self):\n return self.__config" ]
[ "0.7949226", "0.6975132", "0.69293606", "0.6704719", "0.6433767", "0.6314111", "0.6047752", "0.59885114", "0.59635204", "0.5917807", "0.5916698", "0.5907519", "0.5857684", "0.58574104", "0.58456975", "0.5838481", "0.58354545", "0.58210045", "0.58168083", "0.58074665", "0.57086885", "0.56755704", "0.5672046", "0.56555134", "0.5648413", "0.5631415", "0.56311697", "0.56213063", "0.55941993", "0.55911475", "0.55791146", "0.5576172", "0.55737233", "0.55660516", "0.555038", "0.55408996", "0.5536702", "0.5536702", "0.55350053", "0.5519103", "0.55179816", "0.55097985", "0.5507288", "0.55072707", "0.5506658", "0.54930264", "0.54918146", "0.5488746", "0.5485208", "0.54806507", "0.5474044", "0.54666555", "0.54655313", "0.54616916", "0.5440218", "0.54347616", "0.54316974", "0.5430191", "0.5426878", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614", "0.54170614" ]
0.72880656
1
Return whether an image needs pushing
def image_needs_pushing(image): d = docker_client() try: d.images.get_registry_data(image) except docker.errors.APIError: # image not found on registry, needs pushing return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def PrePush(self, image):\n pass", "def hasImage(self):\n if self.getImage():\n return True\n return False", "def _pushing(pushop):\n return bool(\n pushop.outgoing.missing\n or pushop.outdatedphases\n or pushop.outobsmarkers\n or pushop.outbookmarks\n )", "def hasImage(self):\n return self._image is not None", "def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None", "def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]", "def has_images(self):\n return len(self.images) > 0", "def image_needs_building(image):\n d = docker_client()\n\n # first, check for locally built image\n try:\n d.images.get(image)\n except docker.errors.ImageNotFound:\n # image not found, check registry\n pass\n else:\n # it exists locally, no need to check remote\n return False\n\n # image may need building if it's not on the registry\n return image_needs_pushing(image)", "def hasImages(self):\n return len(self.getImages()) > 0", "def hasImages(self):\n return len(self.getImages()) > 0", "def filter_push(move: dict):\n if move.get(\"pushes\") > 0:\n return True\n else:\n return False", "def push(self):\n return False", "def can_push(self) -> bool:\n return pulumi.get(self, \"can_push\")", "def request_image(self, source, connection):\n try:\n self.__image_queue.put_nowait((source, connection))\n return True\n except Queue.Full:\n return False", "def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def __contains__(self, image: Any) -> bool:\n return isinstance(image, self.native_image_type)", "def has_media(self):\r\n if self.image:\r\n return True\r\n return False", "def test_push_already_pushed(self, mock_docker_environment, snapshot, capsys):\n mock_docker_environment.api.push = mock.Mock(\n return_value=event_streams.PUSH_ALREADY_PRESENT\n )\n push_image(TEST_IMAGE_NAME)\n out, err = capsys.readouterr()\n snapshot.assert_match(out)", "def is_image(pos, image, start_pos, dim_square):\n # Grab image on real board\n im = region_grabber((start_pos[0] + pos[1] * dim_square[0],\n start_pos[1] - (pos[0] + 1.0) * dim_square[1],\n start_pos[0] + (pos[1] + 1.0) * dim_square[0],\n start_pos[1] - pos[0] * dim_square[1]))\n\n pos_image = imagesearcharea(image, 0, 0, 0, 0, 0.9, im)\n return pos_image != [-1, -1]", "def __nonzero__(self):\n if self._pushed:\n return True\n try:\n self.push(self.next())\n except StopIteration:\n return False\n return True", "def has_picture(self):\n try:\n first = self.picture_planets()[0]\n except IndexError:\n first = None\n\n return first is not None", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def _is_push_command(self):\n return self._match_memory_pattern(\"push\")", "def images_exist(self):\n pass", "def is_valid_image(image):\n if image not in AVAILABLE_IMAGES.keys():\n return False\n\n return True", "def push_image(self, tag_list, push_to_defaults, additional_registries=[], version_release_tuple=None,\n push_late=False, dry_run=False):\n\n # Late pushes allow certain images to be the last of a group to be\n # pushed to mirrors. CI/CD systems may initiate operations based on the\n # update a given image and all other images need to be in place\n # when that special image is updated. The special images are there\n # pushed \"late\"\n # Actions that need to push all images need to push all images\n # need to make two passes/invocations of this method: one\n # with push_late=False and one with push_late=True.\n\n is_late_push = False\n if self.config.push.late is not Missing:\n is_late_push = self.config.push.late\n\n if push_late != is_late_push:\n return True\n\n push_names = []\n\n if push_to_defaults:\n push_names.extend(self.metadata.get_default_push_names())\n\n push_names.extend(self.metadata.get_additional_push_names(additional_registries))\n\n # Nothing to push to? We are done.\n if not push_names:\n return True\n\n with Dir(self.distgit_dir):\n\n if version_release_tuple:\n version = version_release_tuple[0]\n release = version_release_tuple[1]\n else:\n\n # History\n # We used to rely on the \"release\" label being set in the Dockerfile, but this is problematic for several reasons.\n # (1) If 'release' is not set, OSBS will determine one automatically that does not conflict\n # with a pre-existing image build. This is extremely helpful since we don't have to\n # worry about bumping the release during refresh images. This means we generally DON'T\n # want the release label in the file and can't, therefore, rely on it being there.\n # (2) People have logged into distgit before in order to bump the release field. This happening\n # at the wrong time breaks the build.\n\n # If the version & release information was not specified,\n # try to detect latest build from brew.\n # Read in version information from the Distgit dockerfile\n _, version, release = self.metadata.get_latest_build_info()\n\n try:\n record = {\n \"distgit_key\": self.metadata.distgit_key,\n \"distgit\": '{}/{}'.format(self.metadata.namespace, self.metadata.name),\n \"image\": self.config.name,\n \"version\": version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n # pull just the main image name first\n image_name_and_version = \"%s:%s-%s\" % (self.config.name, version, release)\n brew_image_url = \"/\".join((constants.BREW_IMAGE_HOST, image_name_and_version))\n pull_image(brew_image_url)\n record['message'] = \"Successfully pulled image\"\n record['status'] = 0\n except Exception as err:\n record[\"message\"] = \"Exception occurred: %s\" % str(err)\n self.logger.info(\"Error pulling %s: %s\" % (self.metadata.name, err))\n raise\n finally:\n self.runtime.add_record('pull', **record)\n\n push_tags = list(tag_list)\n\n # If no tags were specified, build defaults\n if not push_tags:\n push_tags = self.metadata.get_default_push_tags(version, release)\n\n for image_name in push_names:\n try:\n\n repo = image_name.split('/', 1)\n\n action = \"push\"\n record = {\n \"distgit_key\": self.metadata.distgit_key,\n \"distgit\": '{}/{}'.format(self.metadata.namespace, self.metadata.name),\n \"repo\": repo, # ns/repo\n \"name\": image_name, # full registry/ns/repo\n \"version\": version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"tags\": \", \".join(push_tags),\n \"status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n for push_tag in push_tags:\n push_url = '{}:{}'.format(image_name, push_tag)\n\n if dry_run:\n rc = 0\n self.logger.info('Would have tagged {} as {}'.format(brew_image_url, push_url))\n self.logger.info('Would have pushed {}'.format(push_url))\n else:\n rc, out, err = exectools.cmd_gather([\"docker\", \"tag\", brew_image_url, push_url])\n\n if rc != 0:\n # Unable to tag the image\n raise IOError(\"Error tagging image as: %s\" % push_url)\n\n for r in range(10):\n self.logger.info(\"Pushing image to mirror [retry=%d]: %s\" % (r, push_url))\n rc, out, err = exectools.cmd_gather([\"docker\", \"push\", push_url])\n if rc == 0:\n break\n self.logger.info(\"Error pushing image -- retrying in 60 seconds\")\n time.sleep(60)\n\n if rc != 0:\n # Unable to push to registry\n raise IOError(\"Error pushing image: %s\" % push_url)\n\n record[\"message\"] = \"Successfully pushed all tags\"\n record[\"status\"] = 0\n\n except Exception as err:\n record[\"message\"] = \"Exception occurred: %s\" % str(err)\n self.logger.info(\"Error pushing %s: %s\" % (self.metadata.name, err))\n raise\n\n finally:\n self.runtime.add_record(action, **record)\n\n return True", "def check_got_promotion():\n im = region_grabber((550, 250, 815, 320)) # Hardcoded\n pos = imagesearcharea(\"Images/promotion_queen.jpg\", 0, 0, 0, 0, 0.9, im)\n if pos != [-1, -1]:\n print(\"Got promotion\")\n pos_image = [550 + pos[0], 250 + pos[1]]\n click_image(\"Images/promotion_queen.jpg\", pos_image, \"left\", 0.2)\n time.sleep(0.5)\n return True\n return False", "def is_finished(self):\n if self.task_index + 1 >= self.image_count:\n return True\n return False", "def is_use_pictures(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsUsePictures', self.handle))", "def is_new_based_on_imgs(soup):\n\n \n \n prev_hashes = get_prev_img_hashes()\n temp_hashes = get_temp_img_hashes(soup)\n\n if len(temp_hashes.difference(prev_hashes))>0:\n print(\"new, based on images\")\n return True\n else:\n return False", "def has_legacy_image(self):\n pass", "def has_legacy_image(self):\n pass", "def process_images(self):\n source_images = self.get_build_images()\n self.log.info(\"Got %s images for publishing. Processing..\", len(source_images))\n\n for image in source_images:\n self.fetch_image(image)\n\n for target in image.push_registries:\n for tag in image.release_tags:\n repository = \"%s/%s\" % (target, image.repository.name)\n self.tag_image(image, repository, tag)\n retry_count = 1\n while retry_count <= self.retry_limit:\n self.log.info(\"Pushing %s:%s to %s (%d/%d)\", repository, tag, target, retry_count, self.retry_limit)\n try:\n self.publish_image(target, repository, tag)\n break\n except ImagePushError as e:\n self.log.error(\"%s\", e.message)\n retry_count = retry_count + 1\n else:\n return False\n return True", "def image_present_check(self):\r\n if not self.master.images: # If no images present in the list\r\n messagebox.showerror(\"Error\", 'No image selected') # Throw up the error messagebox\r\n\r\n else:\r\n return True # If there are images present in the list, then return True value\r", "def is_depth_image(self):\n return False", "def is_push_enabled(self) -> bool:\n return pulumi.get(self, \"is_push_enabled\")", "def check_image(self, tag):\n image_name = self.build_image_name(tag)\n try:\n self.client.images.get_registry_data(image_name)\n return True\n except Exception as ex:\n print('Image {} does not exist: '.format(image_name), str(ex))\n return False", "def image_is_available(filename):\n # FIXME - Implement!\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n return os.path.isfile(file_path)", "def hasImageData(self,volumeNode):\n if not volumeNode:\n logging.debug('hasImageData failed: no volume node')\n return False\n return True", "def image(self):\n return self.any_image(-1)", "def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )", "def no_bricks(self):\n if self.brick_count == self.total_bricks:\n return True\n else:\n return False", "def isopen(self):\n return _image.image_isopen(self)", "def is_stack(self) -> bool:\n return self.layers > 1", "def is_image_local(self, image):\n result = self.execute_module(\"docker_image_facts\", {\"name\": image})\n return bool(result.get(\"images\")) and not result.get(\"failed\")", "def is_ringing(self) -> bool:", "def image_named(self, msg, name, target=None):\n return name in self._get_image_names(msg)", "def push(args, image_name_tag):\n if args.push is True:\n cmd_push = f\"docker push {image_name_tag}\"\n print(f\"COMMAND: {cmd_push}\")\n print(\"\", flush=True)\n return_code = subprocess.call(cmd_push, shell=True)\n if return_code != 0:\n exit(f\"Error with {cmd_push}\")\n return 0", "def image_cb(self, msg):\n self.has_image = True\n self.camera_image_msg = msg", "def tag_push_events(self) -> bool:\n return pulumi.get(self, \"tag_push_events\")", "def is_dead(self, img):\n\n crop_height = 20\n crop_width = 20\n threshold = 70\n pixels_percentage = 0.10\n\n pixels_required = (img.shape[1] - 2 * crop_width) * crop_height * pixels_percentage\n\n crop = img[-crop_height:, crop_width:-crop_width]\n\n r = crop[:, :, 0] < threshold\n g = crop[:, :, 1] < threshold\n b = crop[:, :, 2] < threshold\n\n pixels = (r & g & b).sum()\n\n # print(\"Pixels: {}, Required: {}\".format(pixels, pixels_required))\n\n return pixels < pixels_required", "def hasPng(self):\n\t\tif self.isPng:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.png' ) ).exists", "def hasImageData(self,volumeNode):\n if not volumeNode:\n logging.debug('no volume node')\n return False\n if volumeNode.GetImageData() is None:\n logging.debug('no image data')\n return False\n return True", "def _from_snapshot_request(pre_image, image):\n if pre_image.status == 'queued' and len(image.locations) == 1:\n loc_meta = image.locations[0]['metadata']\n return loc_meta and loc_meta.get('image_from', None) in ['snapshot',\n 'volume']", "def hasImageData(self,volumeNode):\n if not volumeNode:\n print('no volume node')\n return False\n if volumeNode.GetImageData() == None:\n print('no image data')\n return False\n return True", "def image_cb(self, msg):\n self.has_image = True\n self.camera_image = msg", "def not_in_image(self) -> bool:\n return not self.vector", "def pil_available():\n out = False\n try:\n from PIL import Image # noqa\n out = True\n except ImportError:\n pass\n return out", "def test_push_silent(self, mock_docker_environment, snapshot, capsys):\n push_image(TEST_IMAGE_NAME, silent=True)\n out, err = capsys.readouterr()\n snapshot.assert_match(out)", "def check_image_useful(image_path):\n lights_on = lights_are_on(image_path)\n if not lights_on:\n os.remove(image_path)", "def hasImageData(self,volumeNode):\n if not volumeNode:\n logging.debug('hasImageData failed: no volume node')\n return False\n if volumeNode.GetImageData() is None:\n logging.debug('hasImageData failed: no image data in volume node')\n return False\n return True", "def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res", "def check_if_actuall(self) -> bool:\n\n return self.last_date >= self.get_last_image_date()", "def rgb(self) -> bool:\n return self.image_shape[2] == 3", "def _is_push(self, words):\n if words[0] == 'push':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_PUSH command.\".format(self._file_line))\n if words[1] not in ['constant', 'temp', 'pointer', 'static', 'local', 'argument', 'this', 'that']:\n raise SyntaxError(\"File line {}: Invalid second argument.\".format(self._file_line))\n return True\n else:\n return False", "def push(self, executor: Executor) -> bool:\n if self.episode >= self._wp.training_delay:\n self.get_logger().info('Pushing gradients...')\n self.push_gradients(executor)\n\n else:\n self.get_logger().info('Pushing empty gradients...')\n self.push_empty_gradients(executor)\n\n experiment_done = self._future_gradients.result().done\n\n if experiment_done:\n self.get_logger().warn('Experiment complete!')\n\n else:\n self.get_logger().info(f'Episode {self.episode} complete!')\n\n # Move to top `pull` section\n self.flag.reset()\n\n # Increment episodes\n self.episode += 1\n\n return experiment_done", "def test_push(self, mock_docker_environment, snapshot, capsys):\n push_image(TEST_IMAGE_NAME)\n out, err = capsys.readouterr()\n snapshot.assert_match(out)", "def reloadImage(*args, **kwargs)->bool:\n pass", "def IsOk(*args, **kwargs):\n return _gdi_.Brush_IsOk(*args, **kwargs)", "def option_image_nocomposite(self):\n return not rcParams['image.composite_image']", "def not_pushed_down(self):\n return (self.genus >= 2 and self.n != 0) or (self.genus == 1 and self.n > 1) or (self.genus == 0 and self.n > 3)", "def is_brush(self) -> bool:\n return len(self.solids) > 0", "def createBitmap(self):\n return self.level.has_redundancy and self.size >= 1000 and self.format.type != \"swap\"", "def image_cb(self, msg): # incoming image\n self.has_image = True\n self.camera_image = msg", "def any(self) -> bool:\n return len(self.queue) > 0", "def hasImg(img_name):\n try:\n Image.objects.raw({\"_id\": img_name}).first()\n return True\n except pymodm_errors.DoesNotExist:\n return False", "def push(self, repository, stream=False, raise_on_error=False, **kwargs):\n response = super(DockerClientWrapper, self).push(repository, stream=stream, **kwargs)\n if stream:\n result = self._docker_status_stream(response, raise_on_error)\n else:\n result = self._docker_status_stream(response.split('\\r\\n') if response else (), raise_on_error)\n return result and not result.get('error')", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def create_octree_image() -> bool:\n return async_octree and (create_image_type != CREATE_IMAGE_NORMAL)", "def push_image(image):\n\n subprocess.check_call(['docker', 'push', image])", "def push_events(self) -> bool:\n return pulumi.get(self, \"push_events\")", "def game_active():\n im = region_grabber((0, 0, 110, 30))\n pos = imagesearcharea(\"Images/title.jpg\", 0, 0, 0, 0, 0.9, im) # Black background\n return pos != [-1, -1]", "def have_circ_pump(self):\n return bool(self.circ_pump)", "def track(self, image):\r\n \r\n # if the object was initialized correctrly\r\n if self.well_initialized:\r\n ok, self.object_bound_rect = self.tracker.update(image)\r\n \r\n return ok, self.object_bound_rect", "def developer_can_push(self) -> bool:\n return pulumi.get(self, \"developer_can_push\")", "def image_tests_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"image_tests_enabled\")", "def hasNext(self) -> bool:\n return self.stack != []", "def hasCurrentFrame(self):\n if self.currentFrame == []:\n return False\n return True", "def button_pushed(new_state, old_state):\n if new_state == Actuator.BUTTON_ON and old_state == Actuator.BUTTON_OFF:\n return True\n return False", "def detect_state(self, camera, image, send_q):\n print('Therefore, should never get to this print statement')\n pass", "def create_image_allowed(self):\n return self._create_image_allowed", "def is_image(mine=None, file=None):\n if file:\n mine = get_file_mine(file)\n print(mine)\n if mine:\n return mine.find('image') != -1\n\n return False", "def on_fruit(self):\r\n if self.grid_pos in self.app.fruit:\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0):\r\n return True\r\n # in the x-direction \r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1):\r\n return True\r\n # in the y-direction\r\n\r\n return False", "def push(state, value):\n if state[HEAD][MEM] == 0:\n state[HEAD][STATUS] = OOM\n return False\n else:\n state[STACK].append(value)\n state[HEAD][MEM] -= 1#XXX\n return True", "def builder_should_create_target_image(self, builder, target, image_id, template, parameters):", "def branches(image):\n return _neighbors_conv(image) > 2", "def is_shooting(self):\n if self.gun_interface:\n return self.gun_interface.is_preparing()\n return False", "def has_next(self):\n return len(self.pile) > 0", "def hasImageData(self,volumeNode):\n if not volumeNode:\n logging.debug('hasImageData failed: no volume node')\n return False\n if volumeNode.GetImageData() is None:\n logging.debug('hasImageData failed: no image data in volume node')\n return False\n return True" ]
[ "0.67209774", "0.66439164", "0.6631148", "0.656371", "0.64065194", "0.63889164", "0.6336889", "0.6315508", "0.6282437", "0.6282437", "0.62279326", "0.61669666", "0.6137131", "0.6032941", "0.6019985", "0.59767556", "0.5969536", "0.59661514", "0.5943", "0.5921205", "0.59130216", "0.5907948", "0.5905992", "0.59012645", "0.5897086", "0.58821714", "0.5851078", "0.58395475", "0.57877517", "0.57867956", "0.57384926", "0.573108", "0.573108", "0.57056105", "0.5693553", "0.568624", "0.5624381", "0.5616539", "0.56026363", "0.5573916", "0.5534898", "0.55239123", "0.5512597", "0.5510057", "0.55091536", "0.5506208", "0.54954356", "0.54951686", "0.5493653", "0.5493632", "0.5488732", "0.54740226", "0.54570866", "0.5453527", "0.5452603", "0.54405904", "0.54340816", "0.5419896", "0.54197043", "0.54021424", "0.5396594", "0.5386461", "0.53806686", "0.5359579", "0.53580403", "0.5355371", "0.53549415", "0.5345546", "0.5340807", "0.53355753", "0.53209907", "0.53204095", "0.5317038", "0.5309159", "0.53013176", "0.52979535", "0.5283988", "0.5281911", "0.527964", "0.52784485", "0.52712405", "0.5270423", "0.5264639", "0.526288", "0.52558553", "0.52541596", "0.5251377", "0.52471346", "0.524427", "0.5235687", "0.52313614", "0.5230287", "0.5229677", "0.52293134", "0.52251804", "0.5224363", "0.5222571", "0.5221547", "0.52212507", "0.5219869" ]
0.7235848
0
Return whether an image needs building Checks if the image exists (ignores commit range), either locally or on the registry.
def image_needs_building(image): d = docker_client() # first, check for locally built image try: d.images.get(image) except docker.errors.ImageNotFound: # image not found, check registry pass else: # it exists locally, no need to check remote return False # image may need building if it's not on the registry return image_needs_pushing(image)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image_needs_pushing(image):\n d = docker_client()\n try:\n d.images.get_registry_data(image)\n except docker.errors.APIError:\n # image not found on registry, needs pushing\n return True\n else:\n return False", "def check_image(self, tag):\n image_name = self.build_image_name(tag)\n try:\n self.client.images.get_registry_data(image_name)\n return True\n except Exception as ex:\n print('Image {} does not exist: '.format(image_name), str(ex))\n return False", "def is_image(self, service_name: str) -> bool:\n return False if self.get_from_service(service_name, \"build\") else True", "def test_image_exists_local_and_registry(self, mock_docker_environment):\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.images.build.assert_not_called()", "def is_valid_image(image):\n if image not in AVAILABLE_IMAGES.keys():\n return False\n\n return True", "def check_molns_image(self):\n if 'molns_image_name' in self.config and self.config['molns_image_name'] is not None \\\n and self.config['molns_image_name'] != '':\n return self.docker.image_exists(self.config['molns_image_name'])\n return False", "def is_available_skopeo_image(self, image, registry, task_vars):\n\n cmd_str = \"skopeo inspect docker://{registry}/{image}\".format(\n registry=registry,\n image=image,\n )\n\n args = {\"_raw_params\": cmd_str}\n result = self.module_executor(\"command\", args, task_vars)\n return not result.get(\"failed\", False) and result.get(\"rc\", 0) == 0", "def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool\n try:\n docker_command(args, ['image', 'inspect', image], capture=True)\n except SubprocessError:\n return False\n\n return True", "def req_build(container):\n try:\n return 'dockerfile' in self.kard.env.get_container(container)\n except KeyError:\n return False", "def hasImage(self):\n if self.getImage():\n return True\n return False", "def is_image_local(self, image):\n result = self.execute_module(\"docker_image_facts\", {\"name\": image})\n return bool(result.get(\"images\")) and not result.get(\"failed\")", "def test_image_exists_local(self, mock_docker_environment):\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.images.build.assert_not_called()", "def hasImage(self):\n return self._image is not None", "def is_image_exists(c, name):\n res = c.run('sudo docker images', hide='stdout')\n for image in res.stdout.split('\\n'):\n if name == image.split(' ')[0]:\n print('Image {name} exists'.format(name=name))\n return True\n\n print('Image {name} doesn\\'t exist'.format(name=name))\n return False", "def has_image(self):\n return hasattr(self, \"_image\") and self._image is not None", "def _check_build(self, gppkg_file, gppkg_spec):\n return gppkg_file == gppkg_spec.get_filename()", "def check_exist(self):\n helper.RbdImageOperator._check_rbd_image(self.real_path)", "def image_is_available(filename):\n # FIXME - Implement!\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)\n return os.path.isfile(file_path)", "def _check_trigger_sync(pre_image, image):\n return pre_image.status in ('saving', 'queued') and image.size and \\\n [l for l in image.locations if not utils.is_glance_location(l['url'])]", "def images_exist(self):\n pass", "def is_available_skopeo_image(self, image, default_registries):\n registries = default_registries\n\n # If image already includes a registry, only use that.\n # NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.\n # registry.access.redhat.com/rhel7 as if the registry were a namespace.\n # It's not clear that there's any way to distinguish them, but fortunately\n # the current set of images all look like [registry/]namespace/name[:version].\n if image.count(\"/\") > 1:\n registry, image = image.split(\"/\", 1)\n registries = [registry]\n\n for registry in registries:\n if registry not in self.reachable_registries:\n self.reachable_registries[registry] = self.connect_to_registry(registry)\n if not self.reachable_registries[registry]:\n continue\n\n args = {\"_raw_params\": self.skopeo_img_check_command.format(registry=registry, image=image)}\n result = self.execute_module_with_retries(\"command\", args)\n if result.get(\"rc\", 0) == 0 and not result.get(\"failed\"):\n return True\n if result.get(\"rc\") == 124: # RC 124 == timed out; mark unreachable\n self.reachable_registries[registry] = False\n\n return False", "def check_image(image):\n\n if not path.isfile(image):\n raise ImageException('Error: Singularity image \"%s\" not found.' % image)\n return True", "def has_image(self, container_name):\n name, tag = split_container_name(container_name)\n images = self._client.images(all=True)\n return any(container_name in image[\"RepoTags\"] for image in images)", "def BuildExists(buildname):\n for platform in platforms:\n if not os.path.exists(builds_basedir+'/'+platform+'/'+buildname):\n return False\n return True", "def test_image_exists_registry(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.api.pull.assert_called_with(\n TEST_IMAGE_NAME, \"latest\", decode=True, stream=True\n )\n mock_docker_environment.images.build.assert_not_called()", "def test_image_exists_registry_no_pull(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n build_image_if_needed(TEST_IMAGE_NAME, pull=False)\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)", "def pil_available():\n out = False\n try:\n from PIL import Image # noqa\n out = True\n except ImportError:\n pass\n return out", "def docker_available(): # type: () -> bool\n return bool(get_docker_command())", "def image_check(kwargs) -> bool:\n\n # Kwarg argument check\n return kwarg_check(\n kwargs=kwargs,\n options=[\n \"min_captured_at\",\n \"max_captured_at\",\n \"radius\",\n \"image_type\",\n \"organization_id\",\n \"fields\",\n ],\n callback=\"image_check\",\n )", "def check_image_local(self, tag):\n tags = self.get_tags()\n return (tag in tags)", "def has_legacy_image(self):\n pass", "def has_legacy_image(self):\n pass", "def is_built(args, task_name: str, artifact_name: str) -> bool:\n if task_name not in args._artifacts:\n return False\n\n for a in args._artifacts[task_name]:\n if a.name == artifact_name and a.built:\n return True\n elif a.name == artifact_name and not a.built:\n return False\n return False", "def has_images(self):\n return len(self.images) > 0", "def image_present_check(self):\r\n if not self.master.images: # If no images present in the list\r\n messagebox.showerror(\"Error\", 'No image selected') # Throw up the error messagebox\r\n\r\n else:\r\n return True # If there are images present in the list, then return True value\r", "def test_force_with_local_and_registry_image(self, mock_docker_environment):\n build_image_if_needed(TEST_IMAGE_NAME, force=True)\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)", "def exists(self):\n return self._repository is not None", "def hasImages(self):\n return len(self.getImages()) > 0", "def hasImages(self):\n return len(self.getImages()) > 0", "def hasImg(img_name):\n try:\n Image.objects.raw({\"_id\": img_name}).first()\n return True\n except pymodm_errors.DoesNotExist:\n return False", "def md5sum_check_if_build_is_needed(md5sum_cache_dir: Path, skip_provider_dependencies_check: bool) -> bool:\n build_needed = False\n modified_files, not_modified_files = calculate_md5_checksum_for_files(\n md5sum_cache_dir, update=False, skip_provider_dependencies_check=skip_provider_dependencies_check\n )\n if modified_files:\n get_console().print(\n f\"[warning]The following important files are modified in {AIRFLOW_SOURCES_ROOT} \"\n f\"since last time image was built: [/]\\n\\n\"\n )\n for file in modified_files:\n get_console().print(f\" * [info]{file}[/]\")\n get_console().print(\"\\n[warning]Likely CI image needs rebuild[/]\\n\")\n build_needed = True\n else:\n get_console().print(\n \"[info]Docker image build is not needed for CI build as no important files are changed! \"\n \"You can add --force-build to force it[/]\"\n )\n return build_needed", "def test_force_with_registry_image(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n build_image_if_needed(TEST_IMAGE_NAME, force=True)\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)", "def image_not_exists(self):\n res = subprocess.run(\n \"{} inspect {}\".format(self.binary, self.vars['image']),\n shell=True, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)\n return res.returncode", "def allowed_image(self, module_id):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tself.log(\"In allowed_image: \" + module_id,level=logging.DEBUG)\n\t\tcfg = self.cfg\n\t\tif self.build['ignoreimage']:\n\t\t\tself.log(\"ignoreimage == true, returning true\" + module_id,level=logging.DEBUG)\n\t\t\treturn True\n\t\tself.log(str(cfg[module_id]['shutit.core.module.allowed_images']),level=logging.DEBUG)\n\t\tif cfg[module_id]['shutit.core.module.allowed_images']:\n\t\t\t# Try allowed images as regexps\n\t\t\tfor regexp in cfg[module_id]['shutit.core.module.allowed_images']:\n\t\t\t\tif not shutit_util.check_regexp(regexp):\n\t\t\t\t\tself.fail('Illegal regexp found in allowed_images: ' + regexp) # pragma: no cover\n\t\t\t\tif re.match('^' + regexp + '$', self.target['docker_image']):\n\t\t\t\t\treturn True\n\t\treturn False", "def GetOrBuild(self, image):\n full_image = self.GetFullRegistryTag(image)\n # Log in to the registry to see if image exists\n self.Login()\n if not FLAGS.force_container_build:\n # manifest inspect inpspects the registry's copy\n inspect_cmd = ['docker', 'manifest', 'inspect', full_image]\n _, _, retcode = vm_util.IssueCommand(\n inspect_cmd, raise_on_failure=False)\n if retcode == 0:\n return full_image\n self._Build(image)\n return full_image", "def image_exists(self, id=None, tag=None):\n exists = False\n if id and self.image_by_id(id):\n exists = True\n elif tag and self.image_by_tag(tag):\n exists = True\n\n return exists", "def BinaryExists(filename):\n return os.path.exists(os.path.join(self.options.build_dir, filename))", "def is_image(pos, image, start_pos, dim_square):\n # Grab image on real board\n im = region_grabber((start_pos[0] + pos[1] * dim_square[0],\n start_pos[1] - (pos[0] + 1.0) * dim_square[1],\n start_pos[0] + (pos[1] + 1.0) * dim_square[0],\n start_pos[1] - pos[0] * dim_square[1]))\n\n pos_image = imagesearcharea(image, 0, 0, 0, 0, 0.9, im)\n return pos_image != [-1, -1]", "def can_build(self, game_map) -> bool:\n if self.is_cart():\n return False\n cell = game_map.get_cell_by_pos(self.pos)\n if not cell.has_resource() and cell.citytile is None and self.can_act() and self.has_enough_resources_to_build:\n return True\n return False", "def can_load(self):\n\n try:\n return self._get_nearest_entry_with_artifact() is not None\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)", "def hasPng(self):\n\t\tif self.isPng:\n\t\t\treturn True\n\t\treturn textureFile( self.path.replace( self.extension, '.png' ) ).exists", "def _is_new_repo_generating(module_build, koji_session):\n if not module_build.new_repo_task_id:\n return False\n\n log.debug(\n 'Checking status of newRepo task \"%d\" for %s', module_build.new_repo_task_id, module_build)\n task_info = koji_session.getTaskInfo(module_build.new_repo_task_id)\n\n active_koji_states = [\n koji.TASK_STATES[\"FREE\"], koji.TASK_STATES[\"OPEN\"], koji.TASK_STATES[\"ASSIGNED\"]]\n\n return task_info[\"state\"] in active_koji_states", "def __contains__(self, image: Any) -> bool:\n return isinstance(image, self.native_image_type)", "def check_build(self, bld_num):\n # QQQ In future this should be replaced with a query to the\n # build database\n bld_dir = os.path.join(self.ver_dir, str(bld_num))\n for plat in self.plats.keys():\n if self.plats[plat]:\n # QQQ Assumes format of filename unique to couchbase-server\n files = glob.glob(\"{}/couchbase-server-enterprise?{}*{}*\".format(\n bld_dir, self.version, plat\n ))\n files = [x for x in files if not (x.endswith(\".md5\") or x.endswith(\".sha256\"))]\n if len(files) == 0:\n print (\"Platform {} is missing\".format(plat))\n return False\n return True", "def test_image_exists_registry_pull_not_found(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n mock_docker_environment.api.pull.side_effect = DockerNotFound(\"testing\")\n\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.api.pull.assert_called_with(\n TEST_IMAGE_NAME, \"latest\", decode=True, stream=True\n )\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)", "def create_image_allowed(self):\n return self._create_image_allowed", "def is_image_exist(path_to_image, threshold=0.8, window_title=None):\n image_rgb = get_screenshot(window_title)\n image_rgb = np.array(image_rgb)\n image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)\n template = cv2.imread(path_to_image, 0)\n w, h = template.shape[::-1]\n res = cv2.matchTemplate(image_gray, template, cv2.TM_CCOEFF_NORMED)\n loc = np.where(res >= threshold)\n for pt in zip(*loc[::-1]):\n cv2.rectangle(image_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n flag = False\n if np.amax(res) > threshold:\n flag = True\n return flag", "def images_are_present(file_info):\n currentdir = os.path.join(WORKDIR, file_info['folder'])\n if not os.path.exists(currentdir):\n return False\n count = len([x for x in os.listdir(currentdir) if x.endswith('.png')])\n if count != file_info['size']:\n print([x for x in os.listdir(currentdir) if x.endswith('.png')])\n print('Count does not match')\n print(count)\n print(file_info['size'])\n return False\n return True", "def has_repos(self):\n for section in self.sections():\n if section not in self.reserved_sections:\n return True\n return False", "def test_image_does_not_exist(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n mock_docker_environment.images.get_registry_data.side_effect = DockerNotFound(\"mocked\")\n build_image_if_needed(TEST_IMAGE_NAME)\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)\n\n build_image_if_needed(TEST_IMAGE_NAME, pull=True)\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)", "def test_unknown_registry(self, mock_docker_environment):\n mock_docker_environment.images.get.side_effect = DockerNotFound(\"testing\")\n build_image_if_needed(\"unknown.registry.com/foo/bar\")\n mock_docker_environment.images.build.assert_called_with(\n dockerfile=\"Dockerfile\",\n tag=\"unknown.registry.com/foo/bar:latest\",\n path=\"/opt/ixian_docker\",\n )", "def testCheckAvailable(self):\n img = self.img\n img.inspect()\n with converter.RootMounted(img.converter._h,\n '/dev/VolGroup00/LogVol00'):\n c = img.converter\n installer = redhat.LocalInstaller(\n c._h, '/dev/VolGroup00/LogVol00',\n db.DB(['{}/conf/guestconv.db'.format(env.topdir)]),\n log.get_logger_object(test_helper.logger)\n )\n\n kernel = redhat.Package('kernel',\n version='2.6.9', release='89.EL',\n arch='i686')\n self.assertTrue(installer.check_available([kernel]))", "def _build(registry, image_name, git_sha):\n\n image = f\"{registry}/{image_name}\"\n\n # If this image is not in our registry, nothing we can do.\n if registry != config.DOCKER_REGISTRY:\n return \"\"\n\n # If image:{git_sha} already exists, then return.\n logger.info(\n f\"Pulling git_sha tag {image}:{git_sha} to check if it already exists\"\n )\n if (\n not args.dry_run\n and docker_silent.pull(f\"{image}:{git_sha}\").returncode == 0\n ):\n # better to find the hash-{hash} of this image, and return hash\n # but currently, it is not easy to find all tags of the same image digest through registry API.\n # so we return image digest instead.\n digest = get_image_digest(f\"{image}:{git_sha}\")\n logger.info(\n f\"git_sha tag {image}:{git_sha} already exists, digest: %s\", digest\n )\n if not digest:\n raise Exception(\"Failed to get digest for existing image\")\n _tag_to_extra_tags(args, image, git_sha)\n return digest\n\n # Enter build context directory if it is specified\n build_context = enter_build_context(image_name)\n\n # Parse .dockerignore in build context\n dockerignore_files_set = parse_dockerignore(build_context)\n\n # Check if the dockerfile exists\n dockerfile_path = locate_dockerfile(image_name)\n if not os.path.isfile(dockerfile_path):\n logger.error(\n \"%s not exists or is not a file, so %s cannot get build\",\n dockerfile_path,\n image_name,\n )\n raise Exception(\"Building cannot continue\")\n\n dockerfile = Dockerfile(dockerfile_path, build_arg=build_arg)\n\n hasher = sha256()\n\n # Build parents, and calc parents hash\n for parent_image in dockerfile.parent_images:\n (\n parent_image_registry,\n parent_image_name,\n parent_image_tag,\n ) = parse_docker_image_identity(parent_image)\n parent_digest = _build(\n parent_image_registry, parent_image_name, parent_image_tag\n )\n if parent_digest is None:\n raise Exception(f\"Failed to get parent_digest for {image}\")\n hasher.update(parent_digest.encode())\n hash_logger.info(\n \"parent: %s, digest: (%s, %s), hash: %s\",\n parent_image,\n parent_digest,\n parent_digest.encode(),\n hasher.hexdigest(),\n )\n\n # Calc current image files hash\n\n def update_file_hash(f):\n if not os.path.isfile(f):\n return\n if f in dockerignore_files_set:\n hash_logger.debug(\"ignore: %s\", f)\n return\n with open(f, \"rb\") as open_file:\n buf = open_file.read(config.READ_FILE_BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = open_file.read(config.READ_FILE_BLOCKSIZE)\n hash_logger.info(\"update: %s, hash: %s\", f, hasher.hexdigest())\n\n srcs = [dockerfile_path] + dockerfile.copied_srcs + dockerfile.added_srcs\n # TODO: if the src is a url, download it and hash it (even crane didn't do that)\n for src in srcs:\n for f in sorted(glob.glob(src)):\n # We match every file in a directory recursively\n if os.path.isdir(f):\n for sub_f in sorted(glob.glob(f\"{f}/**\", recursive=True)):\n update_file_hash(sub_f)\n else:\n update_file_hash(f)\n\n files_hash = hasher.hexdigest()\n hash_logger.info(\"image: %s, hash: %s\", image, files_hash)\n\n hash_tag = config.FILES_HASH_TAG_PATTERN.format(files_hash=files_hash)\n # FIXME(harry): hack, remove this\n old_hash_image = f\"docker-registry.example.com:5000/{image_name}:{hash_tag}\"\n\n logger.info(\n f\"Pulling files_hash tag {image}:{hash_tag} to check if it already exists\"\n )\n # If image:hash-{hash} already exists,\n # then content didn't change, return.\n # We just need to tag it to latest code version.\n if (\n not args.dry_run\n and docker_silent.pull(f\"{image}:{hash_tag}\").returncode == 0\n ):\n logger.info(\n f\"files_hash tag {image}:{hash_tag} already exists, \"\n \"it means content didn't change, we can just tag the old image to new git_sha version tag\"\n )\n # FIXME(harry): hack, remove this\n elif not args.dry_run and docker_silent.pull(old_hash_image).returncode == 0:\n logger.info(f\"NOTE: files_hash tag {old_hash_image} already exists!\")\n # tag and push this hash image\n if docker.tag(old_hash_image, f\"{image}:{hash_tag}\").returncode != 0:\n logger.error(\"Failed to tag old hash image\")\n return\n if docker.push(f\"{image}:{hash_tag}\").returncode != 0:\n logger.error(\"Failed to push hash_tag image\")\n return\n # If image:hash-{hash} not exists, then build it from Dockerfile.\n else:\n logger.info(\n f\"files_hash tag {image}:{hash_tag} dosen't exists, \"\n \"it means content may changed, gonna build it from Dockerfile\"\n )\n if build_with_raw_command(args, image, dockerfile_path, hash_tag) != 0:\n logger.error(f\"Failed to build {image}:{hash_tag}\")\n return\n if docker.push(f\"{image}:{hash_tag}\").returncode != 0:\n logger.error(f\"Failed to push image\")\n return\n logger.info(f\"image files_hash tag {image}:{hash_tag} is pushed\")\n\n # tag and push this final image\n if docker.tag(f\"{image}:{hash_tag}\", f\"{image}:{git_sha}\").returncode != 0:\n logger.error(\"Failed to tag image\")\n return\n _tag_to_extra_tags(args, image, git_sha)\n if docker.push(f\"{image}:{git_sha}\").returncode != 0:\n logger.error(\"Failed to push image\")\n return\n digest = get_image_digest(f\"{image}:{git_sha}\")\n if not digest:\n logger.error(\"Failed to get digest for image\")\n return\n logger.info(f\"image {image}:{git_sha} is pushed, digest: {digest}\")\n\n return digest", "def check_build_status(owner, repository, ref):\n return get_hvcs().check_build_status(owner, repository, ref)", "def file_present(self,imagefile=None):\n import hashlib\n if self.filesize()==0:\n return False # empty files are never present\n if imagefile==None:\n imagefile=self.imagefile # use this one\n for hashname in ['md5','sha1']:\n oldhash = self.tag(hashname)\n if oldhash:\n newhash = hashlib.new(hashname,self.contents(imagefile=imagefile)).hexdigest()\n return oldhash==newhash\n raise ValueError,\"Cannot process file \"+self.filename()+\": no hash in \"+str(self)", "def _is_rage(self):\n\n# dir_list = os.listdir(os.environ['PWD'])\n\n# # Check .../*/pc/data/cdimages dir\n# for data_dir in dir_list:\n# if os.path.exists(os.path.join(os.environ['PWD'], data_dir, 'pc/data/cdimages')):\n# return True\n if os.path.exists(os.path.join(os.environ['PWD'], 'pc/data/cdimages')):\n return True\n\n return False", "def _create(self, imagespec):\n if not self.dockerioapi.is_repo_name(imagespec):\n Msg().err(\"Error: must specify image:tag or repository/image:tag\")\n return False\n (imagerepo, tag) = self._check_imagespec(imagespec)\n if imagerepo:\n return ContainerStructure(self.localrepo).create_fromimage(\n imagerepo, tag)\n return False", "def check_availability(img_path):\n # loading gray image\n gray_image = cv2.imread(img_path, 0)\n\n # check whether img give empty list or not\n flag = face_recognition.face_locations(gray_image)\n if flag:\n return True\n return False", "def build_container(\n self, odcs, repo_type, repo, push_to_defaults, additional_registries, terminate_event,\n scratch=False, retries=3):\n if self.org_image_name is None or self.org_version is None:\n if not os.path.isfile(os.path.join(self.distgit_dir, 'Dockerfile')):\n self.logger.info('No Dockerfile found in {}'.format(self.distgit_dir))\n else:\n self.logger.info('Unknown error loading Dockerfile information')\n return False\n\n action = \"build\"\n release = self.org_release if self.org_release is not None else '?'\n record = {\n \"dir\": self.distgit_dir,\n \"dockerfile\": \"%s/Dockerfile\" % self.distgit_dir,\n \"distgit\": self.metadata.name,\n \"image\": self.org_image_name,\n \"version\": self.org_version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"task_id\": \"n/a\",\n \"task_url\": \"n/a\",\n \"status\": -1,\n \"push_status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n target_tag = \"-\".join((self.org_version, release))\n target_image = \":\".join((self.org_image_name, target_tag))\n\n try:\n if not scratch and self.org_release is not None \\\n and self.metadata.tag_exists(target_tag):\n self.logger.info(\"Image already built for: {}\".format(target_image))\n else:\n # If this image is FROM another group member, we need to wait on that group member\n # Use .get('from',None) since from is a reserved word.\n image_from = Model(self.config.get('from', None))\n if image_from.member is not Missing:\n self._set_wait_for(image_from.member, terminate_event)\n for builder in image_from.get('builder', []):\n if 'member' in builder:\n self._set_wait_for(builder['member'], terminate_event)\n\n # Allow an image to wait on an arbitrary image in the group. This is presently\n # just a workaround for: https://projects.engineering.redhat.com/browse/OSBS-5592\n if self.config.wait_for is not Missing:\n self._set_wait_for(self.config.wait_for, terminate_event)\n\n def wait(n):\n self.logger.info(\"Async error in image build thread [attempt #{}]\".format(n + 1))\n # Brew does not handle an immediate retry correctly, wait\n # before trying another build, terminating if interrupted.\n if terminate_event.wait(timeout=5 * 60):\n raise KeyboardInterrupt()\n\n exectools.retry(\n retries=3, wait_f=wait,\n task_f=lambda: self._build_container(\n target_image, odcs, repo_type, repo, terminate_event,\n scratch, record))\n\n # Just in case someone else is building an image, go ahead and find what was just\n # built so that push_image will have a fixed point of reference and not detect any\n # subsequent builds.\n push_version, push_release = ('','')\n if not scratch:\n _, push_version, push_release = self.metadata.get_latest_build_info()\n record[\"message\"] = \"Success\"\n record[\"status\"] = 0\n self.build_status = True\n\n except (Exception, KeyboardInterrupt):\n tb = traceback.format_exc()\n record[\"message\"] = \"Exception occurred:\\n{}\".format(tb)\n self.logger.info(\"Exception occurred during build:\\n{}\".format(tb))\n # This is designed to fall through to finally. Since this method is designed to be\n # threaded, we should not throw an exception; instead return False.\n finally:\n # Regardless of success, allow other images depending on this one to progress or fail.\n self.build_lock.release()\n\n self.push_status = True # if if never pushes, the status is True\n if not scratch and self.build_status and (push_to_defaults or additional_registries):\n # If this is a scratch build, we aren't going to be pushing. We might be able to determine the\n # image name by parsing the build log, but not worth the effort until we need scratch builds.\n # The image name for a scratch build looks something like:\n # brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/openshift3/ose-base:rhaos-3.7-rhel-7-docker-candidate-16066-20170829214444\n\n # To ensure we don't overwhelm the system building, pull & push synchronously\n with self.runtime.mutex:\n self.push_status = False\n try:\n self.push_image([], push_to_defaults, additional_registries, version_release_tuple=(push_version, push_release))\n self.push_status = True\n except Exception as push_e:\n self.logger.info(\"Error during push after successful build: %s\" % str(push_e))\n self.push_status = False\n\n record['push_status'] = '0' if self.push_status else '-1'\n\n self.runtime.add_record(action, **record)\n return self.build_status and self.push_status", "def test_force_with_local_image(self, mock_docker_environment):\n build_image_if_needed(TEST_IMAGE_NAME, force=True)\n mock_docker_environment.images.build.assert_called_with(**self.default_call_kwargs)", "def check_for_docker_compose_file():\n return os.path.isfile(DOCKER_COMPOSE_FILE)", "def create_octree_image() -> bool:\n return async_octree and (create_image_type != CREATE_IMAGE_NORMAL)", "def _from_snapshot_request(pre_image, image):\n if pre_image.status == 'queued' and len(image.locations) == 1:\n loc_meta = image.locations[0]['metadata']\n return loc_meta and loc_meta.get('image_from', None) in ['snapshot',\n 'volume']", "def exists_image_in_database(full_image_url):\r\n\r\n logging.debug('exists_image_in_database({})'.format(full_image_url))\r\n\r\n dir_path = os.path.join(os.environ['LOCALAPPDATA'],'WarietyWallpaperImages')\r\n db_file = os.path.join(dir_path,'wariety.db')\r\n conn = sqlite3.connect(db_file)\r\n c = conn.cursor()\r\n\r\n # Select a row\r\n c.execute(\"SELECT id FROM wallpapers WHERE iurl = ?\", (full_image_url,))\r\n\r\n if c.fetchone() is not None:\r\n conn.close()\r\n logging.debug('exists_image_in_database - True')\r\n return True\r\n else:\r\n conn.close()\r\n logging.debug('exists_image_in_database - False')\r\n return False", "def required_images(self):\n required = set()\n deployment_type = self.get_var(\"openshift_deployment_type\")\n host_groups = self.get_var(\"group_names\")\n # containerized etcd may not have openshift_image_tag, see bz 1466622\n image_tag = self.get_var(\"openshift_image_tag\", default=\"latest\")\n image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]\n\n # template for images that run on top of OpenShift\n image_url = \"{}/{}-{}:{}\".format(image_info[\"namespace\"], image_info[\"name\"], \"${component}\", \"${version}\")\n image_url = self.get_var(\"oreg_url\", default=\"\") or image_url\n if 'nodes' in host_groups:\n for suffix in NODE_IMAGE_SUFFIXES:\n required.add(image_url.replace(\"${component}\", suffix).replace(\"${version}\", image_tag))\n # The registry-console is for some reason not prefixed with ose- like the other components.\n # Nor is it versioned the same, so just look for latest.\n # Also a completely different name is used for Origin.\n required.add(image_info[\"registry_console_image\"])\n\n # images for containerized components\n if self.get_var(\"openshift\", \"common\", \"is_containerized\"):\n components = set()\n if 'nodes' in host_groups:\n components.update([\"node\", \"openvswitch\"])\n if 'masters' in host_groups: # name is \"origin\" or \"ose\"\n components.add(image_info[\"name\"])\n for component in components:\n required.add(\"{}/{}:{}\".format(image_info[\"namespace\"], component, image_tag))\n if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise\n required.add(\"registry.access.redhat.com/rhel7/etcd\") # and no image tag\n\n return required", "def _check_grib(self, url):\n head = requests.head(url)\n check_exists = head.ok\n if check_exists:\n check_content = int(head.raw.info()['Content-Length']) > 1_000_000\n return check_exists and check_content\n else:\n return False", "def _check_image_is_supported(self):\n\t\tSUPPORTED = {}\n\t\tSUPPORTED['RECORD_TYPE'] = 'FIXED_LENGTH',\n\t\tSUPPORTED['SAMPLE_BITS'] = 8, 16\n\t\tSUPPORTED['SAMPLE_TYPE'] = ( 'UNSIGNED_INTEGER',\n\t\t\t\t'MSB_UNSIGNED_INTEGER',\n\t\t\t\t'LSB_INTEGER',\n\t\t\t\t'MSB_INTEGER'\n\t\t\t\t)\n\n\t\timageIsSupported = True\n\n\t\tif not self.labels.has_key('IMAGE'):\n\t\t\tif self.log: self.log.warn(\"No image data found\")\n\t\t\timageIsSupported = False\n\n\t\trecordType = self.labels['RECORD_TYPE']\n\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\n\t\tif recordType not in SUPPORTED['RECORD_TYPE']:\n\t\t\terrorMessage = (\"RECORD_TYPE '%s' is not supported\") % (recordType)\n\t\t\tif self.raisesImageNotSupportedError:\n\t\t\t\traise ImageNotSupportedError(errorMessage)\n\t\t\timageIsSupported = False\n\t\tif imageSampleBits not in SUPPORTED['SAMPLE_BITS']:\n\t\t\terrorMessage = (\"SAMPLE_BITS '%s' is not supported\") % (imageSampleBits)\n\t\t\tif self.raisesImageNotSupportedError:\n\t\t\t\traise ImageNotSupportedError(errorMessage)\n\t\t\timageIsSupported = False\n\t\tif imageSampleType not in SUPPORTED['SAMPLE_TYPE']:\n\t\t\terrorMessage = (\"SAMPLE_TYPE '%s' is not supported\") % (imageSampleType)\n\t\t\tif self.raisesImageNotSupportedError:\n\t\t\t\traise ImageNotSupportedError(errorMessage)\n\t\t\timageIsSupported = False\n\n\t\treturn imageIsSupported", "def hasImages(self):\n\n if len(self._images) > 0:\n return 1\n for s in self._subdirs:\n if s.hasImages():\n return 1\n return 0", "def is_openimageio_installed(raise_exception=False):\n\n try:\n import OpenImageIO # noqa\n\n return True\n except ImportError as error:\n if raise_exception:\n raise ImportError(('\"OpenImageIO\" related Api features '\n 'are not available: \"{0}\".').format(error))\n return False", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _is_docker(self) -> bool:\n from hubble.executor.helper import is_valid_docker_uri\n\n uses = getattr(self.args, 'uses', '')\n return is_valid_docker_uri(uses)", "def cached(self, args) -> bool:\n return all([art.built for art in self.artifacts])", "def image_tests_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"image_tests_enabled\")", "def check_image_useful(image_path):\n lights_on = lights_are_on(image_path)\n if not lights_on:\n os.remove(image_path)", "def is_available(self):\n return bool(FileUtil(\"curl\").find_exec())", "def checkBuildStatus(self):\n pass", "def canBuild(self, game_map) -> bool:\n cell = game_map.getCellByPos(self.pos)\n if not cell.hasResource() and self.canAct() and (self.cargo[\"wood\"] + self.cargo[\"coal\"] + self.cargo[\"uranium\"]) >= GAME_CONSTANTS[\"PARAMETERS\"][\"CITY_BUILD_COST\"]:\n return True\n return False", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False", "def check_pool_exist(pool_name: str) -> bool:\n if not pool_name:\n return False\n return os.path.exists(constant.work_dir + \"/pool/\" + pool_name)", "def exists(self):\n\t\tif self.hasUdim:\n\t\t\treturn len( self.udimPaths ) != 0\n\t\treturn super( textureFile, self ).exists", "def is_image(mine=None, file=None):\n if file:\n mine = get_file_mine(file)\n print(mine)\n if mine:\n return mine.find('image') != -1\n\n return False", "def available(self):\n return not os.path.exists(self.lockfile)", "def verify(image_path):\n try:\n with Image.open(image_path) as img:\n img.verify()\n return True\n except Exception as e:\n log.warn('Path [{}] does not point to an image: [{}]'.format(image_path, e))\n return False", "def DoAll(self):\n flags = ['--hwtest']\n date_str = datetime.date.today()\n description = 'master_%s_%s_%s' % (self._patches_string, self._build,\n date_str)\n _ = buildbot_utils.GetTrybotImage(\n self._chromeos_root,\n self._build,\n self._patches,\n description,\n other_flags=flags,\n async=True)\n\n return 0", "def validate_image(path):\n problems = False\n # Rasterio env is required to make sure that the gdal bindings are setup correctly.\n with rasterio.Env():\n try:\n dataset = rasterio.open(path)\n except Exception as e:\n logging.error(\"Could not open dataset\", e)\n return False\n\n # Check the bands have sort of sensible values\n if dataset.count != args.bands:\n logging.error(f\"There is not the required number of bands. Expected {args.bands} found {dataset.count}\")\n problems = True\n\n if not data_validation.check_data(dataset):\n problems = True\n\n # Validate coordinate box doesn't cover the origin.\n # Also make sure that it has valid coordinates.\n if dataset.transform:\n top_left = dataset.transform * (0, 0)\n bottom_right = dataset.transform * (dataset.width, dataset.height)\n if np.sign(bottom_right[0]) != np.sign(top_left[0]) and np.sign(bottom_right[1]) != np.sign(top_left[1]):\n logging.error(f\"Data set appears to be over the origin of the coordinate space.\")\n problems = True\n else:\n logging.error(f\"Dataset transform is missing.\")\n problems = True\n return not problems # return true if the image is valid", "def check_build_exists(self):\n path = self.base_dir + \"/\" + self.app_name + \"/\" + \"build\"\n from django_swagger_utils.core.utils.check_path_exists import check_path_exists\n build_dir = check_path_exists(path)\n if build_dir:\n raise Exception(\"Build Directory Already Exist, please run update_specs_build\")", "def built_file_must_exist(self, name, type=None, **kw):\n return self.must_exist(self.built_file_path(name, type, **kw))", "def is_active(self):\n deployment_type = self.get_var(\"openshift_deployment_type\")\n has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO\n\n return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type" ]
[ "0.74606645", "0.7137432", "0.70654684", "0.6916333", "0.6890094", "0.6849104", "0.68168086", "0.6784783", "0.6744717", "0.66717255", "0.6600988", "0.6536723", "0.65264153", "0.65185374", "0.650834", "0.6502069", "0.6478776", "0.6386925", "0.6359315", "0.6355004", "0.6343484", "0.6323211", "0.632266", "0.62907976", "0.6287016", "0.62540287", "0.6188263", "0.615947", "0.6124011", "0.6123703", "0.6101383", "0.6101383", "0.6066439", "0.6063898", "0.60398036", "0.5990924", "0.59693485", "0.59639716", "0.59639716", "0.59284574", "0.59126776", "0.5886342", "0.5849621", "0.5847302", "0.58057517", "0.57994074", "0.5798516", "0.5797246", "0.57869756", "0.5763942", "0.57462263", "0.5741366", "0.57318074", "0.5695969", "0.5676758", "0.5657037", "0.56526697", "0.5638159", "0.56344914", "0.56333405", "0.5617468", "0.5617101", "0.5612994", "0.5611214", "0.56080997", "0.5603881", "0.5591065", "0.5577012", "0.5569944", "0.5562726", "0.5539967", "0.55360353", "0.5535294", "0.5535251", "0.55225813", "0.55208766", "0.55200714", "0.54995656", "0.5496546", "0.5494335", "0.5494335", "0.5489435", "0.54850197", "0.5481857", "0.5481434", "0.5478628", "0.5472268", "0.5470964", "0.5466167", "0.54657847", "0.54618335", "0.54549813", "0.54522437", "0.5447159", "0.5445383", "0.54311866", "0.5419355", "0.54129505", "0.54127324", "0.5407638" ]
0.8575527
0
Build a collection of docker images
def build_images(prefix, images, tag=None, commit_range=None, push=False, chart_version=None, skip_build=False): value_modifications = {} for name, options in images.items(): image_path = options.get('contextPath', os.path.join('images', name)) image_tag = tag # include chartpress.yaml itself as it can contain build args and # similar that influence the image that would be built paths = list(options.get('paths', [])) + [image_path, 'chartpress.yaml'] last_commit = last_modified_commit(*paths) if tag is None: if chart_version: image_tag = "{}-{}".format(chart_version, last_commit) else: image_tag = last_commit image_name = prefix + name image_spec = '{}:{}'.format(image_name, image_tag) value_modifications[options['valuesPath']] = { 'repository': image_name, 'tag': SingleQuotedScalarString(image_tag), } if skip_build: continue template_namespace = { 'LAST_COMMIT': last_commit, 'TAG': image_tag, } if tag or image_needs_building(image_spec): build_args = render_build_args(options, template_namespace) build_image(image_path, image_spec, build_args, options.get('dockerfilePath')) else: print(f"Skipping build for {image_spec}, it already exists") if push: if tag or image_needs_pushing(image_spec): check_call([ 'docker', 'push', image_spec ]) else: print(f"Skipping push for {image_spec}, already on registry") return value_modifications
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_docker_images(self):\n print(f\"+ building {len(self.neurodocker_specs)} Docker images\")\n self.docker_status = []\n for sha1, neurodocker_dict in self.neurodocker_specs.items():\n try:\n print(\"++ building image: {}\".format(neurodocker_dict))\n cg.docker_main(\n self.working_dir,\n neurodocker_dict,\n sha1,\n build_context=self.build_context,\n )\n self.docker_status.append(\"docker ok\")\n except Exception as e:\n self.docker_status.append(\n \"failed to build image with SHA1 {}: {}\".format(sha1, e)\n )", "def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")", "def build_all_images_for_release(lang, release):\n docker_images = []\n build_jobs = []\n\n env = {}\n # If we not using current tree or the sibling for grpc stack, do checkout.\n stack_base = \"\"\n if args.git_checkout:\n stack_base = checkout_grpc_stack(lang, release)\n var = {\n \"go\": \"GRPC_GO_ROOT\",\n \"java\": \"GRPC_JAVA_ROOT\",\n \"node\": \"GRPC_NODE_ROOT\",\n }.get(lang, \"GRPC_ROOT\")\n env[var] = stack_base\n\n for runtime in client_matrix.get_runtimes_for_lang_release(lang, release):\n job = build_image_jobspec(runtime, env, release, stack_base)\n docker_images.append(job.tag)\n build_jobs.append(job)\n\n jobset.message(\"START\", \"Building interop docker images.\", do_newline=True)\n print(\"Jobs to run: \\n%s\\n\" % \"\\n\".join(str(j) for j in build_jobs))\n\n num_failures, _ = jobset.run(\n build_jobs, newline_on_success=True, maxjobs=multiprocessing.cpu_count()\n )\n if num_failures:\n jobset.message(\n \"FAILED\", \"Failed to build interop docker images.\", do_newline=True\n )\n docker_images_cleanup.extend(docker_images)\n sys.exit(1)\n\n jobset.message(\n \"SUCCESS\",\n \"All docker images built for %s at %s.\" % (lang, release),\n do_newline=True,\n )\n\n if release != \"master\":\n commit_log = os.path.join(stack_base, \"commit_log\")\n if os.path.exists(commit_log):\n for image in docker_images:\n add_files_to_image(image, [commit_log], \"release=%s\" % release)\n return docker_images", "def build_all_images_for_lang(lang):\n if not args.git_checkout:\n if args.release != \"master\":\n print(\n \"Cannot use --release without also enabling --git_checkout.\\n\"\n )\n sys.exit(1)\n releases = [args.release]\n else:\n if args.release == \"all\":\n releases = client_matrix.get_release_tags(lang)\n else:\n # Build a particular release.\n if args.release not in [\"master\"] + client_matrix.get_release_tags(\n lang\n ):\n jobset.message(\n \"SKIPPED\",\n \"%s for %s is not defined\" % (args.release, lang),\n do_newline=True,\n )\n return []\n releases = [args.release]\n\n images = []\n for release in releases:\n images += build_all_images_for_release(lang, release)\n jobset.message(\n \"SUCCESS\",\n \"All docker images built for %s at %s.\" % (lang, releases),\n do_newline=True,\n )\n return images", "def build_docker(c):\n tag = c.run('git describe', hide=True)\n docker_img = f'{docker_repo}:{tag.stdout.strip()}'\n c.run(f'docker build -t {docker_img} .')", "def build(images, tag, quiet):\n # images to build\n build_images = None\n\n # list of available images\n image_list = build_image_map().keys()\n\n # image specified: check if it exists\n if images:\n build_images = []\n for check_name in images:\n if check_name in image_list:\n build_images.append(check_name)\n continue\n raise click.ClickException(\n \"invalid image name '{}'\".format(check_name))\n click.echo('building images {}'.format(', '.join(build_images)))\n # no image specified: build all\n else:\n click.echo('building images {}'.format(', '.join(image_list)))\n\n manager = Manager('build', tag, images=build_images, verbose=not quiet)\n manager.run()", "def containers_from_image(image_name):\n jobs = Queue(maxsize=0)\n containers = list(\n filter(\n lambda x: image_name in x.attrs['Config']['Image'],\n doxy.containers.list()\n )\n )\n for container in containers:\n jobs.put(container)\n return jobs", "def docker_build(c):\n cli_tasks.docker_build.run(c)", "def pull_dlc_images(images):\n for image in images:\n run(f\"docker pull {image}\", hide=\"out\")", "def get_images(name):\n url = \"/\".join([REGISTRY_BASE, name, \"/tags/list\"])\n response = req(url)\n image_list = []\n if response is not None:\n headers = {\"Accept\": \"application/vnd.docker.distribution.manifest.v2+json\"}\n tags = response[\"tags\"]\n for tag in tags:\n url = \"/\".join([REGISTRY_BASE, name, \"/manifests\", tag])\n response = req(url, headers)\n if response is not None:\n image = {}\n image[\"size\"] = response[\"config\"][\"size\"]\n for i in response[\"layers\"]:\n image[\"size\"] += i[\"size\"]\n image[\"size\"] = round(float(image[\"size\"]) / 1024 / 1024, 2)\n image[\"id\"] = response[\"config\"][\"digest\"][7:19]\n image[\"tag\"] = tag\n image[\"cmd\"] = \"docker pull uk8s.com/\" + name + \":\" + tag\n image_list.append(image)\n return sorted(image_list, reverse=True)", "def make(tag_masks: str = \"*\", poetry_version: str = \"master\"):\n tags = requests.get(\n \"https://registry.hub.docker.com/v1/repositories/python/tags\"\n ).json()\n\n def match_tag(tag) -> bool:\n tag_name = tag[\"name\"]\n return [\n tag_mask\n for tag_mask in tag_masks\n if tag_mask == \"*\" or fnmatch.fnmatch(tag_name, tag_mask)\n ]\n\n tags = list(filter(match_tag, tags))\n\n click.echo(f\"Found {len(tags)} tags.\")\n click.echo(\"Generating \", nl=False)\n\n docker_3_template = Path(\"./Dockerfile-3.template\").read_text(\"utf8\")\n docker_2_template = Path(\"./Dockerfile-2.template\").read_text(\"utf8\")\n\n for tag in tags:\n tag_name = tag[\"name\"]\n\n docker_template = docker_3_template\n\n try:\n tag_major_version = int(tag_name[0])\n tag_major_path = Path(str(tag_major_version))\n try:\n tag_major_path.mkdir()\n except FileExistsError:\n pass\n tag_path = tag_major_path / Path(tag_name)\n if tag_major_version == 2:\n docker_template = docker_2_template\n except ValueError:\n tag_path = Path(tag_name)\n\n try:\n tag_path.mkdir()\n except FileExistsError:\n pass\n\n (tag_path / \"Dockerfile\").write_text(\n docker_template.format(python_tag=tag_name, poetry_version=poetry_version)\n )\n click.echo(\".\", nl=False)\n click.echo(\" Done.\")", "def images_refresh(self, kwargs=None):\n scode, hosts = Rest.get('Host')\n filter = {}\n n = 1\n e = {}\n data = []\n for host in hosts:\n os.environ[\"DOCKER_HOST\"] = host['Ip'] + \":\" + str(host['Port'])\n filter['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n self.client = docker.from_env()\n try:\n images = self.client.images.list(**kwargs)\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(images) == 0:\n Console.info(\"No images exist\")\n continue\n\n for imagem in images:\n image = imagem.__dict__['attrs']\n image['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n data.append(image)\n d = {}\n d['Ip'] = os.environ[\"DOCKER_HOST\"].split(':')[0]\n d['Id'] = image['Id']\n if image['RepoTags'] == None:\n d['Repository'] = image['RepoDigests'][0]\n else:\n d['Repository'] = image['RepoTags'][0]\n # d['Size'] = image['Size']\n d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2)\n e[n] = d\n n = n + 1\n Rest.delete('Image', filter)\n Rest.post('Image', data)\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))", "def images_list(self, kwargs=None):\n\n try:\n scode, images = Rest.get('Image')\n except docker.errors.APIError as e:\n Console.error(e.explanation)\n return\n\n if len(images) == 0:\n Console.info(\"No images exist\")\n return\n\n n = 1\n e = {}\n for image in images:\n d = {}\n d['Ip'] = image['Ip']\n d['Id'] = image['Id']\n if image['RepoTags'] == None:\n d['Repository'] = image['RepoDigests'][0]\n else:\n d['Repository'] = image['RepoTags'][0]\n # d['Size'] = image['Size']\n d['Size(GB)'] = round(image['Size'] / float(1 << 30), 2) # Converting the size to GB\n e[n] = d\n n = n + 1\n Console.ok(str(Printer.dict_table(e, order=['Ip', 'Id', 'Repository', 'Size(GB)'])))", "def build_container_image(self) -> None:\n print_version_of_tools()\n try:\n self.fs_watcher.start()\n runner = PluginsRunner(self,\n self.plugins_conf,\n self.plugin_files,\n self.keep_plugins_running,\n plugins_results=self.data.plugins_results)\n runner.run()\n finally:\n self.fs_watcher.finish()", "def imageutil(self) -> imageutil.ImageUtil:\n return imageutil.ImageUtil(self.data.dockerfile_images, self.conf)", "def list_docker_images():\n raw_result = subprocess.getstatusoutput('docker images')\n return result_handler(raw_result)", "def get_images_to_build(fuzzers, benchmarks):\n images = {}\n templates = _get_image_type_templates()\n for fuzzer in fuzzers:\n for benchmark in benchmarks:\n for name_templ, obj_templ in templates.items():\n name, obj = _instantiate_image_obj(name_templ, obj_templ,\n fuzzer, benchmark)\n images[name] = obj\n return images", "def main():\n parser = argparse.ArgumentParser(\n epilog=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-d\", \"--dry-run\", action=\"store_true\", default=0, help=\"Dry run mode.\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Verbosity. Default is WARNING level.\",\n )\n\n subparsers = parser.add_subparsers(help=\"Sub commands\", dest=\"subparser\")\n subparsers.required = True\n\n build_parser = subparsers.add_parser(\n \"build\",\n description=\"Build an image from Dockerfile, caching image hierarchy\",\n help=\"Build an image from a Dockerfile\",\n )\n build_parser.add_argument(\n \"path\", metavar=\"PATH\", help=\"The build context directory\"\n )\n build_parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Name of the Dockerfile. If not provided, \"\n \"will use config.DOCKERFILE_PATH_PATTERN to compute. \",\n )\n build_parser.add_argument(\n \"-v\",\n \"--git-sha\",\n required=True,\n help=\"The version of code to build against, \" \"will pass as GIT_SHA variable\",\n )\n build_parser.add_argument(\n \"-n\", \"--name\", required=True, help=\"The name of the image to build\"\n )\n build_parser.add_argument(\n \"--build-arg\",\n metavar=\"ARG=VALUE\",\n nargs=\"*\",\n default=[],\n help=\"Set extra build-time variables. GIT_SHA, TIMESTAMP will be passed by default.\",\n )\n build_parser.add_argument(\n \"-r\",\n \"--raw\",\n action=\"store_true\",\n help=\"Whether to use raw docker build command to build, skipping caching logic\",\n )\n build_parser.add_argument(\n \"--registry\",\n default=config.DOCKER_REGISTRY,\n help=\"Docker registry use to determine the image identity, \"\n \"can be set via IMAGE_BUILDER_DOCKER_REGISTRY environment variable, \"\n 'or set DOCKER_REGISTRY in config.py. Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-t\",\n \"--tag-pattern\",\n default=config.GIT_SHA_TAG_PATTERN,\n help=\"Tag pattern, can only include one `{git_sha}` placeholder, \"\n 'such as \"{git_sha}-new\". If the tag exists, we won\\'t rebuild it. '\n 'Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-e\",\n \"--extra-tag\",\n nargs=\"*\",\n default=[],\n help=\"Extra tags to tag to the final images\",\n )\n build_parser.add_argument(\n \"--extra-name\",\n nargs=\"*\",\n default=[],\n help=\"Extra name and optionally with a tag in the 'name:tag' format\",\n )\n build_parser.add_argument(\n \"-o\", \"--output-hash\", help=\"The output filename of the files hash log.\"\n )\n build_parser.set_defaults(func=build)\n\n args = parser.parse_args()\n if args.dry_run:\n # DRY_RUN env will be read in image_builder.libs.process\n os.environ[\"DRY_RUN\"] = \"1\"\n\n if args.func == build:\n args.path = expand_path(args.path)\n if args.output_hash:\n args.output_hash = expand_path(args.output_hash)\n\n args.file = args.file or locate_dockerfile(args.name)\n args.file = expand_path(args.file)\n # set environ for main dockerfile for possibly retrieving later\n os.environ[\n config.DOCKERFILE_ENV_PATTERN.format(image_name=args.name)\n ] = args.file\n\n # change CWD to PATH\n os.chdir(args.path)\n\n if not args.registry:\n parser.error(\n \"--registry should be provied \"\n \"or specified by IMAGE_BUILDER_DOCKER_REGISTRY environment variable or set DOCKER_REGISTRY in config.py\"\n )\n if not all(\"=\" in kv for kv in args.build_arg):\n parser.error(\"--build_arg must be in ARG=VALUE format\")\n\n # set git_sha_tag\n try:\n args.git_sha_tag = args.tag_pattern.format(git_sha=args.git_sha)\n except KeyError:\n parser.error(\n 'Wrong --tag-pattern provided. Can only include one `{git_sha}` placeholder, such as \"{git_sha}-new\"'\n )\n\n # setup logging\n level = logging.WARNING - args.verbose * 10\n logging.basicConfig(\n level=level, format=\"%(asctime)s %(name)s %(levelname)s %(message)s\"\n )\n\n if args.output_hash:\n h = logging.FileHandler(args.output_hash)\n h.setLevel(logging.DEBUG)\n h.setFormatter(logging.Formatter(\"%(message)s\"))\n hash_logger.addHandler(h)\n\n # Suppress warning when we don't verify ssl\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n return args.func(args)", "def run_build_image(image_list):\n success_image_list = list()\n failed_image_list = list()\n for image in image_list:\n docker_build_success_flag = image.run_docker_build()\n if not docker_build_success_flag:\n failed_image_list.append(image)\n continue\n smoke_test_sccess_flag = image.run_smoke_test()\n if not smoke_test_sccess_flag:\n failed_image_list.append(image)\n continue\n success_image_list.append(image)\n\n logger.info(\"--- docker build summary ---\")\n logger.info(\"following image succeed:\")\n for image in success_image_list:\n logger.info(f\" {image.local_identifier}\")\n logger.info(\"following image failed:\")\n for image in failed_image_list:\n logger.info(f\" {image.local_identifier}\")\n return success_image_list, failed_image_list", "def build_and_push_3p_images():\n\n log.info(\n \"Building Lambda Stack docker images (these are not on DockerHub). \"\n \"FMI see https://github.com/lambdal/lambda-stack-dockerfiles\")\n run_cmd(\"\"\"\n cd /tmp &&\n (rm -rf lambda-stack-dockerfiles || true) &&\n git clone https://github.com/lambdal/lambda-stack-dockerfiles &&\n cd lambda-stack-dockerfiles &&\n git checkout d762400d61636c074533416674426a84cc4d8992 &&\n docker build -t oarphpy/lambda-stack:22.04 -f Dockerfile.jammy . &&\n docker push oarphpy/lambda-stack:22.04\n \"\"\")", "def pullall():\n\tprint(red('\\t\\tStarting download of QNIBTerminal images\\t\\t'))\n\t# pull all the needed images\n\tdocker_images={'fd20','terminal','helixdns','elk','slurm','compute'}\n\tfor image in docker_images:\n\t\tprint 'docker pull qnib/'+image\n\t\t# run('docker pull qnib/'+image)", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def _create_dockerfile(self, commands):\n import Utils\n\n user_id = Utils.get_sudo_user_id()\n dockerfile = '''FROM ubuntu:14.04\\nRUN apt-get update\\n\\n# Add user ubuntu.\\nRUN useradd -u {0} -ms /bin/bash ubuntu\\n\n # Set up base environment.\\nRUN apt-get install -yy \\ \\n software-properties-common \\ \\n\n python-software-properties \\ \\n wget \\ \\n curl \\ \\n git \\ \\n ipython \\ \\n sudo \\ \\n\n screen \\ \\n iptables \\nRUN echo \"ubuntu ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers\n \\nWORKDIR /home/ubuntu\\n\\nUSER ubuntu\\nENV HOME /home/ubuntu'''.format(user_id)\n\n flag = False\n\n for entry in commands:\n if isinstance(entry, list):\n dockerfile += '''\\n\\nRUN '''\n first = True\n flag = False\n for sub_entry in entry:\n if first is True:\n dockerfile += self._preprocess(sub_entry)\n first = False\n else:\n dockerfile += ''' && \\ \\n ''' + self._preprocess(sub_entry)\n else:\n if flag is False:\n dockerfile += '''\\n\\nRUN '''\n flag = True\n dockerfile += self._preprocess(entry)\n else:\n dockerfile += ''' && \\ \\n ''' + self._preprocess(entry)\n\n dockerfile += '''\\n\\n\\n'''\n\n dockerfile_file = DockerProvider.__get_new_dockerfile_name()\n with open(dockerfile_file, 'w') as Dockerfile:\n Dockerfile.write(dockerfile)\n named_dockerfile = tempfile.NamedTemporaryFile()\n named_dockerfile.write(dockerfile)\n named_dockerfile.seek(0)\n\n return named_dockerfile, dockerfile_file", "def set_dockerfile_images(self, images: Union[DockerfileImages, List[str]]):\n if not isinstance(images, DockerfileImages):\n images = DockerfileImages(images)\n self.workflow.data.dockerfile_images = images\n return self", "def docker():\n try:\n client = docker_from_env(\n version=os.environ.get('DOCKER_API_VERSION', '1.24'))\n\n containers = []\n\n for container in client.containers.list():\n include_container = False\n if INTERESTING_CONTAINERS.search(container.name):\n include_container = True\n else:\n for tag in container.image.attrs.get('RepoTags', []):\n if INTERESTING_TAGS.match(tag):\n include_container = True\n break\n\n if not include_container:\n continue\n\n docker_metrics = {\n \"stats_type\": \"docker\",\n \"docker\": {\n \"id\": container.short_id,\n \"name\": container.name,\n \"status\": container.status,\n \"labels\": [\"%s=%s\" % (k, v)\n for k, v in container.labels.items()],\n \"tags\": container.image.attrs['RepoTags'],\n 'created': container.image.attrs['Created'],\n }\n }\n if 'version' in container.labels:\n docker_metrics['docker']['image_version'] = \\\n container.labels['version']\n containers.append(docker_metrics)\n\n except Exception as exc:\n logging.debug(\"Error gathering Docker info: %s\", exc)\n return []\n\n return containers", "def pull_base_images(ctx, dockerfile, signed_pull=True):\n images = set()\n stages = set()\n\n with open(dockerfile, \"r\") as f:\n for line in f:\n words = line.split()\n # Get source images\n if len(words) < 2 or words[0].lower() != \"from\":\n continue\n images.add(words[1])\n # Get stage names to remove them from pull\n if len(words) < 4 or words[2].lower() != \"as\":\n continue\n stages.add(words[3])\n\n if stages:\n print(\"Ignoring intermediate stage names: {}\".format(\", \".join(stages)))\n images -= stages\n\n print(\"Pulling following base images: {}\".format(\", \".join(images)))\n\n pull_env = {}\n if signed_pull:\n pull_env[\"DOCKER_CONTENT_TRUST\"] = \"1\"\n\n for i in images:\n ctx.run(\"docker pull {}\".format(i), env=pull_env)", "def docker_worker():", "def build(self):\n self._remove_swarm_keys()\n self._remove_pod_keys()\n self._set_image()\n self._translate_docker_properties()", "def all(tag=None, env=None):\n build_images(tag=tag)\n push_images(tag=tag)\n deploy(env=env, tag=tag)", "def build_filler_images(self):", "def build(parser):\n parser.add_argument(\n '-i', '--identity-file',\n help=(\n 'A SSH private key file which may be used to pull down '\n 'repositories when building.'\n ),\n )\n parser.add_argument(\n '-e', '--env',\n action='append',\n default=[],\n help=(\n 'Add environ variables to the build. These may be accessed in '\n 'the build scripts. Each variable should be of the format '\n 'KEY=VALUE. This may be used to pass in credentials required '\n 'to access private repositories. May be specified more than once.'\n ),\n )\n parser.add_argument(\n '-b', '--build-dir',\n default=os.getcwd(),\n help=(\n 'This folder should be accessible from the docker instance.'\n ),\n )\n parser.add_argument(\n '--archive',\n help=(\n 'Archive the build files into a local tarball.'\n ),\n )\n parser.add_argument(\n '--archive-only',\n action='store_true',\n default=False,\n help=(\n 'Skip tagging and building the runner image.'\n ),\n )\n parser.add_argument(\n '-t', '--tag',\n help=(\n 'Tag to apply to the built image. '\n 'This will default to the current date/time.'\n ),\n )\n parser.add_argument(\n '--no-cache',\n dest='use_cache',\n action='store_false',\n default=True,\n help=(\n 'Do not mount a cache volume when compiling the app.'\n ),\n )\n parser.add_argument(\n '--cache',\n metavar='CONTAINER:PATH',\n help=(\n 'An optional volume or location for the cache. The format is '\n '\"<volume_id>:<path>\" where the \"volume_id\" must be the '\n 'name or hash of an existing volume. The \"path\" is an absolute '\n 'path to the cache folder/volume within the build container.'\n '\\n\\n'\n 'By default a container will be created by mangling the name of '\n 'the app by appending \"__buildcache\" (e.g. \"myapp__buildcache\").'\n '\\n\\n'\n 'This option is ignored if --no-cache is specified.'\n '\\n\\n'\n 'The \"volume_id\" may be an absolute path on the host filesystem.'\n '\\n\\n'\n 'The \"path\" may be dropped, in which case it will default to '\n '/tmp/cache inside the build container.'\n '\\n\\n'\n 'Examples:'\n '\\n\\n'\n ' # custom volume with default path\\n'\n ' --cache my_cache'\n '\\n\\n'\n ' # custom path inside of volume\\n'\n ' --cache my_cache:/tmp/cache'\n '\\n\\n'\n ' # host filesystem\\n'\n ' --cache /tmp/cache'\n ),\n )\n parser.add_argument(\n '--rebuild-cache',\n action='store_true',\n default=False,\n help=(\n 'Delete any cached artifacts prior to building.'\n ),\n )\n parser.add_argument(\n '--skip-cleanup',\n action='store_true',\n default=False,\n help=(\n 'Skip removal of images and containers.'\n ),\n )\n parser.add_argument(\n 'app',\n help=(\n 'Path to an application folder with a meta.yml file'\n ),\n )", "def _build(build_context,\n image_tag,\n image_name,\n nocache,\n credstore_env=None,\n registries=None):\n _logger.info('Starting build ...')\n\n # Build the image\n docker_builder = DockerBuilder(\n build_context=build_context,\n image_name=image_name,\n image_tag=image_tag,\n credstore_env=credstore_env,\n registries=registries,\n )\n docker_builder.login_private_registries()\n if docker_builder.check_image():\n # Image already built\n docker_builder.clean()\n return docker_builder\n if not docker_builder.build(nocache=nocache):\n docker_builder.clean()\n raise BuildException('The docker image could not be built.')\n return docker_builder", "def get_build_images(self) -> List[Image]:\n images = []\n image_names = []\n conflicting_names = []\n for registry_name, registry in self.registries.items():\n # if the registry is not marked as source, skip it\n if not registry.source:\n continue\n\n images += self.get_images_from_registry(registry)\n\n if conflicting_names:\n raise RuntimeError(\"Images found in multiple 'source' repositories: %s\", conflicting_names)\n\n return images", "def build_images(formulas, dir_images, quality=100, density=200, down_ratio=2,\n buckets=None, n_threads=4):\n init_dir(dir_images)\n existing_idx = sorted(set([int(file_name.split('.')[0]) for file_name in\n get_files(dir_images) if file_name.split('.')[-1] == \"png\"]))\n\n pool = Pool(n_threads)\n result = pool.map(build_image, [(idx, form, dir_images, quality, density,\n down_ratio, buckets) for idx, form in formulas.items()\n if idx not in existing_idx])\n pool.close()\n pool.join()\n\n result += [(str(idx) + \".png\", idx) for idx in existing_idx]\n\n return result", "def containers_init(self):\n\n def test_container(name, state, status):\n \"\"\"Creates test container. \"\"\"\n return {\n 'Image': \"alpine:3.7\",\n 'Command': \"/bin/sleep 999\",\n 'Labels': {'out': ''},\n 'State': state,\n 'Created': 1524205394,\n 'Status': status,\n 'Names': [\"/\" + name]\n }\n\n state_created = 'created'\n state_running = 'running'\n\n status_created = 'Created'\n status_up = 'Up 15 minutes'\n\n self.containers_list = [\n test_container(self.container_to_run,\n state_created, status_created),\n test_container(self.container_running,\n state_running, status_up),\n test_container(self.container_to_remove,\n state_created, status_created),\n ]\n\n CLIENT.containers_list.extend(self.containers_list)", "def cli(ctx, image_file):\n if not image_file:\n return\n for pull_image in image_file.readline():\n pull_image = pull_image.rstrip('\\n')\n if len(pull_image) == 0:\n continue\n docker.pull(pull_image)\n push_image = '%s/%s/%s' % (DEFAULT_REGISTRY,\n DEFAULR_NAMESPACE,\n pull_image.split('/')[-1])\n docker.tag(pull_image, push_image)\n docker.push(push_image)", "def handle_api_list_images(self, http_context):\n\n command = self.docker + ['images', '--format', '\\'{{json .}}\\'', '--no-trunc', '-a']\n images = []\n for line in subprocess.check_output(command).decode().splitlines():\n image = json.loads(line)\n image['hash'] = image['ID'].split(':')[1][:12]\n images.append(image)\n return images", "def _get_docker_images_for_lint(\n self,\n script_obj: Dict,\n script_id: str,\n docker_image_flag: str,\n docker_image_target: Optional[str],\n ) -> List[str]:\n log_prompt = f\"{self._pack_name} - Get All Docker Images For Lint\"\n logger.info(\n f\"{log_prompt} - Requested docker image flag is: '{docker_image_flag}'\"\n )\n imgs = []\n\n if (\n docker_image_flag == DockerImageFlagOption.FROM_YML.value\n ): # the default option\n # Desirable docker images are the docker images from the yml file (alt-dockerimages included)\n logger.info(f\"{self._pack_name} - Get Docker Image from YML - Started\")\n if imgs := get_docker_images_from_yml(script_obj):\n logger.info(\n f\"{log_prompt} - Docker images to run on are: {', '.join(imgs)}\"\n )\n return imgs\n\n di_from_yml = script_obj.get(\"dockerimage\")\n # If the 'dockerimage' key does not exist in yml - run on native image checks will be skipped\n native_image_config = (\n NativeImageConfig()\n ) # parsed docker_native_image_config.json file (a singleton obj)\n supported_native_images_obj = ScriptIntegrationSupportedNativeImages(\n _id=script_id,\n native_image_config=native_image_config,\n docker_image=di_from_yml,\n )\n supported_native_images = set(\n supported_native_images_obj.get_supported_native_image_versions(\n only_production_tags=False\n )\n )\n\n if docker_image_flag.startswith(DockerImageFlagOption.NATIVE.value):\n # Desirable docker image to run on is a native image\n\n self._check_native_image_flag(docker_image_flag)\n\n image_support = docker_image_flag\n if docker_image_flag == DockerImageFlagOption.NATIVE_TARGET.value:\n image_support = DockerImageFlagOption.NATIVE_DEV.value\n\n if native_image := self._get_native_image_name_from_config_file(\n image_support\n ):\n\n if self._is_native_image_support_script(\n native_image, supported_native_images, script_id\n ): # Integration/Script is supported by the requested native image\n native_image_ref: Optional[str] = \"\"\n\n if (\n docker_image_flag == DockerImageFlagOption.NATIVE_TARGET.value\n and docker_image_target\n ):\n # Desirable docker image to run is the target image only on native supported content.\n native_image_ref = docker_image_target\n\n elif docker_image_flag == DockerImageFlagOption.NATIVE_DEV.value:\n # Desirable docker image to run on is the dev native image - get the latest tag from Docker Hub\n native_image_ref = self._get_dev_native_image(script_id)\n\n else:\n # Desirable docker image to run on is a versioned native image - get the docker ref from the\n # docker_native_image_config.json\n native_image_ref = self._get_versioned_native_image(\n native_image\n )\n\n if native_image_ref:\n imgs.append(native_image_ref)\n logger.info(\n f\"{log_prompt} - Native image to run on is: {native_image_ref}\"\n )\n\n elif docker_image_flag == DockerImageFlagOption.ALL_IMAGES.value:\n # Desirable docker images are the docker images from the yml file, the supported versioned native images\n # and the dev native image\n if imgs := self._get_all_docker_images(\n script_obj, script_id, supported_native_images\n ):\n logger.info(\n f\"{log_prompt} - Docker images to run on are: {', '.join(imgs)}\"\n )\n\n else:\n # The flag is a specific docker image (from Docker Hub) or an invalid input -\n # In both cases we will try to run on the given input, if it does not exist in docker hub the run of lint\n # will fail later on.\n imgs.append(docker_image_flag)\n logger.info(\n f\"{log_prompt} - Docker image to run on is: {docker_image_flag}\"\n )\n\n return imgs", "def _do_build(self) -> List[types.Action]:\n return [\n docker_command.DockerRun(\n command=[\"/entrypoint.sh\", self.tag],\n builder=builder.GO_BUILDER,\n run_config=docker_command.default_run_config(\n constants.STORAGE_OPERATOR_ROOT / \"entrypoint.sh\"\n ),\n mounts=[\n utils.bind_mount(\n target=Path(\"/storage-operator\"),\n source=constants.STORAGE_OPERATOR_ROOT,\n ),\n # This container (through operator-sdk) will call `docker\n # build`, so we need to expose our Docker socket.\n utils.bind_mount(\n target=Path(\"/var/run/docker.sock\"),\n source=Path(\"/var/run/docker.sock\"),\n ),\n ],\n )\n ]", "def _Build(self, image):\n image = _ContainerImage(image)\n build_start = time.time()\n if not FLAGS.local_container_build:\n try:\n # Build the image remotely using an image building service.\n self.RemoteBuild(image)\n self.remote_build_times[image.name] = time.time() - build_start\n return\n except NotImplementedError:\n pass\n\n self.PrePush(image)\n # Build the image locally using docker.\n build_start = time.time()\n self.LocalBuildAndPush(image)\n self.local_build_times[image.name] = time.time() - build_start", "def images(self, **kwargs):\n\n raise NotImplementedError", "def build(build_context,\n image_tag,\n image_name,\n nocache,\n credstore_env=None,\n registries=None,\n max_retries=3,\n sleep_interval=1):\n retry = 0\n is_done = False\n while retry < max_retries and not is_done:\n try:\n docker_builder = _build(build_context=build_context,\n image_tag=image_tag,\n image_name=image_name,\n nocache=nocache,\n credstore_env=credstore_env,\n registries=registries)\n is_done = True\n docker_builder.clean()\n return docker_builder\n except ReadTimeoutError:\n retry += 1\n time.sleep(sleep_interval)\n if not is_done:\n raise BuildException('The docker image could not be built, client timed out.')", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def run_docker_push(image_list, docker_client):\n logger.info(\"--- push image to registry ---\")\n if runtime == Runtime.local:\n logger.info(\"Detected local runtime, stop here.\")\n return\n\n if not len(success_image_list):\n logger.info(\"No success image to push, stop here.\")\n return\n\n if GIT_BRANCH != \"master\":\n logger.info(\"Not master branch, stop here\")\n return\n\n docker_client.login(username=DOCKER_HUB_USERNAME, password=DOCKER_HUB_PASSWORD)\n for image in image_list:\n image.run_docker_push(docker_client)\n print(\"Finished.\")", "def make_images(self):\n self._images = [tree.to_image() for tree in self.reaction_trees]\n self._update_route_dict(self._images, \"image\")", "def create_preset_images(self):\n for f in sorted(self.get_files_from_data()):\n photoInstances = {}\n for preset in self.generator.settings[\"GALLERY_PRESETS\"]:\n preset_dir = \"%s%s%s\" % (self.absolute_output_path,\n os.sep, \n preset[\"name\"])\n photoInstances[preset[\"name\"]] = Photo(self, f, preset_dir, preset)\n \n self.photos.append(photoInstances)", "def build_image_jobspec(runtime, env, gcr_tag, stack_base):\n basename = \"grpc_interop_%s\" % runtime\n tag = \"%s/%s:%s\" % (args.gcr_path, basename, gcr_tag)\n build_env = {\"INTEROP_IMAGE\": tag, \"BASE_NAME\": basename}\n build_env.update(env)\n image_builder_path = _IMAGE_BUILDER\n if client_matrix.should_build_docker_interop_image_from_release_tag(lang):\n image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)\n build_job = jobset.JobSpec(\n cmdline=[image_builder_path],\n environ=build_env,\n shortname=\"build_docker_%s\" % runtime,\n timeout_seconds=30 * 60,\n )\n build_job.tag = tag\n return build_job", "def process_images(self):\n source_images = self.get_build_images()\n self.log.info(\"Got %s images for publishing. Processing..\", len(source_images))\n\n for image in source_images:\n self.fetch_image(image)\n\n for target in image.push_registries:\n for tag in image.release_tags:\n repository = \"%s/%s\" % (target, image.repository.name)\n self.tag_image(image, repository, tag)\n retry_count = 1\n while retry_count <= self.retry_limit:\n self.log.info(\"Pushing %s:%s to %s (%d/%d)\", repository, tag, target, retry_count, self.retry_limit)\n try:\n self.publish_image(target, repository, tag)\n break\n except ImagePushError as e:\n self.log.error(\"%s\", e.message)\n retry_count = retry_count + 1\n else:\n return False\n return True", "def get_image_collection(images_directory, variant, dimensions):\n \n stains = variant.get(\"values\", [])\n primary_stain = variant.get(\"primary_value\", None) \n\n image_data = []\n\n images_directory_glob = images_directory\n if images_directory_glob[-1] != os.sep:\n images_directory_glob = images_directory_glob + os.sep\n images_directory_glob = images_directory_glob + \"*\"\n\n for image_file in glob.glob(images_directory_glob):\n file_name = os.path.basename(image_file)\n\n image_stain = None\n image_root = file_name\n for stain in stains:\n image_root = image_root.replace(stain, \"\")\n if stain in file_name:\n image_stain = stain\n\n image_dimensions = {}\n\n for key, value in dimensions.iteritems():\n if value[\"type\"] == \"String Match\":\n image_dimensions[key] = \\\n get_dimension_string_match(file_name, value.get(\"data\", []))\n elif value[\"type\"] == \"Date Parse\":\n image_dimensions[key] = get_dimension_date_parse(file_name)\n\n experiment_name = experiment.get_experiment_name(file_name)\n experiment_date = experiment.get_experiment_date(file_name)\n\n seed_source_image = file_name\n if primary_stain not in seed_source_image and primary_stain is not None:\n for stain in stains:\n seed_source_image = seed_source_image.replace(stain, primary_stain)\n \n image_data.append({\n \"file_name\": file_name,\n \"file_root\": image_root,\n \"stain\": image_stain, # TODO: Deprecate\n \"experiment_name\": experiment_name, # TODO: Deprecate\n \"experiment_date\": experiment_date, # TODO: Deprecate\n \"seed_source_image\": seed_source_image,\n \"variant\": image_stain,\n \"dimensions\": image_dimensions\n })\n\n return image_data", "def docker(all_, command) -> None:\n if all_ is not None:\n docker_manager = DockerManager()\n getattr(docker_manager, f\"{all_}_all\")()\n else:\n if len(command):\n DockerManager.execute_command(command)", "def process_images():\n create_dirs()\n for root, dirs, files in os.walk(IN):\n for name in files:\n if name[0] == '.':\n continue\n process_image(name)", "def fetch_images(client, images):\n return [fetch_image(client, image) for image in images]", "def build_base():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/requirements.txt')\n\n with cd('/srv/build'):\n run('docker build -t {base_image_name} .'.format(\n base_image_name=env.base_image_name,\n ))", "def build(config, version_tag):\n config_dict = get_config(config)\n image_name = config_dict['IMAGE'].split(':')[0]\n image = '{}:{}'.format(image_name, version_tag)\n base_image = config_dict['BASE_IMAGE']\n\n cmd = 'docker build -t {image} --build-arg base_image={base_image} .'.format(image=image,\n base_image=base_image)\n with cd(env.project_dir):\n run(cmd)\n return image", "def build(self, images):\n\n shape = images.get_shape().as_list()\n images = tf.reshape(images,\n [shape[0] * shape[1], shape[2], shape[3], shape[4]])\n\n with slim.arg_scope(\n [slim.conv2d, slim.fully_connected],\n activation_fn=tf.nn.relu,\n weights_regularizer=slim.l2_regularizer(self._params.weight_decay_rate),\n biases_initializer=tf.zeros_initializer()):\n with slim.arg_scope([slim.conv2d], padding='SAME'):\n # convert the image to one hot if needed.\n if self._params.to_one_hot:\n net = tf.one_hot(\n tf.squeeze(tf.to_int32(images), axis=[-1]),\n self._params.one_hot_length)\n else:\n net = images\n\n p = self._params\n # Adding conv layers with the specified configurations.\n for conv_id, kernel_stride_channel in enumerate(\n zip(p.conv_sizes, p.conv_strides, p.conv_channels)):\n kernel_size, stride, channels = kernel_stride_channel\n net = slim.conv2d(\n net,\n channels, [kernel_size, kernel_size],\n stride,\n scope='conv_{}'.format(conv_id + 1))\n\n net = slim.flatten(net)\n net = slim.fully_connected(net, self._params.embedding_size, scope='fc')\n\n output = tf.reshape(net, [shape[0], shape[1], -1])\n return output", "def setup_images(self, images):\n if isinstance(images, str):\n images = [images]\n self._image_list.extend(images)", "def load_images(self, files, sub_dir):\n\n for f in files:\n self.images.append(Image(f, sub_dir))", "def resize_images(self, images):\n \n img_list = []\n \n for img in images:\n \n yield np.resize(img, (64, 64, 3))", "def build_container(\n self, odcs, repo_type, repo, push_to_defaults, additional_registries, terminate_event,\n scratch=False, retries=3):\n if self.org_image_name is None or self.org_version is None:\n if not os.path.isfile(os.path.join(self.distgit_dir, 'Dockerfile')):\n self.logger.info('No Dockerfile found in {}'.format(self.distgit_dir))\n else:\n self.logger.info('Unknown error loading Dockerfile information')\n return False\n\n action = \"build\"\n release = self.org_release if self.org_release is not None else '?'\n record = {\n \"dir\": self.distgit_dir,\n \"dockerfile\": \"%s/Dockerfile\" % self.distgit_dir,\n \"distgit\": self.metadata.name,\n \"image\": self.org_image_name,\n \"version\": self.org_version,\n \"release\": release,\n \"message\": \"Unknown failure\",\n \"task_id\": \"n/a\",\n \"task_url\": \"n/a\",\n \"status\": -1,\n \"push_status\": -1,\n # Status defaults to failure until explicitly set by success. This handles raised exceptions.\n }\n\n target_tag = \"-\".join((self.org_version, release))\n target_image = \":\".join((self.org_image_name, target_tag))\n\n try:\n if not scratch and self.org_release is not None \\\n and self.metadata.tag_exists(target_tag):\n self.logger.info(\"Image already built for: {}\".format(target_image))\n else:\n # If this image is FROM another group member, we need to wait on that group member\n # Use .get('from',None) since from is a reserved word.\n image_from = Model(self.config.get('from', None))\n if image_from.member is not Missing:\n self._set_wait_for(image_from.member, terminate_event)\n for builder in image_from.get('builder', []):\n if 'member' in builder:\n self._set_wait_for(builder['member'], terminate_event)\n\n # Allow an image to wait on an arbitrary image in the group. This is presently\n # just a workaround for: https://projects.engineering.redhat.com/browse/OSBS-5592\n if self.config.wait_for is not Missing:\n self._set_wait_for(self.config.wait_for, terminate_event)\n\n def wait(n):\n self.logger.info(\"Async error in image build thread [attempt #{}]\".format(n + 1))\n # Brew does not handle an immediate retry correctly, wait\n # before trying another build, terminating if interrupted.\n if terminate_event.wait(timeout=5 * 60):\n raise KeyboardInterrupt()\n\n exectools.retry(\n retries=3, wait_f=wait,\n task_f=lambda: self._build_container(\n target_image, odcs, repo_type, repo, terminate_event,\n scratch, record))\n\n # Just in case someone else is building an image, go ahead and find what was just\n # built so that push_image will have a fixed point of reference and not detect any\n # subsequent builds.\n push_version, push_release = ('','')\n if not scratch:\n _, push_version, push_release = self.metadata.get_latest_build_info()\n record[\"message\"] = \"Success\"\n record[\"status\"] = 0\n self.build_status = True\n\n except (Exception, KeyboardInterrupt):\n tb = traceback.format_exc()\n record[\"message\"] = \"Exception occurred:\\n{}\".format(tb)\n self.logger.info(\"Exception occurred during build:\\n{}\".format(tb))\n # This is designed to fall through to finally. Since this method is designed to be\n # threaded, we should not throw an exception; instead return False.\n finally:\n # Regardless of success, allow other images depending on this one to progress or fail.\n self.build_lock.release()\n\n self.push_status = True # if if never pushes, the status is True\n if not scratch and self.build_status and (push_to_defaults or additional_registries):\n # If this is a scratch build, we aren't going to be pushing. We might be able to determine the\n # image name by parsing the build log, but not worth the effort until we need scratch builds.\n # The image name for a scratch build looks something like:\n # brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/openshift3/ose-base:rhaos-3.7-rhel-7-docker-candidate-16066-20170829214444\n\n # To ensure we don't overwhelm the system building, pull & push synchronously\n with self.runtime.mutex:\n self.push_status = False\n try:\n self.push_image([], push_to_defaults, additional_registries, version_release_tuple=(push_version, push_release))\n self.push_status = True\n except Exception as push_e:\n self.logger.info(\"Error during push after successful build: %s\" % str(push_e))\n self.push_status = False\n\n record['push_status'] = '0' if self.push_status else '-1'\n\n self.runtime.add_record(action, **record)\n return self.build_status and self.push_status", "def setup(self):\n\n exists = [i for i in self.client.images() if self.image in i['RepoTags']]\n\n # Only pull the image if we don't have it\n if not exists or self.pull:\n self.client.pull(self.image)\n self.logger.debug(\"Pulled {}\".format(self.image))\n\n self.container = self.client.create_container(\n image=self.image,\n host_config=self.host_config,\n name=self.name,\n command=self.command,\n environment=self.environment\n )\n self.logger.debug(\"Created container {}\".format(self.container['Id']))", "def build(path, tag):\n print (\"*** BUILDING IS INITIATED\")\n try:\n response = client.images.build(path = path, tag=tag, rm=True)\n except Exception as ex:\n template = \"An exception of type {0} occured. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n click.echo(message)\n print(\"*** BUILD OUTPUT:\")\n for object in response[1]:\n click.echo(object)\n click.echo('*** BUILDING FINISHED')", "def build(image_name, path='.'):\n try:\n doxy.images.build(path=path, tag=image_name)\n message = '[*] Image {} built.'\n print message.format(image_name)\n except Exception as err:\n print err\n raise", "def buildImages(files, targets, type):\n images = []\n for file in files:\n targets.append(file)\n with open(file, \"rb\") as f:\n if type == \"Byte\":\n images.append(bytePlot(list(f.read())))\n elif type == \"Markov\":\n images.append(markovPlot(list(f.read())))\n elif type == \"Hilbert\":\n images.append(hilbertPlot(list(f.read())))\n smp.imsave(\"{}.png\".format(file), images[-1])\n return images, targets", "def build(obj):\n logger.info(ASCII_LOGO)\n logger.info(\"Started building SageMaker Docker image. It will take some minutes...\\n\")\n\n try:\n config_file_path = os.path.join('.sagify.json')\n if not os.path.isfile(config_file_path):\n raise ValueError()\n\n config = ConfigManager(config_file_path).get_config()\n api_build.build(\n source_dir=config.sagify_module_dir,\n requirements_dir=config.requirements_dir,\n docker_tag=obj['docker_tag'],\n image_name=config.image_name,\n python_version=config.python_version)\n\n logger.info(\"Docker image built successfully!\")\n except ValueError:\n logger.info(\"This is not a sagify directory: {}\".format(dir))\n sys.exit(-1)\n except subprocess.CalledProcessError as e:\n logger.debug(e.output)\n raise\n except Exception as e:\n logger.info(\"{}\".format(e))\n sys.exit(-1)", "def dockerized_pip(\n work_dir: str,\n client: Optional[docker.DockerClient] = None,\n runtime: Optional[str] = None,\n docker_file: Optional[str] = None,\n docker_image: Optional[str] = None,\n python_dontwritebytecode: bool = False,\n **_: Any,\n) -> None:\n # TODO use kwargs to pass args to docker for advanced config\n if bool(docker_file) + bool(docker_image) + bool(runtime) != 1:\n # exactly one of these is needed. converting to bool will give us a\n # 'False' (0) for 'None' and 'True' (1) for anything else.\n raise InvalidDockerizePipConfiguration(\n \"exactly only one of [docker_file, docker_file, runtime] must be \"\n \"provided\"\n )\n\n if not client:\n client = docker.from_env()\n\n if docker_file:\n if not os.path.isfile(docker_file):\n raise ValueError(f'could not find docker_file \"{docker_file}\"')\n LOGGER.info('building docker image from \"%s\"', docker_file)\n response = cast(\n Union[Image, Tuple[Image, Iterator[Dict[str, str]]]],\n client.images.build(\n path=os.path.dirname(docker_file),\n dockerfile=os.path.basename(docker_file),\n forcerm=True,\n ),\n )\n # the response can be either a tuple of (Image, Generator[Dict[str, str]])\n # or just Image depending on API version.\n if isinstance(response, tuple):\n docker_image = response[0].id\n for log_msg in response[1]:\n if log_msg.get(\"stream\"):\n LOGGER.info(log_msg[\"stream\"].strip(\"\\n\"))\n else:\n docker_image = response.id\n LOGGER.info('docker image \"%s\" created', docker_image)\n if runtime:\n if runtime not in SUPPORTED_RUNTIMES:\n raise ValueError(\n f'invalid runtime \"{runtime}\" must be one of {SUPPORTED_RUNTIMES}'\n )\n docker_image = f\"lambci/lambda:build-{runtime}\"\n LOGGER.debug(\n 'selected docker image \"%s\" based on provided runtime', docker_image\n )\n\n if sys.platform.lower() == \"win32\":\n LOGGER.debug(\"formatted docker mount path for Windows\")\n work_dir = work_dir.replace(\"\\\\\", \"/\")\n\n work_dir_mount = docker.types.Mount(\n target=\"/var/task\", source=work_dir, type=\"bind\"\n )\n pip_cmd = \"python -m pip install -t /var/task -r /var/task/requirements.txt\"\n\n LOGGER.info('using docker image \"%s\" to build deployment package...', docker_image)\n\n docker_run_args: Dict[str, Any] = {}\n if python_dontwritebytecode:\n docker_run_args[\"environment\"] = \"1\"\n\n container = client.containers.run(\n image=cast(str, docker_image),\n command=[\"/bin/sh\", \"-c\", pip_cmd],\n auto_remove=True,\n detach=True,\n mounts=[work_dir_mount],\n **docker_run_args,\n )\n\n # 'stream' creates a blocking generator that allows for real-time logs.\n # this loop ends when the container 'auto_remove's itself.\n for log in container.logs(stdout=True, stderr=True, stream=True, tail=0):\n # without strip there are a bunch blank lines in the output\n LOGGER.info(log.decode().strip())", "def define_containers(self, *types: ContainerType) -> None:\n\n for container_type in types:\n self.containers[container_type] = self.onefuzz.utils.build_container_name(\n container_type=container_type,\n project=self.project,\n name=self.name,\n build=self.build,\n platform=self.platform,\n )", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def pyfunc_build_image(model_uri, extra_args=None):\n name = uuid.uuid4().hex\n cmd = [\"mlflow\", \"models\", \"build-docker\", \"-m\", model_uri, \"-n\", name]\n if extra_args:\n cmd += extra_args\n p = subprocess.Popen(cmd,)\n assert p.wait() == 0, \"Failed to build docker image to serve model from %s\" % model_uri\n return name", "def describe_image_builders(Names=None, MaxResults=None, NextToken=None):\n pass", "def create_containers(self):\n status = []\n for key, container in self.containers.items():\n result = container.create()\n status.append(result)\n\n return status", "def _iter_images(self):\n raise NotImplementedError", "def main(\n *,\n component: list[str],\n no_cache: bool,\n pull: bool,\n quiet: bool,\n release: str,\n sp_osi: str | None,\n tag_suffix: str | None,\n) -> None:\n\n def build_component(component: str) -> None:\n \"\"\"Rebuild the container for a single component.\"\"\"\n parts: Final = component.split(\"-\", maxsplit=1)\n if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case\n sys.exit(f\"Internal error: build_component() invoked with {component=!r}\")\n kolla_component, kolla_service = parts\n build: Final = prepare.build_dockerfile(cfg, files, kolla_component, kolla_service)\n\n with tempfile.NamedTemporaryFile(\n mode=\"wt\", encoding=\"UTF-8\", prefix=\"Dockerfile.\"\n ) as dockerfile:\n dockerfile.write(build.dockerfile)\n dockerfile.flush()\n subprocess.check_call([\"ls\", \"-l\", \"--\", dockerfile.name])\n subprocess.check_call([\"cat\", \"--\", dockerfile.name])\n\n cmd: Final[list[str | pathlib.Path]] = [\n \"docker\",\n \"build\",\n \"-t\",\n f\"storpool/{build.container_name}{cfg.tag_suffix}\",\n \"--rm\",\n *([\"--no-cache\"] if no_cache else []),\n *([\"--pull\"] if pull else []),\n \"-f\",\n dockerfile.name,\n \"--\",\n datadir,\n ]\n cmd_str: Final = shlex.join(str(word) for word in cmd)\n cfg.diag(lambda: f\"Running `{cmd_str}`\")\n try:\n subprocess.run(cmd, check=True)\n except (OSError, subprocess.CalledProcessError) as err:\n sys.exit(f\"Could not run `{cmd_str}`: {err}\")\n\n if release not in prepare.ALL_RELEASES:\n sys.exit(\n f\"Unsupported release {release!r}, must be one of {' '.join(prepare.ALL_RELEASES)}\"\n )\n if any(comp for comp in component if comp not in ALL_COMPONENTS):\n sys.exit(f\"Unrecognized components, must be one or more of {' '.join(ALL_COMPONENTS)}\")\n cfg: Final = build_config(quiet=quiet, release=release, sp_osi=sp_osi, tag_suffix=tag_suffix)\n\n datadir: Final = cfg.topdir / defs.DATA_DIR\n files: Final = prepare.prepare_data_files(cfg, datadir)\n\n for comp in component:\n build_component(comp)", "def docker_runner_factory(image):\n\n mapping = {\n 'gunicorn': GunicornDockerRunner,\n 'redis': RedisDockerRunner,\n 'consul': ConsulDockerRunner,\n 'postgres': PostgresDockerRunner,\n 'registrator': RegistratorDockerRunner,\n 'solr': SolrDockerRunner\n }\n\n for key in mapping:\n if key in image:\n return mapping[key]\n\n return DockerRunner", "def build(self):\n # open json, len 161,260\n at_json = open_json(self.json_names[0])\n link_json = open_json(self.json_names[1])\n # if need preprocessing, do it\n if self.args.img_preprocessing:\n print(\"resize imgs\")\n for i in tqdm(range(len(link_json))):\n image_url = \"image/\" + link_json[i][\"image_url_4x\"].split('/')[-1]\n img = Image.open(image_url)\n img = img.resize((224, 224))\n img.save(image_url)\n\n # create dataset\n itemlen = 0\n previd = 0\n for i in tqdm(range(len(link_json))):\n image_url = link_json[i][\"image_url_4x\"].split('/')[-1]\n uid = image_url.split('-')[0]\n if previd != uid:\n self.label.append(list(at_json[i].values())[2:])\n if i != 0:\n self.itemlen.append(itemlen)\n itemlen = 0\n self.input.append(f\"{self.frontpath}dataset/image/\" + image_url)\n previd = uid\n itemlen += 1\n self.itemlen.append(itemlen)\n self.separate()\n self.dataset = {\n 'train': self.train,\n 'validation': self.val,\n 'test': self.test\n }\n\n print('finished dataset')", "def required_images(self):\n required = set()\n deployment_type = self.get_var(\"openshift_deployment_type\")\n host_groups = self.get_var(\"group_names\")\n # containerized etcd may not have openshift_image_tag, see bz 1466622\n image_tag = self.get_var(\"openshift_image_tag\", default=\"latest\")\n image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]\n\n # template for images that run on top of OpenShift\n image_url = \"{}/{}-{}:{}\".format(image_info[\"namespace\"], image_info[\"name\"], \"${component}\", \"${version}\")\n image_url = self.get_var(\"oreg_url\", default=\"\") or image_url\n if 'nodes' in host_groups:\n for suffix in NODE_IMAGE_SUFFIXES:\n required.add(image_url.replace(\"${component}\", suffix).replace(\"${version}\", image_tag))\n # The registry-console is for some reason not prefixed with ose- like the other components.\n # Nor is it versioned the same, so just look for latest.\n # Also a completely different name is used for Origin.\n required.add(image_info[\"registry_console_image\"])\n\n # images for containerized components\n if self.get_var(\"openshift\", \"common\", \"is_containerized\"):\n components = set()\n if 'nodes' in host_groups:\n components.update([\"node\", \"openvswitch\"])\n if 'masters' in host_groups: # name is \"origin\" or \"ose\"\n components.add(image_info[\"name\"])\n for component in components:\n required.add(\"{}/{}:{}\".format(image_info[\"namespace\"], component, image_tag))\n if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise\n required.add(\"registry.access.redhat.com/rhel7/etcd\") # and no image tag\n\n return required", "def _build_image_processing(self, shift_ratio=0):\n with tf.device(self.cpu_device):\n subset = 'train'\n image_producer_ops = []\n image_producer_stages = []\n images_splits, labels_splits = self.image_preprocessor.minibatch(\n self.dataset,\n subset=subset,\n use_datasets=self.params.use_datasets,\n cache_data=self.params.cache_data,\n shift_ratio=shift_ratio)\n images_shape = images_splits[0].get_shape()\n labels_shape = labels_splits[0].get_shape()\n for device_num in range(len(self.devices)):\n image_producer_stages.append(\n data_flow_ops.StagingArea(\n [images_splits[0].dtype, labels_splits[0].dtype],\n shapes=[images_shape, labels_shape]))\n return (image_producer_ops, image_producer_stages)", "def _get_pinned_docker_images() -> Mapping[str, Mapping[str, str]]:\n\n pinned_docker_images_file = resources_dir / \"pinned_docker_images.cfg\"\n all_pinned_docker_images = ConfigParser()\n all_pinned_docker_images.read(pinned_docker_images_file)\n return all_pinned_docker_images", "def test_docker_build(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"src/BUILD\": \"docker_image(name='test-image', image_tags=['1.0'])\",\n \"src/Dockerfile\": \"FROM python:3.8\",\n }\n )\n target = rule_runner.get_target(Address(\"src\", target_name=\"test-image\"))\n result = run_docker(rule_runner, target)\n assert len(result.artifacts) == 1\n assert len(result.artifacts[0].extra_log_lines) == 2\n assert \"Built docker image: test-image:1.0\" == result.artifacts[0].extra_log_lines[0]\n assert \"Docker image ID:\" in result.artifacts[0].extra_log_lines[1]\n assert \"<unknown>\" not in result.artifacts[0].extra_log_lines[1]", "def images_init(self):\n\n self.user_image = UserImage(\n user_id=self.user_id,\n tag=self.tag_image,\n image_id='sha256:342fea22',\n created=1524229897,\n size=191623983\n )\n self.user_image.save()\n\n CLIENT.images_list.append(\n {'Containers': -1,\n 'Created': 1524229897,\n 'Id': 'sha256:342fea22',\n 'Labels': None,\n 'ParentId': 'sha256:55d98c2',\n 'RepoDigests': None,\n 'RepoTags': [self.tag_image],\n 'SharedSize': -1,\n 'Size': 191623983,\n 'VirtualSize': 191623983}\n )", "def build_docker(params) -> None:\n print(\"Building docker image...\")\n cmd = \"cd bg_changer && docker build --tag bg_changer . >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\" Success !\")\n else:\n print(\" Failure !\")", "def define_tasks(self, imgcollections,\r\n description, dimensions, folder):\r\n n = imgcollections.size().getInfo()\r\n collections = imgcollections.toList(n) # this is server-object; not iterable\r\n tasks = []\r\n itr = np.arange(n).tolist()\r\n random.shuffle(itr)\r\n pbar = tqdm.tqdm(itr)\r\n for i in pbar:\r\n image = collections.get(i)\r\n task = self.define_task(ee.Image(image).float(),\r\n \"{0:05d}\".format(i),\r\n description,\r\n dimensions,\r\n folder)\r\n tasks.append(task)\r\n pbar.set_description(\"defining tasks {0:05d}/{1:05d}\".format(i, n)) \r\n return tasks", "def build_and_push(build_context,\n image_tag,\n image_name,\n nocache,\n credstore_env=None,\n registries=None,\n max_retries=3,\n sleep_interval=1):\n _logger.info('Starting build ...')\n\n # Build the image\n docker_builder = build(build_context=build_context,\n image_tag=image_tag,\n image_name=image_name,\n nocache=nocache,\n credstore_env=credstore_env,\n registries=registries,\n max_retries=max_retries,\n sleep_interval=sleep_interval)\n push(docker_builder, max_retries=max_retries, sleep_interval=sleep_interval)", "def load_images(pool, entries):\n start = time.perf_counter()\n images = pool.map(ski.io.imread, [x.path for x in entries])\n logger.info(\"Loaded %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def docker_container():\n if SETUP_SPLASH:\n dm = DockerManager()\n dm.start_container()\n\n try:\n requests.post('{}/_gc'.format(SPLASH_URL))\n except requests.exceptions.RequestException:\n pass\n\n yield", "def pull_image(self):\n status = []\n for key, container in self.containers.items():\n result = container.pull()\n status.append(result)\n return status", "def do_stage(self, images):\n\n for i, image in enumerate(images):\n pass\n # logging_tags = logs.image_config_to_tags(image, self.group_by_keywords)", "def _process_images(self, docname: pathlib.Path, images: List[nodes.image]) -> None:\n logger.debug(\"[nbtutorial]: Processing images for %s\", docname)\n\n if len(images) == 0:\n return\n\n img_dir = pathlib.Path(self.outdir, docname.parent, RESOURCE_DIR)\n\n if not img_dir.exists():\n img_dir.mkdir(parents=True)\n\n for img in images:\n fname = pathlib.Path(img[\"uri\"]).name\n\n source = pathlib.Path(self.app.confdir, img[\"uri\"])\n destination = pathlib.Path(img_dir, fname)\n\n shutil.copy(source, destination)", "def _build_container(\n self, target_image, odcs, repo_type, repo_list, terminate_event,\n scratch, record):\n self.logger.info(\"Building image: %s\" % target_image)\n cmd_list = [\"rhpkg\", \"--path=%s\" % self.distgit_dir]\n\n if self.runtime.user is not None:\n cmd_list.append(\"--user=%s\" % self.runtime.user)\n\n cmd_list += (\n \"container-build\",\n \"--nowait\",\n )\n\n if odcs:\n if odcs == 'signed':\n odcs = 'release' # convenience option for those used to the old types\n cmd_list.append('--signing-intent')\n cmd_list.append(odcs)\n else:\n if repo_type:\n repo_list = list(repo_list) # In case we get a tuple\n repo_list.append(self.metadata.cgit_url(\".oit/\" + repo_type + \".repo\"))\n\n if repo_list:\n # rhpkg supports --repo-url [URL [URL ...]]\n cmd_list.append(\"--repo-url\")\n cmd_list.extend(repo_list)\n\n if scratch:\n cmd_list.append(\"--scratch\")\n\n # Run the build with --nowait so that we can immediately get information about the brew task\n rc, out, err = exectools.cmd_gather(cmd_list)\n\n if rc != 0:\n # Probably no point in continuing.. can't contact brew?\n self.logger.info(\"Unable to create brew task: out={} ; err={}\".format(out, err))\n return False\n\n # Otherwise, we should have a brew task we can monitor listed in the stdout.\n out_lines = out.splitlines()\n\n # Look for a line like: \"Created task: 13949050\" . Extract the identifier.\n task_id = next((created_line.split(\":\")[1]).strip() for created_line in out_lines if\n created_line.startswith(\"Created task:\"))\n\n record[\"task_id\"] = task_id\n\n # Look for a line like: \"Task info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=13948942\"\n task_url = next((info_line.split(\":\", 1)[1]).strip() for info_line in out_lines if\n info_line.startswith(\"Task info:\"))\n\n self.logger.info(\"Build running: {}\".format(task_url))\n\n record[\"task_url\"] = task_url\n\n # Now that we have the basics about the task, wait for it to complete\n error = watch_task(self.logger.info, task_id, terminate_event)\n\n # Looking for something like the following to conclude the image has already been built:\n # BuildError: Build for openshift-enterprise-base-v3.7.0-0.117.0.0 already exists, id 588961\n if error is not None and \"already exists\" in error:\n self.logger.info(\"Image already built against this dist-git commit (or version-release tag): {}\".format(target_image))\n error = None\n\n # Gather brew-logs\n logs_dir = \"%s/%s\" % (self.runtime.brew_logs_dir, self.metadata.name)\n logs_rc, _, logs_err = exectools.cmd_gather([\"brew\", \"download-logs\", \"-d\", logs_dir, task_id])\n\n if logs_rc != 0:\n self.logger.info(\"Error downloading build logs from brew for task %s: %s\" % (task_id, logs_err))\n\n if error is not None:\n # An error occurred. We don't have a viable build.\n self.logger.info(\"Error building image: {}, {}\".format(task_url, error))\n return False\n\n self.logger.info(\"Successfully built image: {} ; {}\".format(target_image, task_url))\n return True", "def list_images(self):\n raise NotImplementedError()", "def build_image(image, build_args):\n\n subprocess.check_call(['docker', 'build', '--no-cache', '--force-rm', '-t',\n image] + build_args)", "def main():\n client = docker.from_env()\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--package_id', default='0',\n help='provide id for the work package, comma separated if multiple')\n parser.add_argument('--load_quicksave', default=\"no\", help='wanna load? -> yes/no')\n args = parser.parse_args()\n packages = args.package_id.split(\",\")\n print('Building docker container. This might take a while.')\n build_container(client)\n print('Build finished. Starting containers.')\n for package in packages:\n start_container(client, package, args.load_quicksave)\n print('Containers are running. Check Docker Dashboard for container health. Script will exit.')", "def test_build_image(self):\n labels = {\"apple\": \"red\", \"grape\": \"green\"}\n self.docker.images.build(\n path=\"test/python/docker/build_labels\",\n labels=labels,\n tag=\"labels\",\n isolation=\"default\",\n )\n image = self.docker.images.get(\"labels\")\n self.assertEqual(image.labels[\"apple\"], labels[\"apple\"])\n self.assertEqual(image.labels[\"grape\"], labels[\"grape\"])", "def add_files_to_image(image, with_files, label=None):\n tag_idx = image.find(\":\")\n if tag_idx == -1:\n jobset.message(\n \"FAILED\", \"invalid docker image %s\" % image, do_newline=True\n )\n sys.exit(1)\n orig_tag = \"%s_\" % image\n subprocess.check_output([\"docker\", \"tag\", image, orig_tag])\n\n lines = [\"FROM \" + orig_tag]\n if label:\n lines.append(\"LABEL %s\" % label)\n\n temp_dir = tempfile.mkdtemp()\n atexit.register(lambda: subprocess.call([\"rm\", \"-rf\", temp_dir]))\n\n # Copy with_files inside the tmp directory, which will be the docker build\n # context.\n for f in with_files:\n shutil.copy(f, temp_dir)\n lines.append(\"COPY %s %s/\" % (os.path.basename(f), _BUILD_INFO))\n\n # Create a Dockerfile.\n with open(os.path.join(temp_dir, \"Dockerfile\"), \"w\") as f:\n f.write(\"\\n\".join(lines))\n\n jobset.message(\"START\", \"Repackaging %s\" % image, do_newline=True)\n build_cmd = [\"docker\", \"build\", \"--rm\", \"--tag\", image, temp_dir]\n subprocess.check_output(build_cmd)\n dockerjob.remove_image(orig_tag, skip_nonexistent=True)", "def image_iter() -> iter:\r\n return ('Images/' + image for image in IMAGES)", "def get_images(self,\n collection,\n bounds=None,\n year=None,\n start_date=None,\n end_date=None,\n start_julian=1,\n end_julian=365,\n index_list=None,\n scale_factor=None,\n **kwargs):\n coll = ee.ImageCollection(collection)\n\n if year is not None:\n start_date = '{}-01-01'.format(str(year))\n end_date = '{}-12-31'.format(str(year))\n\n if bounds is not None:\n coll = coll.filterBounds(bounds)\n if (start_date is not None) and (end_date is not None):\n coll = coll.filterDate(start_date, end_date)\n\n coll = coll.filter(ee.Filter.calendarRange(start_julian, end_julian))\n\n if len(kwargs) > 0:\n for key, value in kwargs.items():\n if key == 'map':\n if value == 'add_indices':\n\n if index_list is not None:\n self.index_list = index_list\n\n if scale_factor is not None:\n self.scale_factor = scale_factor\n\n func = getattr(self, value, None)\n\n if func is not None:\n coll = coll.map(func)\n else:\n warnings.warn('The function {} is not implemented'.format(str(key)))\n return coll", "def create_image(images_steps):\n images = []\n\n def _create_image(image_name, *args, **kwgs):\n images_steps.create_image(image_name, *args, **kwgs)\n image = utils.AttrDict(name=image_name)\n images.append(image)\n return image\n\n yield _create_image\n\n for image in images:\n images_steps.delete_image(image.name)", "def images(request, get_glance_steps, uncleanable, credentials):\n params = {'count': 1}\n params.update(getattr(request, 'param', {}))\n names = utils.generate_ids('cirros', count=params['count'])\n with create_images_context(get_glance_steps,\n uncleanable,\n credentials,\n names,\n config.CIRROS_QCOW2_URL) as images:\n yield [utils.AttrDict(name=image['name']) for image in images]", "def run(args):\n docker(' '.join(args))" ]
[ "0.8046437", "0.73141617", "0.68958426", "0.6610562", "0.6573472", "0.65122867", "0.64818895", "0.64219844", "0.64116544", "0.63784987", "0.63149446", "0.6278506", "0.6173914", "0.61501855", "0.61332744", "0.61190534", "0.6108034", "0.61043316", "0.6097722", "0.60768604", "0.60623944", "0.6062093", "0.6042108", "0.60415363", "0.60392493", "0.6035408", "0.5994964", "0.5947068", "0.59328663", "0.593106", "0.58851856", "0.58802223", "0.5868655", "0.5834089", "0.58045167", "0.58012116", "0.57804924", "0.5773793", "0.5772279", "0.57198054", "0.5692639", "0.56802607", "0.56800884", "0.56800884", "0.56800884", "0.5676605", "0.5675687", "0.56756717", "0.5673288", "0.5655832", "0.564776", "0.5637407", "0.56318617", "0.5626563", "0.562143", "0.5613162", "0.5612654", "0.56014675", "0.5587354", "0.5586818", "0.55830985", "0.5577749", "0.55560374", "0.5551627", "0.55237895", "0.552284", "0.55136645", "0.55034745", "0.5493486", "0.54841524", "0.548029", "0.5470928", "0.5468354", "0.5457864", "0.5452737", "0.54526997", "0.5449885", "0.5445462", "0.5443408", "0.54432726", "0.5438861", "0.5434099", "0.54329", "0.5428074", "0.54247636", "0.54237336", "0.5423583", "0.54200506", "0.53872585", "0.5384811", "0.53833115", "0.5381634", "0.53779644", "0.53750265", "0.5371467", "0.5369448", "0.5364492", "0.5362204", "0.53604037", "0.53574544" ]
0.6628026
3
Update name/values.yaml with modifications
def build_values(name, values_mods): values_file = os.path.join(name, 'values.yaml') with open(values_file) as f: values = yaml.load(f) for key, value in values_mods.items(): parts = key.split('.') mod_obj = values for p in parts: mod_obj = mod_obj[p] print(f"Updating {values_file}: {key}: {value}") if isinstance(mod_obj, MutableMapping): keys = IMAGE_REPOSITORY_KEYS & mod_obj.keys() if keys: for key in keys: mod_obj[key] = value['repository'] else: possible_keys = ' or '.join(IMAGE_REPOSITORY_KEYS) raise KeyError( f'Could not find {possible_keys} in {values_file}:{key}' ) mod_obj['tag'] = value['tag'] else: raise TypeError( f'The key {key} in {values_file} must be a mapping.' ) with open(values_file, 'w') as f: yaml.dump(values, f)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _write_values(self, app_name, chart_dir, values):\n\n data = self._get_values(app_name, chart_dir)\n new_data = {**data, **values}\n new_raw = yaml.dump(new_data)\n\n values_path = \"%s/%s/values.yaml\" % (chart_dir, app_name)\n with open(values_path, mode=\"w\") as values_file:\n values_file.write(new_raw)", "def update_feature(selfs, k, v, cfg_path):\n with open(cfg_path, 'r') as cfg:\n file_dict = yaml.safe_load(cfg)\n # overprint the entries with the new config_dict\n file_dict['{}'.format(k)] = v\n with open(cfg_path, 'w') as w_file:\n w_file.write(yaml.dump(file_dict))", "def update_file(filename,d):\n if os.path.exists(filename):\n f_old = open(filename,'r')\n d_old = yaml.load(f_old)\n f_old.close()\n d_old.update(d)\n d = d_old\n f = open(filename, 'w')\n yaml.dump(d, f)\n f.close()", "def update_variables(old_contents):\n new_contents = []\n\n for line in old_contents:\n words = line.split()\n\n for word in words:\n # Using the whitespace split above, the keys in the yaml file will\n # have a : at the end, so we need to strip that off before\n # replacing\n if word.endswith(':'):\n word = word[:-1]\n\n if word in VAR_MAPPINGS.keys():\n line = line.replace(word, VAR_MAPPINGS[word])\n\n new_contents.append(line)\n\n return new_contents", "def _do_update(self, meta, k, v):\n self.runtime.logger.info('{}: [{}] -> {}'.format(meta.in_group_config_path, k, v))\n meta.config[k] = v\n meta.save()", "def update(self, values):\n for k, v in values.items():\n setattr(self, k, v)", "def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value", "def set(self, name, path):\n self.yaml[IDK_YAML_GROUP][name] = path\n self.write()", "def update(self, values):\r\n for k, v in six.iteritems(values):\r\n setattr(self, k, v)", "def update(self, namein, nameout):\n\t\ttext = self.dict.sub(self.readFile(namein))\n\t\tself.writeFile(nameout, text)\n\t\treturn", "def update(self, values):\n for k, v in six.iteritems(values):\n setattr(self, k, v)", "def save(self, name, description, template, values):\n # Before attempting to write, ensure the directory exists\n self.directory.mkdir(parents = True, exist_ok = True)\n dest = self.directory / \"{}.yaml\".format(name)\n with dest.open('w') as f:\n yaml.dump(\n dict(\n description = description or '',\n template = template.name,\n values = values\n ),\n f\n )", "def write_data(filename: str, old_position: dict, new_position: dict) -> None:\n\n combined = {\"old_positions\": old_position, \"new_positions\": new_position}\n\n with open(filename, \"w\") as f:\n yaml.dump(combined, f)\n\n return", "def load_values(self, values: Context) -> None:\n for name, refers_to in values.items():\n self.logger.info(f\"load_values {name!r} : {refers_to!r}\")\n if not self.extended_name_path.match(name):\n raise ValueError(f\"Invalid name {name}\")\n\n context = self\n\n # Expand \"name1.name2....\": refers_to into [\"name1\", \"name2\", ...]: refers_to\n # Update NameContainer(\"name1\", NameContainer(\"name2\", NameContainer(..., refers_to)))\n *path, final = self.ident_pat.findall(name)\n for name in path:\n ref = context.setdefault(name, Referent())\n if ref.container is None:\n ref.container = NameContainer(parent=self.parent)\n context = ref.container\n context.setdefault(final, Referent()) # No annotation.\n context[final].value = refers_to", "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def set(self, key, value):\n try:\n if value.lower() in ['true', 'false']:\n value = value.lower() == 'true'\n except:\n pass\n\n try:\n if \".\" in key:\n keys = key.split(\".\")\n #\n # create parents\n #\n parents = keys[:-1]\n location = self.data\n for parent in parents:\n if parent not in location:\n location[parent] = {}\n location = location[parent]\n #\n # create entry\n #\n location[keys[-1]] = value\n else:\n self.data[key] = value\n\n except KeyError:\n raise ValueError(f\"The key '{key}' could not be found in the yaml file '{self.filename}'\")\n except Exception as e:\n print(e)\n raise ValueError(\"unkown error\")\n\n self.flush()", "def update(self, values, priority=\"project\"):\n\t\tself._assert_mutability()\n\t\tif isinstance(values, six.string_types):\n\t\t\tvalues = json.loads(values)\n\t\tif values is not None:\n\t\t\tif isinstance(values, BaseSettings):\n\t\t\t\tfor name, value in six.iteritems(values):\n\t\t\t\t\tself.set(name, value, values.getpriority(name))\n\t\t\telse:\n\t\t\t\tfor name, value in six.iteritems(values):\n\t\t\t\t\tself.set(name, value, priority)", "def update(context, namespace_name, id, values, session):\n namespace_api.get(context, namespace_name, session)\n\n metadata_tag = _get(context, id, session)\n metadef_utils.drop_protected_attrs(models.MetadefTag, values)\n # values['updated_at'] = timeutils.utcnow() - done by TS mixin\n try:\n metadata_tag.update(values.copy())\n metadata_tag.save(session=session)\n except db_exc.DBDuplicateEntry:\n LOG.debug(\"Invalid update. It would result in a duplicate\"\n \" metadata tag with same name=%(name)s\"\n \" in namespace=%(namespace_name)s.\",\n {'name': values['name'],\n 'namespace_name': namespace_name})\n raise exc.MetadefDuplicateTag(\n name=values['name'], namespace_name=namespace_name)\n\n return metadata_tag.to_dict()", "def update(self, values):\n pass", "def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)", "def update(self, new_values):\n values_copy = new_values.copy()\n for key in self.SET_KEYS:\n if key in values_copy:\n values_copy[key] = set(values_copy[key])\n super(ConfigDict, self).update(values_copy)", "def _update_all_fields(self, name, value):\n for field in self._field_map.values():\n setattr(field, name, value)", "def set_values(self, new_values):\n for name, value in new_values.items():\n self.nodes_db.loc[name][\"node\"].set_value(value)", "def json_to_yaml(cls, name, filename=\"~/.cloudmesh/security/google.json\"):\n path = path_expand(filename)\n\n # Open and load the JSON file.\n with open(path, \"r\") as file:\n d = json.load(file)\n\n # Get the project id and client email.\n project_id = d[\"project_id\"]\n client_email = d[\"client_email\"]\n\n # Format the sample with json file details.\n format_sample = cls.sample.format_map(locals())\n # Convert the yaml sample to JSON.\n google_yaml = yaml.load(format_sample, Loader=yaml.SafeLoader)\n # Extract the google compute section\n google_config = google_yaml[\"cloudmesh\"][\"cloud\"]\n\n # Update the google cloud section of cloudmesh.yaml config file.\n config = Config()\n config[\"cloudmesh\"][\"cloud\"][name] = google_config\n config.save()\n banner(\"Result\")\n pprint(config[\"cloudmesh\"][\"cloud\"][name])", "def update(self):\n self.save_config_file()", "def update_config(self, data):\n self.config.data = dict_merge(self.config.data, data)\n self.config.save()", "def modify_res_value(name, delta):\n pass", "def upgrade_settings(self, keys):\n upgradable_keys = {\n \"project_dir\": \"%root_dir%\",\n \"source_folder\": \"%source_folder%\",\n \"packages_path\": \"%packages_path%\",\n \"sep\": \"%sep%\",\n \"$\": \"$\"\n }\n for key in keys:\n value, from_global = self.get(key, as_tuple=True)\n value = value.replace(\"%\", \"%%%\")\n for k in upgradable_keys:\n value = value.replace(\"$\" + k, upgradable_keys[k])\n self.set(key, value, to_global=from_global)", "def update(name, value, config_dir=None):\n if name not in Config.__ALLOWED:\n msg = f'Cannot update configuration; value \"{name}\" is not allowed.'\n raise ConfigurationError(msg)\n config_dir = Config.resolve_config_dir(config_dir)\n config_dat, config_file = Config.get_config_file(\n config_dir,\n round_trip_load=True,\n quiet=True,\n )\n config_dat.update({name: value})\n Config.write_config_file(config_dat, config_file)\n if Config.is_set:\n Config.__conf[name] = value", "def test_with_different_name(data_store_path):\n data_set = [\n {\"name\": \"Eric Idle\", \"phone\": \"123-456-7890\", \"address\": \"here\"},\n {\"name\": \"John Cleese\", \"phone\": \"111-222-3333\", \"address\": \"there\"},\n ]\n data_store_path.write_text(yaml.dump(data_set))\n data_store = YAMLDataStore(file_path=str(data_store_path))\n assert data_store._users == data_set\n\n updated_user = {\n \"name\": \"Terry Gilliam\",\n \"phone\": \"999-999-9999\",\n \"address\": \"not here\",\n }\n data_store.update(\"Eric Idle\", **updated_user)\n\n assert updated_user in data_store._users\n assert not [user for user in data_store._users if user[\"name\"] == \"Eric Idle\"]\n yaml_data = yaml.safe_load(data_store_path.read_text())\n assert updated_user in yaml_data\n assert not [user for user in yaml_data if user[\"name\"] == \"Eric Idle\"]", "def _config_file_content_substitute(self, filename, keys):\n # Open the file for substitution\n try:\n f = open(filename, \"r\")\n lines = f.readlines()\n f.close()\n except:\n self.log.warning(\"Error reading from parameter file \" + filename + \".\")\n raise\n\n try:\n # Backup the original file\n f = open(filename + \".original\", \"w\")\n f.write(\"\".join(lines))\n f.close()\n except:\n self.log.warning(\"Error making a backup file of \" + filename + \". Skipped.\")\n\n # Define the fields within the file\n fields = 2\n keyfield = 0\n datafield = 1\n if \"dacParameters\" in filename or \"tbmParameters\" in filename or \"tbParameters\" in filename:\n fields = 3\n keyfield = 1\n datafield = 2\n\n keys_replaced = []\n # iterate over all lines\n for i in range(len(lines)):\n line = lines[i].strip()\n if len(line) == 0 or line[0] == '-' or line[0] == '#':\n continue\n line = line.split(None, fields - 1)\n if len(line) != fields:\n continue\n # check whether this line matches a key\n if not line[keyfield] in keys:\n continue\n line[datafield] = keys[line[keyfield]]\n keys_replaced.append(line[keyfield])\n if line[datafield].startswith('DTB') and line[keyfield] == 'id':\n lines[i] = \" : \".join(line)\n lines[i] += '\\n'\n else:\n lines[i] = \" \".join(line)\n lines[i] += '\\n'\n try:\n RequireTestParametersExisting = self.init.get('VerifyTestParameters', 'CheckExistence').strip().lower() == 'true'\n except:\n RequireTestParametersExisting = False\n\n for key in keys:\n if not key in keys_replaced:\n WarningMessage = \"Warning: key '%s' in file '%s' does not exist! Update '%s' file in parameters directory or 'Tests *' section in ini file!\"%(key, filename, filename)\n self.log.warning(WarningMessage)\n if RequireTestParametersExisting:\n raise Exception(WarningMessage)\n\n try:\n # Write the new file\n f = open(filename, \"w\")\n f.write(\"\".join(lines))\n f.close()\n except:\n self.log.warning(\"Error saving parameters in \" + filename + \".\")\n raise", "def set_value(key: str, value):\n with open('config.json', 'r+') as f:\n config = json.load(f)\n config[key] = value\n f.seek(0)\n json.dump(config, f, indent=3)\n f.truncate()", "def conf_update(self):\n pass", "def update(self, config_dict):\r\n self._update(config_dict, allow_new_keys=True)", "def overwriteDataField(self, name, value): \n if not name in self.__examples: \n raise ValueError(\"Cannot overwrite a field that does not exist: \" + name)\n \n self.__storeDataField(name, value)", "def update_metadata(self, key, value):\n sp.verify(self.is_administrator(sp.sender), FA12_Error.NotAdmin)\n self.data.metadata[key] = value", "def update_config_db_json(dut, attr, val1, val2):\n cmd = \"sudo sed -i 's/\\\"{}\\\": \\\"{}\\\"/\\\"{}\\\": \\\"{}\\\"/g' /etc/sonic/config_db.json\"\\\n .format(attr,val1,attr,val2)\n st.config(dut, cmd)\n return", "def yaml_inventory(self):\n inventory_file = 'inventory_file'\n with open(inventory_file, 'w') as invfile:\n yaml.dump(self.inventory_dict, invfile, default_flow_style=False, sort_keys=False)", "def set_config(self, section, values):\n for option in values:\n self.data[section][option] = values[option]\n\n with open(self.filename, 'w') as configfile:\n self.data.write(configfile)", "def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)", "def setValue(self, name, value):\n values = self.__get('values')\n values[name] = value\n self.__set('values', values)", "def _update_sidecar(sidecar_fname, key, val):\n with open(sidecar_fname, \"r\") as fin:\n sidecar_json = json.load(fin)\n sidecar_json[key] = val\n with open(sidecar_fname, \"w\") as fout:\n json.dump(sidecar_json, fout)", "def replace_field_value_in_every_item_in_response_copy(context, name, value):\n items = context.response_copy['items']\n for item in items:\n print(item)\n if item['owner']['user_type'] == 'does_not_exist':\n continue\n if name in item:\n item[name] = value\n logging.debug(\n 'Successfully replaced value of field \"%s\" in item: %s with %s',\n name, item['question_id'], value) \n else:\n logging.debug(\n 'Item %s does not contain field \"%s', name, item) \n logging.debug(\n 'Response copy after replacing values of all \"%s\" fields with %s:\\n%s',\n name, value, pformat(context.response_copy))", "def replace_version(version):\n filename = 'conda.recipe/meta.yaml'\n pattern = r'version: .*'\n replacement = 'version: {version}'.format(version=version)\n lines = []\n with open(filename) as meta_file:\n for line in meta_file.readlines():\n lines.append(re.sub(pattern, replacement, line))\n with open(filename, 'w') as meta_file:\n for line in lines:\n meta_file.write(line)", "def __setattr__(self, name: str, value: Any) -> None:\n super().__setattr__(name, value)\n # update entry as well (to sync with CLI, etc. )\n if not name.startswith(\"_\") and name in self._entries:\n self._entries[name].value = value", "def _addAppYaml(self):\n if self.wc.exists(self._branchPath('app/app.yaml')):\n raise ObstructionError('app/app.yaml exists already')\n\n yaml_path = self._branchPath('app/app.yaml')\n self.wc.copy(yaml_path + '.template', yaml_path)\n\n yaml = io.fileToLines(self.wc.path(yaml_path))\n out = []\n for i, line in enumerate(yaml):\n stripped_line = line.strip()\n if 'TODO' in stripped_line:\n continue\n elif stripped_line == '# application: FIXME':\n out.append('application: socghop')\n elif stripped_line.startswith('version:'):\n out.append(line.lstrip() + 'g0')\n out.append('# * initial Google fork of Melange ' + self.branch)\n else:\n out.append(line)\n io.linesToFile(self.wc.path(yaml_path), out)\n\n self.wc.commit('Create app.yaml with Google patch version g0 '\n 'in branch ' + self.branch)", "def update(name=\"\", amount=0, execute=False):\n if name:\n bucket_metadata = get_bucket(name)\n if bucket_metadata:\n bucket = bucket_metadata[\"bucket\"]\n versioning = bucket_metadata[\"versioning\"] == \"Enabled\"\n lifecycle = bucket_metadata[\"lifecycle\"]\n update_bucket(name, bucket, versioning, lifecycle, execute)\n else:\n buckets = get_buckets(amount)\n for k, v in buckets.items():\n name = k\n bucket = v[\"bucket\"]\n versioning = v[\"versioning\"] == \"Enabled\"\n lifecycle = v[\"lifecycle\"]\n update_bucket(name, bucket, versioning, lifecycle, execute)", "def apply_config_file(self, filename):\n def extractor(template, options):\n \"\"\"Ignore things that are existing non default values\"\"\"\n for name, val in options:\n normalised = self.normalise_key(name)\n if normalised in self.values and not isinstance(self.values[normalised], Default):\n continue\n else:\n yield name, val\n\n items = json.load(open(filename)).items()\n self.use_options(items, extractor)", "def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"", "def update_toml(change_dict: Dict[str, Any], files: LocalPath):\n for f in files:\n t = toml.load(f)\n for path, val in change_dict.items():\n merge_dict(path_to_dict(path, val), t)\n toml.dump(t, f)", "def update_ansible_vars(remote_username: str, username: str, ip_address: str):\n\n with open(\"./ansible/vars/params.yml\") as yaml_file:\n params = yaml.load(yaml_file)\n\n params[\"remote_user\"] = remote_username\n params[\"ca_name\"] = f\"admin_{ip_address}\"\n\n with open(\"./ansible/vars/params.yml\", \"w\") as yaml_file:\n yaml.dump(params, yaml_file)", "def set(self, *, name: types.TSeedName, value: types.TSeedValue) -> None:\n (self._base_path / self._get_file_name(name)).write_text(value)", "def update_config_item(self, elements: Dict[str, Any]) -> None:\n ...", "def updateDictFile(self):\n if self.dictFile.vdata.get('version',0): return\n #--Update to version 1\n for name in self.data.keys():\n installer = self.data[name]\n if isinstance(installer,Installer):\n self.data[name] = installer.__copy__(InstallerArchive)\n self.dictFile.vdata['version'] = 1", "def ReplaceDepsVar(deps_file, variable_name, value):\n with open(deps_file, 'r') as file_handle:\n contents_old = file_handle.read()\n contents_new = re.sub(\n '\"%s\":.*,' % variable_name,\n '\"%s\": \"%s\",' % (variable_name, value),\n contents_old)\n with open(deps_file, 'w') as file_handle:\n file_handle.write(contents_new)", "def test_yaml_file_watch(self):\n # Set initial data\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_1.yml')\n\n with TemplateRenderThread('yaml_file_test.t', 'yaml_file_test.tmp.out') as renderer:\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_1.out')\n\n # Set updated data\n print('Updating file..')\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_2.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_2.out')", "def _update_cfg_from_files(self, files):\n\t\tfor file in files:\n\t\t\twith open(self.SettingsFolder + file) as f:\n\t\t\t\tself._add_cfg_to_list(file[:-4], yaml.load(f))", "def update_config(self, kv: dict):\n self._configs.update(kv)\n self._save()", "def user_create_yaml(self):\n pass", "def update_current_settings(file_name):\n new_settings = importlib.import_module(file_name)\n for k, v in new_settings.__dict__.items():\n if k.upper() == k:\n globals().update({k: v})", "def update_values(self, values):\n if values is not None:\n self.settings.update(values)\n\n # External (from MCU)\n self.label_smc1.configure(text=self.smc1_template % self.settings['s1'], font=self.font)\n self.label_smc2.configure(text=self.smc2_template % self.settings['s2'], font=self.font)\n self.label_smc3.configure(text=self.smc3_template % self.settings['s3'], font=self.font)\n self.label_smc4.configure(text=self.smc4_template % self.settings['s4'], font=self.font)\n self.label_ambient_min.configure(text=self.ambient_light_template % self.settings['p'], font=self.font)\n\n # Internal (from GUI)\n self.label_overhead_level.configure(text=self.overhead_level_template % self.overhead_level.get(), font=self.font)\n self.active_changes = True # (flag) Once changes are retrieved, we assume that they will be sent to the controller", "def register_yaml(self, yaml_text):\n\n defs = yaml.load_all(yaml_text)\n for def_set in defs:\n for name,_def in def_set.iteritems():\n # TODO: Hook into pyyaml's event emitting stuff to try to get the canonical form without re-dumping\n def_text = yaml.dump(_def, canonical=True, allow_unicode=True)\n self.register_def(name, _def, def_text)", "def _change_references(path, name, val):\n\n text = _open_file(path)\n for row in text.split('\\n'):\n if row.startswith(name + \"=\"):\n row = f'{name}={val}'\n yield row", "def old_values(basenames):\n p = path_to_data()\n if not p.isfile():\n if not p.parent.isdir():\n p.parent.makedirs_p()\n return write_default_values()\n else:\n return read_old_values(basenames)", "def test_with_same_name(data_store_path):\n data_set = [\n {\"name\": \"Eric Idle\", \"phone\": \"123-456-7890\", \"address\": \"here\"},\n {\"name\": \"John Cleese\", \"phone\": \"111-222-3333\", \"address\": \"there\"},\n ]\n data_store_path.write_text(yaml.dump(data_set))\n data_store = YAMLDataStore(file_path=str(data_store_path))\n assert data_store._users == data_set\n\n updated_user = {\"name\": \"Eric Idle\", \"phone\": \"999-999-9999\", \"address\": \"not here\"}\n data_store.update(\"Eric Idle\", **updated_user)\n\n assert updated_user in data_store._users\n assert updated_user in yaml.safe_load(data_store_path.read_text())", "def format_yaml(template, config):\n formatted = template\n for k, v in config.items():\n formatted = formatted.replace('${%s}' % k, v)\n return formatted", "def change_environment_variables():\n values = load('environment.yaml')\n\n for key in values.keys():\n os.environ[key] = values[key]\n\n info(f'Changed environment variables to {values}')", "def updateNamespace(self):\n import addict\n self.namespace['config'] = addict.Dict(self.namespace['config'])", "def settings_changed(self, name, value):\n return", "def update_file(filename, items):\n # TODO: Implement something in the templates to denote whether the value\n # being replaced is an XML attribute or a value. Perhaps move to dyanmic\n # XML tree building rather than string replacement.\n should_escape = filename.endswith('addon.xml')\n\n with open(filename, 'r') as inp:\n text = inp.read()\n\n for key, val in items.items():\n if should_escape:\n val = saxutils.quoteattr(val)\n text = text.replace('{%s}' % key, val)\n output = text\n\n with open(filename, 'w') as out:\n out.write(output)", "def register_from_yaml(self, path_to_yaml: str) -> None:\n self._manifests.append(path_to_yaml)\n self._sync = False", "def add_metadata (self, name, value):\n self.metadata[name] = value\n return self", "def update_overrides(self, app, name, namespace,\n flag='reset', override_values=None):\n if override_values is None:\n override_values = {}\n body = {'flag': flag, 'values': override_values, 'attributes': {}}\n return self._update(self._path(app) +\n '?name=' + name +\n '&namespace=' + namespace, body)", "def set_parameter_value(self, parameter_name, new_value):\n self.description[\"config\"][\"values\"][parameter_name][\"value\"] = new_value\n ## Update MongoDB\n #self.mongo_client.cps2_project.objects.update_one(\n #{\"_id\": self.mongo_id},\n #{\"$set\": {\"config.values.\" + parameter_name + \".value\": new_value,\n #\"last_modified.value\": str(datetime.utcnow())}\n #}\n #)\n print(\"Switched the parameter \" + parameter_name + \" to \" + new_value + \" and updated MongoDB.\")", "def put(self, name, val):\n pass", "def rewrite(self):\n for f in self.files:\n metadata = dict()\n metadata[\"description\"] = f.metadata.get(\"desc\", \"Unknown\")\n metadata[\"script\"] = os.path.basename(f.filename)\n metadata[\"requires\"] = []\n for package, component in f.requires:\n if package == self.key:\n metadata[\"requires\"].append(\"/\" + component)\n else:\n metadata[\"requires\"].append(package + \"/\" + component)\n metadata[\"provides\"] = [ p[1] for p in f.provides ]\n # Resolve symlinks\n real_filename = os.path.realpath(f.filename)\n LOG.info(\"Editing: \" + real_filename)\n new_filename = f.filename + \".new\"\n new = file(new_filename, \"w\")\n new.write(\"/*\\n---\\n\")\n new.write(yaml.dump(metadata))\n new.write(\"\\n...\\n*/\\n\")\n new.write(file(f.filename).read())\n new.close()\n os.rename(new_filename, real_filename)\n\n package_data = dict()\n package_data[\"name\"] = self.key\n package_data[\"sources\"] = []\n package_data[\"version\"] = \"Unknown\"\n package_data[\"copyright\"] = \"Unknown\"\n package_data[\"description\"] = \"Unknown\"\n target_dir = os.path.dirname(self.scripts_json_filename)\n # package.yml is typically in the parent of the scripts.json dir\n if os.path.basename(target_dir) == \"Source\":\n target_dir = os.path.dirname(target_dir)\n target_filename = os.path.join(target_dir, \"package.yml\")\n for f in self.files:\n common = os.path.commonprefix([target_filename, f.filename])\n source_file = f.filename[len(common):]\n package_data[\"sources\"].append(source_file)\n LOG.info(\"Writing: \" + target_filename)\n out = file(target_filename, \"w\")\n out.write(yaml.dump(package_data))\n out.close()", "def change_config(filename, changes_set):\n # Get configs from file.\n configs, yaml = load_yaml(filename)\n\n # Update configs\n for key, value in changes_set:\n if key in configs:\n value = YAML(typ='safe').load(value)\n logger.info(f\"Changing value of '{key}': {configs[key]} --> {value}.\")\n configs[key] = value\n else:\n logger.warning(f\"KEY = {key} not in config. Config not updated with set ({key}, {value})\")\n\n # TODO: Activate this when schema file has been added\n # Confirm that configs is valid.\n # schema_path = pkg_resources.resource_filename(\"dbspro\", SCHEMA_FILE)\n # validate(configs, schema_path)\n\n # Write first to temporary file then overwrite filename.\n tmpfile = filename + \".tmp\"\n with open(tmpfile, \"w\") as file:\n yaml.dump(configs, stream=file)\n os.rename(tmpfile, filename)", "def update_tempest_conf_file(conf_file, rconfig):\n with open(TempestCommon.tempest_conf_yaml, encoding='utf-8') as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def update_dict(new,old):", "def updateVariables(self) -> None:\n ...", "def test_load_updates_dict(self):\n new_dict = {\n 'test_new_key': 'test_new_value',\n 'test_key1': 'new_value',\n }\n self.extension.registration.settings = new_dict\n self.settings.load()\n\n # Should have added test_new_key, and modified test_key1\n self.assertEqual(new_dict['test_new_key'],\n self.settings['test_new_key'])\n self.assertEqual(new_dict['test_key1'], self.settings['test_key1'])\n\n # Should have left test_key2 alone\n self.assertEqual(self.test_dict['test_key2'],\n self.settings['test_key2'])", "def value(self, new_value):\n for plug in self.plugs:\n plug.value = new_value", "def modify(self, payload):\r\n\t\tfor key, value in payload.items():\r\n\t\t\tsetattr(self, key, value)\r\n\t\tself.save()", "def update_game_items(self):\n _save_dict_to_file(self.get_game_items(), \"items.json\")", "def _update_entry(self, section, key, value):\n entries = section[key] if key in section else []\n if type(entries) != list:\n entries = [entries]\n if len(entries) < self.dom_id - 2:\n raise ValueError('Cannot set namelist value for domain %d, previous domains not filled out.' % self.dom_id)\n if len(entries) <= self.dom_id - 1:\n entries.append(value)\n else:\n entries[self.dom_id-1] = value\n section[key] = entries", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split(\"=\", maxsplit=1)\n keylist = keys.split(\".\")\n dic = self\n # print(keylist)\n if len(keylist) == 1:\n assert keylist[0] in dir(dic), \"Unknown config key: {}\".format(\n keylist[0]\n )\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(k)\n dic = getattr(dic, k)\n key = keylist[-1]\n assert key in dir(dic), \"Unknown config key: {}\".format(key)\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def apply_current_or_orig_values(override, current_version, args):\n keys = args.keys\n if current_version:\n print \"\\tUsing metadata values from {} version {}.\".format(\n current_version[\"name\"], current_version[\"version\"])\n for key in keys:\n current_val = current_version.get(key)\n if current_val:\n override[\"Input\"][\"pkginfo\"][key] = current_val\n else:\n default = override[\"Input_Original\"].get(\n \"pkginfo\", {}).get(key, \"\")\n choice = \"\"\n if not args.no_prompt:\n print \"\\tNo current '%s' value found to apply.\" % key\n print \"\\tRecipe specifies: '%s'\" % default\n choice = raw_input(\"\\tHit enter to use the recipe value, or \"\n \"enter a new value: \")\n override[\"Input\"][\"pkginfo\"][key] = (\n default if choice == \"\" else choice)", "def update(self, values: dict):\n if 'requirement_items' in values:\n # force update requirement items before all other fields\n self.requirement_items = values.pop('requirement_items')\n super().update(values)", "def update_studio_values(self, parent_values):\n raise NotImplementedError(\n \"{} does not have implemented `update_studio_values`\".format(self)\n )", "def update(self, **values):\n\n return self._put(\"\", None, values)", "def refresh(self) -> None:\n self.data = {}\n self.load_settings_file(self.default_settings_path / \"settings.yaml\", file_key=\"internal\")\n self.load_systems(self.default_settings_path / \"systems\")\n self.load_settings_file(self.personal_dir / \"settings.yaml\", file_key=\"user\")\n self.load_systems(self.personal_dir / \"systems\")", "def update_yaml_list(\n orig_lines: FileLines,\n key: OneOrTuple[str],\n new_value: List[str],\n *,\n filename: str,\n keep: Optional[Callable[[str], bool]] = None,\n replacements: Optional[Dict[str, str]] = None,\n) -> FileLines:\n if not isinstance(key, tuple):\n key = (key,)\n\n lines = iter(enumerate(orig_lines))\n current = 0\n indents = [0]\n for n, line in lines:\n stripped = line.lstrip()\n if not stripped or stripped.startswith('#'):\n continue\n indent = len(line) - len(stripped)\n if current >= len(indents):\n indents.append(indent)\n elif indent > indents[current]:\n continue\n else:\n while current > 0 and indent < indents[current]:\n del indents[current]\n current -= 1\n if stripped.startswith(f'{key[current]}:'):\n current += 1\n if current == len(key):\n break\n else:\n warn(f'Did not find {\".\".join(key)}: setting in {filename}')\n return orig_lines\n\n start = n\n end = n + 1\n indent = 2\n list_indent = None\n keep_before: List[str] = []\n keep_after: List[str] = []\n lines_to_keep = keep_before\n kept_last: Optional[bool] = False\n for n, line in lines:\n stripped = line.lstrip()\n line_indent = len(line) - len(stripped)\n if list_indent is None and stripped.startswith('- '):\n list_indent = line_indent\n if stripped.startswith('- ') and line_indent == list_indent:\n lines_to_keep = keep_after\n indent = line_indent\n end = n + 1\n value = stripped[2:].strip()\n kept_last = keep and keep(value)\n if kept_last:\n if replacements and value in replacements:\n lines_to_keep.append(\n f\"{' '* indent}- {replacements[value]}\\n\"\n )\n else:\n lines_to_keep.append(line)\n elif stripped.startswith('#'):\n lines_to_keep.append(line)\n end = n + 1\n elif line_indent > indent:\n if kept_last:\n lines_to_keep.append(line)\n end = n + 1\n elif line == '\\n':\n continue\n elif line[0] != ' ':\n break\n elif list_indent is not None and line_indent < list_indent:\n break\n\n new_lines = orig_lines[:start] + [\n f\"{' ' * indents[-1]}{key[-1]}:\\n\"\n ] + keep_before + [\n f\"{' ' * indent}- {value}\\n\"\n for value in new_value\n ] + keep_after + orig_lines[end:]\n return new_lines", "def pupdate(self):\n try:\n tmp = self.path_list[0]\n except IndexError:\n print(\"Empty value for env variable \", self.name)\n return\n\n for p in self.path_list[1:]:\n tmp = tmp + ':' + p\n self.val = tmp", "def test_indirect_parameters_update(renku_cli, project):\n with chdir(project.path):\n Path(\".renku/tmp\").mkdir(exist_ok=True)\n\n Path(\"script.sh\").write_text(\n \"\"\"\n echo param 1: \"forty-two\" >> .renku/tmp/parameters.yml\n echo param-2: 42.42 >> .renku/tmp/parameters.yml\n echo param3: 42 >> .renku/tmp/parameters.yml\n \"\"\"\n )\n\n project.repository.add(all=True)\n project.repository.commit(\"test setup\")\n\n renku_cli(\"run\", \"sh\", \"script.sh\", stdout=\"result\")\n\n with chdir(project.path):\n Path(\".renku/tmp\").mkdir(exist_ok=True)\n\n Path(\"script.sh\").write_text(\n \"\"\"\n echo param 1: \"forty-two-updated\" >> .renku/tmp/parameters.yml\n echo param-2: 42.42 >> .renku/tmp/parameters.yml\n \"\"\"\n )\n\n project.repository.add(all=True)\n project.repository.commit(\"test setup\")\n\n exit_code, activity = renku_cli(\"update\", \"--all\")\n\n assert 0 == exit_code\n assert {\"forty-two-updated\", \"42.42\", \"42\"} == {a.default_value for a in activity.association.plan.parameters}", "def update_plugin_data(self, entry):", "def _auto_save_update(self, plugin_id, change):\n name = change['name']\n value = change['value']\n if plugin_id in self._prefs:\n self._prefs[plugin_id][name] = value\n else:\n self._prefs[plugin_id] = {name: value}\n\n self._prefs.write()", "def save_to_yml_file(self):\n yml_filename = self.get_yml_filename()\n\n if os.path.exists(yml_filename) and not self.force:\n logger.warning(\n f\"[red]File {yml_filename} already exists, not writing. To override add --force.[/red]\"\n )\n else:\n if self.force:\n logger.info(\n f\"[yellow]Force flag is used. Overriding {yml_filename} if it exists.[/yellow]\"\n )\n if self.metadata:\n self.metadata.save_dict_as_yaml_integration_file(yml_filename)", "def __update(self):\n if self.__file:\n target_file = open(self.__file)\n for attr in dir(self):\n if not attr.startswith(\"_\") and \\\n (self.__overwrite or (attr not in self.__exclude)) \\\n and not self.__is_attr_callable(attr):\n try:\n delattr(self, attr)\n except AttributeError:\n pass\n pool = yaml.load(target_file)\n target_file.close()\n if pool: # could be None\n for key, val in pool.iteritems():\n if not key.startswith(\"_\") and \\\n (self.__overwrite or (key not in self.__exclude)) \\\n and not self.__is_attr_callable(key):\n setattr(self, key, val)\n if hasattr(self, 'log_config_file_changes')\\\n and self.log_config_file_changes:\n logging.getLogger(__name__).info(\"Config file has updated.\")", "def update_packages(self, config_file):\n entries = yacman.load_yaml(config_file)\n self.update(entries)\n return True", "def patch_config(filename, kms_map):\n try:\n # read and parse the packer configuration file\n with open(filename, \"r\") as fp:\n config = json.load(fp)\n except FileNotFoundError:\n eprint(f\"Packer configuration file not found: {filename}\")\n sys.exit(-1)\n\n # loop through all the packer builders\n for builder in config[\"builders\"]:\n # only modify AWS builders\n if builder.get(\"type\") != \"amazon-ebs\":\n continue\n builder[\"ami_regions\"] = \",\".join(kms_map.keys())\n builder[\"region_kms_key_ids\"] = kms_map\n\n # write the modified configuration back out\n with open(filename, \"w\") as fp:\n json.dump(config, fp, indent=2, sort_keys=True)" ]
[ "0.6856736", "0.6455863", "0.6232132", "0.6048177", "0.5998861", "0.5927065", "0.5708528", "0.56122386", "0.5609265", "0.560229", "0.5582246", "0.55429924", "0.552479", "0.5504513", "0.54904646", "0.54742396", "0.54720205", "0.54367936", "0.54282165", "0.53917795", "0.5384969", "0.5374541", "0.5321715", "0.5317371", "0.5312636", "0.52658564", "0.52367586", "0.52245927", "0.5220419", "0.5178973", "0.5149693", "0.5140179", "0.5134271", "0.5129903", "0.5123759", "0.5122478", "0.50908166", "0.50895214", "0.50690025", "0.5068509", "0.5065831", "0.50589234", "0.5051584", "0.504969", "0.50327116", "0.50286347", "0.5025597", "0.50246036", "0.5021189", "0.50168025", "0.50110614", "0.50057405", "0.5004923", "0.5003129", "0.4994292", "0.49866596", "0.49865162", "0.4986098", "0.49815282", "0.49810696", "0.49720597", "0.49607846", "0.49584252", "0.4951453", "0.4949241", "0.49487492", "0.49468765", "0.49454075", "0.49440694", "0.4940451", "0.4938274", "0.49315554", "0.49265197", "0.4920379", "0.4918462", "0.49152216", "0.4908149", "0.49008727", "0.48961565", "0.48951456", "0.4894873", "0.48930588", "0.48864883", "0.48829797", "0.48826176", "0.48791176", "0.48766974", "0.48732376", "0.48723942", "0.48719704", "0.48699266", "0.48644918", "0.48643535", "0.48594746", "0.48589104", "0.4858403", "0.48568088", "0.48560345", "0.48549864", "0.4854948" ]
0.69657123
0
Update chart with specified version or lastmodified commit in path(s)
def build_chart(name, version=None, paths=None, reset=False): chart_file = os.path.join(name, 'Chart.yaml') with open(chart_file) as f: chart = yaml.load(f) if version is None: if paths is None: paths = ['.'] commit = last_modified_commit(*paths) if reset: version = chart['version'].split('-')[0] else: version = chart['version'].split('-')[0] + '-' + commit chart['version'] = version with open(chart_file, 'w') as f: yaml.dump(chart, f) return version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, commit, **kwargs):\n self._pkg_changes(commit=self.commit, **kwargs)\n self.commit = commit", "def updateLastCommitFile(self):\n f = open(self.last_released, 'w')\n f.write(self.new_rev)\n f.close()", "def update_chicago_graph(path=\"chicago.xml\"):\n\n\tChicago = download_chicago_graph()\n\tsave_chicago_graph(Chicago, path)", "def update_data(client, dataset_id, dataset_name, updated_dataset):\n view = client.views.lookup(dataset_id)\n revision = view.revisions.create_replace_revision(permission='private')\n upload = revision.create_upload(dataset_name)\n\n # The path of the updated dataset should be a string to the csv, geojson, shapefile zip, etc.\n if type(updated_dataset) == str:\n with open(updated_dataset, 'rb') as f:\n extension = os.path.splitext(updated_dataset)[1]\n if extension == '.csv':\n source = upload.csv(f)\n elif extension == '.xls':\n source = upload.xls(f)\n elif extension == 'xlsx':\n source = upload.xlsx(f)\n elif extension == '.tsv':\n source = upload.tsv(f)\n elif extension == '.zip':\n source = upload.shapefile(f)\n elif extension == '.kml':\n source = upload.kml(f)\n elif extension == '.geojson':\n source = upload.geojson(f)\n else:\n raise Exception('File format not supported')\n elif type(updated_dataset) == pd.DataFrame or type(updated_dataset) == gpd.GeoDataFrame:\n source = upload.df(updated_dataset)\n\n output_schema = source.get_latest_input_schema().get_latest_output_schema()\n\n output_schema = output_schema.wait_for_finish()\n\n # check for errors\n assert output_schema.attributes['error_count'] == 0\n print(output_schema.attributes['error_count'])\n\n # If you want, you can get a csv stream of all the errors\n errors = output_schema.schema_errors_csv()\n for line in errors.iter_lines():\n print(line)\n\n #############################################################################\n # The next few lines of code will update the draft/revision into the asset. #\n # Do not run if you plan on keeping your draft! #\n #############################################################################\n job = revision.apply(output_schema=output_schema)\n\n # This code outputs the status from the Job object\n # Track the async process\n def job_progress(job):\n clear_output(wait=True)\n print(job.attributes['log'][0]['stage'])\n print('Job progress:', job.attributes['status'])\n\n job = job.wait_for_finish(progress = job_progress)\n sys.exit(0 if job.attributes['status'] == 'successful' else 1)", "def update_from_repo():\n\treturn", "def updateVersionTag(deploymentPath, revision):\n fluidinfoPath = os.path.join(deploymentPath, revision, 'fluidinfo')\n with cd(fluidinfoPath):\n with open('deployment/api-version.txt') as versionFile:\n version = versionFile.read()\n sudo('bin/python bin/fluidinfo update-version-tag '\n 'postgres:///fluidinfo '\n '%s' % version,\n user='fluidinfo')", "def update(repository, args, **_):\n _log(repository, 'INFO', \"Going to build commit %s\" % args[2][:7])", "def _fetch_from_git(self, chart_name, version):\n\n def fetch_pull(ref):\n \"\"\" Do the fetch, checkout pull for the git ref \"\"\"\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".format(ref))\n\n repo_path = '{}/{}'.format(\n self.config.archive,\n re.sub(r'\\:\\/\\/|\\/|\\.', '_', self.git)\n )\n\n logging.debug('Chart repository path: {}'.format(repo_path))\n if not os.path.isdir(repo_path):\n os.makedirs(repo_path)\n\n if not os.path.isdir(\"{}/.git\".format(repo_path)):\n repo = git.Repo.init(repo_path)\n else:\n repo = git.Repo(repo_path)\n\n sparse_checkout_file_path = \"{}/.git/info/sparse-checkout\".format(repo_path)\n\n # A path in the list implies that the Chart is at the root of the git repository.\n if self.path not in ['', '/', './', None]:\n\n self._chart_path = \"{}/{}\\n\".format(self.path, chart_name)\n\n repo.git.config('core.sparseCheckout', 'true')\n with open(sparse_checkout_file_path, \"ab+\") as scf:\n if self.path not in scf.readlines():\n scf.write(self._chart_path)\n logging.debug(\"Configuring sparse checkout for path: {}\".format(self.path))\n\n self._chart_path = \"{}/{}/{}\".format(repo_path, self.path, chart_name)\n\n if not self.config.local_development:\n if 'origin' in [remote.name for remote in repo.remotes]:\n origin = repo.remotes['origin']\n else:\n origin = repo.create_remote('origin', (self.git))\n\n try:\n fetch_pull(version)\n except GitCommandError, e:\n if 'Sparse checkout leaves no entry on working directory' in str(e):\n logging.warn(\"Error with path \\\"{}\\\"! Remove path when chart exists at the repository root\".format(self.path))\n logging.warn(\"Skipping chart {}\".format(chart_name))\n return False\n elif 'did not match any file(s) known to git.' in str(e):\n logging.warn(\"Branch/tag \\\"{}\\\" does not seem to exist!\".format(version))\n logging.warn(\"Skipping chart {}\".format(chart_name))\n return False\n else:\n logging.error(e)\n raise e\n except Exception, e:\n logging.error(e)\n raise e\n finally:\n # Remove sparse-checkout to prevent path issues from poisoning the cache\n logging.debug(\"Removing sparse checkout config\")\n if os.path.isfile(sparse_checkout_file_path):\n os.remove(sparse_checkout_file_path)\n repo.git.config('core.sparseCheckout', 'false')", "def update_path():\n #TODO update path information\n pass", "def statusupdate(filepath):\n pass", "def update(self, paths):\n raise NotImplementedError", "def update_version():\n version = os.environ.get('TRAVIS_COMMIT', None) or \\\n subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])\n version_file = path.join('slingsby', 'VERSION')\n with open(version_file, 'w') as fh:\n fh.write(version)", "def update(self, *path, **kwargs):\n if self.needs_update(*path):\n self.download(*path, **kwargs)", "def mark_obsolete_in_dataset( dataset_name, engine, table ):\n s = table.select( table.c.dataset_name==dataset_name ) \n result = conn.execute(s) # all rows of replica.files with the specified dataset_name\n\n sr = []\n srf = {}\n for row in result:\n # Note that you can loop through result this way only once.\n sr.append(row)\n fn = filename(row)\n if fn in srf:\n srf[fn].append(row)\n else:\n srf[fn] = [row]\n\n #sr.sort( key=filename )\n\n for fn,rows in srf.items():\n if len(rows)<=1: continue\n rows.sort( key=rowversion )\n print \"jfp will keep abs_path=\",rows[-1]['abs_path'],\"status=\",rows[-1]['status'],\\\n \"dataset_name=\",rows[-1]['dataset_name']\n for row in rows[0:-1]:\n abs_path = row['abs_path']\n dataset_name = \"old_\"+row['dataset_name']\n print \"jfp will do update for abs_path=\",abs_path,\"status from\",row['status'],\"to 50\"\n s = table.update().where( table.c.abs_path==abs_path ).\\\n values( status=50 )\n #if dataset_name.find('old_old_')!=0:\n # s = table.update().where( table.c.abs_path==abs_path ).\\\n # values( dataset_name=dataset_name )\n # ... doesn't work, you first have to create a row in replica.datasets with this name.\n result = conn.execute(s)", "def saveInGit(file_content, file_name, report_date):\n file_path = \"/\".join([crs_reports_dir,file_name])\n existed = os.path.isfile(file_path) \n if existed:\n # TODO Check that this specific version of this file isn't already\n # in the comment history\n pass\n with open(file_path, 'w') as f: \n f.write(file_content)\n f.close()\n gitAdd(file_name, crs_reports_dir)\n if existed:\n # TODO Set the commit date to be the CRS release date\n gitCommit(file_name, crs_reports_dir, '%s was updated' % file_name,\n report_date)\n else:\n gitCommit(file_name, crs_reports_dir, 'Added %s' % file_name,\n report_date)\n \n \n \n # 1.) If file_name exists:\n # 1.)overwrite it, \n # 2.) Commit an update to the file_name\n # else:\n # 1.) Create and save a new file\n # 2.) Commit the new file", "def updateStamp(self, depVersions, stamp):\n temp = self.newVersion()\n try:\n self.build(depVersions, temp)\n except:\n self.rollbackVersion(temp)\n raise\n self.commitVersion(temp, stamp)", "def get_and_update_versions ():\n\n try:\n get_comp_versions (\"ACE\")\n get_comp_versions (\"TAO\")\n\n if opts.update:\n files = []\n files += update_version_files (\"ACE\")\n files += update_version_files (\"TAO\")\n files += create_changelog (\"ACE\")\n files += create_changelog (\"TAO\")\n files += update_spec_file ()\n files += update_debianbuild ()\n\n commit (files)\n\n except:\n print (\"Fatal error in get_and_update_versions.\")\n raise", "def update(self, revision, data, user=None, save=True):\n version = super(FigshareFile, self).update(None, data, user=user, save=save)\n\n # Draft files are not renderable\n if data['extra']['status'] == 'drafts':\n return (version, u\"\"\"\n <style>\n .file-download{{display: none;}}\n .file-share{{display: none;}}\n </style>\n <div class=\"alert alert-info\" role=\"alert\">\n The file \"{name}\" is still a draft on figshare. <br>\n To view it on the OSF\n <a href=\"https://support.figshare.com/support/solutions\">publish</a>\n it on figshare.\n </div>\n \"\"\".format(name=markupsafe.escape(self.name)))\n\n return version", "def update_info_when_add(descriptor, rel_path_from_repository,\n mtime, file_sha1_hash, index_dict):\n # If the file is already tracked, update it\n if rel_path_from_repository in index_dict.keys():\n # If the file is already up to date, no need to rewrite.\n if (mtime == index_dict[rel_path_from_repository][0]\n and\n file_sha1_hash == index_dict[rel_path_from_repository][2]):\n return\n # Move the file descriptor to the correct position\n lseek(descriptor, index_dict[rel_path_from_repository][5], 0)\n # Update the timestamp. current sha1 hash, add sha1 hash\n update_file_index(descriptor, \" \".join([mtime,\n file_sha1_hash,\n file_sha1_hash]), 0)\n # Else add a new index line.\n else:\n lseek(descriptor, 0, 2)\n add_new_index(descriptor, mtime, file_sha1_hash,\n rel_path_from_repository)", "def updateFileInfo(self, data, pid):\n self.db.updateLinkInfo(data)\n self.evm.dispatchEvent(\"packageUpdated\", pid)", "def finish(c):\n files_to_commit = [os.path.relpath(path, start=os.curdir) for path in [CHANGELOG_ABSPATH, SETTINGS_PATH]]\n version: VersionStructure = VersionStructure.from_settings()\n\n c.run(f\"git add %s\" % \" \".join(files_to_commit))\n c.run(f'git commit -m \"version {version}\" --no-verify')\n c.run(f\"git tag {version}\")", "def update_version(self, version):\n self._metadata['version'] = version\n\n if self._type == '.json':\n with open(self._filename, 'w') as f:\n f.write(json.dumps(self._metadata, indent=2))\n\n dof_filename = os.path.join(self.path, self.name + '.dof')\n if os.path.isfile(dof_filename):\n dof_file = DOFFile(dof_filename)\n dof_file.update_version(version)", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "def _on_path(self, change):\n if change.new:\n self._git = G.Repo.init(change.new)\n ignore = Path(change.new) / \".gitignore\"\n if not ignore.exists():\n ignore.write_text(\".ipynb_checkpoints/\")\n self.commit(\"initial commit\")\n\n self._initialize_watcher()\n self._update_head_history()", "def run_sql_file(filename, connection, version, lastversion):\n cursor = connection.cursor()\n for line in open(filename):\n cursor.execute(line)\n connection.commit()\n cursor.execute(\n \"update ecs.versionTable SET ecs.versionTable.version='{}' \"\n \"where ecs.versionTable.version ='{}';\".format(version, lastversion))\n connection.commit()\n print(\"VersionTable updated. Current version is now: {}\".format(version))", "def do_project_update(cs, args):\n raise NotImplementedError", "def release(c, bump=\"patch\"):\n assert bump in [f.name for f in fields(VersionStructure)], f'\"{bump}\" is not a version part'\n\n old_version = VersionStructure.from_settings()\n new_version = VersionStructure.bump_version(old_version, part=bump)\n\n # collecting changelog\n print(f'Collecting changelog from the last version tag \"{old_version}\"...')\n\n result = c.run(f'git log \"{old_version}\"..HEAD --pretty=format:\"%s\"', hide=\"out\")\n\n commit_messages = filter(bool, result.stdout.splitlines())\n if not commit_messages:\n sys.stderr.write(\"Error: no new commits from last version, sorry\\n\")\n sys.exit(1)\n\n # updating changelog\n with open(CHANGELOG_ABSPATH, \"r+\", encoding=\"utf-8\") as changelog:\n old_changelog = changelog.read().strip()\n\n changelog.seek(0)\n print(\"Inserting this to changelog file:\\n-----\\n\")\n new_version_line = (\n VERSION_TITLE_TMPL.format(version=new_version, day=today().strftime(\"%Y-%m-%d\")) + \"\\n\"\n )\n changelog.write(new_version_line)\n\n print(new_version_line)\n for line in sorted(commit_messages): # sort commit messages for easier edition afterwards\n line: str = line.strip()\n if line.startswith(\"Merge\"):\n continue\n\n message_line = CHANGE_LINE_TMPL.format(line=line) + \"\\n\"\n print(message_line)\n changelog.write(message_line)\n\n changelog.write(\"\\n\")\n changelog.write(old_changelog)\n\n print(\"-----\")\n\n _set_settings_version(c, SETTINGS_PATH, str(new_version))", "def _update_head(self, index_entry, branch, new_id):\r\n index_entry['versions'][branch] = new_id\r\n self.db_connection.update_course_index(index_entry)", "def update_figure(entry_number, entry_date, x_axis, y_axis):\n global first\n \n if first:\n first = False\n return dash.no_update\n \n if isinstance(entry_date, dict):\n entry_date = datetime.datetime.strptime(entry_date['value'], \"%Y-%m-%dT%H:%M:%S\")\n else:\n entry_date = datetime.datetime.strptime(entry_date, \"%Y-%m-%dT%H:%M:%S\")\n \n # Filter Dataset for selected ENTRY_NO and ENTRY_DATE\n filtered_df = df[(df['ENTRY_NO'] == entry_number) & (df['ENTRY_DATE'] == entry_date)]\n filtered_df = filtered_df.drop(columns=['SNO','ENTRY_NO', 'ENTRY_DATE'])\n \n #Calculation for Line Chart\n sorted = filtered_df.sort_values(by=[x_axis]) \n line_chart = {'data': [{'x': sorted[x_axis], 'y': sorted[y_axis], 'type': 'line'},],}\n\n #Calculation for Bar Chart\n bar_chart={\"data\": [{\"x\": filtered_df[x_axis],\"y\": filtered_df[y_axis], \"type\": \"bar\",}],}\n \n \n #Calculation for Pie Chart\n summation = filtered_df.sum(axis = 0, skipna = True, numeric_only= True)\n label, values = [], []\n for s in summation.iteritems():\n label.append(s[0])\n values.append(s[1])\n pie_chart = px.pie(df, values=values, names=label)\n \n return px.scatter(filtered_df, x=x_axis, y=y_axis), line_chart, bar_chart , pie_chart", "def updateToLatest(self):\n # Determine the newest stamp in each dependency\n latest = {}\n for item in self.spec:\n if isinstance(item, CachedResource):\n latest[item] = item.getLatestStamp()\n if not latest:\n return\n\n # Our new stamp is the greatest out of all deps' stamps\n stamp = max(latest.itervalues())\n\n # Update only if we need to\n if self.getLatestStamp() >= stamp:\n return\n self.updateStamp(latest, stamp)\n\n # Clean old versions if that was successful\n self.cleanStamps(lambda s: s < stamp)", "def update_versioned_target(self, vt):\n self._cache_manager.update(vt.cache_key)", "def updateMdrizVerHistory(self,build,versions):\n _plist = self.assoc.parlist[0]\n if build == True: _output = _plist['output']\n else: _output = _plist['outdata']\n \n fhdu = pyfits.open(_output,mode='update')\n prihdr = fhdu[0].header\n \n ver_str = \"MultiDrizzle product generated using: \"\n prihdr.add_history(ver_str)\n \n for key in versions:\n if versions[key].find('\\n') < 0:\n prihdr.add_history(key+versions[key])\n else:\n # This will accomodate multi-line comments\n _ver_str = versions[key].split('\\n')\n prihdr.add_history(key)\n for val in _ver_str:\n if val.strip() != '':\n prihdr.add_history(val)\n \n #ver_str = ' MultiDrizzle Version '+str(version)\n #prihdr.add_history(ver_str)\n \n fhdu.close()\n del fhdu", "def update_pkg_metadata(self, pkg, version=None, **kwargs):\n pass", "def update(*args):", "def updateGrafana(self, data):\r\n try:\r\n if \"version\" not in self.versions[\"grafana\"] or self.mission_name + \"_\" + data[\"grafana\"][\"version\"] != self.versions[\"grafana\"][\"version\"]:\r\n downloadAndReplaceFile(self.config.get_conf(\"Client\", \"grafana-database\"), data[\"grafana\"][\"link\"])\r\n self.versions[\"grafana\"][\"version\"] = self.mission_name + \"_\" + data[\"grafana\"][\"version\"]\r\n self._logger.info(\"Grafana updated to version: \" + data[\"grafana\"][\"version\"])\r\n except Exception as e:\r\n self._logger.error(\"Failed to update Grafana configuration due to an exception: \" + str(e))", "def save(self, **kwargs):\n if self.history is not None:\n if self.revision == 0:\n self.update_revision_from_history(self.history)\n\n self.history.last_diff_updated = self.timestamp\n self.history.save()\n\n super(DiffSet, self).save(**kwargs)", "def update_historical_graph(dropdown_historical_symbol, dateselector_historical_start, dateselector_historical_end,\n dropdown_historical_interval):\n\n # Variables to update\n ticker = dropdown_historical_symbol\n start = app_obj.utils.parse_date(dateselector_historical_start).date()\n end = app_obj.utils.parse_date(dateselector_historical_end).date()\n interval = dropdown_historical_interval\n\n df = dl.equities.get_historical(tickers=ticker, start_date=start, end_date=end, interval=interval)\n\n return app_obj.figures.build_ohlcv(df, title=f'{ticker} - Historical OHLCV ({start} to {end})')", "def push_updates(update_alias, config, path_or_url):\n api = get_packit_api(config=config, local_project=path_or_url)\n api.push_updates(update_alias)", "def update_command(arguments: List[str]) -> None:\n if len(arguments) != 3:\n print('Required 2 argument for update command') # noqa: WPS421\n return\n token = token_load.load()\n logic.update(token, gist_id=arguments[1], filename=arguments[2])", "def set_version(self, bundle, ctx, filename, version):", "def project_changed(self, day_idx):\n self.is_modified = True\n self.fire_project_changed(ChartProject.CHANGED)", "def main(args):\n\n try:\n repo = RpmGitRepository(args.gitdir)\n except GitRepositoryError, err:\n raise GbsError(str(err))\n\n packaging_dir = get_packaging_dir(args)\n changes_file_list = glob.glob(\"%s/%s/*.changes\" % (repo.path,\n packaging_dir))\n\n if args.spec or not changes_file_list:\n # Create .changes file with the same name as a spec\n specfile = os.path.basename(guess_spec(repo.path,\n packaging_dir, args.spec)[0])\n fn_changes = os.path.splitext(specfile)[0] + \".changes\"\n fn_changes = os.path.join(repo.path, packaging_dir, fn_changes)\n else:\n fn_changes = changes_file_list[0]\n if len(changes_file_list) > 1:\n log.warning(\"Found more than one changes files, %s is taken \"\n % (changes_file_list[0]))\n\n # get the commit start from the args.since\n commitid_since = get_first_commit(repo, fn_changes, args.since)\n\n commits = repo.get_commits(commitid_since, 'HEAD')\n if not commits:\n raise GbsError(\"Nothing found between %s and HEAD\" % commitid_since)\n\n if args.message:\n author = repo.get_author_info()\n lines = [\"- %s\" % line for line in args.message.split(os.linesep) \\\n if line.strip()]\n new_entries = [\"* %s %s <%s> %s\" % \\\n (datetime.datetime.now().strftime(\"%a %b %d %Y\"),\n author.name, author.email,\n get_version(repo, commits[0]))]\n new_entries.extend(lines)\n else:\n new_entries = make_log_entries(commits, repo)\n\n content = get_all_entries(fn_changes, new_entries)\n if edit_file(fn_changes, content):\n log.info(\"Change log has been updated.\")\n else:\n log.info(\"Change log has not been updated\")", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def update(self, co_dir):\n self.run_task(' '.join(['svn', 'up', co_dir]))\n pass", "def __update(self, filename, mtime, pb, thumbnail_cache, have_thumbnail=True):\n self.__lock_acquire()\n path = 0 \n for i in self.__view.model:\n if i[thumbnail_view.MDL_FILENAME].replace('\\n','') == filename:\n if self.__view.iconview.path_is_selected((path,)):\n self.__view.update_last_selection(pb.copy())\n self.__view.add_rectangle_for_pixbuf(pb)\n i[thumbnail_view.MDL_DISP_THUMB] = pb\n i[thumbnail_view.MDL_PATH] = thumbnail_cache\n i[thumbnail_view.MDL_HAVE_THUMB] = have_thumbnail\n i[thumbnail_view.MDL_MTIME] = mtime\n self.__lock_release()\n return True\n path = path + 1\n self.__lock_release()\n return False", "def _post_update_paths(self, **kwargs):\n\n files_updated = kwargs.get('files_updated', list())\n if not files_updated:\n return\n\n maya_utils.reload_textures(files_updated)\n\n # Dependencies are already reloaded during update paths process\n # maya_utils.reload_dependencies(files_updated)", "def commit(self):\n run('git', 'add', '.')\n run('git', 'commit', '-a', '-m', 'updates')", "def GraphDeltaVsVers(data, args):\n p = data[args]['deltasize']\n vers = sorted(v for v in p if 'S1' not in v)\n #sizes = sorted(p[vers[0]])\n for size in sizeticks:\n deltas = [p[ver][size]/size for ver in vers]\n plt.plot(vers, deltas, label=\"%sM\" % size)\n #plt.xscale('log')\n #plt.yscale('log')\n saveplt('data/file-vers-%s-%s.svg' % (args,'delta'), 'deltasize vs version for %s' % args,\n 'version', 'ratio', vers)", "def svn_fs_paths_changed(*args):\r\n return _fs.svn_fs_paths_changed(*args)", "def update_build_commit_documents(self, build_data):\n\n product, version, prev_build_num = (\n build_data[key] for key\n in ['product', 'version', 'prev_build_num']\n )\n prev_build_data = self.db.get_document(\n f'{product}-{version}-{prev_build_num}'\n )\n\n for project, shas in build_data['manifest'].items():\n new_shas = [sha.replace(f'{project}-', '').encode('utf-8')\n for sha in shas]\n old_shas = [sha.replace(f'{project}-', '').encode('utf-8')\n for sha in prev_build_data['manifest'].get(\n project, []\n )]\n\n diff_walker = cbutil_git.DiffWalker(self.repo_base_path / project)\n diff_commits = diff_walker.walk(old_shas, new_shas)\n\n if not diff_commits:\n continue\n\n if old_shas:\n commit_ids = [f'{project}-{commit.id.decode()}'\n for commit in diff_commits]\n else:\n # Only keep most recent commit for new projects\n commit_ids = [f'{project}-{diff_commits[0].id.decode()}']\n\n build_name = build_data['key_']\n logging.debug(f'Updating {build_name} build document for '\n f'the following commits: {\", \".join(commit_ids)}')\n build_document = self.db.get_document(build_name)\n build_document['commits'].extend(commit_ids)\n self.db.upsert_documents({build_name: build_document})\n\n for commit_id in commit_ids:\n commit_document = self.db.get_document(commit_id)\n\n # The check protects from duplicate build document IDs\n # for a commit (potentially due to a loading failure)\n if build_name not in commit_document['in_build']:\n commit_document['in_build'].append(build_name)\n\n self.db.upsert_documents({commit_id: commit_document})", "def update_versions(consumer, resource_versions):\n _get_cached_tracker().update_versions(consumer, resource_versions)", "def update_plot():\n pass", "def updateDoc(self, path):\n self.db.setDb(self.db_file)\n \n if not self.authd:\n self._authorize()\n \n db_row = self.db.getRowFromPath(path)\n if not db_row:\n return False\n \n resource_id = db_row[0]\n etag = db_row[1]\n title = db_row[2]\n \n ms = gdata.data.MediaSource(file_path=path, content_type=MIMETYPES['ODT'])\n doc = self.client.GetDoc(resource_id.replace(':', '%3A'))\n new_version = self.client.Update(doc, media_source=ms)\n print 'Document pushed:', new_version.GetAlternateLink().href\n \n self.db.resetEtag(new_version)", "def update_handler(self, addr, request):\n commits = self.api.get_commits(start_from=self.sessions[addr][\"last_commit\"])\n from engine.commit_container.commit import Commit\n t, e = Commit.Schema(many=True).dump(commits[\"commits\"])\n\n response = {\n \"type\" : \"update\",\n \"commit_range\" : {\n \"commits\" : t\n }\n }\n\n self.sessions[addr][\"last_commit\"] = self.api.get_last_commit_id()\n return Header.RESPONSE, json.dumps(response)", "def update(self, path):\n # pylint: disable=E1101\n # E1101: pylint could not resolve the depth attribute.\n \n self._sharedState.lock.acquire()\n try:\n try:\n self._client.cleanup(self.workingCopyPath)\n self._client.revert(self._workingCopyPath + path, True)\n self._client.update(self._workingCopyPath + path, depth=pysvn.depth.infinity )\n except ClientError, error:\n raise SubversionError(error)\n finally:\n self._sharedState.lock.release()", "def bumpversion(path=\"setup.cfg\"):\n config = ConfigParser()\n config.read(path)\n cfg = open(path, 'w')\n new_version = \"0.0.0\"\n if config.has_option('metadata', 'version'):\n old_version = config.get('metadata', 'version')\n major, minor, patch = old_version.split(\".\")\n new_version = \"%s.%s.%s\" % (major, minor, int(patch) + 1)\n if not config.has_section('metadata'):\n config.add_section('metadata')\n config.set('metadata', 'version', new_version)\n config.write(cfg)\n cfg.close()\n return new_version", "def up_to_date(self, gyp_file, target=None, **kw):\n raise NotImplementedError", "def update_from_vcs(self, vcs, submodule_path = None):\n\n full_path = os.path.abspath(self.filename)\n vcs.add_file(full_path, submodule_path)", "def update_revision_empty(conduit, revision_id):\n\n empty_diff = \"diff --git a/ b/\"\n diff_id = create_raw_diff(conduit, empty_diff).id\n update_revision(conduit, revision_id, diff_id, [], 'update')", "def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)", "def update_repo_cli(api_client, repo_id, branch, tag, path):\n id_from_param_or_path = (repo_id if repo_id is not None\n else ReposApi(api_client).get_repo_id(path))\n content = ReposApi(api_client).update(id_from_param_or_path, branch, tag)\n click.echo(pretty_format(content))", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def test_unreleased_version_label_string(self):\n\n gitchangelog.file_put_contents(\n \".gitchangelog.rc\",\n \"unreleased_version_label = 'bar'\")\n changelog = w('$tprog \"HEAD^..HEAD\"')\n self.assertNoDiff(\n textwrap.dedent(\"\"\"\\\n bar\n ---\n\n New\n ~~~\n - Begin. [Bob]\n\n\n \"\"\"),\n changelog)", "def svn_fs_paths_changed2(*args):\r\n return _fs.svn_fs_paths_changed2(*args)", "def commitVersion(self, tempFile, stamp):\n os.rename(tempFile, self.getFile(stamp))", "def update():", "def update():", "def create_work_version(work, data):\n new = copy.deepcopy(data)\n workc = copy.deepcopy(work)\n key = list(workc.keys())[0]\n new['path'] = new['path'].replace(new['params'][key], workc[key])\n new['version_info'] = dict(zip(('name', 'number'), list(workc.items())[0]))\n new['version'].update(workc)\n new['params'].update(workc)\n new['release'] = 'WORK'\n return new", "def update_ext():\n panel_id = request.json['panel_id']\n region_id = request.json['region_id']\n e3 = request.json[\"ext_3\"]\n e5 = request.json[\"ext_5\"]\n\n current_version = get_current_version(s, panel_id)\n version = get_version_row(s, panel_id, region_id, current_version)\n version_id = version[0]\n intro = version[1]\n\n if e3 is not None:\n ext_3 = e3\n else:\n ext_3 = version[3]\n if e5 is not None:\n ext_5 = e5\n else:\n ext_5 = version[4]\n if int(intro) > int(current_version):\n update_ext_query(s, version_id, ext_3=ext_3, ext_5=ext_5)\n else:\n update_ext_query(s, version_id, panel_id=panel_id, ext_3=ext_3, ext_5=ext_5, current_version=current_version,\n region_id=region_id)\n\n return jsonify(\"complete\")", "def last_modified_commit(*paths, **kwargs):\n return check_output([\n 'git',\n 'log',\n '-n', '1',\n '--pretty=format:%h',\n '--',\n *paths\n ], **kwargs).decode('utf-8')", "def git_sync(commit_ish, force, last_tag, reset, url, directory):\n git_sync_(url, directory, commit_ish, force=force, last_tag=last_tag, reset=reset)", "def _do_houdini_post_publish(self, work_template, progress_cb):\n import hou\n \n progress_cb(0, \"Versioning up the script\")\n\n # get the current script path:\n original_path = hou.hipFile.name()\n script_path = os.path.abspath(original_path)\n\n # increment version and construct new name:\n progress_cb(25, \"Finding next version number\")\n fields = work_template.get_fields(script_path)\n next_version = self._get_next_work_file_version(work_template, fields)\n fields[\"version\"] = next_version\n new_path = work_template.apply_fields(fields)\n\n # log info\n self.parent.log_debug(\"Version up work file %s --> %s...\" % (script_path, new_path))\n\n # save the script:\n progress_cb(75, \"Saving the scene file\")\n hou.hipFile.save(new_path)\n\n progress_cb(100)", "def GraphTimeVsVers(data, args, cmd):\n p = data[args][cmd]\n vers = sorted(v for v in p if 'S1' not in v)\n #sizes = sorted(p[vers[0]])\n for size in sizeticks:\n times = [p[ver][size][0] for ver in vers]\n plt.plot(vers, times, label=\"%sM\" % size)\n ax = plt.gca()\n if cmd == 'delta':\n ax.yaxis.set_major_locator(MultipleLocator(60))\n ax.set_ylim(bottom=0)\n #plt.xscale('log')\n saveplt('data/time-vers-%s-%s.svg' % (args,cmd), '%s times vs version for %s' % (cmd, args),\n 'version', 'time', vers)", "def update_changelog(package_id: str, base_branch: str, verbose: bool):\n if _update_changelog(package_id, base_branch, verbose, True):\n sys.exit(64)", "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n continue\n\n prev = history[i]\n curr = history[i+1]\n\n #print(prev['subject'], \"==>\", curr['subject'])\n #print(curr['changes'])\n for c in curr['changes']:\n \n path = c['path']\n\n # Skip the metadata file\n if c['path'].endswith('datapackage.json'): \n continue \n\n # Find a handler for this kind of file...\n handler = None \n for r in representations: \n if r.can_process(path): \n handler = r \n break \n \n if handler is None: \n continue \n\n # print(path, \"being handled by\", handler)\n\n v1_hex = prev['commit']\n v2_hex = curr['commit']\n\n temp1 = tempfile.mkdtemp(prefix=\"dgit-diff-\") \n \n try: \n for h in [v1_hex, v2_hex]: \n filename = '{}/{}/checkout.tar'.format(temp1, h)\n try:\n os.makedirs(os.path.dirname(filename))\n except:\n pass \n extractcmd = ['git', 'archive', '-o', filename, h, path]\n output = run(extractcmd)\n if 'fatal' in output: \n raise Exception(\"File not present in commit\") \n with cd(os.path.dirname(filename)): \n cmd = ['tar', 'xvf', 'checkout.tar']\n output = run(cmd) \n if 'fatal' in output: \n print(\"Cleaning up - fatal 1\", temp1)\n shutil.rmtree(temp1)\n continue \n\n # Check to make sure that \n path1 = os.path.join(temp1, v1_hex, path) \n path2 = os.path.join(temp1, v2_hex, path) \n if not os.path.exists(path1) or not os.path.exists(path2): \n # print(\"One of the two output files is missing\") \n shutil.rmtree(temp1)\n continue \n\n #print(path1, path2) \n\n # Now call the handler\n diff = handler.get_diff(path1, path2)\n\n # print(\"Inserting diff\", diff)\n c['diff'] = diff\n\n except Exception as e: \n #traceback.print_exc() \n #print(\"Cleaning up - Exception \", temp1)\n shutil.rmtree(temp1)", "def svn_notify(self,event):\n # pysvn.wc_notify_action.update_completed\n if event['action'] == pysvn.wc_notify_action.update_completed:\n revision = event['revision']\n self.revision = revision", "async def version_command(self, ctx):\n member = ctx.message.server.get_member(self.bot.user.id)\n current_commit = get_current_commit()\n commit_url = member.game.url + '/commit/' + current_commit\n msg = await self.bot.send_message(ctx.message.channel, 'I am currently running on commit `{}`\\n\\n{}'.format(current_commit, commit_url))", "def _update_modified_data_sources(self):\n new_last_imported = datetime.utcnow()\n self._update_modified_since(self.last_imported)\n self.last_imported = new_last_imported", "def update_which_sde_data(\n current_sde_df,\n latest_esi_df,\n index_key\n):\n pass", "def fedora_update(container_path, update_body={}):\n initial_data = fedora_get(container_path)\n updated_data = {**initial_data, **update_body}\n updated_data['@context'] = {**initial_data['@context'], **FEDORA_CONTEXT}\n updated_data['@type'] = initial_data['@type'] + ['http://purl.org/dc/dcmitype/Dataset']\n request = requests.put('{}{}/{}'.format(settings.FEDORA_URL,\n PUBLICATIONS_CONTAINER, container_path),\n auth=(settings.FEDORA_USERNAME,\n settings.FEDORA_PASSWORD),\n headers=FEDORA_HEADERS,\n data=json.dumps(updated_data))\n request.raise_for_status()\n return fedora_get(container_path)", "def test_updateVersion(self):\n project = self.makeProject(Version(\"bar\", 2, 1, 0))\n newVersion = Version(\"bar\", 3, 2, 9)\n project.updateVersion(newVersion)\n self.assertEquals(project.getVersion(), newVersion)\n self.assertEquals(\n project.directory.child(\"topfiles\").child(\"README\").getContent(),\n \"3.2.9\")", "def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()", "def update_ref(ref, value):\n subprocess.check_call([\"git\", \"update-ref\", ref, value])", "def update_commitments(commitment=None, external_to_remove=None,\n external_to_add=None):\n c = {}\n if os.path.isfile(PODLE_COMMIT_FILE):\n with open(PODLE_COMMIT_FILE, \"rb\") as f:\n try:\n c = json.loads(f.read())\n except ValueError:\n print \"the file: \" + PODLE_COMMIT_FILE + \" is not valid json.\"\n sys.exit(0)\n\n if 'used' in c:\n commitments = c['used']\n else:\n commitments = []\n if 'external' in c:\n external = c['external']\n else:\n external = {}\n if commitment:\n commitments.append(commitment)\n #remove repeats\n commitments = list(set(commitments))\n if external_to_remove:\n external = {\n k: v for k, v in external.items() if k not in external_to_remove}\n if external_to_add:\n external.update(external_to_add)\n to_write = {}\n to_write['used'] = commitments\n to_write['external'] = external\n with open(PODLE_COMMIT_FILE, \"wb\") as f:\n f.write(json.dumps(to_write, indent=4))", "def _parse_latest_update(self, resp: Dict[str, Any], latest_version: str) -> str:\n latest_release = resp.get(\"releases\", {}).get(latest_version)\n if latest_release is not None and isinstance(latest_release, list):\n release_artifact_dates = []\n for artifact in latest_release:\n try:\n upload_time = artifact.get(\"upload_time_iso_8601\")\n parsed_upload_time = dateutil.parser.isoparse(upload_time)\n release_artifact_dates.append(parsed_upload_time)\n except Exception:\n pass\n latest_artifact_timestamp = max(release_artifact_dates)\n return latest_artifact_timestamp.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n return \"\"", "def rsIncrementOutputVersion(args):\n\n lyr = rsUtility.activeLayer.name()\n versions = rsRenderOutput.getVersions(lyr)\n\n global rsRenderOutput\n rsRenderOutput = renderOutput.RenderOutput()\n\n if not versions:\n print '# Unable to increment version. No versions folders exists (yet).'\n return\n\n versions = [int(re.sub('[^0-9]', '', f)) for f in versions]\n if not versions:\n return\n\n incremented_version_string = 'v{0}'.format(str(max(versions) + 1).zfill(3))\n rsRenderOutput.addVersionDir(lyr, incremented_version_string)\n rsRenderOutput.setVersion(incremented_version_string)\n _outputTemplate()\n _updatePathText()", "def svn_client_update(svn_revnum_t_result_rev, char_path, svn_opt_revision_t_revision, svn_boolean_t_recurse, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def update(pkg_name):\n\n vendor_file = os.path.join('vendor', 'vendor.json')\n target = 'golang.org/x/{}'.format(pkg_name)\n\n with open(vendor_file) as content:\n deps = json.load(content)\n packages = [dep['path'] for dep in deps['package'] if dep['path'].startswith(target)]\n revision = '@{revision}'.format(revision=args.revision) if args.revision else ''\n packages = ['{pkg}{revision}'.format(pkg=pkg, revision=revision) for pkg in packages]\n cmd = ['govendor', 'fetch'] + packages\n if args.verbose:\n print(' '.join(cmd))\n subprocess.check_call(cmd)", "def update( self, trans, payload, **kwd ):\n repository_metadata_id = kwd.get( 'id', None )\n try:\n repository_metadata = metadata_util.get_repository_metadata_by_id( trans, repository_metadata_id )\n flush_needed = False\n for key, new_value in payload.items():\n if hasattr( repository_metadata, key ):\n old_value = getattr( repository_metadata, key )\n setattr( repository_metadata, key, new_value )\n if key in [ 'tools_functionally_correct', 'time_last_tested' ]:\n # Automatically update repository_metadata.time_last_tested.\n repository_metadata.time_last_tested = datetime.datetime.utcnow()\n flush_needed = True\n if flush_needed:\n trans.sa_session.add( repository_metadata )\n trans.sa_session.flush()\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in update: %s\" % str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message\n repository_metadata_dict = repository_metadata.as_dict( value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n return repository_metadata_dict", "def update_version_files (component):\n\n vprint (\"Updating version files for \" + component)\n\n retval = []\n\n ## Update component/VERSION.txt\n path = get_path(component, \"VERSION.txt\")\n with open (path, \"r+\") as version_file:\n new_version = re.sub (component + \" version .*\",\n \"%s version %s, released %s\" % (component,\n comp_versions[component + \"_version\"],\n release_date),\n version_file.read ())\n if opts.take_action:\n version_file.seek (0)\n version_file.truncate (0)\n version_file.write (new_version)\n else:\n print (\"New version file for \" + component)\n print (new_version)\n\n vprint (\"Updating Version.h for \" + component)\n\n retval.append(path)\n\n ## Update COMPONENT/component/Version.h\n comp_l = len(component + \"_\")\n parts = {k[comp_l:]:v for (k, v) in comp_versions.items() if k.startswith(component)}\n parts[\"comp\"] = component\n version_header = \"\"\"\n// -*- C++ -*-\n// This is file was automatically generated by $ACE_ROOT/bin/make_release.py\n\n#define {comp}_MAJOR_VERSION {major}\n#define {comp}_MINOR_VERSION {minor}\n#define {comp}_MICRO_VERSION {micro}\n#define {comp}_VERSION \\\"{version}\\\"\n#define {comp}_VERSION_CODE 0x{code:x}\n#define {comp}_MAKE_VERSION_CODE(a,b,c) (((a) << 16) + ((b) << 8) + (c))\n\"\"\".format(**parts)\n\n path = get_path(component, component.lower (), \"Version.h\")\n if opts.take_action:\n with open (path, 'w+') as version_h:\n version_h.write (version_header)\n else:\n print (\"New Version.h for \" + component)\n print (version_header)\n\n retval.append(path)\n\n # Update component/PROBLEM-REPORT-FORM\n vprint (\"Updating PRF for \" + component)\n\n version_line_re = re.compile (r\"^\\s*(\\w+) +VERSION ?:\")\n path = get_path(component, \"PROBLEM-REPORT-FORM\")\n\n with open (path, 'r+') as prf:\n new_prf = \"\"\n for line in prf.readlines ():\n match = version_line_re.search (line)\n if match is not None:\n vprint (\"Found PRF Version for \" + match.group (1))\n new_version = comp_versions[match.group(1) + \"_version\"]\n line = version_re.sub (new_version, line)\n\n new_prf += line\n\n if opts.take_action:\n prf.seek (0)\n prf.truncate (0)\n prf.writelines (new_prf)\n else:\n print (\"New PRF for \" + component)\n print (\"\".join (new_prf))\n\n retval.append(path)\n\n return retval", "def update_latest_branch (product, which, main_branch):\n\n name = \"Latest_ACE7TAO3_\" + which\n\n vprint ('Fast-forwarding', name, 'to', main_branch)\n ex (\"cd $DOC_ROOT/\" + product + \" && git fetch . \" + main_branch + \":\" + name)", "def _update_modified_since(self, timestamp):\n for data_source in self.data_source_provider.get_data_sources_modified_since(timestamp):\n pillow_logging.info(f'updating modified registry data source: {data_source.domain}: {data_source._id}')\n self._add_or_update_data_source(data_source)", "def update_version(self, new_version):\n if new_version is not None:\n self.version_details = json.loads(new_version)\n\n # Update port file.\n http_port = self.version_details['appscaleExtensions']['httpPort']\n version_key = VERSION_PATH_SEPARATOR.join(\n [self.project_id, self.service_id, self.version_id])\n port_file_location = os.path.join(\n CONFIG_DIR, 'port-{}.txt'.format(version_key))\n with open(port_file_location, 'w') as port_file:\n port_file.write(str(http_port))\n\n logger.info('Updated version details: {}'.format(version_key))\n if self.callback is not None:\n self.callback()", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def fix_cc3d_version(version: str, build_number: str, git_hash: str, cc3d_git_path: Path):\n\n init_path = cc3d_git_path.joinpath('cc3d', '__init__.py')\n init_tmp_path = cc3d_git_path.joinpath('cc3d', '__init__.py.tmp')\n if not init_path.exists():\n raise RuntimeError(f'File {init_path} does not exist and cannot be modified for setting the version')\n\n version_set = False\n revision_set = False\n git_hash_set = False\n with init_path.open('r') as init_in, init_tmp_path.open('w') as init_out:\n for line in init_in.readlines():\n if line.startswith('__version__ = '):\n init_out.write(f'__version__ = \"{version}\"\\n')\n version_set = True\n elif line.startswith('__revision__ = '):\n init_out.write(f'__revision__ = \"{build_number}\"\\n')\n revision_set = True\n elif line.startswith('__githash__ = '):\n init_out.write(f'__githash__ = \"{git_hash}\"\\n')\n git_hash_set = True\n else:\n init_out.write(f'{line}')\n\n if not version_set:\n raise RuntimeError(f'Did not succeed setting version to {version}. Check formatting in {init_path}')\n\n if not revision_set:\n raise RuntimeError(f'Did not succeed setting revision to {build_number}. Check formatting in {init_path}')\n\n if not git_hash_set:\n raise RuntimeError(f'Did not succeed setting git_hash to {git_hash}. Check formatting in {init_path}')\n\n shutil.copy(init_tmp_path, init_path)\n os.remove(init_tmp_path)", "def update(self, args):\n pass", "def patch_repos(self):", "def DoUpdate(options, args):\n client = GClient.LoadCurrentConfig(options)\n\n if not client:\n raise gclient_utils.Error(\"client not configured; see 'gclient config'\")\n\n if not options.head:\n solutions = client.GetVar('solutions')\n if solutions:\n for s in solutions:\n if s.get('safesync_url', ''):\n # rip through revisions and make sure we're not over-riding\n # something that was explicitly passed\n has_key = False\n for r in options.revisions:\n if r.split('@')[0] == s['name']:\n has_key = True\n break\n\n if not has_key:\n handle = urllib.urlopen(s['safesync_url'])\n rev = handle.read().strip()\n handle.close()\n if len(rev):\n options.revisions.append(s['name']+'@'+rev)\n\n if options.verbose:\n # Print out the .gclient file. This is longer than if we just printed the\n # client dict, but more legible, and it might contain helpful comments.\n print(client.ConfigContent())\n return client.RunOnDeps('update', args)", "def _update_version(self) -> None:\n # Implement in child class.\n raise NotImplementedError", "def disk_update(context, disk_id, values):\n return NotImplemented" ]
[ "0.60562843", "0.5704721", "0.5672184", "0.56603265", "0.5605551", "0.5470727", "0.5463986", "0.53915817", "0.53834933", "0.5340218", "0.5326489", "0.52888274", "0.5209247", "0.5190477", "0.5189303", "0.5181957", "0.5151227", "0.51332885", "0.51260006", "0.5121645", "0.50913423", "0.5084725", "0.5056259", "0.50475633", "0.50436914", "0.5038432", "0.50284415", "0.5021859", "0.50211775", "0.5016708", "0.5015449", "0.5006331", "0.49933553", "0.49686903", "0.49603048", "0.4957341", "0.4949959", "0.4942903", "0.49341795", "0.49240726", "0.49103898", "0.49048737", "0.48970306", "0.4888186", "0.48803765", "0.48786676", "0.48753053", "0.48693493", "0.48670027", "0.48517376", "0.4845783", "0.48391122", "0.4835463", "0.48353836", "0.48337945", "0.48304188", "0.483039", "0.4828615", "0.4826685", "0.48236746", "0.48087412", "0.4802068", "0.4798636", "0.4790504", "0.478915", "0.47873908", "0.47873908", "0.47817233", "0.47755194", "0.47742596", "0.476265", "0.4757086", "0.47555095", "0.47533363", "0.47513184", "0.47510538", "0.47477445", "0.4740906", "0.4739182", "0.47391447", "0.47328246", "0.4727936", "0.4726052", "0.47213224", "0.47168407", "0.47141922", "0.4712153", "0.4711908", "0.47112963", "0.47089678", "0.4708673", "0.47050923", "0.4703767", "0.47036716", "0.47025168", "0.4702094", "0.46964166", "0.4691172", "0.468626", "0.4683482" ]
0.54293627
7
Publish helm chart index to github pages
def publish_pages(name, paths, git_repo, published_repo, extra_message=''): version = last_modified_commit(*paths) checkout_dir = '{}-{}'.format(name, version) check_call([ 'git', 'clone', '--no-checkout', git_remote(git_repo), checkout_dir], echo=False, ) check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir) # package the latest version into a temporary directory # and run helm repo index with --merge to update index.yaml # without refreshing all of the timestamps with TemporaryDirectory() as td: check_call([ 'helm', 'package', name, '--destination', td + '/', ]) check_call([ 'helm', 'repo', 'index', td, '--url', published_repo, '--merge', os.path.join(checkout_dir, 'index.yaml'), ]) # equivalent to `cp td/* checkout/` # copies new helm chart and updated index.yaml for f in os.listdir(td): shutil.copy2( os.path.join(td, f), os.path.join(checkout_dir, f) ) check_call(['git', 'add', '.'], cwd=checkout_dir) if extra_message: extra_message = '\n\n%s' % extra_message else: extra_message = '' check_call([ 'git', 'commit', '-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message) ], cwd=checkout_dir) check_call( ['git', 'push', 'origin', 'gh-pages'], cwd=checkout_dir, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def index():\n return render_template(\"charts.html\")", "def main():\n \n root = Folder(name=os.getcwd(), file='meta.json',\n collection='.github/jekyll')\n root.update()\n root.export_folders(True)", "def main():\n # Step1: generate htmls\n csv_data_path= \"./frontend/html_template_data/dataset.csv\"\n html_template_path = \"./frontend/html_template_data/template.html\"\n html_save_path = \"./frontend/html_files/\"\n\n generate_htmls(csv_data_path, html_template_path, html_save_path)\n\n # Step2: push htmls to Github\n # push htmls to Github Pages, currently manual.", "def index():\n graphs = [\n message_genre_bar_chart(df),\n category_bar_chart(df),\n top_words_bar_chart(df)\n ]\n \n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n \n # render web page with plotly graphs\n return render_template('master.html', ids=ids, graphJSON=graphJSON)", "def deploy(version):\n toolkit.readmegen(version)", "def index():\n\n # open the README file\n with open(os.path.dirname(app.root_path) + '/README.md',\n 'r', encoding=\"utf-8\") as markdown_file:\n\n # Read the content of the file\n content = markdown_file.read()\n\n # convert to html\n return markdown.markdown(content)", "def write_index_html(self):\n print(\"- writing index.md\")\n index_toc = [f\"### [Table of Contents]({config['github_pages_url']}/toc.html)\"] if self.notebooks else []\n if os.path.isfile(os.path.join(self.dst_dir, \"data_index.html\")):\n index_toc += [f\"### [Data Index]({config['github_pages_url']}/data_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"figure_index.html\")):\n index_toc += [f\"### [Figure Index]({config['github_pages_url']}/figure_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"python_index.html\")):\n index_toc += [f\"### [Python Module Index]({config['github_pages_url']}/python_index.html)\"]\n if os.path.isfile(os.path.join(self.dst_dir, \"tag_index.html\")):\n index_toc += [f\"### [Tag Index]({config['github_pages_url']}/tag_index.html)\"]\n index_toc += [f\"- {nb.link}\" if type(nb) == Section else f\"\\n### {nb.link}\" for nb in self.notebooks]\n env = Environment(loader=FileSystemLoader(\"templates\"))\n with open(os.path.join(self.dst_dir, \"index.md\"), 'w') as f:\n f.write(env.get_template('index.md.tpl').render(\n readme_toc=index_toc, page_title=config['github_repo_name'], github_url=config['github_repo_url']))", "def home_page():\n\n return render_template('index.html', stories=stories.values())", "def main():\n year = time.strftime(\"%Y\")\n month = time.strftime(\"%m\")\n today = time.strftime(\"%Y%m%d\")\n homedir = \"/home/\" + user + \"/raspi-sump/\"\n webchart.create_folders(year, month, homedir)\n webchart.create_chart(homedir)\n webchart.copy_chart(year, month, today, homedir)", "def index():\n return render_template('home.jinja2')", "def index(request):\n copy = '2018 ' + author\n\n context = dict(author=author, copyright=copy, repo_url=repo_url)\n\n return render(request, 'index.html', context)", "def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')", "def index():\n # create table for original dataset\n table_1 = data_table_low(filepath = \"sparkify_data.csv\", title='Raw Sparkify Data')\n\n table_2 = data_table_low(filepath = \"cleaned_data.csv\", title='Cleaned Sparkify Data')\n\n # create and append plotly visuals into an array to be passed later for graphJSON file\n graphs = [table_1, table_2]\n\n # encode plotly graphs in JSON\n ids = [\"graph-{}\".format(i) for i, _ in enumerate(graphs)]\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n # render web page with plotly graphs\n return render_template(\"master.html\", ids=ids, graphJSON=graphJSON)", "def index():\n r = requests.get(API_ROUTE, headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n articles = sorted(r.json(), key=lambda article: (article['release_at'] or '9', article['updated_at']), reverse=True)\n return render_template('index.html', articles=articles)", "def index():\n return render_template(\"index.html\", page_title=\"Home\")", "def install_helm_plugins():\n plugins = {\n 'https://github.com/technosophos/helm-gpg': '0.1.0',\n }\n for plugin_url, version in plugins.items():\n install_cmd = \"helm plugin install {0} --version={1}\".format(\n plugin_url,\n version)\n logging.info(\"installing helm plugin with command: {0}\".format(install_cmd))\n sp.call(install_cmd, shell=True)", "def home():\n return render_template(\"d3_graph.html\")", "def save(self):\n for page in self.pages.get_published_pages():\n site_path = page.path_to_page.replace('.md', '').replace(\n self.source_path, '').strip('/')\n save_path = self.output_path\n\n # ensure we are not creating a directory for the index file that\n # that lives at the source_path\n if page.full_path() != f'{self.source_path}{os.sep}index.md':\n site_path = slugify_path(site_path)\n save_path = os.path.join('', self.output_path, site_path)\n\n try:\n os.makedirs(save_path, exist_ok=True)\n except Exception as e:\n log((f'unable to create directories: {save_path}'\n f' because: {e}'), True)\n continue\n\n try:\n save_file = os.path.join(save_path, 'index.html')\n log(f'saving {save_file}')\n\n published = self.pages.get_published_pages()\n prev_page = self.pages.get_previous_page(page)\n next_page = self.pages.get_next_page(page)\n content = page.render(published_pages=published,\n previous_page=prev_page, next_page=next_page)\n write(save_file, content)\n except Exception as e:\n log(f'unable to save file: {save_file} -- {e}', True)\n\n unpublished = self.pages.get_unpublished_pages()\n if len(unpublished):\n log('')\n log('these pages were unpublished and not rendered:', True)\n for up in unpublished:\n log(up.path_to_page, True)\n log('')\n\n # build the _tags pages\n for tag, pages in self.tags.pages.items():\n content = self.tags.render(tag, pages)\n tag_index_dir = f'{self.tag_dir}/{slugify(tag)}'\n tag_index = f'{tag_index_dir}/index.html'\n os.makedirs(tag_index_dir, exist_ok=True)\n write(tag_index, content)\n\n log('finished builidng site')", "def home():\n payload = manager.get_payload()\n return render_template('index.html', payload=payload)", "def entry_point():\n return render_template(\"index.html\")", "def index():\n\n return render_template(\"index.html\"), 200", "def main():\n\td = Hugo(\"cmd\")\n\tpass", "def index():\n return render_template(\n 'main/index.html',\n title='Main page'\n )", "def generate():\n\n # Verify if directory exists\n if not os.path.isdir(config.techniques_markdown_path):\n os.mkdir(config.techniques_markdown_path)\n\n #Write the technique index.html page\n with open(os.path.join(config.techniques_markdown_path, \"overview.md\"), \"w\", encoding='utf8') as md_file:\n md_file.write(config.technique_overview_md)\n\n for domain in config.domains:\n generate_domain_markdown(domain)", "def index():\n g.data['api_version'] = API_VERSION\n g.data['apilib_version'] = API_VERSION\n g.data['oar_version'] = VERSION\n g.data['links'] = []\n #endpoints = ('resources', 'jobs', 'config', 'admission_rules')\n endpoints = ('resources', 'jobs')\n for endpoint in endpoints:\n g.data['links'].append({\n 'rel': 'collection',\n 'href': url_for('%s.index' % endpoint),\n 'title': endpoint,\n })", "def index():\n return render_template(\"index.html\",\n title='Index')", "def index():\n return render_template('index.html', title='PanOS Bootstrap Utility')", "def index():\n today = datetime.today()\n return render_template(\"index.html.j2\", today=today)", "def index():\n return render_template('index.html'), 200", "def index():\n return render_template('0-index.html')", "def index() -> object:\n return render_template('ue_bootstrap.j2', title='UENERGO TAGS')", "def home():\n return render_template(\n 'index.html',\n title= \"Pi-Lapse\",\n year=datetime.now().year,\n image = Cam.do.GetLastImage(),\n \n )", "def index():\n\n return {\n 'page': 'index',\n }", "def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)", "def deploy_nucleondocs():\n\n # Copy generated docs to docs_webserver on target machine\n rsync_project(\n remote_dir= '/srv/docs_webserver/docs/nucleon/',\n local_dir=join(dirname(__file__), 'docs/_build/html/'),\n delete=True)", "def index_page():\n \n return render_template(\"index.html\")", "def index():\n return render_template('index.html', title='Home')", "def index_site() -> None:\n app = create_web_app()\n app.app_context().push()\n index.create_index()\n\n indexable: List[IndexablePage] = []\n for path in site.list_paths():\n page: Page = site.load_page(path)\n content = bleach.clean(page.markdown, strip=True, tags=[])\n indexable.append(IndexablePage(\n title=page.title,\n path=page.path,\n content=content\n ))\n index.add_documents(*indexable)", "def index():\n return render_template(\n 'index.html',\n nav=nav,\n title='Home Page',\n year=datetime.now().year\n )", "def homepage():\n return render_template('home/index.html', \n title=\"Bem vindo!\")", "def index(path):\n return render_template('index.jinja2')", "def index():\n\treturn render_template('public/index.html', title='Home')", "def publish(self):\n #vprint(\"PUBLISHING \",self.__dict__)\n \n js = self.compute_json()\n name = self.name\n #topicdir = \"/topicd/\" if constants.publishToS3Dev else \"/topic/\"\n s3path = constants.compositeDir+\"/\"+name+\"/main.json\" #the path where the page will finally end up\n s3.s3SetContents(s3path,contents=js,relativeTo=\"\",contentType=\"application/json\")\n self.genPage()", "def index(self):\n\n # 'html/index.html' is the path WITHIN the tarball.\n return self.static('html/index.html')", "def welcome():\n return \"Welcome to API Deployment\"", "def index():\n return render_template(\"index.html\",\n builder=HtmlBuilder(),\n services=LAPPS_SERVICES)", "def list_articles():\n wiki = listdir(\"wiki\")\n return template(\"index\", wiki = wiki)", "def deploy():\n update_treesheets()\n restart_treesheets()", "def index():\n webapp_config = {\n 'cloudmadeApiKey': config.cloudmade_api_key,\n 'peerserverApiKey': config.peerserver_api_key,\n }\n return render_template('index.html', config=webapp_config)", "def publish():\n pass", "def preview():\n local('pelican -o {} -s publishconf.py'.format(env.deploy_path))", "def index(path):\n return render_template(\"main.jinja2.html\")", "def index():\n mongo_collection = mongo_database[\"settings\"]\n doc_instructions = mongo_collection.find_one({\"id\": \"instructions\"})\n instructions = markdown.markdown(doc_instructions['text'])\n return render_template(\"index.html\", instructions=instructions)", "def main() -> None:\n\n\t# Retrieving the wiki URL\n\turl = get_api_url()\n\tprint(url)\n\n\t# Creates file if it does not exist\n\topen(TEXT_FILE, \"a\")\n\n\twith open(TEXT_FILE, \"r\") as f:\n\t\tlast_title = f.readline().strip()\n\t\tprint(\"Starting from:\", last_title)\n\n\tmodify_pages(url, last_title)\n\n\tprint(\"\\nNo pages left to be tagged\")", "def main_index():\n\n return render_template(\n \"index.html\",\n groups=[{\"name\": f, \"logs\": log_metadata(f)} for f in FOCUS_GROUPS],\n css=get_style(),\n )", "def index():\r\n return render_template('index.html')", "def index():\r\n return render_template('index.html')", "def repository_create_hosted():\n pass", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index_page():\n\n return render_template(\"index.html\")", "def index(request):\n return render(request, 'vaxcharts/home.html')", "def index():\n\treturn render_template(\"index.html\", title=\"Home\")", "def page_home(state):\n\n st.title(\":house: Welcome to Stock Swing Predictor (SSP)\")\n\n image = Image.open(\"ssp.png\") # load logo\n st.image(image, use_column_width=True)\n st.markdown(\"*Note:* This is a conceptual tool and should not be used to make real/serious trading decisions.\")\n st.markdown(\"## Tool Overview:\")\n st.markdown(\n \"The Stock Swing Predictor makes future stock price swing predictions for any stock for the next day. Price swings are simply whether or not a price goes up or down, so with this the tool predicts which way a stocks price will move or swing for the upcoming day.\\nPredictions are made using seven different models and with the user's choice of dataset size. The models are trained using the stock price data of previous days.\"\n )\n\n st.markdown(\"## Using the Tool:\")\n st.markdown(\n \"Using the tool is simple once you are in the Web Interface! To run the tool, go to the `Run Settings` page.\"\n )\n st.markdown(\n \"After filling out the data fields for your chosen option, you can than click the button below to run the tool. After this, wait until the tool prompts you to `Go to the Prediction Dashboard to view your data`. Once prompted, you can then go to the Prediction Dashboard page and view your data.\"\n )\n\n st.markdown(\"## Experimental Results and Optimal Settings:\")\n st.markdown(\n \"Extensive experimentation was completed on the tool, the results of which are detailed in the README.\"\n )\n st.markdown(\"### Settings Recommendations:\")\n st.markdown(\n \"- It is recommended that one runs the tool with as much data as possible, as results are generally more accurate for all models. 1 or 2 years is the optimal amount of training data it seems, any more of that and you will be waiting for your results for a while.\"\n )\n st.markdown(\n \"- With this, the most accurate model seems to be the SVR-POLY model (Support Vector Regression with a Polynomial kernel), especially when trained with 1 year of data. Experimental results show future prediction accuracy results of almost 80%. The SVR-RBF model is also quite accurate, when trained with one month of data.\"\n )\n\n st.markdown(\"### Some Experimental Results:\")\n image2 = Image.open(\"results.png\") # load logo\n st.image(image2, use_column_width=True)\n\n st.markdown(\n \"This shows how accurate models are and which amount of training data they are most accuate with. \\n This table displays the predictions on 9 different stocks over 5 different days for each time period of data. This was done from 3/30/2021-4/6/2021. With this, the percentage represents the number of predictions that were correct, out of a total 45 predictions that were made for each time period of data.\"\n )\n st.markdown(\"## Get in Touch & Learn More:\")\n st.markdown(\"- View source code on the [Project GitHub](https://github.com/lussierc/StockSwingPredictor). Consider contributing.\")\n st.markdown(\"- View my personal website and get contact information [here](https://christianlussier.com).\")\n st.markdown(\"## Disclaimer:\")\n st.markdown(\"We are not responsible for any investment losses incurred by users. This tool is by no means, a be-all-end all for stock prediction and while it offers promise it should not be used to make serious trading decisons. It is a conceptual tool that is somewhat accurate and is meant give users insights into the potential uses of ML for stock prediction.\")", "def make_html(depends=(files['image.gif'],),\n targets=(files['index.html'],)):\n\n index_html = open(files['index.html'].rel, 'w')\n index_html.write(pyyaks.context.render(html_template))\n index_html.close()", "def index():\n # Render template\n return render_template('index.html')", "def index():\n return 'Your api is up and running!'", "def deploy():\n build()\n collect()\n commit()\n push()", "def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')", "def index():\n return render_template('index.html')", "def index():\n return render_template('index.html')", "def index():\n\n return render_template('index.html')", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template(\"index.html\")", "def index():\n return render_template('index.html')", "def index():\n return render_template(\n 'index_t.html',\n call_counter=str(get_model().call_counter),\n app_version=str(app.config.get('GIT_HASH', None))\n )", "def write_data_index(self):\n content = \"\"\n self.remove(os.path.join(config[\"data_subdir\"], \"*\"))\n if self.data_index:\n print(\"- writing data index\")\n content += f\"# [{config['github_repo_name']}]({config['github_pages_url']})\\n\"\n content += \"\\n## Index of Data files in this Repository\\n\"\n for data, links in sorted(self.data_index.items(), key=lambda x: natsort(x[0])):\n if links:\n content += f\"\\n### {data}\\n\"\n content += f\"![{data}]({config['data_subdir']}/{data})\\n\"\n for link in links:\n content += f\"* {link}\\n\"\n data_src = os.path.join(self.src_dir, config[\"data_subdir\"], data)\n data_dst = os.path.join(self.dst_dir, config[\"data_subdir\"], data)\n print(f\"- copying {data_src} to {data_dst}\")\n shutil.copy(data_src, data_dst)\n self.write_md2html(\"data_index\", content)", "def index():\r\n return render_template('index.html')", "def deploy(\n deployment,\n chart,\n environment,\n namespace=None,\n helm_config_overrides=None,\n version=None,\n timeout=None,\n force=False\n):\n if helm_config_overrides is None:\n helm_config_overrides = []\n\n config = get_config(deployment)\n\n name = f'{deployment}-{environment}'\n\n if namespace is None:\n namespace = name\n helm_config_files = [f for f in [\n os.path.join('deployments', deployment, 'config', 'common.yaml'),\n os.path.join('deployments', deployment, 'config', f'{environment}.yaml'),\n os.path.join('deployments', deployment, 'secrets', f'{environment}.yaml'),\n ] if os.path.exists(f)]\n\n for image in config['images']['images']:\n # We can support other charts that wrap z2jh by allowing various\n # config paths where we set image tags and names.\n # We default to one sublevel, but we can do multiple levels.\n # With the PANGEO chart, we this could be set to `pangeo.jupyterhub.singleuser.image`\n helm_config_overrides.append(f'{image.helm_substitution_path}.tag={image.tag}')\n helm_config_overrides.append(f'{image.helm_substitution_path}.name={image.name}')\n\n helm_upgrade(\n name,\n namespace,\n chart,\n helm_config_files,\n helm_config_overrides,\n version,\n timeout,\n force\n )", "def index():\n with open(\"PROJECT.md\", \"r\") as project_file:\n md_template_string = markdown.markdown(\n project_file.read(), extensions=[\"fenced_code\"]\n )\n return md_template_string", "def index(self) -> HTMLBody:\n\t\treturn render_template(\"index.jinja2\")", "def home():\n return render_template(\n 'index.html',\n title='Automation Center',\n year=datetime.now().year,\n message='Welcome to the Automation Center'\n )", "def index():\n return render_template(\"index.html\")", "def index() -> str:\n return render_template(\n Template.INDEX.value,\n spanish=pyjokes.get_joke(language=Language.SPANISH.code()),\n deutsch=pyjokes.get_joke(language=Language.DEUTSCH.code()),\n english=pyjokes.get_joke(language=Language.ENGLISH.code()),\n )" ]
[ "0.6303632", "0.5992799", "0.59895986", "0.56986564", "0.5615526", "0.55472004", "0.5513828", "0.53691167", "0.5364688", "0.5286044", "0.5263749", "0.52587676", "0.5251907", "0.5230299", "0.5216074", "0.52005446", "0.5190611", "0.5182666", "0.5175446", "0.51723653", "0.51648855", "0.5161892", "0.5158165", "0.51531535", "0.5147999", "0.51465327", "0.5144903", "0.5141292", "0.5141177", "0.5139922", "0.5138232", "0.51343805", "0.5133028", "0.5109605", "0.5102408", "0.5092726", "0.508566", "0.50775754", "0.50756896", "0.5075532", "0.5062198", "0.5061978", "0.50607693", "0.50585485", "0.5056421", "0.5055792", "0.5055494", "0.5049459", "0.50442106", "0.50440234", "0.503421", "0.5029216", "0.5026738", "0.5004254", "0.49984187", "0.49923265", "0.49841404", "0.49841404", "0.4982039", "0.4977077", "0.4977077", "0.4977077", "0.49766126", "0.4972237", "0.49709085", "0.49704933", "0.497027", "0.49645975", "0.49619946", "0.49607286", "0.4955249", "0.4955249", "0.49493995", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49487785", "0.49446845", "0.49440083", "0.49430284", "0.49368384", "0.49363074", "0.4935807", "0.4935697", "0.49313882", "0.49256453", "0.4922278" ]
0.6350673
0
Add the domain restrictions.
def add_domains_restriction(self, domain_restriction): self._domain_restricion = domain_restriction self._size_var = self._get_size_var() self._nr_of_bits = self._get_nr_of_bits()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_domain_restrictions(self):\n for index, restriction in enumerate(self._domain_restrictions):\n self.add_specific_domain_restriction(index+1, restriction)", "async def setjradd(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n allowedDomains.append(domain)\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")", "def domain(self, domain):", "def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain", "def relevant_domains(self):\n pass", "def domains(self, domains):\n\n self._domains = domains", "def create_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.create_domain(name)", "def add_function(self, function):\n super(BaseAG, self).add_function(function)\n self._representation.add_domains_restriction(\n function.get_domain_restrictions)\n self._selection.add_function(function)", "def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()", "def add_variable(self, name, domain):\n self.variables.append(name)\n self.domains[name] = list(domain)\n self.constraints[name] = {}", "def exclude_domain(self) -> None:\n self.exclude_domains.add(current_domain.get())", "def par_domain(self):", "def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)", "def domains(self):\n return DomainCollection(self.request)", "def restrict_domain(self, geometry ):\n raise NotImplementedError(\"restrict_domain\")", "def fixDomains(self, domainMin, domainMax, fixToDomain):\n\n return 0", "def set_asset_restrictions(self, restrictions, on_error='fail'):\n control = RestrictedListOrder(on_error, restrictions)\n self.register_trading_control(control)\n self.restrictions |= restrictions", "def add_subdomain_output(self,filename,ll_x,ll_y, ur_x, ur_y,start,stop,step,area_id = 0): \n \n self.number_of_subdomains += 1\n self.subdomains.number_of_subdomains = self.number_of_subdomains #set the 'number_of_subdomains' attribute \n name = 'subdomain' + str(self.number_of_subdomains) \n self.subdomainGroups.append(self.subdomains.createGroup(name) ) #great a new subdomain Group\n \n self.subdomainGroups[self.number_of_subdomains-1].filename = filename #set the bounds attributes for the subdomain\n\n self.subdomainGroups[self.number_of_subdomains-1].ll_x = ll_x #set the bounds attributes for the subdomain\n self.subdomainGroups[self.number_of_subdomains-1].ll_y = ll_y\n self.subdomainGroups[self.number_of_subdomains-1].ur_x = ur_x\n self.subdomainGroups[self.number_of_subdomains-1].ur_y = ur_y\n self.subdomainGroups[self.number_of_subdomains-1].start = start\n self.subdomainGroups[self.number_of_subdomains-1].stop = stop\n self.subdomainGroups[self.number_of_subdomains-1].step = step\n self.subdomainGroups[self.number_of_subdomains-1].area_id = area_id", "def allowed_domains(self):\n if self._allowed_domains is None:\n uri = \"/loadbalancers/alloweddomains\"\n resp, body = self.method_get(uri)\n dom_list = body[\"allowedDomains\"]\n self._allowed_domains = [itm[\"allowedDomain\"][\"name\"]\n for itm in dom_list]\n return self._allowed_domains", "def Create(self, domainsList) :\n\t\t...", "def _get_domain(self):\n self.ensure_one()\n domain = ['|', ('active', '=', True), ('active', '=', False)]\n # Check active\n if self.active == 'true':\n domain += [('active', '=', True)]\n elif self.active == 'false':\n domain += [('active', '=', False)]\n # Check partner type\n if self.partner_type == 'customer_or_supplier':\n domain += ['|', ('customer', '=', True), ('supplier', '=', True)]\n elif self.partner_type == 'customer_and_supplier':\n domain += [('customer', '=', True), ('supplier', '=', True)]\n elif self.partner_type == 'customer':\n domain += [('customer', '=', True)]\n elif self.partner_type == 'supplier':\n domain += [('supplier', '=', True)]\n # Check category\n if self.category_ids:\n domain += [('category_id', 'in', self.category_ids.ids)]\n return domain", "def allowed_domains(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"allowed_domains\")", "def SetDomainsList(self, domainsList) :\n\t\t...", "def domain( self ):\n raise NotImplementedError(\"domain\")", "def allowed_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_domains\")", "def allowed_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_domains\")", "def domainparams(self):\n\t\traise Exception(NotImplemented)", "def add(self, newaddress):\n list = newaddress.split(\"@\")\n newdomain = list[-1]\n if not newdomain in self.__domainlist:\n self.__domainlist.append(newdomain)\n else:\n print(\"Domain is already in the database\")", "def test_add_domain_type_assignment_rule(self):\n pass", "def set_all_domains(self, domains_dict) :\n if not set(domains_dict.keys()) <= set(self.variables):\n invalid_vars = filter(lambda v: v not in self.variables, domains_dict.keys())\n raise KeyError(str(invalid_vars) + \" are not variables in this problem.\")\n self.domains = deepcopy(domains_dict)\n return self", "def __init__(self, cfg):\n self.domains = []\n\n # process domains in order\n for i in range(1, len(cfg)+1):\n this_dom = cfg[str(i)]\n par_dom = self.domains[this_dom['parent_id']-1] if 'parent_id' in this_dom else None\n self.domains.append(WPSDomainLCC(i, this_dom, par_dom))", "def _identify_domains(self):\n\n domains = [FEMDomain(TR3, MeshPart(self.mesh, labels=(0,)), self.media, self.labels)]\n return domains", "def AllowedDomains(self)->list:\n return self._allowedDomains", "def set_subdomains(self, f):\n s = \"::: setting 2D subdomains :::\"\n print_text(s, cls=self)\n\n self.ff = MeshFunction('size_t', self.mesh)\n self.cf = MeshFunction('size_t', self.mesh)\n self.ff_acc = MeshFunction('size_t', self.mesh)\n f.read(self.ff, 'ff')\n f.read(self.cf, 'cf')\n f.read(self.ff_acc, 'ff_acc')\n \n self.ds = Measure('ds')[self.ff]\n self.dx = Measure('dx')[self.cf]\n \n self.dx_g = self.dx(0) # internal above grounded\n self.dx_f = self.dx(1) # internal above floating\n self.dBed_g = self.ds(3) # grounded bed\n self.dBed_f = self.ds(5) # floating bed\n self.dBed = self.ds(3) + self.ds(5) # bed\n self.dSrf_gu = self.ds(8) # grounded with U observations\n self.dSrf_fu = self.ds(9) # floating with U observations\n self.dSrf_u = self.ds(8) + self.ds(9) # surface with U observations\n self.dSrf_g = self.ds(2) + self.ds(8) # surface of grounded ice\n self.dSrf_f = self.ds(6) + self.ds(9) # surface of floating ice\n self.dSrf = self.ds(6) + self.ds(2) \\\n + self.ds(8) + self.ds(9) # surface\n self.dLat_d = self.ds(7) # lateral divide\n self.dLat_to = self.ds(4) # lateral terminus overwater\n self.dLat_tu = self.ds(10) # lateral terminus underwater\n self.dLat_t = self.ds(4) + self.ds(10) # lateral terminus\n self.dLat = self.ds(4) + self.ds(7) \\\n + self.ds(10) # lateral", "def add_domain():\n\n today = date.today()\n\n if request.method == \"POST\":\n # Check to see if domain already exists because\n # duplicate domain names aren't allowed\n domain = session.query(Domain).filter_by(\n domain_name=request.form[\"domain-name\"]).first()\n if domain:\n message = \"{}Error!{} {}{}{} already exists.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n\n # Find existing Provider otherwise create new Provider object\n provider = session.query(Provider).filter(\n Provider.provider_url == request.form[\"provider-url\"]).first()\n if not provider:\n provider = Provider(provider_url=request.form[\"provider-url\"])\n\n # Get existing category name object from CategoryName table\n category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"]).first()\n\n domain = Domain(\n category=Category(),\n domain_name=request.form[\"domain-name\"],\n ip=request.form[\"ip-address\"],\n provider=provider)\n domain.category.category_name = category_name\n domain.status.append(Status(status_type=\"added\"))\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Added {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message , \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Could not add add {}{}{}.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n\n if request.form[\"submit\"] == \"Submit\":\n return redirect(url_for(\"home\"))\n else:\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n else:\n return render_template(\"add_domain.html\", today=today,\n category_names=category_names)", "def restrict_objects(self):\n if self.data_local and self.restrict_method_id:\n model = self.inventory_model.model\n global_vars = self.env['gdpr.restrict_method'].get_eval_context(restrict_days=self.restrict_time_days)\n if self.restrict_domain_advanced:\n eval(compile(self.restrict_domain_code, __name__, 'exec'), global_vars)\n domain = safe_eval(self.restrict_domain, global_vars)\n object_ids = [o['id'] for o in self.env[model].search_read(domain, ['id'])]\n _logger.debug('restrict_objects object_ids: %s' % object_ids)\n domain = [('restricted', '!=', True), ('gdpr_id', '=', self.id), ('object_res_id', 'in', object_ids)]\n if self.lawsection_id.consent:\n gdpr_o_ids = [o['gdpr_object_id'][0] for o in self.env['gdpr.consent'].search_read([('state', '=', 'withdrawn'), ('record_id', 'in', [('%s,%s' % (model, id)) for id in object_ids]), ('gdpr_id', '=', self.id)], ['gdpr_object_id'])]\n domain.append(('id', 'in', gdpr_o_ids))\n _logger.debug('restrict_objects domain: %s' % domain)\n objects = self.env['gdpr.object'].search(domain)\n if objects:\n self.restrict_method_id.restrict_objects(self, objects)", "def add_domain_always_in_scope(self, value, isregex=None, isenabled=None, apikey=''):\n params = {'value': value, 'apikey': apikey}\n if isregex is not None:\n params['isRegex'] = isregex\n if isenabled is not None:\n params['isEnabled'] = isenabled\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/addDomainAlwaysInScope/', params)))", "def add_restriction(self, subject, predicate, object_):\n if type(object_) != rdflib.URIRef:\n object_ = self.check_thing(object_)\n\n if type(predicate) != rdflib.URIRef:\n predicate = self.check_thing(predicate)\n\n if type(subject) != infixowl.Class:\n if type(subject) != rdflib.URIRef:\n subject = self.check_thing(subject)\n subject = infixowl.Class(subject, graph=self.g)\n\n restriction = infixowl.Restriction(predicate, graph=self.g, someValuesFrom=object_)\n subject.subClassOf = [restriction] + [c for c in subject.subClassOf]", "def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")", "def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")", "def whitelist_domains(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"whitelist_domains\")", "def _domain(self):\n return [self.args[0] >= 0, self.args[1] >= 0]", "def setup_domains():\n sdb = boto.connect_sdb()\n sdb.create_domain(\"mls_domain\")", "def _supports_domain(cls, domain):\n return domain in (ZZ, QQ)", "def test_dos_create_service_domain_list(self):\n # create a huge list of domain\n self.reset_defaults()\n for k in range(1, 30000):\n self.domain_list.append({\"domain\": \"w.t%s.com\" % k})\n\n # send MAX_ATTEMPTS requests\n for k in range(1, self.MAX_ATTEMPTS):\n self.service_name = str(uuid.uuid1())\n self.check_one_request()", "def domain(self, domain):\n self._domain = domain", "def domain(self, domain):\n self._domain = domain", "def createDomain(self):\n if not self.rank:\n logging.info('Creating domain')\n\n if 'box' in self.pargs:\n self.lmp.command('region domain block ' + ('{} ' * len(self.pargs['box'])).format(*self.pargs['box']) + ' units box volume_limit 1e-20')\n elif 'cylinder' in self.pargs:\n self.lmp.command('region domain cylinder ' + ('{} ' * len(self.pargs['cylinder'])).format(*self.pargs['cylinder']) + ' units box volume_limit 1e-20') \n\n self.lmp.command('create_box {} domain'.format(self.pargs['nSS']))", "def fixDomains(self, energyMin, energyMax, domainToFix):\n\n return 0", "def fixDomains(self, energyMin, energyMax, domainToFix):\n\n return 0", "def __init__(self, *args, **kwargs):\n self._directives = []\n self.domain = kwargs.get(\"domain\", None)\n self._locations = {}", "def create_domain(DomainName=None):\n pass", "def add_constraints(self, constraints):\n for const in constraints:\n self.add_constraint(const.type, const.column, const.check_clause)", "def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return", "def __parse_domains(self, lines):\n domain_list = []\n for line in lines:\n if len(line) == 0:\n continue\n if line[0] == \"!\":\n continue\n if line[0] == \"|\":\n continue\n if line[0] == \"@\":\n continue\n if line[0] == \"[\":\n continue\n if line.find('zh.wikipedia.org') == 0:\n continue\n line = string.replace(line, \"||\", \"\").lstrip(\".\")\n # strip everything from \"/\" to the end\n if line.find(\"/\") != -1:\n line = line[0:line.find(\"/\")]\n if line.find(\"*\") != -1:\n continue\n if line.find(\".\") == -1:\n continue\n # if line in oklist:\n # \tcontinue\n domain_list.append(line)\n\n return domain_list", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def parse_domain(self, domainfile):\n\n with open(domainfile) as dfile:\n dfile_array = self._get_file_as_array(dfile)\n #Deal with front/end define, problem, :domain\n if dfile_array[0:4] != ['(', 'define', '(', 'domain']:\n print('PARSING ERROR: Expected (define (domain ... at start of domain file')\n sys.exit()\n self.domain = dfile_array[4]\n\n dfile_array = dfile_array[6:-1]\n opencounter = 0\n keyword = ''\n obj_list = []\n is_obj_list = True\n for word in dfile_array:\n if word == '(':\n opencounter += 1\n elif word == ')':\n opencounter -= 1\n elif word.startswith(':'):\n if word[1:] not in DFILE_KEYWORDS:\n pass\n elif keyword != 'requirements':\n keyword = word[1:]\n if opencounter == 0:\n if keyword == 'action':\n self.actions.append(obj_list)\n obj_list = []\n if keyword == 'types':\n for element in obj_list:\n self.types.setdefault('object', []).append(element)\n self.type_list.add('object')\n self.type_list.add(element)\n obj_list = []\n keyword = ''\n\n if keyword == 'requirements': #Requirements list\n if word != ':requirements':\n if not word.startswith(':'):\n print('PARSING ERROR: Expected requirement to start with :')\n sys.exit()\n elif word[1:] not in DFILE_REQ_KEYWORDS:\n print('WARNING: Unknown Rquierement ' + word[1:])\n #print 'Requirements must only be: ' + str(DFILE_REQ_KEYWORDS)\n #sys.exit()\n else:\n self.requirements.add(word[1:])\n elif keyword == 'action':\n obj_list.append(word)\n elif not word.startswith(':'):\n if keyword == 'types': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if not word in self.type_list:\n self.types.setdefault('object', []).append(word)\n self.type_list.add(word)\n self.types.setdefault(word, []).append(element)\n self.type_list.add(element)\n self.type_list.add(word)\n is_obj_list = True\n obj_list = []\n elif keyword == 'constants': #Typed list of objects\n if is_obj_list:\n if word == '-':\n is_obj_list = False\n else:\n obj_list.append(word)\n else:\n #word is type\n for element in obj_list:\n if word in self.type_list:\n self.constants.setdefault(word, []).append(element)\n #self.object_list.add(element)\n else:\n print(self.type_list)\n print(\"ERROR unknown type \" + word)\n sys.exit()\n is_obj_list = True\n obj_list = []\n elif keyword == 'predicates' or keyword == 'private': #Internally typed predicates\n if word == ')':\n if keyword == 'private':\n #print \"...skip agent: \" + str(obj_list[:3])\n obj_list = obj_list[3:]\n keyword = 'predicates'\n if len(obj_list) == 0:\n #print \"...skip )\"\n continue\n p_name = obj_list[0]\n #print \"parse predicate: \" + p_name + \" \" + str(obj_list)\n pred_list = self._parse_name_type_pairs(obj_list[1:],self.type_list)\n self.predicates.append(Predicate(p_name, pred_list, True, False))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n elif keyword == 'functions': #functions\n if word == ')':\n p_name = obj_list[0]\n if obj_list[0] == '-':\n obj_list = obj_list[2:]\n #print \"function: \" + word + \" - \" + str(obj_list)\n self.functions.append(Function(obj_list))\n obj_list = []\n elif word != '(':\n obj_list.append(word)\n\n #Work on the actions\n new_actions = []\n for action in self.actions:\n if action[0] == '-':\n action = action[2:]\n act_name = action[1]\n act = {}\n action = action[2:]\n keyword = ''\n for word in action:\n if word.startswith(':'):\n keyword = word[1:]\n else:\n act.setdefault(keyword, []).append(word)\n self.agent_types.add(act.get('agent')[2])\n agent = self._parse_name_type_pairs(act.get('agent'),self.type_list)\n param_list = agent + self._parse_name_type_pairs(act.get('parameters')[1:-1],self.type_list)\n up_params = Predicate('', param_list, True, False)\n pre_list = self._parse_unground_propositions(act.get('precondition'))\n eff_list = self._parse_unground_propositions(act.get('effect'))\n new_act = Action(act_name, up_params, pre_list, eff_list)\n\n new_actions.append(new_act)\n self.actions = new_actions", "def _get_domain(self):\n self.ensure_one()\n domain = []\n return domain", "def set_domain(self, var, domain) :\n if var not in self.variables :\n raise KeyError(str(var) + \" is not a variable in this problem.\")\n self.domains[var] = sorted(domain[:])\n return self", "def domains(cls):\n return [cls.domain]", "def list_domain_names():\n pass", "def in_second_domain_set(self, var1, var2, var3):\n var4 = [\"ha\", \"hb\", \"ac\", \"sc\", \"gd\", \"sd\", \"he\", \"ah\", \"qh\", \"sh\", \"hi\", \"bj\", \"fj\", \"tj\", \"xj\", \"zj\", \"hk\", \"hl\", \"jl\", \"nm\", \"hn\", \"ln\", \"sn\", \"yn\", \"co\", \"mo\", \"cq\", \"gs\", \"js\", \"tw\", \"gx\", \"jx\", \"nx\", \"sx\", \"gz\", \"xz\"]\n var5 = [\"cat\", \"edu\", \"net\", \"biz\", \"mil\", \"int\", \"com\", \"gov\", \"org\", \"pro\"]\n var6 = [\"name\", \"aero\", \"info\", \"coop\", \"jobs\", \"mobi\", \"arpa\"]\n var7 = [\"travel\", \"museum\"]\n var8 = [None, None, var4, var5, var6, None, var7]\n var9 = [0, 0, len(var4), len(var5), len(var6), 0, len(var7)]\n if var2==2:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==3:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==4:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==6:\n return self.in_domain_set(var1, var2, var8[var2], var9[var2], var3)\n elif var2==5:\n pass\n else:\n return 0", "def global_wildcard_constraints(self, **content):\n self.modifier.wildcard_constraints.update(content)\n # update all rules so far\n for rule in self.modifier.rules:\n rule.update_wildcard_constraints()", "def __init__(self, domain_keys, require_domain=True, datasets=None):\n assert isinstance(domain_keys, list) or isinstance(domain_keys, str)\n if isinstance(domain_keys, list):\n self.domain_keys = domain_keys\n elif isinstance(domain_keys, str):\n self.domain_keys = [x for x in domain_keys.split(',')]\n self.require_domain = require_domain\n self.domain_dict = dict(zip(self.domain_keys, range(len(self.domain_keys))))\n\n if datasets is None:\n datasets = []\n for domain_key in self.domain_keys:\n extra_args = {k: v for dic in [self.domain_specific_params(), self.domain_default_params()] for k, v in dic.items()}\n datasets += [self.get_single_dataset(domain_key, **extra_args)]\n super(DomainDatasetBase, self).__init__(datasets)", "def get_domain_whitelist(self):\n whitelist = self.charm_config[\"federation-domain-whitelist\"]\n return list(filter(None, whitelist.split(\",\")))", "def create_domain(self, url_data, service_id, service_version):\n request_dict = {k: v[0] for k, v in url_data}\n domain_name = request_dict['name']\n\n create_domain = {\n 'comment': '',\n 'service_id': service_id,\n 'version': service_version,\n 'name': domain_name}\n\n if 'domain_list' not in self.fastly_cache[service_id]:\n self.fastly_cache[service_id]['domain_list'] = []\n\n self.fastly_cache[service_id]['domain_list'].append(\n [create_domain, 'None', 'False'])\n return create_domain", "def __learn__(self):\n\n return domains # including 3 values, (begin, end, key)", "def exclude_domain(domain, exclusion_pattern, exclusion_regexes):\n\n exclusion_regex = re.compile(exclusion_pattern + domain)\n exclusion_regexes.append(exclusion_regex)\n\n return exclusion_regexes", "def _domain(self):\n if self.__domain is None:\n self.__domain = Domain(\n definition='Need domain definition?',\n updatable='False',\n optional='False',\n )\n self._ident[self._domain_name] = self.__domain\n self._data_record.domain_ids = [self._domain_name,]\n return self.__domain", "def domains(cls):\n return (cls.domain, )", "def append_allowed_hosts(self, hostname):\r\n settings.ALLOWED_HOSTS.append(hostname)\r\n self.addCleanup(settings.ALLOWED_HOSTS.pop)", "def __isOnDomainList(self, rules, domain):\n for rule in rules:\n if rule.startswith(\".\"):\n if domain.endswith(rule):\n return True\n \n withoutDot = rule[1:]\n if domain == withoutDot:\n return True\n else:\n domainEnding = domain[-(len(rule) + 1):]\n if (\n domainEnding and\n domainEnding[0] == \".\" and\n domain.endswith(rule)\n ):\n return True\n \n if rule == domain:\n return True\n \n return False", "def create(self):\n\n if self.call(method='addSubdomain', args=[self.domainname, self.subdomain]):\n return self", "def domains(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainArgs']]]:\n return pulumi.get(self, \"domains\")", "def resolve(self,\n ns_servers: List[Dict[str, str]] = [{'IPv4 address': '8.8.8.8', 'MAC address': '01:23:45:67:89:0a'}],\n domain: str = 'google.com',\n subdomains_list: List[str] = ['www', 'mail', 'ns', 'test'],\n subdomains_file: Union[None, str] = None,\n subdomains_brute: bool = False,\n max_threats_count: int = 10,\n udp_destination_port: int = 53,\n timeout: int = 30) -> List[Dict[str, str]]:\n\n try:\n\n # region Clear results list\n self.index_of_dns_query = 0\n self.results.clear()\n self.uniq_hosts.clear()\n # endregion\n\n # region Set target domain\n assert not (domain == ''), \\\n 'Target domain is empty, please set target domain in this parameter: ' + self.base.info_text('domain')\n self.domain = domain\n # endregion\n\n # region Subdomains list\n if len(subdomains_list) > 0:\n self.subdomains = subdomains_list\n # endregion\n\n # region Subdomains file\n if subdomains_file is not None:\n assert isfile(subdomains_file), \\\n 'File with subdomain list:' + self.base.error_text(subdomains_file) + ' not found!'\n with open(subdomains_file) as subdomains_file_descriptor:\n for subdomain in subdomains_file_descriptor.read().splitlines():\n self.subdomains.append(subdomain)\n # endregion\n\n # region Subdomains brute\n if subdomains_brute:\n\n if not self.quiet:\n self.base.print_info('Make subdomains list for brute .... ')\n\n for character1 in RawDnsResolver.available_characters:\n self.subdomains.append(character1)\n for character2 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2)\n for character3 in RawDnsResolver.available_characters:\n self.subdomains.append(character1 + character2 + character3)\n # endregion\n\n # region Check length of subdomains list\n assert len(self.subdomains) != 0, \\\n 'List containing subdomains is empty, please set any of this parameters: ' \\\n + self.base.info_text('subdomain_list') + ' or ' \\\n + self.base.info_text('subdomain_file') + ' or ' \\\n + self.base.info_text('subdomain_brute')\n # endregion\n\n # region Create raw socket\n raw_socket: socket = socket(AF_PACKET, SOCK_RAW)\n raw_socket.bind((self.network_interface, 0))\n # endregion\n\n # region Truncate temporary results file\n temporary_results_file = open(RawDnsResolver.temporary_results_filename, 'r+')\n temporary_results_file.truncate()\n temporary_results_file.close()\n # endregion\n\n # region Sniff DNS answers\n if not self.quiet:\n self.base.print_info('Start DNS answers sniffer for domain: ', self.domain)\n\n threats: ThreadManager = ThreadManager(max_threats_count)\n self._sniff_start(self.your_mac_address, self.your_ipv4_address,\n self.your_ipv6_address, udp_destination_port)\n threats.add_task(self._sniff_check)\n # endregion\n\n # region Send DNS queries\n if not self.quiet:\n self.base.print_info('Start sending DNS queries, time: ', str(datetime.now()))\n\n self._send_queries(send_socket=raw_socket,\n source_mac_address=self.your_mac_address,\n source_ipv4_address=self.your_ipv4_address,\n source_ipv6_address=self.your_ipv6_address,\n domain=domain,\n ns_servers=ns_servers,\n destination_port=udp_destination_port,\n max_threats_count=int(max_threats_count) - 1,\n subdomains=self.subdomains)\n # endregion\n\n # region Timeout\n if not self.quiet:\n self.base.print_info('Wait timeout: ', str(timeout) + ' sec')\n sleep(timeout)\n # endregion\n\n # region Return results\n self._sniff_stop()\n if not self.quiet:\n if len(self.results) > 0:\n self.base.print_success('Found ', str(len(self.results)),\n ' subdomains and addresses for domain: ', self.domain)\n else:\n self.base.print_error('Not found subdomains in domain: ', self.domain)\n return self.results\n # endregion\n\n except AssertionError as Error:\n self.base.print_error(Error.args[0])\n exit(1)", "def domain(self, domain):\n\n self._domain = domain", "def domain(self, domain):\n\n self._domain = domain", "def __init__(self):\n self.excluded_entities = []\n self.excluded_domains = []\n self.included_entities = []\n self.included_domains = []", "def add(self, domain, header, record_type, data, ttl=300, priority=5):\n return request(\n API_LIST.DNS_ADD.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain,\n 'header': header,\n 'type': record_type,\n 'data': data,\n 'ttl': ttl,\n 'priority': priority\n }\n )", "def add_rules(self, rules: List[Rule]):\n self.rules.extend(rules)", "def extract_domains(self, resp):\n return", "def add_domain_routes(app):\n\n @app.route(\"/v1/list_agencies/\", methods=[\"GET\"])\n @get_dabs_sub_tier_agencies\n def list_agencies(cgac_sub_tiers, frec_sub_tiers):\n \"\"\" Get all agencies the current user has DABS access to.\n Args:\n cgac_sub_tiers - List of all CGAC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n frec_sub_tiers - List of all FREC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, get_accessible_agencies(cgac_sub_tiers, frec_sub_tiers))\n\n @app.route(\"/v1/list_all_agencies/\", methods=[\"GET\"])\n def list_all_agencies():\n \"\"\" List all CGAC and FREC Agencies \"\"\"\n return JsonResponse.create(StatusCode.OK, get_all_agencies())\n\n @app.route(\"/v1/list_sub_tier_agencies/\", methods=[\"GET\"])\n @get_fabs_sub_tier_agencies\n def list_sub_tier_agencies(sub_tier_agencies):\n \"\"\" List all Sub-Tier Agencies user has FABS permissions for\n Args:\n sub_tier_agencies - List of all SubTierAgencies generated by the get_fabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has FABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, organize_sub_tier_agencies(sub_tier_agencies))", "def limitDomain(absoluteLinks, domain):\n domainRegex = re.compile(\"%s/\" % domain)\n return set(filter(domainRegex.search, absoluteLinks))", "def show_domains(self):\n show_domains(self.system.cavity_gri)", "def domains_v2():\n # Is this public?\n configs = get_configs()\n if configs['api_requests'] == 'auth':\n # Auth token in headers\n try:\n auth_token = Token.query.filter_by(auth_token=request.headers.get('Authorization')).first()\n except:\n return {\"alternatives\" : \"Database Error with token!\"}\n if not auth_token:\n return {\"alternatives\": \"Unauthorized!\"}\n\n req_data = request.get_json()\n url = req_data['url']\n if not url:\n return {\"alternatives\" : 'None'}\n \n domain_data = check(url)\n alternatives = {\"alternatives\": domain_data['available_alternatives']}\n return alternatives", "def list_domain_names(self) -> Dict:\n pass", "def domainRouterSet(self, domain, body):\n pass", "def report_domain():\n req_data = request.get_json()\n\n # is authentication token correct?\n\n try:\n auth_token = Token.query.filter_by(auth_token=request.headers.get('Authorization')).first()\n except:\n return {\"alternatives\" : \"Database Error with token!\"}\n if not auth_token:\n return {\"alternatives\": \"Unauthorized!\"}\n\n now = datetime.datetime.now()\n\n # Have we seen this domain before?\n try:\n domain = Domain.query.filter_by(domain=req_data['domain']).first()\n except:\n return {\"report\" : \"Database Error with domain query!\"}\n\n if domain: # we've seen it before\n domain_id = domain.id\n # Have we seen the mirror before?\n try:\n mirror = Mirror.query.filter_by(mirror_url=req_data['mirror_url']).first()\n except:\n return {\"report\" : \"Database Error with mirror query!\"}\n if mirror:\n mirror_id = mirror.id\n else:\n mirror = False\n else: # Let's add it\n try:\n domain = Domain(domain=req_data['domain'])\n db.session.add(domain)\n db.session.commit()\n except:\n return {\"report\" : \"Database Error with mirror addition!\"}\n domain_id = domain.id\n mirror = False # No domain, no mirror\n \n # Add mirror\n if not mirror:\n mirror = Mirror(\n mirror_url=req_data['mirror_url'],\n domain_id=domain_id)\n try:\n db.session.add(mirror)\n db.session.commit()\n except:\n return {\"report\" : \"Database Error with mirror addition!\"}\n mirror_id = mirror.id\n\n # check values for lat/long/accuracy\n try:\n float(req_data['latitude'])\n except ValueError:\n req_data['latitude'] = None\n \n try:\n float(req_data['longitude'])\n except ValueError:\n req_data['longitude'] = None\n try:\n int(req_data['accuracy'])\n except ValueError:\n req_data['accuracy'] = None\n\n # Make the report\n req_data['auth_token'] = auth_token.auth_token\n req_data['date_reported'] = now\n req_data['domain_id'] = domain_id\n req_data['mirror_id'] = mirror_id\n req_data.pop('domain')\n req_data.pop('mirror_url')\n try:\n report = Report(**req_data)\n db.session.add(report)\n db.session.commit()\n except:\n return {\"report\" : \"Database Error with report!\"}\n\n\n return {\"report\": \"Successfully reported.\"}", "def add_boundary(self, boundary):\n self._bounds.append(boundary)", "def set_keystone_v3_domain(self, **kwargs):\n LOG_OBJ.debug(\"Creating the domain.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains/\" + \\\n str(kwargs['domain_id'])\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _domain_info = {\"domain\": {}}\n for argument in [\"name\", \"description\", \"enabled\", \"disabled\"]:\n try:\n _domain_info['domain'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_domain_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the domain\")\n print (\"No response from Server while set the domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n return True", "def validate_domains(fc, domain_dict, null_ignore=False):\r\n list_of_fields = arcpy.ListFields(fc)\r\n list_of_field_names = [field.name for field in list_of_fields]\r\n # add error field if necessary\r\n if \"ERROR_MESSAGE\" not in list_of_field_names:\r\n arcpy.AddField_management(fc, \"ERROR_MESSAGE\", field_type=\"TEXT\", field_length=1000)\r\n list_of_field_names.append(\"ERROR_MESSAGE\")\r\n\r\n # parse through all records and fields and insert error message if error found\r\n with arcpy.da.UpdateCursor(fc, list_of_field_names) as cur:\r\n for row in cur:\r\n error_message = ''\r\n for idx in range(len(row)-1):\r\n # check if field has domain\r\n if list_of_fields[idx].domain:\r\n # check if field value is in domain value list\r\n if row[idx] not in domain_dict[list_of_fields[idx].domain].keys():\r\n if not null_ignore:\r\n error_message += \"{} contains invalid value - {}, \".format(str(list_of_field_names[idx]), str(row[idx]))\r\n else:\r\n # ignore nulls when creating error message\r\n if row[idx]:\r\n error_message += \"{} contains invalid value - {}, \".format(\r\n str(list_of_field_names[idx]), str(row[idx]))\r\n if error_message:\r\n row[list_of_field_names.index(\"ERROR_MESSAGE\")] = error_message[:-2] # drop last comma\r\n else:\r\n # set error message field to null if there are no errors\r\n row[list_of_field_names.index(\"ERROR_MESSAGE\")] = None\r\n cur.updateRow(row)\r\n del cur # get rid of any remaining locks\r", "def add_rules(self, rules):\n self.name.append(rules)", "def post_domain_create(self, resource_dict):\n pass", "def support_pruning(self):\r\n if self.curr_domains is None:\r\n self.curr_domains = {v: list(self.domains[v]) for v in self.variables}", "def restrict_domain(self, sub):\n D = self.domain()\n if hasattr(D, 'coordinate_module'):\n # We only have to do this in case the module supports\n # alternative basis. Some modules do, some modules don't.\n V = D.coordinate_module(sub)\n else:\n V = sub.free_module()\n A = self.matrix().restrict_domain(V)\n H = sub.Hom(self.codomain())\n return H(A)", "def _set_domain_param_attrs(self, domain_param: dict):\n for name in self.supported_domain_param:\n dp = domain_param.get(name, None)\n if dp is not None:\n setattr(self, name, dp)", "def domain_args(domains):\n return ' ' + ' '.join(['-d {0}'.format(domain) for domain in domains])", "def _parse_domain(self, f_domain):\n\n parse_tree = PDDL_Tree.create(f_domain)\n\n assert \"domain\" in parse_tree, \"Domain must have a name\"\n self.domain_name = parse_tree [\"domain\"].named_children ()[0]\n\n # must read types before constants\n if \":types\" in parse_tree:\n if \"-\" in parse_tree[\":types\"].named_children():\n type_hierarchy = PDDL_Utils.read_type(parse_tree[\":types\"])\n self.parent_types = {subtype: parent for subtype, parent in type_hierarchy}\n self.types = set(parse_tree[\":types\"].named_children())\n self.types.discard(\"-\")\n else:\n self.types = set(parse_tree[\":types\"].named_children())\n self.parent_types = {t: None for t in self.types}\n else:\n self.types = set([Predicate.OBJECT])\n self.parent_types = {Predicate.OBJECT: None}\n\n self.agents = [a.name for a in parse_tree[\":agents\"].children]\n self.types.add('agent')\n self.parent_types['agent'] = None\n self._add_objects([(ag, 'agent') for ag in self.agents])\n\n # must read in constants before actions or predicates\n if \":constants\" in parse_tree:\n object_list = PDDL_Utils.read_type(parse_tree[\":constants\"])\n self._add_objects(object_list)\n\n #TODO this may not be correct, depending on the type hierarchy\n const_map = dict()\n for const in self.objects:\n if len(self.obj_to_type[const]) == 0:\n raise RuntimeError(\"No type for constant object %s\" % const)\n else:\n const_map[const] = list(self.obj_to_type[const])[0]\n\n self.predicates = [self.to_predicate(c, map=const_map) for c in parse_tree[\":predicates\"].children]\n\n # some predicates have this property: they are untyped.\n for predicate in self.predicates:\n if Predicate.OBJECT not in self.types and any([arg[1] == Predicate.OBJECT for arg in predicate.args]):\n for t in self.types:\n if self.parent_types[t] is None:\n self.parent_types[t] = Predicate.OBJECT\n\n self.parent_types[Predicate.OBJECT] = None\n self.types.add(Predicate.OBJECT)\n self.type_to_obj[Predicate.OBJECT] = set([])\n for obj, type_list in self.obj_to_type.items():\n type_list.add(Predicate.OBJECT)\n self.type_to_obj[Predicate.OBJECT].add(obj)\n\n # only need to do this once, obviously\n break\n\n self.actions = [self.to_action(c) for c in parse_tree.find_all(\":action\")]", "def run(self, domain):\n\n url = re.findall('(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', domain)\n if len(url) >= 1:\n for r in url:\n self.domain = r\n else:\n print(\"Provide a valid url ie www.example.com \")\n self.blind # calling a method" ]
[ "0.7922115", "0.623785", "0.60880244", "0.5960196", "0.58387566", "0.57663685", "0.5692345", "0.5655885", "0.56333846", "0.55938256", "0.558293", "0.5558514", "0.55429906", "0.5535729", "0.5511199", "0.54556143", "0.5452144", "0.54225695", "0.54151505", "0.54145116", "0.5341205", "0.5337445", "0.5314957", "0.5277656", "0.5272155", "0.5272155", "0.52388835", "0.52074236", "0.5205038", "0.51969105", "0.5194937", "0.51487887", "0.51390934", "0.5112417", "0.5105895", "0.5096016", "0.5092236", "0.5073982", "0.5053633", "0.5053633", "0.5028754", "0.50201875", "0.5019085", "0.5003381", "0.5003055", "0.49900323", "0.49900323", "0.49768206", "0.49716696", "0.49716696", "0.49544048", "0.49376303", "0.4937602", "0.4931789", "0.4906636", "0.49031636", "0.49031636", "0.4903016", "0.49009392", "0.48991865", "0.48915932", "0.48872858", "0.48835862", "0.4882075", "0.48770517", "0.48741066", "0.48650163", "0.48593253", "0.4857695", "0.48503792", "0.48404992", "0.48348984", "0.4833869", "0.4832764", "0.48215762", "0.48188153", "0.4813224", "0.4813224", "0.4809435", "0.48083487", "0.47971365", "0.47896162", "0.47883046", "0.47766596", "0.47712725", "0.47636303", "0.47612593", "0.4756315", "0.47506714", "0.47454393", "0.4739801", "0.47381803", "0.4732675", "0.47322017", "0.47295815", "0.4724404", "0.47241434", "0.47154805", "0.47141182", "0.46980453" ]
0.7894003
1
Check if the Clase is finnal.
def is_implemented(cls): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Estado_final(self,profundidad:int) -> bool:\n\n\t\tself.Evaluar(profundidad)\n\t\tif self.completo:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def is_cis(self):\n prev_res = self.get_offset_residue(-1)\n if prev_res is None:\n return None\n\n prev_omega = prev_res.calc_torsion_omega()\n if prev_omega is None:\n return None\n\n if abs(prev_omega) <= (math.pi/2.0):\n return True\n\n return False", "def is_faculty():\n return _is_member('uw_faculty')", "def esta_al_final(self):\n\t\treturn self.posicion == (len(self.lista) - 1)", "def fechou(self):\n return self.tot_rodada == self.rodada", "def is_final(self):\n return (self.final_word_out is not None)", "def isFinalizada():\n if 'CIENTE_CHEFIA' in current.session.avaliacao and current.session.avaliacao['CIENTE_CHEFIA'] == 'T':\n if 'CIENTE_SERVIDOR' in current.session.avaliacao and current.session.avaliacao['CIENTE_SERVIDOR'] == 'T':\n return True", "def is_faculty(user):\n return Affil.objects.filter(user=user).exists() or \\\n faculty_courses_for_user(user).exists()", "def IsFinal(self):\n return self.state in FINAL_TEST_RUN_STATES", "def has_dominance(self):\n trait = self.traitDao.get_dominance(self.name)\n if trait is None:\n return False\n else:\n return True", "def isgood(self):\n\t\tanswer = True\n\t\t\n\t\tif self.mes_flux <= 0.0:\n\t\t\tanswer = False\n\n\t\treturn answer", "def complete_level(self):\n if self.ycor() == self.finish_line:\n return True", "def isValid(self):\n\t\treturn ValidadorChaveAcessoNFe.validar(self)", "def test_func(self):\n self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)\n return self.rol_nu == Rollen.ROL_RCL", "def test_func(self):\n self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)\n return self.rol_nu == Rollen.ROL_RCL", "def test_func(self):\n self.rol_nu = rol_get_huidige(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def test_func(self):\n self.rol_nu = rol_get_huidige(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def test_func(self):\n self.rol_nu = rol_get_huidige(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def check(self, full=True):\n CFL_x=self.u_scale*self.dt/self.domain.dx\n print('CFL (u_scale*dt/dx) : {:.2e}'.format(CFL_x))\n print('dx/dz : {:2.1f}\\t\\t{}'.format(self.domain.dx/self.domain.dz,'-- Should be < 5 in practice'))\n print('lx/z_inv : {:2.1f}\\t\\t{}'.format(self.domain.lx/self.inversion_depth,'-- Should be > 6. At *least* 4.'))\n divs = []\n for i in range(2,140):\n if self.domain.nz%i == 0:\n divs.append(i)\n print('Nz = {:03d} and is divisible by : {}'.format(self.domain.nz, divs))\n if full:\n print('Coriolis timescale : {:1.1e} timesteps'.format(int(1./self.freq_coriolis/self.dt)))", "def complete(self):\n return not self.numfalse", "def test_func(self):\n self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def is_valid(self) -> bool:\n return all(\n (\n not self.author,\n self.unit,\n )\n )", "def is_fille(self):\n return self.mere is not None", "def _is_finish(self, pos):\r\n return self.course[pos[0], pos[1]] == 2", "def test_func(self):\n rol_nu = rol_get_huidige(self.request)\n return rol_nu == Rollen.ROL_RCL", "def is_complete(self):\n is_complete = True\n \n if (type(self.N) is not IntType) or self.N < 2:\n warnings.warn('N not set up properly.')\n is_complete = False\n \n if self.m is None or len(self.m) != self.N:\n warnings.warn('m not set up properly.')\n is_complete = False\n \n if self.R is None or len(self.R) != self.N:\n warnings.warn('R not set up properly.')\n is_complete = False\n \n if self.a is None or len(self.a) != self.N - 1:\n warnings.warn('a not set up properly.')\n is_complete = False\n \n if self.force is None or len(self.force) != self.N:\n warnings.warn('force not set up properly.')\n is_complete = False\n \n if self.Delta is None or len(self.Delta) != self.N - 1:\n warnings.warn('Delta not set up properly.')\n is_complete = False\n \n if self.n is None or len(self.n) != self.N - 1:\n warnings.warn('n not set up properly.')\n is_complete = False\n \n if self.beta < 0.0:\n warnings.warn('beta not set up properly.')\n is_complete = False\n \n if self.m0 < 0.0:\n warnings.warn('m0 not set up properly.')\n is_complete = False\n \n if self.mu < 0.0:\n warnings.warn('mu not set up properly.')\n is_complete = False\n \n return is_complete", "def is_infrastructure (self):\n return sum([1 for i in self.infras]) != 0", "def inscricao(self):\n\n return True", "def isCandela(self):\n return _libsbml.Unit_isCandela(self)", "def is_fitted(self):\n return self.__fdata is not None", "def conclusion_echantillon(self, liste_foetus):\n compteur = 0\n for lignes in range(1, len(liste_foetus)):\n if liste_foetus[lignes].contamination != 0 and liste_foetus[lignes].taux > self.seuil_taux_conta:\n compteur = compteur + 1\n if compteur > self.seuil_nbre_marqueurs:\n self.conclusion = 1\n else:\n self.conclusion = 0", "def est_nul(self):\n\t\tif self.__valide:\n\t\t\treturn (self.degre() == 0) and (self.valuation().est_nul())\n\t\telse:\n\t\t\treturn False", "def is_female(self):\n\n if self._is_female is None:\n return None\n\n return self._is_female is True", "def validSuccesor(self):\n lines, cols = len(FINAL_Node.state[0]), len(FINAL_Node.state[1])\n finalFrq = FINAL_Node.getFrq()\n def tooLess(node: StateNode) -> bool:\n return len(node.state[0]) < lines or len(node.state[1]) < cols\n def badFrq(node: StateNode) -> bool:\n frq = node.getFrq()\n for i in range(26):\n if frq[i] < finalFrq[i]:\n return True\n return False\n \n return not tooLess(self) and not badFrq(self)", "def __bool__(self):\n return len(self.atoms) >= 1", "def is_concealed(self) -> bool:\n # return not self._exposed\n return sum(self.concealed_part.values()) == 13", "def est_unite(self):\n\t\tif self.__valide:\n\t\t\treturn (self.degre() == 0) and self.valuation().est_unite()\n\t\telse:\n\t\t\treturn False", "def complete(self):\n if self.__hasTABLE and self.__hasGRAPHS and self.__ndoubledollar == 4:\n return True\n else:\n return False", "def test_spires_syntax_detected_fin(self):\n # trac #261\n converter = search_engine_query_parser.SpiresToInvenioSyntaxConverter()\n spi_search = converter.is_applicable(\"fin t p\")\n self.assertEqual(spi_search, True)", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def test_function_continuity(self):\n self.check_function_continuity()", "def verify(self):\n D,S,I,C = False,False,False,False\n if self.geoData and os.path.exists(self.geoData):\n D = True\n if self.scales:\n S = True\n if type(self.idVariable) == int:\n I = True\n if self.cacheFile:\n C = True\n if D and S and I and C:\n return True\n return False", "def is_artificial(self):\n\t\treturn 0", "def __bool__(self):\n return self.fam.c_nonzero(self)", "def isFim(self):\r\n return", "def invariant(self):\n\t\treturn ((self.tenant != \"\") and (self.loginUser != \"\"))", "def is_final_interval(self):\n return self.increasing_cover_relations() == []", "def complete(self):\n if bool(self.namespace) and bool(self.kind) and bool(self.id):\n return True\n else:\n return False", "def check(self):\n return True", "def exists(self):\n return len(list(self.measures)) > 0", "def esta_abierta(self):\r\n self.actualizar_estado_actividad()\r\n return self.estado != 'Cerrada'", "def _isDone(self):\n return self.steps >= self.max_steps or len(self.food_ids) <= 0", "def containseverything(self) -> bool:\n return self.isinfinite() and self.complement().isempty()", "def is_eof(self) -> bool:\n ...", "def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0", "def test_is_lof(self):\n \n # check that known LOF consensequence return True\n self.var.consequence = \"stop_gained\"\n self.assertTrue(self.var.is_lof())\n \n # check that known non-LOF consensequence returns False\n self.var.consequence = \"missense_variant\"\n self.assertFalse(self.var.is_lof())\n \n # check that null values return False\n self.var.consequence = None\n self.assertFalse(self.var.is_lof())", "def consensus_reached(self):\n pos, com, success = self.perception\n if len(com) > 0 and self.time > 1:\n return all(map(lambda x: x[1][\"consensus\"], com)) and self.consensus\n else:\n return True", "def check(self,):\n self.is_valid_according_policy()", "def checkCashCal(self, usrCals):\n self.check_cal = False\n for calen in usrCals['items']:\n if 'Cash Flow' in calen['summary']:\n self.check_cal = True\n return self.check_cal", "def is_final(self):\n return (\n self.status == self.STATUS_DISCONNECT\n or self.status == self.STATUS_DONE\n or self.status == self.STATUS_PARTNER_DISCONNECT\n or self.status == self.STATUS_PARTNER_DISCONNECT_EARLY\n or self.status == self.STATUS_RETURNED\n or self.status == self.STATUS_EXPIRED\n )", "def is_complete(self):\n return not (self.year is None or\n self.month is None or\n self.day is None)", "def is_finite(self):\n return False", "def testHealthAssessClaudication(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"claudication\")\n\n self.util.boolPropertyTest(self, attr, \"claudication\")", "def is_FSAL(self):\n if np.all(self.A[-1,:]==self.b): return True\n else: return False", "def isTerminalFinished(self):\n return self.f4 is 'F'", "def is_real(self):\n\n return self.purpose == 'real'", "def test_func(self):\n self.rol_nu, self.functie_nu = rol_get_huidige_functie(self.request)\n return self.rol_nu == Rollen.ROL_RKO", "def checkdia(tarea_mensual):\n\n if tarea_mensual == 1:\n return 1\n else:\n return 0", "def has_fcc(self):\n raise NotImplementedError", "def is_valid(self):\n return self.is_signed and not self.is_expired", "def __bool__(self):\n return self.isValid()", "def Complete(self):\n return self.century is not None and self.day is not None", "def isGoal(self):\n for index in range(self.DIM):\n if not self.values('r',index).count(0) is 0:\n return False\n if not self.isValid():\n return False\n return True", "def is_goal(self):\n if 0 in self.final_values: # Check if any zeroes are in the final states\n return False\n return True", "def is_fcc_off(self):\n raise NotImplementedError", "def __bool__(self):\n return bool(self.exp)", "def is_field(self, proof = True):\n return True", "def test_proper(self):\n\n self.assertTrue(self.cs.isProper)\n self.assertFalse(self.cs.isDegenerate)", "def is_full(self):\n return self.name and self.variables and self.assumptions and self.guarantees", "def verify(self):\n if not self.verify_succ_pred_fingers():\n return False\n\n return True", "def isFissile(self):\n return self.name in self.fissile", "def check():", "def check_in(self, token: tokenize.TokenInfo) -> bool:\n if self._seen_for:\n if not self._seen_for_in_line:\n if not self._seen_if_in_line:\n self._reported = True\n return False\n return True" ]
[ "0.6189312", "0.60821956", "0.5967552", "0.57333314", "0.568045", "0.56697154", "0.5623153", "0.552748", "0.545972", "0.5450248", "0.5449714", "0.54326916", "0.5420256", "0.5419858", "0.5419858", "0.5409019", "0.5409019", "0.5409019", "0.54056567", "0.54056567", "0.54056567", "0.54056567", "0.53867245", "0.5349228", "0.53487504", "0.53322375", "0.53223807", "0.5310802", "0.5301431", "0.5294556", "0.5276639", "0.5273652", "0.526552", "0.52550685", "0.5251571", "0.523767", "0.52346665", "0.5226417", "0.52189445", "0.5214024", "0.5190628", "0.5185161", "0.5183208", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5183037", "0.5176065", "0.5175105", "0.5171618", "0.51684064", "0.5161213", "0.51579344", "0.513827", "0.51378", "0.51367724", "0.5133191", "0.51297027", "0.512723", "0.51271325", "0.5120791", "0.5118097", "0.5113741", "0.5097999", "0.50913006", "0.50790715", "0.5077807", "0.5066039", "0.5065705", "0.5065113", "0.50577515", "0.50572395", "0.50541615", "0.5051123", "0.5049669", "0.50474805", "0.50473034", "0.50412035", "0.5038871", "0.5036082", "0.5034315", "0.50328887", "0.502872", "0.50284964", "0.50275", "0.50265354", "0.50233054", "0.5014728", "0.50145525" ]
0.0
-1
Get the size of every variable.
def _get_size_var(self): size_var = [] for index in range(self._nr_args): restriction = self._domain_restricion[index] size_var.append(utils.get_nr_bits(restriction, self._precision)) return size_var
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def size(self, varname):\n if self.handle == None: return []\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return []\n \n def dimlen(d):\n dim = self.handle.dimensions[d]\n if dim != None:\n t = type(dim).__name__\n if t == 'int':\n return dim\n return len(dim)\n return 0\n return map(lambda d: dimlen(d), var.dimensions)", "def __len__(self) -> int:\n return len(self.variables)", "def size_of_variables(glob):\n return sorted(\n [\n (k, sys.getsizeof(glob[k]) / 1e6)\n for k in list(glob.keys())\n ],\n key=lambda k_v: k_v[1],\n reverse=True\n )", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def nvar(self):\n return len(self.__vars)", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()", "def size_of_variable(self, variable):\n index_structures = variable.index_structures\n if not index_structures:\n return 1\n mapping = [self.mod_index[ind].mapping for ind in index_structures]\n blocking = [self.mod_index[ind].blocking for ind in index_structures]\n size = []\n for i in range(len(mapping)):\n if mapping[i] and blocking[i]:\n length = 0\n for blk in blocking[i]:\n if blk == 0:\n length += 1\n else:\n length += blk\n size.append(length)\n else:\n return None\n return size", "def size(self):\n return self._N", "def nVariables(self):\n return len(self.variables)", "def countVars(self):\n return len(self.initializedVars[\"GF\"]) + len(self.initializedVars[\"LF\"]) + len(self.initializedVars[\"TF\"])", "def get_num_variables(self):\n return len(self.variables)", "def num_vars(self):\n return len(self.bounds.lb)", "def size(self):\n return self.N", "def getSize(self):\n if self.sym != None:\n return self.sym.getSize()\n return self.define.getSize()", "def size(self) -> int:", "def size() -> int:\n ...", "def num_vars(self):\n return self.nvars", "def nvar(self):\n return len(self.v)", "def _get_variable_size(self, variable):\n # If domain is empty then variable has size 1\n if variable.domain == []:\n return 1\n else:\n size = 0\n spatial_method = self.spatial_methods[variable.domain[0]]\n repeats = spatial_method._get_auxiliary_domain_repeats(variable.domains)\n for dom in variable.domain:\n size += spatial_method.mesh[dom].npts_for_broadcast_to_nodes * repeats\n return size", "def __len__(self):\n\n value_length = []\n for v in chain(self.values(), self.metainfo_values()):\n if isinstance(v, LabelData):\n value_length.append(v.label.shape[0])\n elif is_splitable_var(v):\n value_length.append(len(v))\n else:\n continue\n\n # NOTE: If length of values are not same or the current data sample\n # is empty, return length as 1\n if len(list(set(value_length))) != 1:\n return 1\n\n length = value_length[0]\n return length", "def num_vars(self):\n return self._nvars", "def size(self):\n\t\treturn self.dims", "def size(self, level=None):\n level = level or self.local_variables\n names = {}\n while level:\n for name in level.bindings:\n names[name] = 1\n level = level.parent\n return len(names)", "def get_var_nbytes(self, var_name):\n return self.get_value_ref(var_name).nbytes", "def size(self):", "def __len__(self):\n return len(self._varvals)", "def __sizeof__(self):\r\n\r\n S = 0 # Full size of the object\r\n if self.loss_list is not None:\r\n for value in self.loss_list:\r\n S += getsizeof(value)\r\n if self.meshsol_list is not None:\r\n for value in self.meshsol_list:\r\n S += getsizeof(value)\r\n if self.loss_index is not None:\r\n for key, value in self.loss_index.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.logger_name)\r\n if self.axes_dict is not None:\r\n for key, value in self.axes_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n S += getsizeof(self.Pstator)\r\n S += getsizeof(self.Protor)\r\n S += getsizeof(self.Pmagnet)\r\n S += getsizeof(self.Pprox)\r\n S += getsizeof(self.Pjoule)\r\n if self.coeff_dict is not None:\r\n for key, value in self.coeff_dict.items():\r\n S += getsizeof(value) + getsizeof(key)\r\n return S", "def count_vars(scope=''):\n v = get_vars(scope)\n return sum([np.prod(var.shape.as_list()) for var in v])", "def size(self):\n pass", "def size(self):\n pass", "def size(self):\n pass", "def size(self, gather=True):\n raise NotImplementedError", "def size(self):\r\n return len(atoms)", "def size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"size\")", "def get_size(self):\n lines = len(self.coefficients)\n columns = 0 if lines == 0 else len(self.coefficients[0])\n return lines, columns", "def local_symbols_size(self):\n size = 0\n for s in self.symbols[-1]:\n if self.symbols[-1][s].type == 'procedure': continue\n if self.symbols[-1][s].isparam: continue\n #if self.symbols[-1][s].isparam: continue\n size += self.symbols[-1][s].size\n return size", "def size(self):\n return sum(elem.size for elem in self)", "def size(self):\n return self.__nelems", "def numel(self):\n return self.t.size", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result", "def __len__(self):\n return sum(self.size_freqs.values())", "def size(self):\r\n return self.__length", "def n_items(self) -> int:\n return len(self._data_vars)", "def domain_size(self):\n all_vars = self.all_variables()\n if not all_vars:\n return 0\n return np.prod([v.size for v in all_vars])", "def size(self) -> int:\n size = self.da.length()\n return size", "def getSize(self):\n return sum(m.getSize() for m in self.members)", "def dimensions():", "def n_variables(self):\n return sum([p.n_variables for p in self.parameters])", "def size(self):\n return self.__size", "def size(self):\n return self.__length", "def size(self):\n\t\treturn (len(self.aminos))", "def size(cls):\n return (cls.num_properties()*2 + 2)", "def size_out(self):\n return self.dimensions", "def get_size(self):", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def _get_vector_size(self):\n if len(self):\n return len(self.values()[0])\n else:\n return 0", "def size(self):\n return _libsbml.ListOf_size(self)", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def GetNumberOfVariables(self):\n\n # nvar = 0\n # for i in self.variables_order:\n # # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT\n # if i!=0:\n # if mesh.element_type == \"tri\":\n # nvar += (i+1)*(i+2) // 2\n # elif mesh.element_type == \"tet\":\n # nvar += (i+1)*(i+2)*(i+3) // 6\n # elif mesh.element_type == \"quad\":\n # nvar += (i+1)**2\n # elif mesh.element_type == \"hex\":\n # nvar += (i+1)**3\n\n # nvar = sum(self.variables_order)\n if self.nvar == None:\n self.nvar = self.ndim\n return self.nvar", "def __len__(self):\n a = 1\n for size in self.sizes:\n a *= size\n return a", "def var_sizes_check(global_dic):\n import types\n def _print(a, b):\n print(\"|{:>15}|{:>13}|\".format(a, b))\n\n _print(\"Variable\", \"Size\")\n print(\"-\"*31)\n for k, v in global_dic.items():\n if not k.startswith('_') and not isinstance(v, types.ModuleType):\n # print size of variable\n if hasattr(v, 'size'):\n try:\n _print(k, v.size)\n except:\n continue\n # print length of variable\n elif hasattr(v, '__len__'):\n try:\n _print(k, len(v))\n except:\n continue", "def size(self):\n return len(self.array_form)", "def size(self):\n\t\treturn self._count", "def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)", "def size(self) -> int:\n\n return self.sizes.sum()", "def dim(self):\n return len(self._n)", "def size(self):\n return self.getattr('size')", "def total_length():\n return", "def size(self):\n return self._length", "def size(self):\n return reduce(lambda x, ins: x + ins.size, self.instructions, 0)", "def size(name):", "def size(self):\n return self._size", "def __len__(self):\n return sum(len(p) for p in self.parts)", "def size(self):\r\n return self.info().size", "def ndims(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return len(var.dimensions)", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def size(self):\r\n return self._size", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def size(self): \r\n pass", "def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size", "def size(self):\n return self.variables.end_of_tree - 1", "def size(self) -> int:\r\n return self.da.length()", "def get_total_trainable_parameter_size():\n total_parameters = 0\n import tensorflow as tf\n for variable in tf.trainable_variables():\n # shape is an array of tf.Dimension\n total_parameters += np.product([x.value for x in variable.get_shape()])\n return total_parameters", "def length(self):\n return int(np.sum([x.length for x in self.parameters]))", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def getNumDimensions(self):\n return len(self.di.keys())", "def __len__(self) -> int:\n return self.disp_size ** 2", "def size(self):\n return tuple(self._size)", "def ndim(self):\n return len(self.nvars)", "def getSize(self):\r\n list = self.getList()\r\n return len(list)", "def size(self) -> int:\n return sum(ob.size for ob in self.objects.ravel())", "def size(self):\n raise NotImplementedError", "def size(self):\n return len(self.__m__)", "def size(self):\n\t\treturn len(self.lables)", "def dim(self) -> int:", "def size(*args):", "def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n" ]
[ "0.8138758", "0.80240923", "0.7549388", "0.7541823", "0.7528079", "0.7511355", "0.75046957", "0.7494464", "0.7438866", "0.7414112", "0.73386174", "0.7333908", "0.73007977", "0.72860545", "0.72836995", "0.7265444", "0.72609293", "0.7239517", "0.7197402", "0.7189756", "0.7178838", "0.71249104", "0.7109817", "0.7105279", "0.71050686", "0.7101672", "0.70939124", "0.7084729", "0.70645213", "0.7024943", "0.7024943", "0.7024943", "0.698481", "0.6978507", "0.6964449", "0.6953326", "0.6952669", "0.69506", "0.69463784", "0.69408447", "0.6933347", "0.6928504", "0.6895948", "0.6894516", "0.68870544", "0.68858063", "0.6883586", "0.68813", "0.6881131", "0.6874068", "0.68643045", "0.6861543", "0.68419015", "0.6830399", "0.6806685", "0.68037367", "0.68011457", "0.6793807", "0.6790684", "0.67867905", "0.67863256", "0.67799014", "0.67646754", "0.6757251", "0.6757155", "0.6752977", "0.67481613", "0.6733872", "0.67322147", "0.6730838", "0.6724602", "0.67210793", "0.6717246", "0.67150086", "0.67112637", "0.67109656", "0.6710576", "0.66971433", "0.6697066", "0.6694458", "0.6694458", "0.66896594", "0.668965", "0.6688108", "0.6687801", "0.66835964", "0.66821045", "0.6676803", "0.6674568", "0.66727597", "0.6671809", "0.6671162", "0.6669964", "0.66648203", "0.66467845", "0.6638752", "0.6636485", "0.66323274", "0.66322523", "0.6632154" ]
0.7626377
2