query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Manual tracking of objects in videoclips using the mouse. Allows manual tracking of an object(s) in the video clip between times `t1` and `t2`. This displays the clip frame by frame and you must click on the object(s) in each frame. If ``t2=None`` only the frame at ``t1`` is taken into account. Returns a list ``[(t1, x1, y1), (t2, x2, y2)...]`` if there is one object per frame, else returns a list whose elements are of the form ``(ti, [(xi1, yi1), (xi2, yi2)...])``.
def manual_tracking(clip, t1=None, t2=None, fps=None, n_objects=1, savefile=None): import pygame as pg screen = pg.display.set_mode(clip.size) step = 1.0 / fps if (t1 is None) and (t2 is None): t1, t2 = 0, clip.duration elif t2 is None: t2 = t1 + step / 2 t = t1 txy_list = [] def gatherClicks(t): imdisplay(clip.get_frame(t), screen) objects_to_click = n_objects clicks = [] while objects_to_click: for event in pg.event.get(): if event.type == pg.KEYDOWN: if event.key == pg.K_BACKSLASH: return "return" elif event.key == pg.K_ESCAPE: raise KeyboardInterrupt() elif event.type == pg.MOUSEBUTTONDOWN: x, y = pg.mouse.get_pos() clicks.append((x, y)) objects_to_click -= 1 return clicks while t < t2: clicks = gatherClicks(t) if clicks == "return": txy_list.pop() t -= step else: txy_list.append((t, clicks)) t += step tt, xylist = zip(*txy_list) result = [] for i in range(n_objects): xys = [e[i] for e in xylist] xx, yy = zip(*xys) result.append(Trajectory(tt, xx, yy)) if savefile is not None: Trajectory.save_list(result, savefile) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def raytrace(pos1: tuple, pos2: tuple) -> list:\n x0, y0 = pos1\n x1, y1 = pos2\n tiles = []\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n x, y = x0, y0\n n = 1 + dx + dy\n x_inc = 1 if x1 > x0 else -1\n y_inc = 1 if y1 > y0 else -1\n error = dx - dy\n dx *= 2\n dy *= 2\n\n while n > 0:\n tiles.append((x, y))\n if error > 0:\n x += x_inc\n error -= dy\n else:\n y += y_inc\n error += dx\n n -= 1\n return tiles", "def track_video(frames):\n first_frame = next(frames)\n lkt = lktrack.LKTracker(first_frame)\n lkt.detect_points()\n yield lkt.draw()\n\n for im in frames:\n lkt.step(im)\n lkt.track_points()\n yield lkt.draw()", "def getClipData(self, x, y, t0, t1):\n it0 = (numpy.abs(x - t0)).argmin()\n it1 = (numpy.abs(x - t1)).argmin()\n if it0 > it1:\n t = it1\n it1 = it0\n it0 = t\n return (x[it0:it1], y[it0:it1])", "def oneTimepoint(timepoint):\n\tt = []\n\tfor vs in timepoint:\n\t\tt.append((timepoint.attrib.get('CollectionTime'), vs[0].text, vs[1].text))\n\treturn(t)", "def waypoints(t):\n global x\n xx = x + ((2 * PI)/t)\n yy = 2*(math.sin(xx))*(math.sin(xx/2))\n return [xx, yy]", "def list_times(self, start: int = None, end: int = None) -> List:\n return [i.time for i in self.data[start:end]]", "def get_times(self):\n times = []\n for i in range(1, len(self.events)):\n times.append(self.events[i-1].elapsed_time(self.events[i]))\n return times", "def __on_click(self,event, x, y, p1, p2): \r\n \r\n # global variables of the class with mouse click position\r\n global mouse_click_pos, mouse_click_list \r\n \r\n mouse_click_list = []\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n \r\n mouse_click_pos = (x,y)\r\n print(mouse_click_pos)\r\n mouse_click_list.append((x, y))", "def get_other_tracks(input_df: pd.DataFrame, track_id_list:List[int]) -> List[Tuple[np.ndarray, int]]:\n split_time_stamp = np.unique(input_df[\"TIMESTAMP\"].values)[19]\n others_list = []\n for track_id in track_id_list:\n track = input_df[(input_df[\"TRACK_ID\"] == track_id) & (input_df[\"OBJECT_TYPE\"] != \"AGENT\")][[\"TIMESTAMP\", \"X\", \"Y\"]]\n if len(track) > 0:\n obs = track[track[\"TIMESTAMP\"] <= split_time_stamp][[\"X\",\"Y\"]].to_numpy()\n target = track[track[\"TIMESTAMP\"] > split_time_stamp][[\"X\",\"Y\"]].to_numpy()\n if len(obs) > 0:\n others_list.append((np.concatenate((obs, target)), len(obs)))\n # agent_traj = np.column_stack((agent_x, agent_y))\n return others_list", "def track_ball_2(video):\n return track_ball_generic(video)", "def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None):\n if not autotracking_possible:\n raise IOError(\n \"Sorry, autotrack requires OpenCV for the moment. \"\n \"Install OpenCV (aka cv2) to use it.\"\n )\n\n if not xy0:\n xy0 = findAround(clip.get_frame(tt[0]), pattern)\n\n if tt is None:\n tt = np.arange(0, clip.duration, 1.0 / fps)\n\n xys = [xy0]\n for t in tt[1:]:\n xys.append(findAround(clip.get_frame(t), pattern, xy=xys[-1], r=radius))\n\n xx, yy = zip(*xys)\n\n return Trajectory(tt, xx, yy)", "def snapShot(self):\n syncTimeNow = self.syncTimelineClock.ticks\n # convert from pts to wallclock\n wcNow = self.syncTimelineClock.toOtherClockTicks(self.wallClock, syncTimeNow)\n speed = self.syncTimelineClock.speed\n whenSnapshotted = wcNow\n return (whenSnapshotted, (wcNow, syncTimeNow, speed))", "def tracks_test(timestamps):\n tracks_test = (\n Track(1, [1, 2, 3, 10, 11], list(timestamps[0:5]), meta={}),\n Track(2, [4, ], (timestamps[0],), meta={}),\n Track(3, [5, ], (timestamps[0],), meta={}),\n Track(4, [6, 7], timestamps[2:4], meta={}),\n Track(5, [8, 9, 12, 13], timestamps[3:7], meta={}),\n )\n return tracks_test", "def track_motion(self, video=None, set_roi=False):\r\n \r\n if video is None:\r\n video = self.video_buffer\r\n if set_roi:\r\n roi = self.get_roi(video=video)\r\n \r\n video_track = video.copy()\r\n motion_tracker = []\r\n # Generate different colors for tracking display \r\n color = np.random.randint(0,255,(100,3))\r\n \r\n # params for ShiTomasi corner detection\r\n feature_params = dict( maxCorners = 100,\r\n qualityLevel = 0.3,\r\n minDistance = 5,\r\n blockSize = 7 )\r\n # Parameters for lucas kanade optical flow\r\n lk_params = dict( winSize = (15,15),\r\n maxLevel = 8,\r\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\r\n\r\n old_gray = cv2.cvtColor(video[0], cv2.COLOR_BGR2GRAY)\r\n # Create mask for drawing\r\n mask = np.zeros_like(video[0])\r\n # Mask to dectate the features to track\r\n features_mask = np.zeros_like(old_gray)\r\n features_mask[roi['x1']: roi['x2'], roi['y1']: roi['y2']] = old_gray[roi['x1']: roi['x2'], roi['y1']: roi['y2']]\r\n # Find corners in first frame\r\n p0 = cv2.goodFeaturesToTrack(features_mask, mask = None, **feature_params)\r\n \r\n for idx in range(1, video.shape[0]):\r\n new_gray = cv2.cvtColor(video[idx], cv2.COLOR_BGR2GRAY)\r\n # calculate optical flow\r\n p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, new_gray, p0, None, **lk_params)\r\n \r\n # Get good points\r\n good_old = p0[st==1]\r\n good_new = p1[st==1]\r\n motion_tracker.append(good_new)\r\n for i, (old, new) in enumerate(zip(good_old, good_new)):\r\n (ox, oy) = old.reval()\r\n (nx, ny) = new.ravel()\r\n mask = cv2.circle(mask, (nx, ny), 5, color[i].tolist(), -1)\r\n frame = cv2.add(video[idx], mask)\r\n video_track[idx] = frame\r\n \r\n cv2.imshow('frame', frame)\r\n if cv2.waitKey(30) & 0xFF==27:\r\n break\r\n # Updat old frames and points before checking next frame\r\n \r\n old_gray = new_gray.copy()\r\n p0 = p1.resapr(-1,1,2)\r\n cv2.destroyAllWindows()\r\n \r\n return video_track, motion_tracker", "def timepoints(self, session):\n tps = session.query(Timepoint).filter(\n Timepoint.id.between(\n self.start_timepoint_id, self.end_timepoint_id)).all()\n\n return tps", "def task_get_time_slices(\n self, timestamp: datetime = None\n ) -> List[Tuple[datetime, datetime]]:\n total_streams: int = self._config[\"graph_streams\"]\n\n t_now: datetime = (\n timestamp.replace(microsecond=0)\n if timestamp is not None\n else datetime.utcnow().replace(microsecond=0)\n )\n\n t_lag: timedelta = timedelta(seconds=self._config[\"graph_timelag\"])\n t_sec: timedelta = timedelta(seconds=1)\n t_delta: timedelta = timedelta(seconds=self._config[\"graph_stream_frame\"])\n\n frame_end: datetime = t_now - t_lag - t_sec\n frame_start: datetime = frame_end + t_sec - t_delta * total_streams\n\n self._logger.info(\n \"Split [%s - %s] into %s slices\",\n frame_start.isoformat(),\n frame_end.isoformat(),\n total_streams,\n )\n\n result: List[Tuple[datetime, datetime]] = []\n\n for i in range(total_streams):\n slice_start: datetime = frame_end + t_sec - t_delta * (i + 1)\n slice_end: datetime = frame_end - t_delta * i\n\n result.append((slice_start, slice_end))\n\n return result", "def getMouseClicks(plotcoords = 0):\n nmax = 1000\n xlist, ylist = [-92171]*nmax,[-92171]*nmax\n nclicks = dislin.csrpts(xlist, ylist, nmax)\n xlist, ylist = xlist[:nclicks], ylist[:nclicks]\n if plotcoords:\n return xlist, ylist\n else:\n x = [dislin.xinvrs(i) for i in xlist]\n y = [dislin.yinvrs(i) for i in ylist]\n return x,y", "def sources_over_time(ant1, ant2, list_sources=None,\n start=0, end=2/3*np.pi, interval=np.pi/72, nu=151e6):\n if list_sources is None:\n # make a copy to ensure write safety\n list_sources = catalog.obj_catalog.copy()\n\n # If the user has entered a single source directly,\n # we can automatically standardize the formatting\n # by placing it by itself in a list\n if type(list_sources) != list:\n list_sources = [list_sources]\n \n list_visibilities = []\n lst = start\n while lst <= end:\n next_vista = np.array([0j, 0j, 0j, 0j])\n for source in list_sources:\n next_vista += visibility(ant1, ant2, source, nu=nu, time=lst)\n\n list_visibilities.append(np.array([lst, next_vista]))\n lst += interval\n # perhaps not necessary. Better safe than sorry:\n return np.array(list_visibilities)", "def get_ts_pixel(self, x, y):\n for i in xrange(self.length):\n self.retrieve_pixel(x, y, i)", "def get_licks(dlc, dlc_t):\r\n lick_times = get_feature_event_times(dlc, dlc_t, ['tongue_end_l_x', 'tongue_end_l_y',\r\n 'tongue_end_r_x', 'tongue_end_r_y'])\r\n return lick_times", "def get_clicks(self, live_buttons, timestamp, relative_to):\n clicked = []\n if timestamp and relative_to is None:\n if self.listen_start is None:\n raise ValueError('I cannot timestamp: relative_to is None and '\n 'you have not yet called listen_clicks.')\n else:\n relative_to = self.listen_start\n clicked = self._retrieve_events(live_buttons)\n return self._correct_clicks(clicked, timestamp, relative_to)", "def video_times():\n p = parse_cmdline(get_parser=get_parser_times)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.DEBUG)\n vis.show_video_abi_glm_times(\n start_date=p.start_time,\n end_date=p.end_time,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir,\n sector=p.sector,\n area=p.area)\n print(\"Files written to:\", p.outdir)", "def get_frame_targets_from_time_targets(time_targets_ms, output_fps=24):\n return [(t * output_fps) // 1000 for t in time_targets_ms]", "def get_spikes(spiketimes=None, t1=None, t2=None):\n indices = np.where((spiketimes[:,1] > t1) & (spiketimes[:,1] < t2))\n timed_spikes = spiketimes[indices]\n return timed_spikes", "def track_ball_1(video):\n return track_ball_generic(video)", "def elapsed_time(self, cond1:str='', cond2:str='', legible:bool=True) -> list:\n q1 = 'x.time; ' + cond1\n q2 = 'x.time; ' + cond2\n\n x1 = self.query(q1, x_idx=0)\n x2 = self.query(q2, x_idx=-1)\n \n diff = (lambda x: str(datetime.timedelta(seconds=x)).split('.')[0]) if legible else lambda x: x\n\n return [diff(b - a) for a, b in zip(x1, x2)]", "def getTimes( self ):\n\n pars\t= ( _EVENT_TIME, 0, 0, 0 )\n values = self.adbGetEvent( pars )\n return values[2]", "def track_ball_generic(video):\n result = []\n frames = readFrames(video)\n\n # get background with background estimator method\n background = findBackground(frames)\n # Setup background subtractor object with parameters\n subtractor = cv2.BackgroundSubtractorMOG(30, 10, 0.7, 0)\n # Feed estimated background as first input to subtractor\n subtractor.apply(background)\n\n # Iterate over every frame in video\n i = 0\n while i < len(frames):\n frame = frames[i] # get the new frame\n\n # apply background subtraction to frame\n frame = subtractor.apply(frame)\n\n # find contours in the frame\n contours, _ = cv2.findContours(frame, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # sort the contours in reverse order by width to eliminate small\n # contours caused by noise.\n sort_cnt = sorted(contours, key=lambda x: cv2.boundingRect(x)[2],\n reverse=True)\n\n # get the parameters of the bounding box for the largest width contour\n x, y, w, h = cv2.boundingRect(sort_cnt[0])\n # append to result list\n result.append((x, y, x + w, y + h))\n\n if VISUALIZE:\n orig_frame = frames[i]\n cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.imshow('frame', orig_frame)\n cv2.waitKey(30)\n\n i += 1\n\n return result", "def measureTrace2(x, y, t0 = 0, t1 = 10, thisaxis = 0, mode='mean', threshold = 0):\n result = numpy.array([])\n d = y.T # get data for this block\n for j in range(0, numpy.shape(d)[0]):\n if isinstance(threshold, int):\n thr = threshold\n else:\n thr = threshold[j]\n (m1, m2) = measure(mode, x, d[j][:], t0, t1, thresh= thr)\n result = numpy.append(result, m1)\n return(result)", "def lick_times():\n lick_timestamps = read_npy_file('licks.times.npy')\n lick_ts = TimeSeries(\n name='lick_times',\n timestamps=np.ravel(lick_timestamps),\n data=np.full(len(lick_timestamps), True),\n unit='',\n description='Extracted times of licks, from the lickPiezo signal.'\n )\n lick_bev = BehavioralEvents(lick_ts)\n behavior_module.add_data_interface(lick_bev)" ]
[ "0.5310043", "0.5286239", "0.5057076", "0.4953343", "0.49187213", "0.48390925", "0.48240438", "0.4816844", "0.4795924", "0.47676378", "0.47432348", "0.47256297", "0.4705768", "0.46897307", "0.4685795", "0.46163556", "0.46015707", "0.45810923", "0.45771328", "0.45648286", "0.4547574", "0.45385203", "0.4534605", "0.45336717", "0.45332602", "0.45272514", "0.45262045", "0.4516886", "0.45150098", "0.45121357" ]
0.7477079
0
Find an image pattern in a picture optionally defining bounds to search. The image is found is ``pat`` is inside ``pic[x +/ r, y +/ r]``.
def findAround(pic, pat, xy=None, r=None): if xy and r: h, w = pat.shape[:2] x, y = xy pic = pic[y - r : y + h + r, x - r : x + w + r] matches = cv2.matchTemplate(pat, pic, cv2.TM_CCOEFF_NORMED) yf, xf = np.unravel_index(matches.argmax(), matches.shape) return (x - r + xf, y - r + yf) if (xy and r) else (xf, yf)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_icon_by_pic(name, offset=(10, 10), **kwargs):\r\n rate = kwargs.get(\"rate\", 0.9)\r\n path_demo = get_current_dir('demo.png')\r\n capture_screen(path_demo)\r\n img_name = cv2.imread(name)\r\n t = cv2.matchTemplate(cv2.imread(path_demo), img_name,\r\n cv2.TM_CCOEFF_NORMED)\r\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(t)\r\n log.debug(\"[get pic icon]current match picture: {}, Similarity:{:.3f}/{}\".format(name, max_val, rate))\r\n if max_val > rate:\r\n x = max_loc[0]\r\n y = max_loc[1]\r\n return (x + offset[0], y + offset[1]), img_name.shape\r\n else:\r\n return None", "def image_search(screengrab, image, searchx, searchy, threshold=None, radius=5, great_threshold=None):\n image_width, image_height = image.size\n if threshold is None:\n best_rms = image_width * image_height * 30.0\n else:\n best_rms = threshold\n\n if great_threshold is None:\n # use an automatic value based upon size.\n great_threshold = image_width * image_height * 3\n\n best_x = -1\n best_y = -1\n for x, y in search_offset(radius=radius, offsetx=searchx, offsety=searchy):\n rms = compare_images(screengrab.crop((x, y, x + image_width, y + image_height)), image)\n logging.debug(\"Image Search {0},{1} rms: {2:>10.3f}\".format(x, y, rms))\n if best_rms is None or rms < best_rms:\n best_y = y\n best_x = x\n best_rms = rms\n if rms < great_threshold:\n break\n return best_x, best_y", "def image_search_in_image(base_image, looking_for_img):\n base_image = cv2.imread(base_image)\n looking_for_img = cv2.imread(looking_for_img)\n # result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_SQDIFF_NORMED)\n result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_CCOEFF)\n (_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n print(result)\n (waldoHeight, waldoWidth) = looking_for_img.shape[:2]\n topLeft = maxLoc\n botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\n roi = base_image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n mask = np.zeros(base_image.shape, dtype=\"uint8\")\n puzzle = cv2.addWeighted(base_image, 0.25, mask, 0.75, 0)\n puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\n cv2.imshow(\"Puzzle\", puzzle)\n cv2.imshow(\"Waldo\", looking_for_img)\n cv2.waitKey(0)", "def find(image_to_find, image, search_spec=None, **kwargs):\n if search_spec is not None and search_spec.startswith('template'):\n return _find_using_template(image_to_find, image, **kwargs)\n features = Features(search_spec)\n features.add_target(image_to_find)\n tracked = features.find(image, kwargs.get('k'), kwargs.get('ratio'))\n if not len(tracked):\n raise FindError(1.0, None)\n return tracked[0]", "def locate_template(template, img):\n temp_found = None\n (height, width) = template.shape[:2]\n\n for scale in np.linspace(0.1, 3, 10)[::-1]:\n # resize the image and store the ratio\n resized_img = imutils.resize(img, width=int(img.shape[1] * scale))\n ratio = img.shape[1] / float(resized_img.shape[1])\n if resized_img.shape[0] < height or resized_img.shape[1] < width:\n break\n # Convert to edged image for checking\n e = cv2.Canny(resized_img, 10, 25)\n match = cv2.matchTemplate(e, template, cv2.TM_CCOEFF)\n (_, val_max, _, loc_max) = cv2.minMaxLoc(match)\n if temp_found is None or val_max > temp_found[0]:\n temp_found = (val_max, loc_max, ratio)\n return temp_found", "def get_test_pattern(img_size=(2048, 2048)):\n ny, nx = img_size\n # mask = np.zeros((ny, nx))\n\n # patterns with variable spacing\n periods = range(2, 20, 2)\n # vcounter = 0\n for ii, p in enumerate(periods):\n cell = np.zeros((p, nx))\n on_pix = int(np.ceil(p / 2))\n cell[:on_pix, :] = 1\n cell = np.tile(cell, [4, 1])\n\n if ii == 0:\n mask = cell\n else:\n mask = np.concatenate((mask, cell), axis=0)\n\n mask = mask[:, :mask.shape[0]]\n\n mask_block = np.concatenate((mask, np.rot90(mask)), axis=1)\n mask_block2 = np.concatenate((np.rot90(mask), mask), axis=1)\n\n mask_superblock = np.concatenate((mask_block, mask_block2))\n\n ny_reps = int(np.ceil(ny / mask_superblock.shape[0]))\n nx_reps = int(np.ceil(nx / mask_superblock.shape[1]))\n mask = np.tile(mask_superblock, [ny_reps, nx_reps])\n mask = mask[0:ny, 0:nx]\n\n return mask", "def search(self, txt, pat):\n m, n, = len(pat), len(txt)\n bad_char = self.get_bad_char(pat, m)\n s = 0\n\n while s <= n - m:\n j = m - 1 # start at last index of pattern string.\n\n # move left in pattern string when a matched character is found.\n while j >= 0 and pat[j] == txt[s + j]:\n j -= 1\n\n if j < 0:\n print(f\"Pattern occur at shift = {s}\")\n s += (m - bad_char[ord(txt[s + m])] if s + m < n else 1)\n else:\n s += max(1, j - bad_char[ord(txt[s + j])])\n\n return n", "def find_image(screengrab, image, threshold=None):\n matches = []\n image_width, image_height = image.size\n screen_width, screen_height = screengrab.size\n if threshold is None:\n # Use an automatic threshold.\n threshold = image_width * image_height * 10.0\n for x in range(screen_width - image_width):\n for y in range(screen_height - image_height):\n if compare_images(screengrab.crop((x, y, x + image_width, y + image_height)), image) < threshold:\n matches.append((x, y))\n return matches", "def find(image):\n keypoint, description = describe(image)\n # load keypoints, descriptions from mongodb\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n best_match_size = float(\"inf\")\n best_match_index = -1\n match_index = 0\n best_matches = 0\n\n for desc in descriptions:\n matches = bf.match(desc,description)\n matches = sorted(matches, key = lambda x:x.distance)\n if len(matches) > 0:\n match_size = sum(x.distance for x in matches[:10])\n\n print \"match size is \", match_size\n if match_size < best_match_size:\n best_match_size = match_size\n best_match_index = match_index\n best_matches = matches\n\n match_index += 1\n\n needle_color = cv2.imread('needle-stripped.png')[:,:,::-1] # needle\n best_match_image = cv2.imread(\"haystack/\"+files[best_match_index])\n print \"best match is \", files[best_match_index]\n\n # Draw first 10 matches.\n outImg = cv2.imread(\"output/outImg.png\")\n match = cv2.drawMatches(needle_color,keypoint,best_match_image[:,:,::-1],keypoints[best_match_index],best_matches[-20:],outImg, flags=6)\n\n plt.imshow(match),plt.show()\n return", "def detect_pattern(array, pattern, ppos=None, dpos=0):\n # Inner parameters\n shape = array.shape\n hits = np.zeros(shape, dtype=np.bool)\n pattern = np.asarray(pattern)\n pshape = pattern.shape\n\n # Check the input parameters\n if pattern.ndim != 1:\n raise ValueError(\"Invalid pattern '{0}'.\".format(pattern))\n\n # Pattern instersection\n nb_of_hits = shape[0] - pshape[0] + 1\n hits = np.ones((nb_of_hits, shape[1]), dtype=np.bool)\n for cnt, pattern_value in enumerate(pattern):\n local_match = (array[cnt: cnt + nb_of_hits, :] == pattern_value)\n hits = np.logical_and(hits, local_match)\n\n return hits", "def find_insideimage(subimg_path, bbox_used):\n assert os.path.isfile(subimg_path)\n\n # Take screenshot.\n # (540+65*i, 610, 610+65*i, 680)\n with ImageGrab.grab(bbox = bbox_used) as rgba, rgba.convert(mode='RGB') as screenshot:\n return find_subimage(screenshot, subimg_path)", "def detect_image(screengrab, imageset, searchx, searchy, threshold=None, radius=2, great_threshold=None,\n xradius=None, yradius=None, algorithm=SEARCH_SPIRAL, compare_regions=None):\n image_width, image_height = imageset[0][1].size\n if threshold is None:\n # Use an automatic threshold.\n best_rms = image_width * image_height * 20.0\n else:\n best_rms = threshold\n\n if great_threshold is None:\n # use an automatic value based upon size.\n great_threshold = best_rms / 10.0\n\n if compare_regions is None:\n compare_regions = [(0, 0, image_width, image_height)]\n best_x = -1\n best_y = -1\n best_name = None\n for region in compare_regions:\n for x, y in search_offset(radius=radius, offsetx=searchx, offsety=searchy,\n xradius=xradius, yradius=yradius, algorithm=algorithm):\n for name, image in imageset:\n image_region = image.crop(region)\n screengrab_region = screengrab.crop((x + region[0], y + region[1], x + region[2], y + region[3]))\n rms = compare_images(screengrab_region, image_region)\n logging.debug(\"Image Search {0},{1} rms: {2:>10.3f} name: {3}\".format(x, y, rms, name))\n if best_rms is None or rms < best_rms:\n best_y = y\n best_x = x\n best_name = name\n best_rms = rms\n if rms < great_threshold:\n break\n if best_name is not None:\n break\n if best_name is not None:\n break\n return best_name, best_x, best_y", "def highlight_pattern(self, pad, pattern,\n tag, start=\"1.0\", end=\"end\", regexp=False):\n start = pad.index(start)\n end = pad.index(end)\n pad.mark_set(\"matchStart\", start)\n pad.mark_set(\"matchEnd\", start)\n pad.mark_set(\"searchLimit\", end)\n\n count = GUI.IntVar()\n while True:\n index = pad.search(pattern, \"matchEnd\", \"searchLimit\", count=count,\n regexp=regexp)\n if index == \"\":\n break\n pad.mark_set(\"matchStart\", index)\n pad.mark_set(\"matchEnd\", \"%s+%sc\" % (index, count.get()))\n pad.tag_add(tag, \"matchStart\", \"matchEnd\")", "def findpixel(img, col):\n for x in range(img.width):\n for y in range(img.height):\n if img[y][x][0] == col[0] and img[y][x][1] == col[1] and img[y][x][2] == col[2]:\n return cvPoint(x, y)", "def find_image(self, image, threshold=0.99, cache=False, zone=None, screen=None):\n threshold = float(threshold)\n cache = utils.to_bool(cache)\n\n assert 0 < threshold <= 1, \"Threshold must be in (0, 1)\"\n\n #get the screenshot to search on\n screen_img = self._get_screen(cache, zone, screen)\n #load the template image\n img = self.load_image(image)\n #locate the image with threshold and return result\n return self.find_image_result(img, screen_img, threshold)", "def _locate_finder_in_square(image, transform, size):\n radius = int(round(size/2))\n center = transform.trans\n angle = transform.rot\n\n rotated = image.rotate(angle, center)\n\n sx1, sy1 = center.x-radius, center.y-radius\n sx2, sy2 = center.x+radius, center.y+radius\n thick = int(round(size / 14))\n\n # Top\n x1, y1 = sx1, sy1\n x2, y2 = sx2, sy1 + thick\n top = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Left\n x1, y1 = sx1, sy1\n x2, y2 = sx1 + thick, sy2\n left = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Bottom\n x1, y1 = sx1, sy2 - thick\n x2, y2 = sx2, sy2\n bottom = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Right\n x1, y1 = sx2 - thick, sy1\n x2, y2 = sx2, sy2\n right = np.sum(rotated.img[y1:y2, x1:x2]) / (size * thick)\n\n # Identify finder edges\n if top < bottom and left < right:\n c1 = [sx1, sy1]\n c2 = [sx1, sy2]\n c3 = [sx2, sy1]\n elif top < bottom and right < left:\n c1 = [sx2, sy1]\n c2 = [sx1, sy1]\n c3 = [sx2, sy2]\n elif bottom < top and left < right:\n c1 = [sx1, sy2]\n c2 = [sx2, sy2]\n c3 = [sx1, sy1]\n elif bottom < top and right < left:\n c1 = [sx2, sy2]\n c2 = [sx2, sy1]\n c3 = [sx1, sy2]\n else:\n return None\n\n # rotate points around center of square\n c1 = _rotate_around_point(Point.from_array(c1), angle, center)\n c2 = _rotate_around_point(Point.from_array(c2), angle, center)\n c3 = _rotate_around_point(Point.from_array(c3), angle, center)\n\n # Create finder pattern\n c1 = c1.intify()\n side1 = (c2 - c1).intify()\n side2 = (c3 - c1).intify()\n fp = FinderPattern(c1, side1, side2)\n\n return fp", "def pattern_search(search_pattern):\n needle = search_pattern\n\n try:\n if needle.startswith(\"0x\"):\n # Strip off '0x', convert to ASCII and reverse\n needle = needle[2:]\n needle = bytearray.fromhex(needle).decode(\"ascii\")\n needle = needle[::-1]\n except (ValueError, TypeError) as e:\n raise\n\n haystack = \"\"\n for upper in ascii_uppercase:\n for lower in ascii_lowercase:\n for digit in digits:\n haystack += upper + lower + digit\n found_at = haystack.find(needle)\n if found_at > -1:\n return found_at\n\n raise WasNotFoundException(\n \"Couldn`t find {0} ({1}) \"\n \"anywhere in the pattern.\".format(search_pattern, needle)\n )", "def match_pattern(self, pat, word, normalize=True):\n segs = self.word_fts(word, normalize)\n if len(pat) != len(segs):\n return None\n else:\n if all([s >= p for (s, p) in zip(segs, pat)]):\n return segs", "def get_position_by_pic(name, offset=(10, 10), **kwargs):\r\n if isinstance(name, str) and os.path.isdir(name):\r\n pic_list = get_folder_items(name, file_only=True, filter_name=\".png\")\r\n assert pic_list, \"pic is not exist in {}\".format(name)\r\n pic_path_list = list(map(lambda x: name + \"/{}\".format(x), pic_list))\r\n name = pic_path_list\r\n if isinstance(name, list):\r\n time.sleep(0.5)\r\n return get_icon_by_pictures(name, offset, **kwargs)\r\n else:\r\n time.sleep(0.5)\r\n return get_icon_by_pic(name, offset, **kwargs)", "def draw_match(src, dst, src_point, dst_point, number=20):\n src_point = src_point.astype(np.int)\n dst_point = dst_point.astype(np.int)\n src_h, src_w, _ = src.shape\n dst_h, dst_w, _ = dst.shape\n final_height = max(src_h, dst_h)\n final_width = src_w + dst_w\n\n # copy\n pic = np.zeros([final_height, final_width, 3], dtype=np.uint8)\n pic[:src_h, :src_w, :] = src[:, :, :]\n pic[:dst_h, src_w:, :] = dst[:, :, :]\n\n # give destination offset\n dst_point[:, 0] = dst_point[:, 0] + src_w\n\n # matching #\n n = src_point.shape[0]\n if number > n:\n number = n\n\n # matching line draw\n for i in range(number):\n cv2.line(pic, (src_point[i, 0], src_point[i, 1]), (dst_point[i, 0], dst_point[i, 1]),\n (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)), thickness=1, lineType=cv2.LINE_AA)\n return pic", "def templateMatchMulti(img, template):\n\tgray = grayscale(img)\n\ttemp = grayscale(template)\n\tw, h = temp.shape[::-1]\n\tres = cv2.matchTemplate(gray, temp, cv2.TM_CCOEFF_NORMED)\n\tthreshold = 0.8\n\tloc = np.where(res >= threshold)\n\tpts = []\n\tfor pt in zip(*loc[::-1]):\n\t\trect = [pt, (pt[0] + w, pt[1] + h)]\n\t\tpts.append(rect)\n\treturn pts", "def drawMatch(img, template, color=(255,255,0), thickness=2):\n\ttmp = img.copy()\n\ttl, br = templateMatchSingle(tmp, template)\n\tcv2.rectangle(tmp, tl, br, color, thickness)\n\treturn tmp", "def try_template_matching(image,template):\n img2 = image.copy()\n w, h = template.shape[::-1]\n # All the 6 methods for comparison in a list\n methods = ['cv2.TM_CCOEFF', 'cv2.TM_CCOEFF_NORMED', 'cv2.TM_CCORR',\n 'cv2.TM_CCORR_NORMED', 'cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']\n for meth in methods:\n img = img2.copy()\n method = eval(meth)\n # Apply template Matching\n res = cv2.matchTemplate(img,template,method)\n res-=np.min(res)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)\n threshold=0.9*np.max(res)\n \n # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum\n if method in [cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:\n loc = np.where( res <=0.1*np.max(res))\n else:\n loc = np.where( res >= threshold)\n \n for pt in zip(*loc[::-1]):\n cv2.rectangle(img, pt, (pt[0] + w, pt[1] + h), 255, 2)\n plt.figure()\n plt.subplot(121),plt.imshow(image,cmap = 'gray')\n plt.title('Matching Result'), plt.xticks([]), plt.yticks([])\n plt.subplot(122),plt.imshow(img,cmap = 'gray')\n plt.title('Detected Point'), plt.xticks([]), plt.yticks([])\n plt.suptitle(meth)\n plt.show()", "def encontrar_picos(self, guess, delta_ppm):\r\n # La idea es esta:\r\n # dado el delta en cada direccion, defino una region cuadrada donde\r\n # busco el maximo. Luego, con los indices del maximo encuentro el pico.\r\n # es importante que el guess sea bueno! \r\n ppmDir = self.ppmGridDir[0,:]\r\n ppmInd = self.ppmGridInd[:,0]\r\n \r\n x,y = guess \r\n x_index = find_nearest(ppmDir, x)\r\n y_index = find_nearest(ppmInd, y)\r\n \r\n # cuantos ppm son un paso en cada direccion\r\n stepDir = np.abs(ppmDir[1]-ppmDir[0])\r\n stepInd = np.abs(ppmInd[1]-ppmInd[0]) \r\n nx = int(delta_ppm[0]/stepDir)\r\n ny = int(delta_ppm[1]/stepInd)\r\n \r\n spec_reduced = self.spec[y_index-ny:y_index+ny, x_index-nx:x_index+nx]\r\n ppmDir_reduced = ppmDir[x_index-nx:x_index+nx]\r\n ppmInd_reduced = ppmInd[y_index-ny:y_index+ny]\r\n \r\n maximo = spec_reduced.max()\r\n y, x = np.where(spec_reduced==maximo)\r\n \r\n x = ppmDir_reduced[x[0]]\r\n y = ppmInd_reduced[y[0]]\r\n \r\n x_index = find_nearest(ppmDir, x)\r\n y_index = find_nearest(ppmInd, y)\r\n \r\n #plt.contourf(ppmDir_reduced,ppmInd_reduced,spec_reduced)\r\n return x_index, y_index", "def draw_all_position_patterns(img, found, contours):\n draw_img = img.copy()\n for i in found:\n rect = cv2.minAreaRect(contours[i])\n box = np.int0(cv2.cv.BoxPoints(rect))\n cv2.drawContours(draw_img, [box], 0, (0, 0, 255), 2)\n show(draw_img)", "def _find_using_template(image_to_find, image, threshold=None, **kwargs):\n threshold = 1e-6 if threshold is None else threshold\n result = cv2.matchTemplate(image, image_to_find, cv2.TM_SQDIFF_NORMED)\n idx = np.argmin(result)\n metric = np.ravel(result)[idx]\n x0, y0 = np.unravel_index(idx, result.shape)[-1::-1]\n if metric > threshold:\n raise FindError(metric, (x0, y0))\n x, y = image_to_find.shape[1::-1]\n target = Target(image_to_find, [[0, 0], [x, 0], [x, y], [0, y]], None, None, None)\n x1 = x0 + image_to_find.shape[1]\n y1 = y0 + image_to_find.shape[0]\n quad = [[x0, y0], [x1, y0], [x1, y1], [x0, y1]]\n H = np.array([[0., 0., x0], [0., 0., y0], [0., 0., 1.0]])\n return TrackedTarget(target, image, [(0, 0)], [(x0, y0)], H, quad)", "def search(self, pattern):\n raise NotImplementedError()", "def singleLayerFind(self, method: Finder, layer: int = 0):\n prepared_image = None\n if len(self.image.shape) > 2: # atleast 3 dimesions in array e.g 100,100,5\n if self.image.shape[0] > self.image.shape[2]: # one image array with multi samples e.g 100,100,5\n if self.image.shape[2] > layer: # check sample that exists\n prepared_image = self.image[:, :, layer]\n else:\n print('sample:', layer, ' out of bounds:', self.image.shape[2],\n 'from the following multi sample image', self.image.shape)\n return\n elif self.image.shape[0] < self.image.shape[2]: # image with more than one layer e.g 5,100,100\n if self.image.shape[0] > layer: # check layer exisits\n prepared_image = self.image[layer]\n else:\n print('layer:', layer, ' out of bounds:', self.image.shape[0],\n 'from the following multi layer image', self.image.shape)\n return\n else:\n print('Unrecognised dimesnsions:', self.image.shape)\n elif len(self.image.shape) == 2: # basic 2 dimesional array\n prepared_image = self.image\n else:\n print('invalid dimensions in image', self.image.shape)\n return\n\n if prepared_image is None:\n print('something went wrong')\n\n self.locations, self.intermediaryImage = method.findInImage(prepared_image)\n return self.locations, self.intermediaryImage", "def locate(self, gray_img, barcode_size):\n # Clear cache\n self.metric_cache = dict()\n self.count = 0\n\n if self.DEBUG:\n gray_img.rescale(4).popup()\n\n # Threshold the image converting it to a binary image\n binary_image = self._adaptive_threshold(gray_img.img, 99, 0)\n\n # Find the transform that best fits the square to the barcode\n best_transform = self._minimise_integer_grid(binary_image, gray_img.center(), barcode_size)\n\n # Get the finder pattern\n fp = self._locate_finder_in_square(binary_image, best_transform, barcode_size)\n\n if self.DEBUG and fp is not None:\n img = _draw_finder_pattern(binary_image, best_transform, fp)\n img.rescale(4).popup()\n\n best_transform = self._find_best_fp(binary_image, best_transform, barcode_size)\n fp = self._locate_finder_in_square(binary_image, best_transform, barcode_size)\n\n if self.DEBUG and fp is not None:\n img = _draw_finder_pattern(binary_image, best_transform, fp)\n img.rescale(4).popup()\n print(self.count)\n\n return fp", "def draw_mask(img, percentage_x=100, percentage_y=100, offset_x=0, offset_y=0, rotation=0, rectangle=True):\n ydim, xdim = img.shape\n mask = np.zeros((ydim, xdim))\n\n # Convert percentages to fractions\n offset_x = (xdim * offset_x/100)\n offset_y = (ydim * offset_y/100)\n percentage_x = percentage_x/100\n percentage_y = percentage_y/100\n\n if rectangle is False:\n x_rad = np.floor((img.shape[1]/2) * percentage_x)\n y_rad = np.floor((img.shape[0]/2) * percentage_y)\n\n x_center = img.shape[1]//2 + offset_x\n y_center = img.shape[0]//2 - offset_y\n\n\n [x, y] = draw.ellipse(y_center, x_center, y_rad, x_rad, shape = img.shape, rotation=rotation)\n\n else:\n ysub = ydim * (1 - percentage_y)\n y1 = max(ysub/2 - offset_y, 0)\n y2 = min(ydim - ysub/2 - offset_y, ydim)\n r_coords = np.array([y1, y1, y2, y2, y1])\n\n xsub = xdim * (1 - percentage_x)\n x1 = max(xsub/2 + offset_x,0)\n x2 = min(xdim - xsub/2 + offset_x, xdim)\n c_coords = np.array([x1, x2, x2, x1, x1])\n\n x, y = draw.polygon(r_coords, c_coords)\n\n mask[x, y] = 1\n\n return(mask)" ]
[ "0.58755505", "0.5569356", "0.55036914", "0.54863626", "0.54638803", "0.54306555", "0.5426215", "0.5414956", "0.5314991", "0.5228552", "0.521884", "0.52016884", "0.5173406", "0.5113699", "0.50773954", "0.49873438", "0.49566814", "0.49469578", "0.49061322", "0.4893878", "0.48842797", "0.48840442", "0.48478156", "0.48470184", "0.4845947", "0.48365292", "0.48066747", "0.47919208", "0.47846183", "0.47834846" ]
0.7766923
0
Tracks a given pattern (small image array) in a video clip. Returns ``[(x1, y1), (x2, y2)...]`` where ``(xi, yi)`` are the coordinates of the pattern in the clip on frame ``i``. To select the frames you can either specify a list of times with ``tt`` or select a frame rate with ``fps``. This algorithm assumes that the pattern's aspect does not vary much and that the distance between two occurrences of the pattern in two consecutive frames is smaller than ``radius`` (if you set ``radius`` to 1 the pattern will be searched in the whole screen at each frame). You can also provide the original position of the pattern with xy0.
def autoTrack(clip, pattern, tt=None, fps=None, radius=20, xy0=None): if not autotracking_possible: raise IOError( "Sorry, autotrack requires OpenCV for the moment. " "Install OpenCV (aka cv2) to use it." ) if not xy0: xy0 = findAround(clip.get_frame(tt[0]), pattern) if tt is None: tt = np.arange(0, clip.duration, 1.0 / fps) xys = [xy0] for t in tt[1:]: xys.append(findAround(clip.get_frame(t), pattern, xy=xys[-1], r=radius)) xx, yy = zip(*xys) return Trajectory(tt, xx, yy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_timing_pattern(img, timing_patterns):\n draw_img = img.copy()\n for timing_pattern in timing_patterns:\n cv2.line(draw_img, timing_pattern[0], timing_pattern[1], (0, 255, 0), 2)\n show(draw_img)", "def draw_separate_position_patterns(img, found, contours):\n for i in found:\n qr_dc = img.copy()\n cv2.drawContours(qr_dc, contours, i, (0, 255, 0), 2)\n show(qr_dc)", "def circle_pattern(pattern_radius,\n circle_radius,\n count,\n center=[0.0, 0.0],\n angle=None):\n from .path import Path2D\n\n if angle is None:\n angles = np.linspace(0.0, np.pi * 2.0, count + 1)[:-1]\n elif isinstance(angle, float) or isinstance(angle, int):\n angles = np.linspace(0.0, angle, count)\n else:\n raise ValueError('angle must be float or int!')\n\n centers = np.column_stack((np.cos(angles),\n np.sin(angles))) * pattern_radius\n\n verts = collections.deque()\n ents = collections.deque()\n for circle_center in centers:\n # (3,3) center points of arc\n three = arc.to_threepoint(angles=[0, np.pi],\n center=circle_center,\n radius=circle_radius)\n ents.append(entities.Arc(points=np.arange(3) + len(verts),\n closed=True))\n # keep flat array by extend instead of append\n verts.extend(three)\n\n # translate vertices to center\n verts = np.array(verts) + center\n pattern = Path2D(entities=ents,\n vertices=verts)\n return pattern", "def detect_chessboard(frame, pattern_shape=(7, 6)):\n corners = None\n canvas = None\n img = frame.copy()\n criteria = (cv2.TERM_CRITERIA_EPS +\n cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n ret, corners = cv2.findChessboardCorners(gray, pattern_shape)\n if ret:\n corners = cv2.cornerSubPix(gray, corners,\n (11, 11), (-1, -1), criteria)\n canvas = cv2.drawChessboardCorners(img, pattern_shape,\n corners, ret)\n\n return corners, canvas", "def draw_all_position_patterns(img, found, contours):\n draw_img = img.copy()\n for i in found:\n rect = cv2.minAreaRect(contours[i])\n box = np.int0(cv2.cv.BoxPoints(rect))\n cv2.drawContours(draw_img, [box], 0, (0, 0, 255), 2)\n show(draw_img)", "def place(self, x, y, pattern: TilePattern):\n pattern_width = pattern.region.width()\n pattern_height = pattern.region.height()\n data = self.tileset.get(pattern.region)\n w = self.size[0]\n\n print(f\"drawing pattern at {x, y}\")\n for py in range(pattern_height):\n for px in range(pattern_width):\n dx = px + x\n dy = py + y\n self.data[dx + w * dy] = data[px + pattern_width * py]\n\n x *= self.tile_size\n y *= self.tile_size\n\n painter = QPainter(self.image)\n painter.setCompositionMode(QPainter.CompositionMode_Source)\n painter.drawImage(QPoint(x, y), pattern.image)\n painter.end()", "def track_ball_generic(video):\n result = []\n frames = readFrames(video)\n\n # get background with background estimator method\n background = findBackground(frames)\n # Setup background subtractor object with parameters\n subtractor = cv2.BackgroundSubtractorMOG(30, 10, 0.7, 0)\n # Feed estimated background as first input to subtractor\n subtractor.apply(background)\n\n # Iterate over every frame in video\n i = 0\n while i < len(frames):\n frame = frames[i] # get the new frame\n\n # apply background subtraction to frame\n frame = subtractor.apply(frame)\n\n # find contours in the frame\n contours, _ = cv2.findContours(frame, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # sort the contours in reverse order by width to eliminate small\n # contours caused by noise.\n sort_cnt = sorted(contours, key=lambda x: cv2.boundingRect(x)[2],\n reverse=True)\n\n # get the parameters of the bounding box for the largest width contour\n x, y, w, h = cv2.boundingRect(sort_cnt[0])\n # append to result list\n result.append((x, y, x + w, y + h))\n\n if VISUALIZE:\n orig_frame = frames[i]\n cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.imshow('frame', orig_frame)\n cv2.waitKey(30)\n\n i += 1\n\n return result", "def get_pattern_positions(sequence, pattern):\n return [pos.span()[0] for pos in re.finditer(r'(' + pattern + ')', sequence)]", "def findAround(pic, pat, xy=None, r=None):\n if xy and r:\n h, w = pat.shape[:2]\n x, y = xy\n pic = pic[y - r : y + h + r, x - r : x + w + r]\n\n matches = cv2.matchTemplate(pat, pic, cv2.TM_CCOEFF_NORMED)\n yf, xf = np.unravel_index(matches.argmax(), matches.shape)\n return (x - r + xf, y - r + yf) if (xy and r) else (xf, yf)", "def track_ball_4(video):\n\n result = []\n frames = readFrames(video)\n\n i = 0\n while i < len(frames):\n frame = frames[i] # get the new frame\n\n # find edges of the ball in the frame\n # parameters filter out background\n edges = cv2.Canny(frame, 700, 800)\n\n # find contours in the edge frame\n contours, _ = cv2.findContours(edges, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n\n # sort the contours in reverse order by width to eliminate small\n # contours caused by noise.\n sort_cnt = sorted(contours, key=lambda x: cv2.boundingRect(x)[2],\n reverse=True)\n\n # get the parameters of the bounding box for the largest width contour\n x, y, w, h = cv2.boundingRect(sort_cnt[0])\n # append to result list\n result.append((x, y, x + w, y + h))\n\n if VISUALIZE:\n orig_frame = frames[i]\n cv2.rectangle(orig_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.imshow(\"ball\", orig_frame)\n cv2.waitKey(30)\n\n i += 1\n\n return result", "def _calc_circle_pattern(self):\n circle_pattern_dist = 0.0125\n circle_pattern = np.zeros([self.height * self.width, 3])\n for i in range(self.width):\n for j in range(self.height):\n circle_pattern[i + j * self.width, 0] = circle_pattern_dist * i - \\\n circle_pattern_dist * (self.width - 1) / 2\n circle_pattern[i + j * self.width, 1] = circle_pattern_dist * j - \\\n circle_pattern_dist * (self.height - 1) / 2\n return circle_pattern", "def attach_calibration_pattern(ax, **calibration_pattern_kwargs):\r\n \r\n pattern, flow = calibration_pattern(**calibration_pattern_kwargs)\r\n flow_max_radius = calibration_pattern_kwargs.get(\"flow_max_radius\", 1)\r\n extent = (-flow_max_radius, flow_max_radius) * 2\r\n image = ax.imshow(pattern, extent=extent)\r\n ax.spines[\"top\"].set_visible(False)\r\n ax.spines[\"right\"].set_visible(False)\r\n \r\n for spine in (\"bottom\", \"left\"):\r\n ax.spines[spine].set_position(\"zero\")\r\n ax.spines[spine].set_linewidth(1)\r\n ax.xaxis.set_minor_locator(AutoMinorLocator())\r\n ax.yaxis.set_minor_locator(AutoMinorLocator())\r\n attach_coord(ax, flow, extent=extent)\r\n circle = plt.Circle((0, 0), flow_max_radius, fill=False, lw=1)\r\n ax.add_artist(circle)\r\n \r\n return image, circle", "def get_test_pattern(img_size=(2048, 2048)):\n ny, nx = img_size\n # mask = np.zeros((ny, nx))\n\n # patterns with variable spacing\n periods = range(2, 20, 2)\n # vcounter = 0\n for ii, p in enumerate(periods):\n cell = np.zeros((p, nx))\n on_pix = int(np.ceil(p / 2))\n cell[:on_pix, :] = 1\n cell = np.tile(cell, [4, 1])\n\n if ii == 0:\n mask = cell\n else:\n mask = np.concatenate((mask, cell), axis=0)\n\n mask = mask[:, :mask.shape[0]]\n\n mask_block = np.concatenate((mask, np.rot90(mask)), axis=1)\n mask_block2 = np.concatenate((np.rot90(mask), mask), axis=1)\n\n mask_superblock = np.concatenate((mask_block, mask_block2))\n\n ny_reps = int(np.ceil(ny / mask_superblock.shape[0]))\n nx_reps = int(np.ceil(nx / mask_superblock.shape[1]))\n mask = np.tile(mask_superblock, [ny_reps, nx_reps])\n mask = mask[0:ny, 0:nx]\n\n return mask", "def _calculate_camera_pose(frame, K, d, corners, pattern_shape=(6, 4), grid_size=30): # noqa: E501\n img = frame.copy()\n axis = np.float32([[grid_size, 0, 0], [0, grid_size, 0],\n [0, 0, -grid_size]]).reshape(-1, 3)*2\n\n objp = np.zeros((np.prod(pattern_shape), 3), np.float32)\n objp[:, :2] = np.mgrid[0:pattern_shape[0],\n 0:pattern_shape[1]].T.reshape(-1, 2) * grid_size\n\n _, rvecs, tvecs = cv2.solvePnP(objp, corners, K, d)\n R, _ = cv2.Rodrigues(rvecs)\n # project 3D points onto image plane\n imgpts, _ = cv2.projectPoints(axis,\n rvecs, tvecs,\n K, d)\n\n canvas = computer_vision.draw_axis(img, corners, imgpts)\n return R, tvecs, canvas", "def plot_motion_patterns(self, patterns, st_0, cl = 'black'):\n\t\tplt.scatter(st_0[0], st_0[1], s = 16)\n\t\tfor i in range(len(patterns)):\n\t\t\tplt.plot(patterns[i][:,0], patterns[i][:,1], cl)\n\t\t\t# print(patterns[i])\n\t\t\t# print('________________________________')", "def create_video_hit(all_obj_locs, fps=30):\n i = 0\n print(len(all_obj_locs[::STEP]))\n for f in all_obj_locs[::STEP]:\n fig = plt.figure(figsize=(SIZE * 2, SIZE), dpi=80)\n ax = fig.add_subplot(projection='3d')\n ax.set_axis_off()\n plt.ylim([-70, 170])\n plt.xlim([1800 - 40, LANE_LENGTH + 50 + 40])\n\n # plt.axis(\"off\")\n x_s = [p[1] for p in f[:-1, :] if p[2] < 40 and 0 < p[0] < LANE_WIDTH and p[1] < LANE_LENGTH + 50]\n y_s = [p[0] for p in f[:-1, :] if p[2] < 40 and 0 < p[0] < LANE_WIDTH and p[1] < LANE_LENGTH + 50]\n z_s = [p[2] for p in f[:-1, :] if p[2] < 40 and 0 < p[0] < LANE_WIDTH and p[1] < LANE_LENGTH + 50]\n s = 300\n ax.set_zlim(-5, 140)\n ax.scatter(x_s, y_s, z_s, s=s)\n ax.scatter([f[-1, 1]], [f[-1, 0]], [f[-1, 2]], s=s / 2, color=\"red\")\n plt.savefig(\"data/frame\" + str(i) + \".png\")\n plt.close()\n plt.show()\n i += 1\n create_video_from_frames_hit(len(all_obj_locs[::STEP]), fps / STEP / DT / 10)", "def capture(self):\n with picamera.PiCamera() as camera:\n # camera setup\n camera.resolution = (frame_width, frame_height)\n camera.framerate = 32\n camera.rotation = 90\n stream = PiRGBArray(camera, size=(frame_width, frame_height))\n\n # let camera warm up\n time.sleep(1)\n avg = None\n\n prev_area = 0\n upload_cnt = 0\n upload_threshold = 75\n motion_frames = []\n frame_cnt = 0\n\n start_time = time.time()\n\n print 'Ready'\n for frame in camera.capture_continuous(stream, 'bgr',\n use_video_port=True):\n\n stream.seek(0)\n image = frame.array\n\n if avg is None:\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, tuple(blur_size), 0)\n avg = gray.copy().astype(\"float\")\n stream.truncate()\n continue\n\n (contours, avg, gray, image) = motion_detect(image, avg)\n\n #print contours\n if isinstance(contours, tuple):\n contours = list(list(contours))\n if len(contours) > 0 and (time.time() - start_time) > 20:\n if upload_cnt < upload_threshold:\n print len(contours)\n print str(datetime.datetime.now())\n sys.stdout.flush()\n time_str = get_time()\n output_filename = path + 'img_' + time_str + '-' + str(frame_cnt) + '.jpg'\n if frame_cnt % 3 == 0:\n motion_frames.append((output_filename, gray, image))\n upload_cnt += 1\n frame_cnt += 1\n else:\n upload_cnt = 0\n if motion_frames:\n if len(motion_frames) > 1:\n self.append_frames(motion_frames)\n motion_frames = []\n frame_cnt = 0\n\n stream.seek(0)\n stream.truncate()", "def _fast_sphere_pattern(n, radius):\n phi = (1 + np.sqrt(5)) / 2\n long_incr = 2*np.pi / phi\n dz = 2.0 / float(n)\n bands = np.arange(n)\n z = bands * dz - 1.0 + (dz/2.0)\n r = np.sqrt(1.0 - z*z)\n az = bands * long_incr\n x = r * np.cos(az)\n y = r * np.sin(az)\n points = np.column_stack((x, y, z)) * np.asarray([radius])\n\n return points", "def calibration_pattern(\r\n pixel_size: int=151,\r\n flow_max_radius: float=1,\r\n **flow_to_rgb_args\r\n) -> Tuple[np.ndarray, np.ndarray]:\r\n half_width = pixel_size // 2\r\n y_grid, x_grid = np.mgrid[:pixel_size, :pixel_size]\r\n u = flow_max_radius * (x_grid / half_width - 1)\r\n v = flow_max_radius * (y_grid / half_width - 1)\r\n flow = np.zeros((pixel_size, pixel_size, 2))\r\n flow[..., 0] = u\r\n flow[..., 1] = v\r\n flow_to_rgb_args[\"flow_max_radius\"] = flow_max_radius\r\n img = flow_to_rgb(flow, **flow_to_rgb_args)\r\n return img, flow", "def add_pattern(self, start, stop, pattern):\n self.coord2pattern[start] = []\n self.coord2pattern[start].append(pattern)", "def drawPathToFrame( oVideo, oPathXY, iFrame=1, iFrameSize=(40,40) ):\n oPathXY_t = oPathXY[:iFrame,:]\n showImage( oVideo[...,iFrame], 'Pot do okvirja %d' % iFrame )\n for i in range(1,oPathXY_t.shape[0]):\n plt.plot(oPathXY_t[i-1:i+1,0],oPathXY_t[i-1:i+1,1],'--r')\n if i==1 or (i%5)==0:\n plt.plot( oPathXY_t[i,0],oPathXY_t[i,1],'xr',markersize=3)\n \n dx = iFrameSize[0]/2; dy = iFrameSize[1]/2\n plt.plot( (oPathXY_t[-1,0]-dx,oPathXY_t[-1,0]+dx),(oPathXY_t[-1,1]+dy,oPathXY_t[-1,1]+dy),'-g') \n plt.plot( (oPathXY_t[-1,0]+dx,oPathXY_t[-1,0]+dx),(oPathXY_t[-1,1]-dy,oPathXY_t[-1,1]+dy),'-g') \n plt.plot( (oPathXY_t[-1,0]-dx,oPathXY_t[-1,0]-dx),(oPathXY_t[-1,1]-dy,oPathXY_t[-1,1]+dy),'-g')\n plt.plot( (oPathXY_t[-1,0]-dx,oPathXY_t[-1,0]+dx),(oPathXY_t[-1,1]-dy,oPathXY_t[-1,1]-dy),'-g')", "def extract_fixed_point_locations(fps):\n fixed_point_location = [fp['x'] for fp in fps]\n\n fixed_point_locations = np.vstack(fixed_point_location)\n\n return fixed_point_locations", "def create_video(all_obj_locs, fps=30):\n i = 0\n print(len(all_obj_locs[::STEP]))\n for f in all_obj_locs[::STEP]:\n plt.figure(figsize=(SIZE * 2, SIZE), dpi=80)\n plt.ylim([-LANE_LENGTH / 4 + 25, LANE_LENGTH / 4 + 75])\n plt.xlim([-50, LANE_LENGTH + 50])\n x_s = [p[1] for p in f]\n y_s = [p[0] for p in f]\n s = 10\n plt.plot([0, 0], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([LANE_LENGTH + MARGIN, LANE_LENGTH + MARGIN], [0 - MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [0 - MARGIN, 0 - MARGIN], color=\"red\")\n plt.plot([0, LANE_LENGTH + MARGIN], [LANE_WIDTH + MARGIN, LANE_WIDTH + MARGIN], color=\"red\")\n plt.scatter(x_s, y_s, s=s)\n x_s_pins = init_pins()[:, 0]\n y_s_pins = init_pins()[:, 1]\n plt.scatter(y_s_pins, x_s_pins, s=3, color=\"black\")\n plt.savefig(\"data/frame\" + str(i) + \".png\")\n plt.close()\n # plt.show()\n i += 1\n create_video_from_frames(len(all_obj_locs[::STEP]), fps / STEP / DT)", "def detect_markers_in_video(source_path, source_type=\"auto\", decoder_pipeline=None, pipeline_factory=None,\n tag_pixel_diameter=30.0, timestamps=None,\n start_timestamp=None, fps=3.0, cam_id=0,\n verbose=False, n_frames=None, progress=tqdm.auto.tqdm,\n calculate_confidences=True, confidence_filter=None,\n use_parallel_jobs=False, clahe=False):\n if source_type == \"auto\":\n if isinstance(source_path, str):\n if source_path.endswith(\"jpg\") or source_path.endswith(\"png\"):\n source_type = \"image\"\n else:\n source_type = \"video\"\n elif isinstance(source_path, list):\n source_type = \"image\"\n calculate_confidences = calculate_confidences or confidence_filter is not None\n scale = 30.0 / tag_pixel_diameter\n if decoder_pipeline is None and pipeline_factory is None:\n decoder_pipeline = get_default_pipeline()\n\n if timestamps is None:\n def generate_timestamps(start_from):\n def gen():\n i = 0.0\n while True:\n yield start_from + i * 1.0 / fps\n i += 1.0\n yield from gen()\n timestamps = generate_timestamps(start_timestamp or 0.0)\n else:\n timestamps = list(timestamps) # Evaluate generators.\n ts = timestamps[0]\n if not isinstance(ts, (np.floating, float)):\n if not ts.tzinfo or int(ts.utcoffset().total_seconds()) != 0:\n raise ValueError(\"timestamps argument must be iterable of floats or UTC datetime objects with timezones.\")\n timestamps = [t.timestamp() for t in timestamps]\n import skimage.transform\n import pipeline as bb_pipeline\n \n interrupted = False\n def interruptable_frame_generator(gen):\n nonlocal interrupted\n for g in gen:\n yield g\n if interrupted:\n print(\"Stopping early...\")\n break\n \n def preprocess_image(im):\n if clahe:\n im = skimage.exposure.equalize_adapthist(im, kernel_size=3 * tag_pixel_diameter)\n im = (skimage.transform.rescale(im, scale, order=1, multichannel=False, anti_aliasing=False, mode='constant') * 255).astype(np.uint8)\n return im\n\n def get_frames_from_images():\n import skimage.io\n for idx, (path, ts) in enumerate(zip(interruptable_frame_generator(source_path), timestamps)):\n if type(path) is str:\n im = skimage.io.imread(path, as_gray=True) \n else:\n im = path\n im = preprocess_image(im)\n yield idx, ts, im\n\n if n_frames is not None and idx >= n_frames - 1:\n break\n\n def get_frames_from_video():\n frames_generator = bb_pipeline.io.raw_frames_generator(source_path, format=None)\n \n for idx, (im, ts) in enumerate(zip(interruptable_frame_generator(frames_generator), timestamps)):\n assert im is not None\n # Skip broken videos.\n if im.shape[0] <= 0:\n print(\"Warning: Could not read frame {} of file {}. The video is corrupt.\".format(idx, source_path))\n break\n # Skip broken frames because clahe would fail on constant input.\n if im.min() == im.max():\n print(\"Warning: Frame {} of file {} is empty.\".format(idx, source_path))\n continue\n \n assert im.shape[0] > 0\n assert im.shape[1] > 0\n\n im = preprocess_image(im)\n\n yield idx, ts, im\n \n if n_frames is not None and idx >= n_frames - 1:\n break\n \n def get_detections_from_frame(idx, ts, im, thread_context=None, **kwargs):\n nonlocal decoder_pipeline\n if decoder_pipeline is not None:\n pipeline_results = decoder_pipeline([im])\n else:\n if \"pipeline\" not in thread_context:\n thread_context[\"pipeline\"] = pipeline_factory()\n pipeline_results = thread_context[\"pipeline\"]([im])\n \n #confident_ids = [r for c, r in zip(confidences, decoded_ids) if c >= threshold]\n #decimal_ids = set([ids.BeesbookID.from_bb_binary(i).as_ferwar() for i in confident_ids])\n\n if verbose:\n import pipeline.objects\n from pipeline.stages.visualization import ResultCrownVisualizer\n import matplotlib.pyplot as plt\n crowns = pipeline_results[pipeline.objects.CrownOverlay]\n frame = ResultCrownVisualizer.add_overlay(im.astype(np.float64) / 255, crowns)\n fig, ax = plt.subplots(figsize=(20, 10))\n plt.imshow(frame)\n plt.axis(\"off\")\n plt.show()\n \n frame_id = bb_pipeline.io.unique_id()\n required_results = pipeline_results[PipelineResult] \n n_detections = required_results.orientations.shape[0]\n decoded_ids = [list(r) for r in list(required_results.ids)]\n\n if n_detections > 0:\n frame_data = {\n \"localizerSaliency\": required_results.tag_saliencies.flatten(),\n \"beeID\": decoded_ids,\n \"xpos\": required_results.tag_positions[:, 1] / scale,\n \"ypos\": required_results.tag_positions[:, 0] / scale,\n \"camID\": [cam_id] * n_detections, \n \"zrotation\": required_results.orientations[:, 0],\n \"timestamp\": [ts] * n_detections,\n \"frameIdx\": [idx] * n_detections,\n \"frameId\": frame_id,\n \"detection_index\": range(n_detections),\n \"detection_type\": \"TaggedBee\"\n }\n \n frame_data = pd.DataFrame(frame_data)\n\n if calculate_confidences:\n confidences = np.array([np.product(np.abs(0.5 - np.array(r)) * 2) for r in decoded_ids])\n frame_data[\"confidence\"] = confidences\n if confidence_filter is not None:\n frame_data = frame_data[frame_data.confidence >= confidence_filter]\n\n else:\n frame_data = None\n\n n_bees = required_results.bee_positions.shape[0]\n\n if n_bees > 0:\n bee_data = {\n \"localizerSaliency\": required_results.bee_saliencies.flatten(),\n \"beeID\": [np.nan] * n_bees,\n \"xpos\": required_results.bee_positions[:, 1] / scale,\n \"ypos\": required_results.bee_positions[:, 0] / scale,\n \"camID\": [cam_id] * n_bees, \n \"zrotation\": [np.nan] * n_bees,\n \"timestamp\": [ts] * n_bees,\n \"frameIdx\": [idx] * n_bees,\n \"frameId\": frame_id,\n \"detection_index\": range(n_bees),\n \"detection_type\": required_results.bee_types\n }\n\n if calculate_confidences:\n bee_data[\"confidence\"] = [np.nan] * n_bees\n\n bee_data = pd.DataFrame(bee_data)\n\n if frame_data is not None:\n frame_data = pd.concat((frame_data, bee_data))\n else:\n frame_data = bee_data\n\n return idx, frame_id, ts, frame_data\n \n \n progress_bar = None\n if progress is not None:\n progress_bar = progress(total=n_frames)\n \n def save_frame_data(idx, frame_id, ts, frame_data, **kwargs):\n nonlocal frame_info\n nonlocal video_dataframe\n frame_info.append((idx, frame_id, ts))\n if frame_data is not None:\n video_dataframe.append(frame_data)\n if progress is not None:\n progress_bar.update()\n \n source = get_frames_from_video\n if source_type == \"image\":\n source = get_frames_from_images\n if isinstance(source_path, str):\n source_path = [source_path]\n \n if use_parallel_jobs:\n from ..utils.processing import ParallelPipeline\n\n thread_context_factory = None\n n_pipeline_jobs = 1\n if not decoder_pipeline:\n # The thread context is used to provide each thread with a unique pipeline object.\n class Ctx():\n def __enter__(self):\n return dict()\n def __exit__(self, *args):\n pass\n \n n_pipeline_jobs = 4\n thread_context_factory = Ctx\n\n jobs = ParallelPipeline([source, get_detections_from_frame, save_frame_data],\n n_thread_map={0: 1, 1: n_pipeline_jobs},\n thread_context_factory=thread_context_factory)\n else:\n def sequential_jobs():\n for im in source():\n if im is not None:\n detections = get_detections_from_frame(*im)\n save_frame_data(*detections)\n jobs = sequential_jobs\n\n frame_info = []\n video_dataframe = []\n try:\n jobs()\n except KeyboardInterrupt:\n interrupted = True\n frame_info = list(sorted(frame_info))\n if len(video_dataframe) > 0:\n video_dataframe = pd.concat(video_dataframe)\n video_dataframe.sort_values(\"frameIdx\", inplace=True)\n # Enfore frame ID datatype to be unsigned which may have gotten lost when concatenating the data frames.\n video_dataframe.frameId = video_dataframe.frameId.astype(np.uint64)\n else:\n video_dataframe = None\n \n return frame_info, video_dataframe", "def create_marker_array(\n pts, radius=0.05, color_msg=None, frame_id=\"world\", duration=1000\n):\n global pts_id\n\n markers = MarkerArray()\n for pt in pts:\n marker = Marker(\n type=Marker.SPHERE,\n # ns='velodyne',\n action=Marker.ADD,\n )\n marker.header.frame_id = frame_id\n marker.header.stamp = rospy.Time.now()\n\n marker.id = pts_id\n pts_id += 1\n\n marker.scale.x = marker.scale.y = marker.scale.z = radius\n marker.lifetime = rospy.Duration.from_sec(duration)\n if color_msg is None:\n marker.color.a = 1.0\n marker.color.r = 0.5\n marker.color.g = 0.5\n marker.color.b = 0.5\n else:\n marker.color = color_msg\n\n marker.pose.position.x = pt[0]\n marker.pose.position.y = pt[1]\n marker.pose.position.z = pt[2]\n marker.pose.orientation.w = 1.0\n\n markers.markers.append(marker)\n return markers", "def extract_patch(self, patch_radius, full_x, full_y, full_i, full_j):\n com_ijs = [self.center_of_mass_ij(time) for time in self.times]\n patch_grid = []\n patch_mask = []\n patch_x = []\n patch_y = []\n patch_i = []\n patch_j = []\n for t, time in enumerate(self.times):\n obj_slice_buff = (slice(com_ijs[t][0] - patch_radius, com_ijs[t][0] + patch_radius),\n slice(com_ijs[t][1] - patch_radius, com_ijs[t][1] + patch_radius))\n obj_slice_local = [[com_ijs[t][0] - self.i[t].min() - patch_radius,\n com_ijs[t][0] - self.i[t].min() + patch_radius],\n [com_ijs[t][1] - self.j[t].min() - patch_radius,\n com_ijs[t][1] - self.j[t].min() + patch_radius]]\n patch_i.append(full_i[obj_slice_buff])\n patch_j.append(full_j[obj_slice_buff])\n patch_x.append(full_x[obj_slice_buff])\n patch_y.append(full_y[obj_slice_buff])\n pad_i_l = abs(obj_slice_local[0][0]) if obj_slice_local[0][0] < 0 else 0\n pad_i_u = obj_slice_local[0][1] - self.timesteps[t].shape[0] \\\n if obj_slice_local[0][1] - self.timesteps[t].shape[0] > 0 else 0\n pad_j_l = abs(obj_slice_local[1][0]) if obj_slice_local[1][0] < 0 else 0\n pad_j_u = obj_slice_local[1][1] - self.timesteps[t].shape[1] \\\n if obj_slice_local[1][1] - self.timesteps[t].shape[1] > 0 else 0\n\n if obj_slice_local[0][0] < 0:\n obj_slice_local[0][0] = 0\n obj_slice_local[0][1] += pad_i_l\n if obj_slice_local[1][0] < 0:\n obj_slice_local[1][0] = 0\n obj_slice_local[1][1] += pad_j_l\n pad_grid = np.pad(self.timesteps[t], pad_width=[(pad_i_l, pad_i_l + pad_i_u), (pad_j_l, pad_j_l + pad_j_u)])\n pad_mask = np.pad(self.masks[t], pad_width=[(pad_i_l, pad_i_l + pad_i_u), (pad_j_l, pad_j_l + pad_j_u)])\n obj_slice_const = (slice(obj_slice_local[0][0], obj_slice_local[0][1]),\n slice(obj_slice_local[1][0], obj_slice_local[1][1]))\n patch_grid.append(pad_grid[obj_slice_const])\n patch_mask.append(pad_mask[obj_slice_const])\n patch_obj = STObject(patch_grid, patch_mask, patch_x, patch_y, patch_i, patch_j, self.start_time,\n self.end_time, step=self.step, dx=self.dx, u=self.u, v=self.v)\n return patch_obj", "def rotate_pattern(self, st2):\n\t\t# dx = st2[0] - self.x0\n\t\t# dy = st2[1] - self.y0\n\t\tdtheta = st2[2] - self.theta0\n\t\ttransformT = np.array([[cos(dtheta), -sin(dtheta), self.x0],[sin(dtheta), cos(dtheta), self.y0],[0,0,1]])\n\t\tn_p = []\n\t\tp0 = self.motion_primitives\n\t\tfor i in range(len(p0)):\n\t\t\tp0_i = p0[i]\n\t\t\txyp = np.hstack((p0_i[:,0].reshape(-1,1)-self.x0, p0_i[:,1].reshape(-1,1)-self.y0, np.ones(p0_i.shape[0]).reshape(-1,1))).T\n\t\t\t# xyp = np.hstack((p0_i[:,:2], np.ones(p0_i.shape[0]).reshape(-1,1))).T\n\t\t\tn_xyp = np.dot(transformT, xyp)\n\t\t\tn_theta = p0_i[:,-1]+dtheta\n\t\t\tnn_xyp = np.vstack((n_xyp[0,:].reshape(1,-1)+st2[0]-self.x0, n_xyp[1,:].reshape(1,-1)+st2[1]-self.y0))\n\t\t\tp2 = np.hstack((nn_xyp.T, n_theta.reshape(-1,1)))\n\t\t\tn_p.append(p2)\n\t\treturn n_p", "def pattern_generator(self, x: int, y: int) -> float:\n dis_corners = self.distance_from_corners(x, y)\n dis_center = self.distance_from_center(x, y)\n key = (dis_corners - dis_center)\n return [x / key, y / key]", "def locate_tracker(self, debug):\n\n # tmp_image =\n # tmp_image = cv2.GaussianBlur(self.frame, (11, 11), 0) # Experiment with this\n\n hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV) # Convert to HSV Color Space. This is temporary for testing using colored objects)\n\n mask = cv2.inRange(hsv, self.hueLower, self.hueUpper)\n\n try:\n mask = cv2.inRange(hsv, self.hueLower2, self.hueUpper2) + mask\n except AttributeError:\n pass\n\n mask = cv2.erode(mask, None, iterations=2)\n mask = cv2.dilate(mask, None, iterations=2)\n\n if debug:\n tmpMask = imutils.resize(mask, width=1000, height=1000)\n cv2.imshow(\"mask\", tmpMask)\n\n\n # find contours in the mask and initialize the current (x, y) center of the object\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n\n # only proceed if at least one contour was found\n if len(cnts) > 0:\n # find the largest contour in the mask, then use\n # it to compute the minimum enclosing circle and\n # centroid\n c = max(cnts, key=cv2.contourArea)\n\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n\n # only proceed if the radius meets a minimum size\n # if radius > 10:\n # # draw the circle and centroid on the frame,\n # # then update the list of tracked points\n # cv2.circle(frame, (int(x), int(y)), int(radius),\n # (0, 255, 255), 2)\n # cv2.circle(frame, center, 5, (0, 0, 255), -1)\n if debug:\n cv2.drawContours(self.frame, c, -1, (0, 255, 0), 20)\n return center, radius\n # update the points queue\n cv2.imshow(\"mask\", imutils.resize(mask, width=1000, height=1000))\n cv2.imshow(\"frame\", imutils.resize(self.frame, width=1000, height=1000))\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n raise OpenCVError(\"Could not find tracker!\")\n\n # return (1, 1), 1", "def calculate_camera_pose(frame, K, d, pattern_shape=(6, 4), grid_size=30): # noqa: E501\n corners, canvas = computer_vision.detect_chessboard(frame, pattern_shape) # noqa: E501\n\n if corners is None:\n canvas = frame\n text = \"No checkerboard\"\n return text, corners, canvas, None, None\n else:\n canvas = cv2.undistort(canvas, K, d)\n R, t, canvas = computer_vision._calculate_camera_pose(frame,\n K, d,\n corners,\n pattern_shape, # noqa: E501\n grid_size)\n text = \" \".join(np.round(t, 2).ravel().astype(str))\n return text, corners, canvas, R, t" ]
[ "0.53953344", "0.51301557", "0.50669867", "0.5048978", "0.50434595", "0.49598178", "0.49458113", "0.49102822", "0.49101192", "0.48921007", "0.48678896", "0.4867333", "0.4805416", "0.47268882", "0.4714568", "0.4698213", "0.46616584", "0.46566698", "0.46362138", "0.46190938", "0.4612223", "0.4606432", "0.4553725", "0.4548873", "0.45404333", "0.4535626", "0.45251444", "0.451994", "0.45107573", "0.45043436" ]
0.66206294
0
Swarm function go through each agent and compare positions with other quads find the difference between two locations find the norm (euclidean distance) between two agents
def swarm_next_location(node_locations, agents_locations, agents_velocities, agents_angles): # Potential Field Gradient Calculation # Gradient of potential field dv = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT)) # create an array of values for agent_it_1 in range(0, AGENT_COUNT): # Inter-Agent Forces for agent_it_2 in range(0, AGENT_COUNT): n_x = int(numpy.linalg.norm(numpy.subtract(agents_locations[agent_it_1], agents_locations[agent_it_2]))) for dimension_it in range(0, DIMENSION_COUNT): delta_x = agents_locations[agent_it_1][dimension_it] - agents_locations[agent_it_2][dimension_it] dv[agent_it_1][dimension_it] = dv[agent_it_1][dimension_it] - long_range_repulsive * ( delta_x / numpy.sqrt((SMOOTHNESS_COEFFICIENT ^ 2) + n_x ^ 2)) - 2 * ( repulsive_gain / repulsive_aoe) * delta_x * numpy.exp((-n_x ^ 2) / repulsive_aoe) # Formation Attraction Forces if NODE_COUNT > 0: for node_it in range(0, NODE_COUNT): n_x = int( numpy.linalg.norm( numpy.subtract(agents_locations[agent_it_1], node_locations[node_it]))) # norm of the vector between two bots for dimension_it in range(0, DIMENSION_COUNT): delta_x = agents_locations[agent_it_1][dimension_it] - node_locations[node_it][dimension_it] dv[agent_it_1][dimension_it] = dv[agent_it_1][dimension_it] + ATTRACTIVE_GAIN * ( delta_x / numpy.sqrt((SMOOTHNESS_COEFFICIENT ^ 2) + n_x ^ 2)) + ( short_range_attractive / attractive_aoe) * delta_x * numpy.exp((-n_x ^ 2) / attractive_aoe) sliding_surface = numpy.add(agents_velocities, dv) # Saturation Block [sat(s)] sx = numpy.zeros(numpy.size(sliding_surface[0])) for agent_it_1 in range(0, AGENT_COUNT): for dimension_it in range(0, DIMENSION_COUNT): if abs(sliding_surface[agent_it_1][dimension_it]) > SATURATION_LEVEL: # FIXME: not sure if this fix was correct but I changed Sx(ip, di) -> sx[ip+di] based on values found # in MATLAB code sample sx[agent_it_1 + dimension_it] = numpy.sign(sliding_surface[agent_it_1][dimension_it]) * SATURATION_LEVEL else: sx[agent_it_1 + dimension_it] = sliding_surface[agent_it_1][dimension_it] # Gains c = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT)) k = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT)) # TODO: should be able to make the loop faster somehow # row by row multiplication for agent_it_1 in range(0, AGENT_COUNT): c[agent_it_1] = numpy.multiply(agents_velocities[agent_it_1], REACHING_GAINS) k[agent_it_1] = numpy.multiply(sx[agent_it_1], SLIDING_GAINS) u0 = k + c print(u0) return u0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_agent_distances_vector(self):\n count = 0\n for agent in self.agents:\n agent_loc = agent.getz()\n\n for i, each_task in enumerate(self.tasks):\n dist = euclid_dist(agent_loc, each_task.getloc())\n self.agent_distances[count][i] = dist\n count += 1\n if self.DEBUG:\n print(self.agent_distances)", "def distances(self):", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def calc_dist_to_poi(self,agent):\n mini_dist = 100000 \n for poi in self.poi_pos_list:\n mini_dist = np.linalg.norm(agent.get_pos() - poi)\n\n return mini_dist", "def distance_between(agents_row_a, agents_row_b):\n return (((agents_row_a._y - agents_row_b._y)**2) + ((agents_row_a._x - agents_row_b._x)**2))**0.5", "def test_distances(self):\n\t\tm1 = models.vgg11()\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\n\t\tprint(avg_dW,avg_db)\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 46.485\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.67622\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\tprint(distances)", "def test_same_distances(self):\n \n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tprint(distances)", "def agent_distance(self, agent):\n if self.type == 'Line':\n return self._line_intersection(self.params, (agent._position.x, agent._position.y))\n elif self.type == 'Circle':\n return self._circle_intersection(self.params, (agent._position.x, agent._position.y))\n elif self.type == 'Rect':\n x, y, w, h = self.params\n candidates = dict()\n d1, p1 = self._line_intersection((x, y, x+w, y), (agent._position.x, agent._position.y))\n d2, p2 = self._line_intersection((x, y, x, y+h), (agent._position.x, agent._position.y))\n d3, p3 = self._line_intersection((x+w, y, x+w, y+h), (agent._position.x, agent._position.y))\n d4, p4 = self._line_intersection((x, y+h, x+w, y+h), (agent._position.x, agent._position.y))\n candidates[d1] = p1\n candidates[d2] = p2\n candidates[d3] = p3\n candidates[d4] = p4\n\n keylist = candidates.keys()\n keylist.sort()\n\n return keylist[0], candidates[keylist[0]]", "def two_user_route_statistics(i,j, source_data, destination_data, source_destination_data, delta=1.2):\n\toccupancy_ratio = 0.0\n\tminimum_distance_so_far = 0.0\n\tcommon_travel_distance = 0.0\n\n\ttry:\n\t\tif source_destination_data[j][i] + source_data[i][j] <= 1.2*source_destination_data[i][i] and source_destination_data[j][i] + destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[j][i] + source_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\tsecond = ((source_destination_data[j][i] + destination_data[i][j])/(source_destination_data[j][i] + source_data[i][j]+destination_data[i][j]))\n\t\t\toccupancy_ratio = (first+second)/2\n\t\t\tcommon_travel_distance = source_destination_data[j][i]\n\t\t\tminimum_distance_so_far = source_data[i][j] + source_destination_data[j][i] + destination_data[i][j]\n\n\t\tif source_destination_data[i][j] + destination_data[j][i] <= 1.2*source_destination_data[i][i] and source_destination_data[i][j] + source_data[j][i] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = ((source_destination_data[i][j] + destination_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\t\t\n\t\t\tsecond = ((source_destination_data[i][j] + source_data[j][i])/(source_destination_data[i][j] + destination_data[j][i]+source_data[j][i]))\n\t\t\ttotal_distance = source_data[j][i] + source_destination_data[i][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[i][j]+source_destination_data[j][j]+destination_data[j][i] <= 1.2*source_destination_data[i][i]:\n\t\t\tfirst = (1)\n\t\t\tsecond = (source_destination_data[j][j]/(source_data[i][j]+source_destination_data[j][j]+destination_data[j][i]))\n\n\t\t\ttotal_distance = source_data[i][j] + source_destination_data[j][j] + destination_data[j][i]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[j][j]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\t\tif source_data[j][i]+source_destination_data[i][i]+destination_data[i][j] <= 1.2*source_destination_data[j][j]:\n\t\t\tfirst = (source_destination_data[i][i]/(source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]))\n\t\t\tsecond = (1)\n\n\t\t\ttotal_distance = source_data[j][i]+source_destination_data[i][i]+destination_data[i][j]\n\n\t\t\tif total_distance < minimum_distance_so_far:\n\t\t\t\tminimum_distance_so_far = total_distance\n\t\t\t\tcommon_travel_distance = source_destination_data[i][i]\n\t\t\t\toccupancy_ratio = (first+second)/2\n\n\texcept Exception as e:\n\t\toccupancy_ratio = 1.0\n\t\tminimum_distance_so_far = 0.0\n\t\tcommon_travel_distance = 0.0\n\n\n\treturn occupancy_ratio, common_travel_distance, minimum_distance_so_far", "def distanceBet(a, b, name=\"temp_distBet\", keep=False):\r\n if cmds.objExists(a) and cmds.objExists(b):\r\n if keep:\r\n # create nulls:\r\n nullA = cmds.group(empty=True, name=a+\"_distBetNull\")\r\n nullB = cmds.group(empty=True, name=b+\"_distBetNull\")\r\n nullC = cmds.group(empty=True, name=b+\"_distBetNull_origRef\")\r\n cmds.pointConstraint(a, nullA, maintainOffset=False, name=nullA+\"_pointConstraint\")\r\n cmds.pointConstraint(b, nullB, maintainOffset=False, name=nullB+\"_pointConstraint\")\r\n tempToDel = cmds.pointConstraint(b, nullC, maintainOffset=False)\r\n cmds.delete(tempToDel)\r\n pointConst = cmds.pointConstraint(b, nullC, nullB, maintainOffset=False, name=nullB+\"_pointConstraint\")[0]\r\n # create distanceBetween node:\r\n distBet = cmds.shadingNode(\"distanceBetween\", n=name, asUtility=True)\r\n # connect aPos to the distance between point1:\r\n cmds.connectAttr(nullA+\".tx\", distBet+\".point1X\")\r\n cmds.connectAttr(nullA+\".ty\", distBet+\".point1Y\")\r\n cmds.connectAttr(nullA+\".tz\", distBet+\".point1Z\")\r\n # connect bPos to the distance between point2:\r\n cmds.connectAttr(nullB+\".tx\", distBet+\".point2X\")\r\n cmds.connectAttr(nullB+\".ty\", distBet+\".point2Y\")\r\n cmds.connectAttr(nullB+\".tz\", distBet+\".point2Z\")\r\n dist = cmds.getAttr(distBet+\".distance\")\r\n return [dist, distBet, nullA, nullB, nullC, pointConst]\r\n else:\r\n # get xform datas:\r\n aPos = cmds.xform(a, query=True, worldSpace=True, translation=True)\r\n bPos = cmds.xform(b, query=True, worldSpace=True, translation=True)\r\n # create distanceBetween node:\r\n distBet = cmds.shadingNode(\"distanceBetween\", n=name, asUtility=True)\r\n # set aPos to the distance between point1:\r\n cmds.setAttr(distBet+\".point1X\", aPos[0])\r\n cmds.setAttr(distBet+\".point1Y\", aPos[1])\r\n cmds.setAttr(distBet+\".point1Z\", aPos[2])\r\n # set bPos to the distance between point2:\r\n cmds.setAttr(distBet+\".point2X\", bPos[0])\r\n cmds.setAttr(distBet+\".point2Y\", bPos[1])\r\n cmds.setAttr(distBet+\".point2Z\", bPos[2])\r\n dist = cmds.getAttr(distBet+\".distance\")\r\n cmds.delete(distBet)\r\n return [dist, None, None, None, None, None]", "def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()", "def distance_score(vertex1, board, player_id): #implement preference for closer settlements\n num_buildings = 0\n total_dist = 0\n player_buildings = board.get_player_settlements(player_id) + board.get_player_cities(player_id)\n\n if len(player_buildings) == 0: #if it is our first turn\n return 0\n\n player_roads = board.get_player_roads(player_id)\n accessible_vertices = list(set(player_buildings+ [vertex for pair in player_roads for vertex in pair]))\n get_distance = lambda v: manhattan_distance(v, vertex1, board)\n min_distance = min(map(get_distance, accessible_vertices))\n\n enemy_buildings = [v for v in board.settlements if board.settlements[v] != player_id]\n enemy_roads = [r for r in board.roads if board.roads[r] != player_id]\n\n\n \"\"\"\n for s in board.settlements:\n if board.settlements[s] != player_id:\n vertex2 = s\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n for c in board.cities:\n if board.cities[c] != player_id:\n vertex2 = c\n total_dist_enemies += manhattan_distance(vertex1, vertex2, board)\n num_buildings+=1\n\n \"\"\"\n return min_distance", "def hausdorffDistance(self,fiber1,fiber2):\n polyA = fiber1.GetPolyData()\n polyB = fiber2.GetPolyData()\n\n locA = vtk.vtkMergePoints()\n locB = vtk.vtkMergePoints()\n\n locA.SetDataSet(polyA)\n locB.SetDataSet(polyB)\n\n locs = (locA,locB)\n for loc in locs:\n loc.AutomaticOn()\n loc.BuildLocator()\n\n ptsA = polyA.GetPoints()\n ptsB = polyB.GetPoints()\n\n rangeA = ptsA.GetNumberOfPoints()\n rangeB = ptsB.GetNumberOfPoints()\n\n maxd = 0.0\n maxd1 = 0.0\n avgd = 0.0\n avgd1 = 0.0\n\n distanceA = vtk.vtkFloatArray()\n distanceA.SetName(\"Distance\")\n for i in range(rangeA):\n pt = ptsA.GetPoint(i)\n bid = locB.FindClosestPoint(pt)\n ptb = ptsB.GetPoint(bid)\n d = self.pointDistance(pt,ptb)\n distanceA.InsertNextValue(d)\n avgd += d\n if d > maxd:\n maxd = d\n avgd = avgd / rangeA\n\n distanceB = vtk.vtkFloatArray()\n distanceB.SetName(\"Distance\")\n for i in range(rangeB):\n pt = ptsB.GetPoint(i)\n bid = locA.FindClosestPoint(pt)\n ptb = ptsA.GetPoint(bid)\n d = self.pointDistance(pt,ptb)\n distanceB.InsertNextValue(d)\n avgd1 += d\n if d > maxd1:\n maxd1 = d\n avgd1 = avgd1 / rangeB\n\n polyA.GetPointData().SetScalars(distanceA)\n polyB.GetPointData().SetScalars(distanceB)\n\n return max(maxd,maxd1)", "def cluster_testing_dist(agg1, agg2, partDiameter):\n agg2_temp = translate_aggregate(agg2, random_point_generator(calculate_LD(agg1), calculate_LD(agg2), calculate_COM(agg1), calculate_COM(agg2), partDiameter))\n agg2_temp = random_rotate_aggregate(agg2_temp)\n\n check = 1\n while check == 1:\n agg2_temp = translate_aggregate(agg2_temp, numpy.array((calculate_COM(agg1)-calculate_COM(agg2_temp))*0.01))\n check, index = test_collision(agg1, agg2_temp, partDiameter)\n \"\"\" Index from this part is not valid! Function returns '99' before collision happens.\n \"\"\"\n if (check == 2):\n # print(index)\n return numpy.linalg.norm(calculate_COM(agg1) - calculate_COM(agg2_temp)), numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[:,index])\n # return numpy.linalg.norm(calculate_COM(agg1) - agg2_temp[0:3,index])\n break", "def test_equals_distance_buildings():\n for i in range(building_count):\n for j in range(building_count):\n if i == j:\n continue\n rust_result = rust_force.calculate_distance_between_two_buildings(\n rust_buildings[i], rust_buildings[j])\n python_result = calculate_distance_between_two_buildings(\n python_figures[i], python_figures[j], python_positions[i], python_positions[j])\n assert rust_result == python_result", "def simulation(nepisodes):\n # Initialize robots\n # print('I am inside the simulation')\n agents = [] # List containing all robots\n a1 = Agent(start = [0, 0], end = [grid_size-1, grid_size-1], nr = 1) # Create agent 1\n a2 = Agent(start = [0, grid_size-1], end = [grid_size-1, 0], nr = 2) # Create agent 2\n a3 = Agent(start = [grid_size-1, 0], end = [0, grid_size-1], nr = 3) # Create agent 3\n a4 = Agent(start = [grid_size-1, grid_size-1], end = [0, 0], nr = 4) # Create agent 4\n agents.append(a1)\n agents.append(a2)\n agents.append(a3)\n agents.append(a4)\n\n # for agent in agents:\n # agent.load_target('target_weights_{}.h5'.format(agent.nr))\n # agent.load_policy('policy_weights_{}.h5'.format(agent.nr))\n # print('loaded')\n\n steps_list = [[] for i in range(len(agents))]\n reward_list = [[] for i in range(len(agents))]\n cumulative_rewards = [[] for i in range(len(agents))]\n collisions_list = [[] for i in range(len(agents))]\n\n t = 0 # Set time to zero\n for i in range(nepisodes):\n t = episode(agents, t, i+1) # Run one episode\n\n print('End of episode ', i+1)\n agent_index = 0\n for agent in agents:\n steps_list[agent_index].append(agent.steps)\n reward_list[agent_index].append(agent.reward)\n collisions_list[agent_index].append(agent.collisions)\n if i == 0:\n cumulative_rewards[agent_index].append(agent.reward)\n else:\n cumulative_rewards[agent_index].append(agent.reward + cumulative_rewards[agent_index][i-1])\n agent_index += 1\n\n if i % 1000 == 0:\n with open('reward_4_agents_{}'.format(i),'wb') as f:\n pickle.dump(reward_list,f)\n\n with open('steps_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(steps_list, f)\n\n with open('cols_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(collisions_list, f)\n\n\n return steps_list, reward_list, collisions_list, cumulative_rewards", "def computeForces(self, neighbors=[]): #computing forces to drive the agents and avoid collisions \n if not self.atGoal:\n if self.entry_state % 2 == 0 and len(self.entrancex) > 0 and self.id != 4 : #checks if assigned curve is entry and switches to state 1 to follow entry bezier curve\n time2=0.5 # time used to calculate driving force \n self.local_goal = [self.entrancex[0], self.entrancey[0]] #assigning waypoint as goal\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2))) #calculating direction vector\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez) #driving force\n self.entrancex = np.delete(self.entrancex,0) #eliminating the used waypoints from the list \n self.entrancey = np.delete(self.entrancey,0) #eliminating the used waypoints from the list \n \n elif self.force_state == 1 and (abs(self.pos[0] - self.goal[0]) >400 or abs(self.pos[1] - self.goal[1]) >400): #checks if force-based navigation is assigned, switches to state 2\n self.F = (self.gvel-self.vel)/self.ksi #driving force\n for neighbor in neighbors:\n if neighbor.id != self.id: #and not neighbor.atGoal: \n distSq = (neighbor.pos-self.pos).dot(neighbor.pos-self.pos)\n #print(distSq, self.dhorSq)\n if distSq < self.dhorSq: # neighbor is inside the sensing radius\n tau = self.ttc(neighbor)\n #print(tau, self.timehor)\n if tau < self.timehor: # will the two agents collide in less than timehor?\n dir = self.pos + self.vel*tau - neighbor.pos - neighbor.vel*tau \n length = sqrt(dir.dot(dir))\n if length > 0:\n dir = dir/length # the direction of the force\n mag = (self.timehor - tau)/(tau + 1e-6) # the magnitude of the force\n self.F += mag*dir # add the force\n \n else: #state 3 - following the exit bezier curve\n time2=0.5 # time used to calculate driving force\n self.local_goal = [self.exitx[0], self.exity[0]]\n if abs(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos)))) >10: #to reach first point of exit curve from agents previous state position\n self.F = ((self.local_goal - self.pos)/(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos) )))*self.prefspeed)/self.ksi\n else:\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2)))\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez)\n #print(self.pos, self.local_goal)\n if len(self.exitx) > 1 :\n self.exitx = np.delete(self.exitx,0)\n self.exity = np.delete(self.exity,0)", "def agent_overlap(t_drs, h_drs, replacements):\n t_agents = get_agent(t_drs) \n h_agents = get_agent(h_drs)\n length = len(t_agents) + len(h_agents)\n if len(t_agents) is 0:\n return 0\n common = 0\n for agent in t_agents:\n if agent in h_agents:\n h_agents.pop(h_agents.index(agent))\n common =+ 1\n if common > 1:\n print(common)\n \n return len(h_agents)/len(t_agents) #seems to work better then real comparison\n '''\n else:\n for replacement in replacements:\n if get_agent(replacement[15]) == get_agent(replacement[16]):\n return 1\n '''", "def drive_distance_all(distances, motors):\n return null", "def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n self.start = gameState.getAgentPosition(self.index)\n CaptureAgent.registerInitialState(self, gameState)\n\n \"G A M E K E Y L O C A T I O N S D E T E R M I N A T I O N\"\n if self.red:\n leftEdge = gameState.data.layout.width / 2\n rightEdge = gameState.data.layout.width - 2 #don't need the last wall\n self.safeColumn = leftEdge - 2 # -1 doesn't always seem to work\n else:\n leftEdge = 1\n rightEdge = gameState.data.layout.width / 2\n self.safeColumn = rightEdge + 2\n\n self.safeSpaces = []\n for h in xrange(1,gameState.data.layout.height-1):\n if not gameState.data.layout.isWall((self.safeColumn, h)):\n self.safeSpaces += [(self.safeColumn, h)]\n\n\n \"S T A T E A S S I G N M E N T\"\n pos = gameState.getAgentState(self.index).getPosition()\n self.friend = min(2 + int(not self.red), 2 - self.index + 2 * int(not self.red))\n friendPos = gameState.getAgentState(self.friend).getPosition()\n opps = [gameState.getAgentState(el).getPosition() for el in [1 - int(not self.red), 3 - int(not self.red)] ]\n\n print \"I am agent\", self.index, \"at position \", pos\n #print \"agent 0:\", gameState.getAgentState(0).getPosition()\n print \"My friend agent\", self.friend, \"is at position \", friendPos\n print \"My first enemy agent is at position \", opps[0]\n print \"My second enemy agent is at position \", opps[1]\n\n self.top = False\n self.undecided = False\n\n if pos[1] > friendPos[1]:\n print \"My friend is lower on the map, and I will take top Quad\"\n self.top = True\n elif pos[1] < friendPos[1]:\n print \"My friend is higher on the map, and I will take bottom Quad\"\n else:\n self.undecided = True\n\n \"F O O D A S S I G N M E N T\"\n self.initFood = self.getFood(gameState).asList()\n self.myFood = self.initFood[:] #this is will be updated during our A* Search for theoretical consumption\n print self.myFood\n\n \"I N I T I A L F O O D A S S I G N M E N T S \"\n\n start = time.time()\n print 'eval time for moves: %.4f' % (time.time() - start)\n\n\n \"D E B U G G I N G\"\n print \"Coloring my safe column white\"\n self.debugDraw([(self.safeColumn, el) for el in xrange(0, gameState.data.layout.height)], [1,1,1], clear=False)\n\n print \"Coloring my safe spaces\", self.safeSpaces, \"blue\"\n self.debugDraw(self.safeSpaces, [0,0,1], clear=False)\n\n self.counter = 0\n self.moves = []\n self.intendedCoords =[]\n self.best = None\n\n #new\n print \"Using my sweet time to find next moves during init as agent\", self.index\n self.best = self.ActionLoop(gameState, 140)\n self.moves = self.best.getDir()[1]\n self.counter = len(self.moves)\n self.cacheSize = len(self.moves)\n #new", "def sense(self, agents, agent_index, top_down_map=None):\n host_agent = agents[agent_index]\n other_agent_dists = {}\n sorted_pairs = sorted(other_agent_dists.items(),\n key=operator.itemgetter(1))\n\n sorting_criteria = []\n for i, other_agent in enumerate(agents):\n if other_agent.id == host_agent.id:\n continue\n # project other elements onto the new reference frame\n rel_pos_to_other_global_frame = other_agent.pos_global_frame - \\\n host_agent.pos_global_frame\n p_parallel_ego_frame = np.dot(rel_pos_to_other_global_frame, host_agent.ref_prll)\n p_orthog_ego_frame = np.dot(rel_pos_to_other_global_frame, host_agent.ref_orth)\n dist_between_agent_centers = vec2_l2_norm(rel_pos_to_other_global_frame)\n dist_2_other = dist_between_agent_centers - host_agent.radius - other_agent.radius\n combined_radius = host_agent.radius + other_agent.radius\n\n if dist_between_agent_centers > Config.SENSING_HORIZON:\n # print(\"Agent too far away\")\n continue\n\n if self.agent_sorting_method != \"time_to_impact\":\n time_to_impact = None\n else:\n time_to_impact = compute_time_to_impact(host_agent.pos_global_frame,\n other_agent.pos_global_frame,\n host_agent.vel_global_frame,\n other_agent.vel_global_frame,\n combined_radius)\n\n sorting_criteria.append([i, round(dist_2_other,2), p_orthog_ego_frame, time_to_impact])\n\n clipped_sorted_inds = self.get_clipped_sorted_inds(sorting_criteria)\n clipped_sorted_agents = [agents[i] for i in clipped_sorted_inds]\n\n other_agents_states = np.zeros((Config.MAX_NUM_OTHER_AGENTS_OBSERVED, 7))\n other_agent_count = 0\n for other_agent in clipped_sorted_agents:\n if other_agent.id == host_agent.id:\n continue\n # project other elements onto the new reference frame\n rel_pos_to_other_global_frame = other_agent.pos_global_frame - \\\n host_agent.pos_global_frame\n p_parallel_ego_frame = np.dot(rel_pos_to_other_global_frame,\n host_agent.ref_prll)\n p_orthog_ego_frame = np.dot(rel_pos_to_other_global_frame,\n host_agent.ref_orth)\n v_parallel_ego_frame = np.dot(other_agent.vel_global_frame,\n host_agent.ref_prll)\n v_orthog_ego_frame = np.dot(other_agent.vel_global_frame,\n host_agent.ref_orth)\n dist_2_other = np.linalg.norm(rel_pos_to_other_global_frame) - \\\n host_agent.radius - other_agent.radius\n combined_radius = host_agent.radius + other_agent.radius\n\n other_obs = np.array([p_parallel_ego_frame,\n p_orthog_ego_frame,\n v_parallel_ego_frame,\n v_orthog_ego_frame,\n other_agent.radius,\n combined_radius,\n dist_2_other])\n \n if other_agent_count == 0:\n host_agent.other_agent_states[:] = other_obs\n\n other_agents_states[other_agent_count,:] = other_obs\n other_agent_count += 1\n\n host_agent.num_other_agents_observed = other_agent_count\n\n return other_agents_states", "def get_nodes_distance(dbpath,node1,node2,inst,stepname,nframe=-1):\n odb = openOdb(path=dbpath)\n _inst = odb.rootAssembly.instances[inst]\n ic = odb.rootAssembly.instances[inst].nodes\n us = odb.steps[stepname].frames[nframe].fieldOutputs['U'].getSubset(region=_inst).values\n xx1 = ic[node1-1].coordinates[0]+us[node1-1].data[0]\n yy1 = ic[node1-1].coordinates[1]+us[node1-1].data[1]\n xx2 = ic[node2-1].coordinates[0]+us[node2-1].data[0]\n yy2 = ic[node2-1].coordinates[1]+us[node2-1].data[1]\n if _inst.embeddedSpace == THREE_D:\n zz1 = ic[node1-1].coordinates[2]+us[node1-1].data[2]\n zz2 = ic[node2-1].coordinates[2]+us[node2-1].data[2]\n d = np.sqrt((xx2-xx1)**2 + (yy2-yy1)**2 + (zz2-zz1)**2)\n else:\n d = np.sqrt((xx2-xx1)**2+(yy2-yy1)**2)\n return d", "def distance_between_actor(vehicle, ped):\n\n return distance_to_vehicle(vehicle, ped.get_location())", "def test_distances(self):\n\n cent_1 = np.array([0.5, 0.5])\n verts_1 = np.array([[0., 1.], [0., 0.], [1., 0.], [1., 1.]])\n cent_2 = cent_1 - 0.5\n verts_2 = verts_1 - np.array([0.5, 0.5])\n\n # Compare the center-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.cvdist(verts_1, cent_1) == po.cvdist(verts_2, cent_2)))\n # Compare the vertex-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.vvdist(verts_1) == po.vvdist(verts_2)))", "def measure_distance():\n # Get the active object\n obj = bpy.context.active_object\n \n # Switch in object mode\n bpy.ops.object.mode_set(mode='OBJECT')\n\n # Get the two selected vertices\n twoVerts = [None, None]\n index = 0\n for vertex in obj.data.vertices:\n if vertex.select:\n twoVerts[index] = (obj.matrix_world @ vertex.co)\n index = index + 1\n if index == 2:\n break \n \n print(twoVerts)\n \n # Calculate the distance between the two points\n if twoVerts[0] != None and twoVerts[1] != None:\n bpy.context.scene.distance[0] = abs(twoVerts[0].x - twoVerts[1].x)\n bpy.context.scene.distance[1] = abs(twoVerts[0].y - twoVerts[1].y)\n bpy.context.scene.distance[2] = abs(twoVerts[0].z - twoVerts[1].z)\n bpy.context.scene.distance[3] = sqrt(bpy.context.scene.distance[0]**2 + bpy.context.scene.distance[1]**2 + bpy.context.scene.distance[2]**2)\n else:\n bpy.context.scene.distance[0] = 0\n bpy.context.scene.distance[1] = 0\n bpy.context.scene.distance[2] = 0\n bpy.context.scene.distance[3] = 0 \n \n # Switch in edit mode\n bpy.ops.object.mode_set(mode='EDIT')", "def test_equals_distance_clusters_rust():\n rust_result = rust_force.calculate_distance_between_two_clusters(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n rust_result_parallel = rust_force.calculate_distance_between_two_clusters_parallel(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n assert rust_result == rust_result_parallel", "def calc_dist(self, neighboring_pos):\n vec = np.array([i[1] - i[0] for i in zip(self.pos, neighboring_pos)])\n dist = np.linalg.norm(vec)\n return vec, dist", "def distance(brd1,brd2):\n\n step=brd1[1,0]-brd1[0,0]\n return np.sum(np.abs(brd1[:,1]-brd2[:,1]))*step", "def hausdorffDistance(self,id1,id2):\n #productive #math\n profprint()\n node1 = slicer.mrmlScene.GetNodeByID(id1)\n polydata1=node1.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID(id2)\n polydata2=node2.GetPolyData()\n nb1 = polydata1.GetNumberOfPoints()\n nb2 = polydata2.GetNumberOfPoints()\n minimum=None\n maximum=None\n JJ,jj=None,None\n II,ii=None,None\n pt1=[0,0,0]\n pt2=[0,0,0]\n polydata1.GetPoint(1,pt1)\n polydata1.GetPoint(nb1-1,pt2)\n minVal1=min(pt1[2],pt2[2])\n maxVal1=max(pt1[2],pt2[2])\n pt1=[0,0,0]\n pt2=[0,0,0]\n pt1b,pt2b=None,None\n polydata2.GetPoint(1,pt1)\n polydata2.GetPoint(nb2-1,pt2)\n minVal2 = min(pt1[2],pt2[2])\n maxVal2 = max(pt1[2],pt2[2])\n valueBase=max(minVal1,minVal2)\n valueTip=min(maxVal1,maxVal2)\n\n # truncate polydatas\n truncatedPolydata1 = self.clipPolyData(node1,valueBase)\n truncatedPolydata2 = self.clipPolyData(node2,valueBase)\n\n cellId=vtk.mutable(1)\n subid=vtk.mutable(1)\n dist=vtk.mutable(1)\n cl2=vtk.vtkCellLocator()\n cl2.SetDataSet(truncatedPolydata2)\n cl2.BuildLocator()\n # Hausforff 1 -> 2\n minima=[]\n for i in range(int(nb1/float(10))):\n pt=[0,0,0]\n polydata1.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl2.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff12 = max(minima)\n \n # Hausforff 2 -> 1\n minima=[]\n cl1=vtk.vtkCellLocator()\n cl1.SetDataSet(truncatedPolydata1)\n cl1.BuildLocator()\n for i in range(int(nb2/float(10))):\n pt=[0,0,0]\n polydata2.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl1.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff21 = max(minima)\n return max(hausdorff12,hausdorff21)", "def hausdorffDistance13(self,id1,id2):\n #productive #math\n profprint()\n node1 = slicer.mrmlScene.GetNodeByID(id1)\n polydata1=node1.GetPolyData()\n node2 = slicer.mrmlScene.GetNodeByID(id2)\n polydata2=node2.GetPolyData()\n nb1 = polydata1.GetNumberOfPoints()\n nb2 = polydata2.GetNumberOfPoints()\n minimum=None\n maximum=None\n JJ,jj=None,None\n II,ii=None,None\n pt1=[0,0,0]\n pt2=[0,0,0]\n polydata1.GetPoint(1,pt1)\n polydata1.GetPoint(nb1-1,pt2)\n minVal1=min(pt1[2],pt2[2])\n maxVal1=max(pt1[2],pt2[2])\n pt1=[0,0,0]\n pt2=[0,0,0]\n pt1b,pt2b=None,None\n polydata2.GetPoint(1,pt1)\n polydata2.GetPoint(nb2-1,pt2)\n minVal2 = min(pt1[2],pt2[2])\n maxVal2 = max(pt1[2],pt2[2])\n valueBase=max(minVal1,minVal2)\n valueTip=min(maxVal1,maxVal2)\n cellId=vtk.mutable(1)\n subid=vtk.mutable(1)\n dist=vtk.mutable(1)\n cl2=vtk.vtkCellLocator()\n cl2.SetDataSet(polydata2)\n cl2.BuildLocator()\n # Hausforff 1 -> 2\n minima=[]\n for i in range(int(nb1/float(100))):\n pt=[0,0,0]\n polydata1.GetPoint(100*i,pt)\n closest=[0,0,0]\n cl2.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff12 = max(minima)\n \n # Hausforff 2 -> 1\n minima=[]\n cl1=vtk.vtkCellLocator()\n cl1.SetDataSet(polydata1)\n cl1.BuildLocator()\n for i in range(int(nb2/float(10))):\n pt=[0,0,0]\n polydata2.GetPoint(10*i,pt)\n closest=[0,0,0]\n cl1.FindClosestPoint(pt,closest,cellId,subid,dist)\n if abs(closest[2]-pt[2])<=1:\n minima.append(self.distance(pt,closest))\n else:\n minima.append(0)\n hausdorff21 = max(minima)\n return max(hausdorff12,hausdorff21)" ]
[ "0.6117487", "0.59835637", "0.59294254", "0.5841786", "0.5818312", "0.5792214", "0.5784532", "0.5768335", "0.56439126", "0.56345", "0.5620634", "0.56057656", "0.5601702", "0.55617213", "0.55402714", "0.5538578", "0.5528003", "0.5515387", "0.5493555", "0.5465111", "0.54553723", "0.5437392", "0.54275215", "0.5413407", "0.5412416", "0.5411462", "0.54043293", "0.54040694", "0.5403368", "0.5359742" ]
0.6788852
0
Upload 10 files at a time!
def upload_all(all_file_names): with ThreadPool(processes=int(10)) as pool: return pool.map(upload_file, all_file_names)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload(server):\n for i in range(10):\n start_time = time.time()\n logging.debug('Start uploading: %d' % i)\n os.system(\"scp uploads/18UPLOAD %s:\" % server)\n end_time = time.time()\n logging.debug('End uploading: ')\n logging.debug('Time taken by uploader: %s' % (end_time - start_time))", "def upload_files(self, files):\n\n for f in files:\n self.scp.put(f, recursive=True)", "def upload(all_files, session):\n remote_directory = unique_path('cli-import')\n log.info('uploading files to %s' % remote_directory)\n\n for filename in all_files:\n callback = _progress_callback\n log.info(\"Uploading %s\" % filename)\n session.uploadWrapper(filename, remote_directory, callback=callback)\n if callback:\n print('')\n return remote_directory", "def run(n):\n base_files = os.listdir(DATA_DIR)\n\n # Keep track of files we have uploaded\n uploaded = []\n\n # Upload files\n for _ in range(n):\n files = random.choices(base_files, k=random.randint(1, 15))\n name, checksum = assemble_file(files)\n upload_file(name)\n os.remove(name)\n uploaded.append((name, checksum))\n\n # Download files\n for name, checksum in uploaded:\n download_and_validate_checksum(name, checksum)\n\n # Validation checks\n check_pack_sizes()\n check_pack_checksums()\n\n print(server_stats())\n\n # Delete all files except for the last two. This should force the vacuum to rebalance\n # some packfiles.\n to_delete = uploaded[:-2]\n remaining = uploaded[-2:]\n for name, _ in to_delete:\n delete_file(name)\n\n # Run a vacuum and wait for it to complete\n vacuum_id = vacuum()\n status = None\n for _ in range(10):\n status = vacuum_status(vacuum_id)\n if status != \"RUNNING\":\n break\n time.sleep(1)\n if status != \"SUCCEEDED\":\n raise ValueError(f\"vacuum failed {status}\")\n\n # Check that the remaining files can still be downloaded after the vacuum\n for name, checksum in remaining:\n download_and_validate_checksum(name, checksum)", "def add_files(self,count=None):\n message_buffer =[]\n if count is None:\n count = len(self.files)\n while count:\n count -= 1\n message_buffer.append((count,base64.b64encode(self.files.pop()),0)) # required to maintain compatibility with\n if len(message_buffer) > 9:\n self.queue.write_batch(message_buffer)\n message_buffer = []\n self.queue.write_batch(message_buffer)", "def upload_file(log_filename_list, index):\n initlog(\"begin to upload files to server!!!!!!!\") \n for filename in log_filename_list:\n ftp_server = '10.10.3.25'\n ftp_port = '21'\n remotepath = '.'\n \n ftp = FTP() \n ftp.set_debuglevel(2)\n ftp.connect(ftp_server, ftp_port)\n ftp.login('', '')\n ftp.cwd(remotepath)\n bufsize = 1024\n \n try:\n file_handler = open(filename, 'rb') \n ftp.storbinary('STOR %s' % (str(index) + '_' + os.path.basename(filename)), file_handler, bufsize)\n ftp.set_debuglevel(0)\n except Exception, e:\n initlog('failed to upload files; %s' % str(e))\n else:\n file_handler.close()\n finally:\n ftp.quit()", "def push(self):\n\n self.start = time.time()\n self.log.info('Uploading {} files to database...'\n ''.format(len(self.filenames)))\n i = 0\n\n # Loop over a portion of files and upload them\n if self.n_files != -1:\n files = self.filenames[0:self.n_files]\n else:\n files = self.filenames\n\n for i, f in enumerate(files):\n\n # If were not debugging script allow exceptions and report them\n # later\n if not self.debug:\n try:\n self._push_one(f, **self.meta)\n\n except Exception as e:\n self.log.error('Error with {}'.format(f))\n self.log.error(e)\n self.errors.append((f, e))\n\n else:\n self._push_one(f, **self.meta)\n\n self.session.close()\n\n # Log the ending errors\n self.report(i + 1)", "def upload(self, folder_list, files):\n current_folder_id = self.top_folder_id\n for fname in folder_list:\n current_folder_id = self._fetch_or_create_folder(fname, current_folder_id)\n for file in files:\n self._upload_detail(file, current_folder_id)", "def upload_files_s3(files, bucket):\n \n print('************************************')\n print('Uploading files to s3 bucket...')\n print('************************************')\n \n for i in range(len(files)):\n upload_file_s3(files[i], bucket)\n \n print('************************************')\n print('Upload complete')\n print('************************************')", "def test_upload_20_products(self):\n for index in range(0, 20):\n with open('./client_assets/dog.jpg', 'rb') as file:\n coll = json.dumps({\n \"collectionName\": \"Apple %d\" % randint(0, 9),\n \"collectionDesc\": \"Apple desc %s\" % (randint(0, 9))\n })\n response = self.client.post('http://localhost:8000/webapp/api/products/', {\n 'title': 'test post 1', 'imageSource': file, 'collection': coll}, format='multipart')\n self.assertEqual(response.status_code, 201)", "def test_batch_upload(\n large_upload_collection: UploadCollection,\n fake_session: HexpySession,\n caplog: CaptureFixture,\n) -> None:\n responses.add(\n responses.POST, HexpySession.ROOT + \"content/upload\", json={}, status=200\n )\n\n client = ContentUploadAPI(fake_session)\n\n with caplog.at_level(logging.INFO):\n response = client.upload(\n document_type=123456789, items=large_upload_collection, request_usage=True\n )\n\n assert (\n caplog.records[0].msg\n == \"More than 1000 items found. Uploading in batches of 1000.\"\n )\n\n assert response == {\"Batch 0\": {}, \"Batch 1\": {}, \"Batch 2\": {}, \"Batch 3\": {}}", "def upload(self):\n while not self._upload_queue.empty():\n logger.info('%d files left to upload', self._upload_queue.qsize())\n self._sia_condition_waiter.wait_for_available_upload_slot()\n job = self._upload_queue.get()\n if (not self._process_upload_job_async(job)) and (job.failure_count\n < 3):\n self._upload_queue.put(job)\n self._sia_condition_waiter.wait_for_all_uploads_to_complete()\n self._exit_event.set()", "def _make_files(self, dir, num_files=10):\n for i in range(num_files):\n self._make_random_file(dir)", "def upload_progress(self, cloud_file, size, uploaded):", "async def create_upload_files(files: List[UploadFile] = File(...)):\n\n if len(files) > 3:\n return {\" \": {\"mode\": \"File Limit Exceeded\"}}\n \n filename = \"_temp_files_one/myfilem.wav\"\n res_json = {}\n file_counter = 0\n for upload_file in files:\n \n with open(filename, \"wb\") as file_object:\n \n file_object.write(upload_file.file.read())\n \n res_json[upload_file.filename + str(file_counter)] = predict_many(filename)\n \n os.remove(filename)\n \n return res_json", "def upload(filename):\n client = connect()\n for _ in range(RETRIES):\n description = str(time.asctime()).replace(' ', '_')\n data = client.upload_archive(\n vaultName=VAULT_NAME,\n archiveDescription=description,\n body=open(filename))\n print(\"Success at \" + time.asctime() + \": ID \" + data['archiveId'])\n return", "async def create_upload_files(background_tasks: BackgroundTasks, files: List[UploadFile] = File(...), db: Session = Depends(get_db)):\n background_tasks.add_task(process_wrist, files)\n return {\"status\": \"success\"}", "def _upload_samples(self, samples):\n # Iterate over the full set of provided samples, uploading them in chunks.\n for offset in range(0, len(samples), self.upload_chunk_size):\n chunk = samples[offset:offset + self.upload_chunk_size]\n self.api.upload_samples(offset, chunk)", "def upload_media_to_bandwidth(media_files):\n for filename in media_files:\n with open(filename, \"rb\") as f:\n file_content = f.read()\n try:\n ##Note: The filename is doubling as the media id##\n response = messaging_client.upload_media(MESSAGING_ACCOUNT_ID, filename, str(len(file_content)), body=file_content)\n except Exception as e:\n print(e)", "def upload_file(url, filename, metadata={}):\n while True:\n files = {'dataFile': open(filename, 'rb')}\n r = requests.post(url, files=files, data=metadata)\n\n # Check if everything went good\n if r.status_code is 200:\n res = r.json()\n\n if res['success'] and md5_checksum(filename) == res['md5']:\n print('[ OK ] File uploaded successfully!')\n return True\n else:\n print('[ WARNING ] Something went werong. Trying again...')\n time.sleep(10)\n else:\n print('[ ERROR ] Connection error. Status code: {}'.format(r.status_code))", "def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")", "def upload(files, session, samples_resource, server_url, threads=DEFAULT_UPLOAD_THREADS,\n validate=True, log_to=None, metadata=None, tags=None):\n if threads is None:\n threads = 1\n\n filenames = []\n file_sizes = []\n for file_path in files:\n normalized_filename, file_size = _file_stats(file_path, validate=validate)\n filenames.append(normalized_filename)\n file_sizes.append(file_size)\n\n # set up the logging\n bar_length = 20\n if log_to is not None:\n log_to.write('Uploading: Preparing upload(s)... ')\n log_to.flush()\n\n overall_size = sum(file_sizes)\n validated_sizes = {filename: 0 for filename in filenames}\n transferred_sizes = {filename: 0 for filename in filenames}\n\n # TODO: we should use click.progressbar?\n def progress_bar_display(file_id, bytes_transferred, validation=False):\n validation_in_progress = sum(validated_sizes.values()) != overall_size\n if validation and validation_in_progress:\n # Validating mode\n prev_progress = sum(validated_sizes.values()) / overall_size\n validated_sizes[file_id] = bytes_transferred\n progress = sum(validated_sizes.values()) / overall_size\n else:\n # Uploading mode\n prev_progress = sum(transferred_sizes.values()) / overall_size\n transferred_sizes[file_id] = bytes_transferred\n progress = sum(transferred_sizes.values()) / overall_size\n\n if floor(100 * prev_progress) == floor(100 * progress):\n return\n\n block = int(round(bar_length * progress))\n bar = '#' * block + '-' * (bar_length - block)\n if validation and validation_in_progress:\n log_to.write('\\rValidating: [{}] {:.0f}% '.format(bar, progress * 100))\n elif progress != 1:\n log_to.write('\\rUploading: [{}] {:.0f}% '.format(bar, progress * 100))\n else:\n log_to.write('\\rUploading: Finalizing upload... ')\n log_to.flush()\n\n progress_bar = None if log_to is None else progress_bar_display\n\n # first, upload all the smaller files in parallel (if multiple threads are requested)\n uploading_uuids = []\n if threads > 1:\n import ctypes\n thread_error = Value(ctypes.c_wchar_p, '')\n semaphore = BoundedSemaphore(threads)\n upload_threads = []\n\n def threaded_upload(*args):\n def _wrapped(*wrapped_args):\n semaphore.acquire()\n try:\n file_uuid = upload_file(*wrapped_args[:-1])\n if file_uuid:\n uploading_uuids.append(file_uuid)\n except Exception as e:\n # handle inside the thread to prevent the exception message from leaking out\n wrapped_args[-1].value = '{}'.format(e)\n raise SystemExit\n semaphore.release()\n\n # the thread error message must be the last parameter\n thread = Thread(target=_wrapped, args=args + (thread_error, ))\n thread.daemon = True\n thread.start()\n upload_threads.append(thread)\n else:\n threaded_upload = upload_file\n\n upload_threads = []\n uploading_files = []\n for file_path, filename, file_size in zip(files, filenames, file_sizes):\n if file_size < MULTIPART_SIZE:\n file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)\n file_uuid = threaded_upload(file_obj, filename, session, samples_resource, log_to,\n metadata, tags)\n if file_uuid:\n uploading_uuids.append(file_uuid)\n uploading_files.append(file_obj)\n\n if threads > 1:\n # we need to do this funky wait loop to ensure threads get killed by ctrl-c\n while True:\n for thread in upload_threads:\n # hopefully no one has a <5Gb file that takes longer than a week to upload\n thread.join(604800)\n if all(not thread.is_alive() for thread in upload_threads):\n break\n if thread_error.value != '':\n raise UploadException(thread_error.value)\n\n # lastly, upload all the very big files sequentially\n for file_path, filename, file_size in zip(files, filenames, file_sizes):\n if file_size >= MULTIPART_SIZE:\n file_obj = _wrap_files(file_path, logger=progress_bar, validate=validate)\n upload_large_file(file_obj, filename, session, samples_resource, server_url,\n threads=threads, log_to=log_to)\n file_obj.close()\n\n if log_to is not None:\n log_to.write('\\rUploading: All complete.' + (bar_length - 3) * ' ' + '\\n')\n log_to.flush()\n\n return uploading_uuids", "def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()", "def upload():\n form = request.form\n\n # Create a unique \"session ID\" for this particular batch of uploads.\n upload_key = str(uuid4())\n\n # Is the upload using Ajax, or a direct POST by the form?\n is_ajax = False\n if form.get(\"__ajax\", None) == \"true\":\n is_ajax = True\n\n # Target folder for these uploads.\n target = app.config['UPLOAD_FOLDER'] + \"/{}\".format(upload_key)\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create upload directory: {}\".format(target))\n else:\n return \"Couldn't create upload directory: {}\".format(target)\n\n for image_upload in request.files.getlist(\"file\"):\n filename = secure_filename(image_upload.filename)\n destination = \"/\".join([target, filename])\n print(\"Accept incoming file:\", filename)\n print(\"Save it to:\", destination)\n image_upload.save(destination)\n upload_image.delay(destination)\n\n if is_ajax:\n return ajax_response(True, upload_key)\n else:\n return redirect(\"/\")", "def import_queued_submissions(conn, limit=50):\n query = schema.submission.select(schema.submission.c.handled == False).limit(limit)\n count = 0\n for submission in conn.execute(query):\n import_submission(conn, submission)\n count += 1\n logger.debug(\"Imported %d submissions\", count)", "def upload_all_parts(self):\n if not self.upload_id:\n raise RuntimeError(\"Attempting to use a multipart upload that has not been initiated.\")\n\n if self.file.name != \"<stdin>\":\n size_left = file_size = os.stat(self.file.name)[ST_SIZE]\n nr_parts = file_size / self.chunk_size + (file_size % self.chunk_size and 1)\n debug(\"MultiPart: Uploading %s in %d parts\" % (self.file.name, nr_parts))\n else:\n debug(\"MultiPart: Uploading from %s\" % (self.file.name))\n\n\tself.chunk_size = self.s3.config.multipart_chunk_size_mb * 1024 * 1024\n\n seq = 1\n\tif self.file.name != \"<stdin>\":\n while size_left > 0:\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = min(file_size - offset, self.chunk_size)\n size_left -= current_chunk_size\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d of %d, %s]\" % (seq, nr_parts, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n try:\n self.upload_part(seq, offset, current_chunk_size, labels)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n else:\n while True:\n buffer = self.file.read(self.chunk_size)\n offset = self.chunk_size * (seq - 1)\n current_chunk_size = len(buffer)\n labels = {\n 'source' : unicodise(self.file.name),\n 'destination' : unicodise(self.uri.uri()),\n 'extra' : \"[part %d, %s]\" % (seq, \"%d%sB\" % formatSize(current_chunk_size, human_readable = True))\n }\n if len(buffer) == 0: # EOF\n break\n try:\n self.upload_part(seq, offset, current_chunk_size, labels, buffer)\n except:\n error(u\"Upload of '%s' part %d failed. Aborting multipart upload.\" % (self.file.name, seq))\n self.abort_upload()\n raise\n seq += 1\n\n debug(\"MultiPart: Upload finished: %d parts\", seq - 1)", "def upload_image():\n url = \"http://uploads.im/api?upload=\" + CHAT['discord']['img_upload_url']\n for i in range(5):\n response = requests.get(url)\n if response.ok:\n break\n print(\"Upload not successful. Retrying...\")\n try:\n return response.json()['data']['img_url']\n except Exception as exc:\n raise UploadError(str(exc), extra_data=response)", "def upload_start(self, local_path, cloud_file, size):\n\t\telog(\"uploading {1} ({2})\".format(local_path, cloud_file.path, bytes_scaled(size)))", "def ingest(self, files):\n for file in files:\n self.files.add(file)", "def upload():\n form = request.form\n\n # Create a unique \"session ID\" for this particular batch of uploads.\n uuid = str(request.args.get('collection'))\n # uuid = str(uuid4())\n upload_key = str(current_user.id) + \"/\" + uuid\n\n # Is the upload using Ajax, or a direct POST by the form?\n is_ajax = False\n if form.get(\"__ajax\", None) == \"true\":\n is_ajax = True\n\n # Target folder for these uploads.\n target = \"uploads/{}\".format(current_user.id)\n\n if not os.path.exists(target):\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create user directory: {}\".format(target))\n else:\n return \"Couldn't create user directory: {}\".format(target)\n\n # Target folder for these uploads.\n target = \"uploads/{}\".format(upload_key)\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create upload directory: {}\".format(target))\n else:\n return \"Couldn't create upload directory: {}\".format(target)\n\n for upload in request.files.getlist(\"file\"):\n filename = upload.filename.rsplit(\"/\")[0]\n destination = \"/\".join([target, filename])\n print \"Accept incoming file:\", filename\n print \"Save it to:\", destination\n upload.save(destination)\n\n if is_ajax:\n return ajax_response(True, uuid)\n else:\n return redirect(url_for(\"core.upload_success\", uuid=uuid))" ]
[ "0.75896925", "0.7002882", "0.69199824", "0.6625899", "0.6493321", "0.6487956", "0.63541657", "0.6290902", "0.627746", "0.6256918", "0.62544245", "0.6240064", "0.6217582", "0.6194342", "0.6168501", "0.6125494", "0.6114586", "0.6112695", "0.6050361", "0.60037833", "0.5988256", "0.5973389", "0.59679955", "0.5954634", "0.59424907", "0.59119004", "0.5858085", "0.585104", "0.5848329", "0.5830447" ]
0.77975726
0
The total width of the widget, in pixels. Includes the border and margin.
def widget_width(self) -> Tuple[int, int]: return self.widget_size[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def margin_width(self):\n return self.border_width() + self.margin_left + self.margin_right", "def width(self) :\n return self.m_width", "def width (self):\n return self._w", "def getWidth(self):\n return DEFAULT_WIDTH", "def getWidth(self):\n return constants.DEFAULT_WIDTH", "def width(self) -> int:\n self.tk_ref.update()\n return self.tk_ref.winfo_width()", "def fill_width(self) -> int:\n\n return self.widget_width - self._get_fill_border_size()", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width", "def width(self):\n return self._width" ]
[ "0.7723997", "0.76160294", "0.75883275", "0.7581459", "0.75692683", "0.75485593", "0.75204754", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7487707", "0.7464676", "0.7464676", "0.7464676", "0.7464676", "0.7464676", "0.7464676", "0.7464676", "0.7464676", "0.7464676", "0.7464676", "0.7464676" ]
0.81201506
0
Gets the currently configured thickness of the border (in pixels)
def border_thickness(self) -> int: return self._border_thickness
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def thickness(self):\n return self._thickness", "def border_width(self):\n if self.has_border:\n return self._border_actor.GetProperty().GetLineWidth()\n return 0", "def _get_fill_border_size(self) -> int:\n\n return (2 * self.border_thickness) + (2 * self.margin_size)", "def border_width(self):\n return self.padding_width() + self.border_left_width + \\\n self.border_right_width", "def GetBorderPen(self):\r\n\r\n return self._borderPen", "def GetBorder(self):\r\n\r\n if wx.Platform == \"__WXMAC__\":\r\n return 6\r\n elif wx.Platform == \"__WXGTK__\":\r\n return 3\r\n elif wx.Platform == \"__WXMSW__\":\r\n return self._pButton.ConvertDialogSizeToPixels(wx.Size(2, 0)).x\r\n else:\r\n return 5", "def fl_get_border_width():\n _fl_get_border_width = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_border_width\",\\\n cty.c_int, [],\\\n \"\"\"int fl_get_border_width()\"\"\")\n library.check_if_flinitialized()\n retval = _fl_get_border_width()\n return retval", "def getBorder(self):\n return self.__border", "def get_border(self):\n return self._border", "def border_height(self):\n return self.padding_height() + self.border_top_width + \\\n self.border_bottom_width", "def _get_border(self, border, size):\n k = 2 * border / size\n i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))\n return border // i", "def thickness(self) -> ErrorValue:\n return ErrorValue(self._data['Thickness'], self._data.setdefault('ThicknessError',0.0))", "def getBorder(self, borderWidth=2):\r\n\r\n patchMap = np.array(self.array, dtype=np.float32)\r\n\r\n smallPatch = ni.binary_erosion(patchMap, iterations=borderWidth).astype(np.float32)\r\n\r\n border = patchMap - smallPatch\r\n\r\n border[border == 0] = np.nan\r\n\r\n return border", "def border(self):\r\n\t\treturn self._border", "def border(self):\n return self._border", "def border(self):\n return self._border", "def GetToolBorderPadding(self):\r\n\r\n return self._tool_border_padding", "def _scrollbar_thickness(self, orientation):\n style = self.widget.style()\n options = QtGui.QStyleOptionSlider()\n options.orientation = orientation\n return style.pixelMetric(style.PM_ScrollBarExtent, options)", "def margin_width(self):\n return self.border_width() + self.margin_left + self.margin_right", "def calc_thickness(self):\n s = \"::: calculating z-varying thickness :::\"\n print_text(s, cls=self)\n #H = project(self.S - self.x[2], self.Q, annotate=False)\n H = self.vert_integrate(Constant(1.0), d='down')\n Hv = H.vector()\n Hv[Hv < 0] = 0.0\n print_min_max(H, 'H', cls=self)\n return H", "def getWidth(self):\n return constants.DEFAULT_WIDTH", "def thickness(self, axis):\n self_body = _union_entities(self.bodies)\n\n axis = axis.copy()\n axis.normalize()\n other_axis = _get_arbitrary_perpendicular_unit_vector(axis)\n\n return app().measureManager.getOrientedBoundingBox(self_body, axis, other_axis).length", "def get_height(self):\n height = 0\n for layer, ldata in self.conf['Layers'].items():\n layer_t = ldata['params']['thickness']\n height += layer_t\n return height", "def current_width(self, factor: Number=1) -> float:\n return self.width + self.spaces_width*factor", "def horizontal_scrollbar_thickness(self):\n return self._scrollbar_thickness(QtCore.Qt.Vertical)", "def getWidth(self) -> int:\n ...", "def getWidth(self):\n return DEFAULT_WIDTH" ]
[ "0.81657225", "0.81657225", "0.81657225", "0.8057173", "0.79038316", "0.7642933", "0.73819387", "0.72433066", "0.71979153", "0.7078818", "0.70385265", "0.7023323", "0.68920004", "0.6884572", "0.6832354", "0.66562974", "0.66272616", "0.65804005", "0.65804005", "0.65035385", "0.64316463", "0.6427723", "0.6353788", "0.63497674", "0.63426256", "0.63216233", "0.63121945", "0.62772965", "0.6239668", "0.6210616" ]
0.8602707
0
The total height of the widget, in pixels. Includes the border and margin.
def widget_height(self) -> int: return self.widget_size[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def margin_height(self):\n return self.border_height() + self.margin_top + self.margin_bottom", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def get_height(self):\r\n return self._height", "def border_height(self):\n return self.padding_height() + self.border_top_width + \\\n self.border_bottom_width", "def fill_height(self) -> int:\n\n return self.widget_height - self._get_fill_border_size()", "def get_height(self):\n return self.__height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def get_height(self):\n return self._height", "def getHeight(self):\n return self.height", "def getHeight(self):\n return self.height", "def height(self):\n _, ymin, _, ymax = self.viewport\n return self.parent.window_size[1] * (ymax - ymin)", "def height(self) :\n return self.m_height", "def get_height(self):\n return self.calc_height(self.root)", "def height(self) -> int:\n if self.props.max_height:\n max_height = UIMetric.parse(self.props.max_height).to_pixels(self.parent.height)\n return min(self.isize[1].to_pixels(self.parent.height), max_height)\n else:\n return self.isize[1].to_pixels(self.parent.height)", "def height(self) -> int:\n self.tk_ref.update()\n return self.tk_ref.winfo_height()", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height", "def height(self):\n return self.__height" ]
[ "0.7872301", "0.7724713", "0.7724713", "0.7724713", "0.77205145", "0.77104795", "0.76858354", "0.7669082", "0.7669082", "0.7669082", "0.7669082", "0.76284504", "0.76284504", "0.7616629", "0.7573606", "0.7535248", "0.75344294", "0.75181544", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058", "0.7510058" ]
0.86296713
0
Returns the currently configured value for the color of the outline (border) of the widget.
def border_color(self) -> int: return self._border_color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def border_color(self):\n return self._border_rgba", "def border_color(self):\n if self.has_border:\n return Color(self._border_actor.GetProperty().GetColor())\n return None", "def borderColor( self ):\n return self._borderColor", "def bordercolor(self):\n return self[\"bordercolor\"]", "def get_color(self):\n return self.color", "def get_border(self):\n return self._border", "def get_color(self):\n\n return self.color", "def get_color(self) -> str:\n return self.color", "def color(self):\n return rgba(self.value_of_css_property('color'))", "def getFgColor(self):\n return self.fgColor", "def get_color(self) -> str:\r\n return self.color", "def get_color(self):\r\n return self.__color", "def color(self):\n return self.container['color']", "def get_color(self):\n return self._color", "def get_color(self):\n return self._color", "def get_color(self):\r\n return self._color", "def getBorder(self):\n return self.__border", "def color(self):\n return self.settings['color']", "def get_color(self):\n return self._io.last_state['color']['front-center']", "def getColor(self):\n return self._l[2]", "def border(self):\n return self._border", "def border(self):\n return self._border", "def bordercolorsrc(self):\n return self[\"bordercolorsrc\"]", "def get_color(self) -> Optional[str]:\n return self.color", "def getColor(self):\r\n return self.color", "def get_colour(self):\n return self.colour", "def get_color(self):\n\n return self._color", "def get_color(self):\n return COLOR_DICT[self.element]", "def getColor(self):\n return self.color", "def border(self):\r\n\t\treturn self._border" ]
[ "0.76036835", "0.73927736", "0.73853207", "0.7094367", "0.6450684", "0.6438329", "0.6391727", "0.6375041", "0.6371977", "0.63530296", "0.63462377", "0.6346167", "0.63437575", "0.63370436", "0.63370436", "0.6334489", "0.6313817", "0.6308941", "0.6305914", "0.6291787", "0.6287082", "0.6287082", "0.6286284", "0.6277729", "0.62708545", "0.6269231", "0.6250928", "0.62472445", "0.6245203", "0.62325615" ]
0.7435684
1
The fill of the progress bar. Can be a hex value for a color or ``None`` for transparent.
def fill(self) -> int: return self._fill_color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fillcolor(self, *args):\n if args:\n color = self._colorstr(args)\n if color == self._fillcolor:\n return\n self.pen(fillcolor=color)\n else:\n return self._color(self._fillcolor)", "def fill(self, colour: int, /) -> None:", "def fillcolor(self):\n return self._fillcolor", "def fill(self, color):", "def fill_color(self) -> String:\r\n from apysc.type import value_util\r\n self._initialize_fill_color_if_not_initialized()\r\n fill_color: String = value_util.get_copy(value=self._fill_color)\r\n return fill_color", "def fill(self, value):\n self.fill_color = value", "def fill_color(self, fill_color=None):\n\n if fill_color is None:\n return self._fill_color\n else:\n self._fill_color = process_color(fill_color)", "def fill(self, rgb, alpha=100):\n self.call('fill', rgb, alpha)", "def fill(self, color):\n self.format.fill(self, color)", "def GetFillAlpha(self):\n return self._attalpha[\"fill\"]", "def fill(self):\n return self[\"fill\"]", "def fill(self):\n return self[\"fill\"]", "def setFill(self, color):\n self._reconfig(\"fill\", color)", "def getFillColor(self):\n return getColor() if (fillColor == None) else fillColor", "def setFill(self, fill):\n self.area_show = fill", "def set_fill_color(self, color: tuple) -> Rectangle:\n self.fill.color = color\n return self", "def begin_fill(\r\n self, color: StrOrString,\r\n alpha: Union[float, Number] = 1.0) -> None:\r\n from apysc.color import color_util\r\n from apysc.converter import cast\r\n from apysc.validation import color_validation\r\n from apysc.validation import number_validation\r\n self._initialize_fill_color_if_not_initialized()\r\n self._initialize_fill_alpha_if_not_initialized()\r\n if color != '':\r\n color = color_util.complement_hex_color(\r\n hex_color_code=color)\r\n self._fill_color.value = color\r\n number_validation.validate_num(num=alpha)\r\n if not isinstance(alpha, Number):\r\n alpha = cast.to_float_from_int(int_or_float=alpha)\r\n color_validation.validate_alpha_range(alpha=alpha)\r\n if isinstance(alpha, Number):\r\n self._fill_alpha.value = alpha.value\r\n else:\r\n self._fill_alpha.value = alpha", "def fill(self):\n return self._turtle.fill()", "def fill(self, color: Union[int, Tuple[int, int, int]]) -> None:\n self._fill_color = color\n if color is None:\n self._palette[0] = 0x00\n self._palette.make_transparent(0)\n else:\n self._palette[0] = color\n self._palette.make_opaque(0)", "def fill_style(self, color=None):\n self._impl.fill_style(color)", "def fill(self, color):\n self.fill_rect(0, 0, self.width, self.height, color)", "def setFillColor(self, color):\n fillColor = color\n repaint()", "def fill(self, x, y, color):\n raise NotImplementedError # Override this function in the Solution classes", "def _initialize_fill_color_if_not_initialized(self) -> None:\r\n if hasattr(self, '_fill_color'):\r\n return\r\n self._fill_color = String('')", "def setFilled(self, fill):\n isFilled = fill\n repaint()", "def fill_px(self, fill_px):\n\n self._fill_px = fill_px", "def write_fill(self, fill: FillFormat):\n if self.fill_type is not None:\n self._write_fill_type(fill)", "def fill(framebuf, color):\n if color:\n fill = 0xFF\n else:\n fill = 0x00\n for i in range(len(framebuf.buf)): # pylint: disable=consider-using-enumerate\n framebuf.buf[i] = fill", "def fill(framebuf, color):\n if color:\n fill = 0xFF\n else:\n fill = 0x00\n for i in range(len(framebuf.buf)): # pylint: disable=consider-using-enumerate\n framebuf.buf[i] = fill", "def sparkline_fill_color(self, sparkline_fill_color):\n\n self._sparkline_fill_color = sparkline_fill_color" ]
[ "0.7329175", "0.73097813", "0.7186909", "0.718688", "0.7166474", "0.7053421", "0.66755503", "0.6654663", "0.6640797", "0.66284734", "0.631836", "0.631836", "0.6185718", "0.61346", "0.6117504", "0.6089102", "0.6018615", "0.60022604", "0.5995704", "0.59930784", "0.596605", "0.5869576", "0.584694", "0.58016723", "0.57798296", "0.5750639", "0.57268226", "0.5725804", "0.5725804", "0.5725083" ]
0.7374778
0
The color of the bar's fill
def bar_color(self) -> int: return self._bar_color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill(self, color):", "def fillcolor(self):\n return self._fillcolor", "def fill(self, colour: int, /) -> None:", "def fillcolor(self, *args):\n if args:\n color = self._colorstr(args)\n if color == self._fillcolor:\n return\n self.pen(fillcolor=color)\n else:\n return self._color(self._fillcolor)", "def fill_color(self) -> String:\r\n from apysc.type import value_util\r\n self._initialize_fill_color_if_not_initialized()\r\n fill_color: String = value_util.get_copy(value=self._fill_color)\r\n return fill_color", "def fill(self) -> int:\n return self._fill_color", "def fill(self, value):\n self.fill_color = value", "def setBarColor(front=-1,side=-1,top=-1):\n dislin.barclr(front,side,top)", "def bar_color(self, color: Union[int, Tuple[int, int, int]]) -> None:\n\n self._bar_color = color\n\n if color is None:\n self._palette[2] = 0x00\n self._palette.make_transparent(2)\n else:\n self._palette[2] = color\n self._palette.make_opaque(2)", "def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'", "def percentageColor(self):\n tup = None\n if self.percent >= 0.7:\n tup = Livebar.colors['green']\n elif self.percent >= 0.3 and self.percent < 0.7:\n tup = Livebar.colors['orange']\n elif self.percent >= 0.0 and self.percent < 0.3:\n tup = Livebar.colors['red']\n return tup", "def GetFillAlpha(self):\n return self._attalpha[\"fill\"]", "def color(self, data):\n\n red = np.interp(data, self.range, self.r)\n blue = np.interp(data, self.range, self.b)\n green = np.interp(data, self.range, self.g)\n # Style plot to return a grey color when value is 'nan'\n red[np.isnan(red)] = 240\n blue[np.isnan(blue)] = 240\n green[np.isnan(green)] = 240\n colors = np.dstack([red.astype(np.uint8),\n green.astype(np.uint8),\n blue.astype(np.uint8),\n np.full_like(data, 255, dtype=np.uint8)])\n #return colors.view(dtype=np.uint32).reshape(data.shape)\n c=[]\n for i in range(len(data)):\n c.append([red[i],green[i],blue[i]])\n return c", "def getFillColor(self):\n return getColor() if (fillColor == None) else fillColor", "def get_hp_bar_color(total, value):\n\t# 256 values per color, so red-to-yellow and yellow-to-green make 512 - but yellow gets counted twice, so it's really 511\n\tnum_colors = 511\n\tc = scale(value, total, num_colors)\n\treturn (min(num_colors - c, 255), min(c, 255), 0)", "def fill(self, rgb, alpha=100):\n self.call('fill', rgb, alpha)", "def color(self):\n return self['color']", "def getColor(self):\n return self._l[2]", "def fill_color(self, fill_color=None):\n\n if fill_color is None:\n return self._fill_color\n else:\n self._fill_color = process_color(fill_color)", "def get_color(self):\n return \"yellow\"", "def set_fill_color(self, color: tuple) -> Rectangle:\n self.fill.color = color\n return self", "def get_cell_colour(self, normalized_daily_amount):\n rgb = \"255,0,0\" if normalized_daily_amount < 0 else \"0, 255, 0\"\n return f\"rgba({rgb},{abs(normalized_daily_amount)})\"", "def getColor(self):\r\n return self.color", "def color(self):\n return self.COLOR", "def color(self):\n return self.container['color']", "def colors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetStackedBarColorArgs']]]]:\n return pulumi.get(self, \"colors\")", "def fill(self, color):\n self.format.fill(self, color)", "def getColor(self):\n return self.color", "def fill(self):\n return self[\"fill\"]", "def fill(self):\n return self[\"fill\"]" ]
[ "0.73191744", "0.710414", "0.6960717", "0.68388325", "0.6779252", "0.6712766", "0.6560369", "0.6522603", "0.65224266", "0.6330059", "0.62342465", "0.61958367", "0.6157005", "0.61489546", "0.6054532", "0.60152364", "0.59889066", "0.59789014", "0.590285", "0.58719695", "0.58598876", "0.5852091", "0.58430165", "0.583988", "0.58197707", "0.5782323", "0.5771086", "0.5756989", "0.5737575", "0.5737575" ]
0.73930514
0
Sets the color of the bar
def bar_color(self, color: Union[int, Tuple[int, int, int]]) -> None: self._bar_color = color if color is None: self._palette[2] = 0x00 self._palette.make_transparent(2) else: self._palette[2] = color self._palette.make_opaque(2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setBarColor(front=-1,side=-1,top=-1):\n dislin.barclr(front,side,top)", "def bar_color(self) -> int:\n\n return self._bar_color", "def setColors(self, background, barR, barG, barB, barA):\n self.volumeBar.override_background_color(\n Gtk.StateFlags.NORMAL, Gdk.RGBA(barR, barG, barB, barA))\n self.modify_bg(Gtk.StateType.NORMAL, Gdk.color_parse(background))", "def set_color(self, color):\n\t\tpass", "def set_color(self, color):\n pass", "def setColor(self,value):\n\t\tself.politics = value if(type(value) is int)else int(value[1:],16)\n\t\tself.canvas.itemconfig('node_'+self.identifier,fill=self.toRGB())", "def on_bar(self, bar: BarData):\n self.bg5.update_bar(bar)\n self.bg15.update_bar(bar)", "def setBarBorderColor(clr=-1):\n dislin.barbor(clr)", "def color(self, color_value):\n self.app.color = color_value", "def set_color(self, color):\n self.color = color", "def set_color(self, new_color):\n self.color = new_color", "def color(self, color):\n\n self.container['color'] = color", "def set_color(self):\n self.background_color = 'white' if self.book.is_completed else 'aqua'", "def setColour(self, col):\n\t\tself.colour = col", "def change_color(self, color):\n self.color = color", "def setBarLabelStyle(mode='none',position='auto', digits=1,color=-1):\n mdict = {'none':'NONE','second':'SECOND','first':'FIRST','delta':'DELTA'}\n pdict = {'inside':'INSIDE','outside':'OUTSIDE','left':'LEFT','right':'RIGHT',\n 'center':'CENTER','auto':'AUTO'}\n dislin.labels(mdict[mode],'BARS')\n dislin.labpos(pdict[position],'BARS')\n dislin.labdig(digits,'BARS')\n dislin.labclr(color,'BARS')", "def set_fig_color(self,curr_value):\n if np.isnan(curr_value):\n for spine in self.ax.spines.values():\n spine.set_edgecolor('orange')\n self.h.set_color(\"orange\")\n elif(curr_value >= self.config[\"danger_value\"]):\n self.h.set_color(\"red\")\n for spine in self.ax.spines.values():\n spine.set_edgecolor('red')\n else:\n self.h.set_color(\"blue\")\n for spine in self.ax.spines.values():\n spine.set_edgecolor('blue')", "def set_color(self, color: str):\n self.color = color", "def set_color(self, color):\n self._color = color", "def _colorBar(self):\n self.gc.show_colorscale(cmap=self.colormap, vmin=self.datamin, vmax=self.datamax, stretch=self.scale) \n axcb = self.f.add_axes([0.1, 0.70, 0.9, 0.025])\n if self.scale == 'linear': \n normcb = mpl.colors.Normalize(vmin=self.datamin/self.scl, vmax=self.datamax/self.scl)\n elif self.scale == 'log':\n normcb = mpl.colors.LogNorm(vmin=self.datamin/self.scl, vmax=self.datamax/self.scl)\n else:\n print(\"\\t=== Normalization not implemented for '{}' ===\".format(self.scale))\n\n cbar = mpl.colorbar.ColorbarBase(axcb, cmap=self.colormap, norm=normcb, orientation='horizontal')\n cbar.solids.set_edgecolor('face')\n cbar.set_label(self.cbarlabel, fontsize=self.ftsize1, horizontalalignment='center')\n cbar.ax.tick_params(labelsize=self.ftsize2)", "def setColorBarRange(start=1,end=254):\n dislin.colran(start,end)", "def on_bar(self, bar: BarData):\n self.bg.update_bar(bar)\n self.bg_large.update_bar(bar)", "def on_bar(self, bar: BarData):\n self.handle_bar(bar)\n self.bg5.update_bar(bar)\n # self.bg15.update_bar(bar)", "def _update_color(self, color):\n self.color = color", "def setColor(self, color):\n self.__color = color", "def _setColor(self, index):\n\n self.colorLabel.setStyleSheet(\"border: 1px solid black; background-color:rgb(%s, %s, %s);\" % (\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.rgbColorDlg.setCurrentColor(QColor.fromRgb(\n cControlColors[index][0] * 255, cControlColors[index][1] * 255,\n cControlColors[index][2] * 255))\n self.colorSlider.setValue(index)", "def fill(self, value):\n self.fill_color = value", "def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])", "def on_bar(self, bar: BarData):\n self.bg_xhour.update_bar(bar)", "def setColorBarWidth(width):\n dislin.widbar(width)" ]
[ "0.7708162", "0.71365523", "0.68602777", "0.6804917", "0.67231643", "0.6674027", "0.66058344", "0.65714437", "0.6562546", "0.65551317", "0.6480851", "0.647629", "0.64459276", "0.64273405", "0.63670033", "0.63262546", "0.6308758", "0.6307943", "0.6272581", "0.6260553", "0.6255988", "0.625509", "0.62349", "0.62257165", "0.62124735", "0.6207625", "0.62037104", "0.6198189", "0.6193864", "0.6191731" ]
0.7670063
1
Sets the value for the underlying variable _progress, then calls self.render() with the appropriate values.
def _set_progress(self, value: float) -> None: self._progress = round(value, 4) self._render(self._old_value, self._value, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_progress_value(self, value):\r\n\r\n pass", "def set_progress(self, progress: float):", "def progress(self, value):\n self.step = float(value)\n self._draw()", "def _setProgress(self, progress):\n # print \"Progress set %.2f --------------------------------\" % progress\n\n if progress > 100.0:\n progress = 100.0\n\n self._progress = progress\n self._progressChangedNotifier.notify(self)", "def set_progress(self, value):\n self.gauge.SetValue(value)", "def setProgress(self, prog):\n\t\tself.progress = prog", "def _render(\n self,\n _old_value: Union[int, float],\n _new_value: Union[int, float],\n _progress_value: float,\n ) -> None:\n\n _prev_ratio, _new_ratio = self._get_ratios(_old_value, _new_value)\n _old_value_size, _new_value_size = self._get_value_sizes(\n _prev_ratio, _new_ratio\n )\n\n # Adjusts for edge cases, such as 0-width non-zero value, or 100% width\n # non-maximum values\n _new_value_size = self._adjust_size_for_range_limits(\n _new_value_size, _new_value\n )\n\n # Default values for increasing value\n _color = 2\n _incr = 1\n _start = max(_old_value_size, 0)\n _end = max(_new_value_size, 0)\n\n if _old_value_size >= _new_value_size:\n # Override defaults to be decreasing\n _color = 0 # Clear\n _incr = -1 # Iterate range downward\n _start = max(_old_value_size, 0) - 1\n _end = max(_new_value_size, 0) - 1\n # If we're setting to minimum, make sure we're clearing by\n # starting one \"bar\" further\n if _new_value == self.minimum:\n _start += 1\n\n _render_offset = self.margin_size + self.border_thickness\n\n vert_start, vert_end, vert_incr = self._get_vertical_fill(_start, _end, _incr)\n horiz_start, horiz_end, horiz_incr = self._get_horizontal_fill(\n _start, _end, _incr\n )\n\n vert_start += _render_offset\n vert_end += _render_offset\n horiz_start += _render_offset\n horiz_end += _render_offset\n\n for vertical_position in range(vert_start, vert_end, vert_incr):\n for horizontal_position in range(horiz_start, horiz_end, horiz_incr):\n self._bitmap[horizontal_position, vertical_position] = _color", "def progress(self, progress):\n\n self._progress = progress", "def progress(self, progress):\n\n self._progress = progress", "def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)", "def set_value(self, value):\n\n self._progress.setValue(value)", "def set_progress(self, step):\n if self._max and step > self._max:\n self._max = step\n elif step < 0:\n step = 0\n\n prev_period = int(self._step / self.redraw_freq)\n curr_period = int(step / self.redraw_freq)\n\n self._step = step\n\n if self._max:\n self._percent = self._step / self._max\n else:\n self._percent = 0.0\n\n if prev_period != curr_period or self._max == step:\n self.display()", "def setProgress(self, value: int):\n self.ui.progress.setValue(value)", "def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step", "def update_progressbar(self, count, value):\n self.status(\"Progress %s/%s\" % (value, count))", "def progress(self, value: float) -> None:\n\n if not isinstance(value, (float, int)):\n raise TypeError(\"'progress' must be an int or a float\")\n\n if not 0.0 <= value <= 100.0:\n raise ValueError(\"'progress' must be between 0 and 100\")\n\n self.value = (self.minimum + (self.maximum - self.minimum)) * (value * 0.01)", "def set_progress(self, progress: int) -> None:\n self.update(progress % len(self.frames)) # prevent IndexError if progress >= len(frames)", "def setProgress(self, val):\n if val is not None:\n self._progressBar.show()\n self._progressBar.setTextVisible(True)\n self.progress = val\n try:\n self._progressBar.setValue(self.progress)\n except:\n pass\n else:\n self._progressBar.setTextVisible(False)\n self._progressBar.hide()\n self._progressBar.reset()\n\n if self.isHidden is True:\n self.isHidden = False\n self.show_()", "def SetProgress(self, frac):\n self.script.append(\"set_progress(%f);\" % (frac,))", "def increase_progress(self, value):\r\n\r\n pass", "def update_progress(self, value=None):\n if self.main_app is not None:\n if value is not None:\n self.main_app.update_progress(value)\n else:\n if self.total_files != 0:\n self.main_app.update_progress((self.current_file / self.total_files) * 100)", "def render(self, progress):\n assert progress >= 0, \"Progress must be positive\"\n\n pct = progress / self.end\n n_bars = int(pct * self.length)\n bars = \"=\" * n_bars\n if n_bars < self.length:\n bars += \">\"\n\n return f\"[{bars:70s}] {pct:4.0%} {progress:8d} / {self.end:8d}\"", "def set_progress(self, current):\n self._current = current\n if self._last_time is None or (datetime.datetime.now() - self._last_time).seconds > 1:\n self._update_time()\n\n self._draw()\n if self._current == self._total:\n self.reset(0)", "def value(self, value: Union[int, float]) -> None:\n\n if not isinstance(value, (int, float)):\n raise TypeError(\"The value to set must be either an integer or a float\")\n\n if not self.minimum <= value <= self.maximum:\n raise ValueError(\n f\"The value must be between minimum ({self.minimum}) and maximum ({self.maximum})\"\n )\n\n # Save off the previous value, so we can pass it in the\n # call to \"Render\"\n self._old_value = self._value\n self._value = value\n # Convert value to float since we may be dealing with\n # integer types, and we can't work with integer division\n # to get a ratio (position) of \"value\" within range.\n self._set_progress(self.get_value_ratio(value))", "def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)", "def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)", "def _bar_progress(self, count, done=False):\n if self.blank:\n return\n self.current_count = count\n count = min(count, self.total)\n if self.total == count or not self.total:\n complete = 100\n else:\n complete = int(floor(100.0*count/self.total))\n if complete <= self.last_percent:\n return\n self.last_percent = complete\n if self.view_type is self.PERCENT:\n self.f.write('\b\b\b\b%3d%%' % complete)\n elif self.view_type is self.BAR:\n blockcount = int(complete//2)\n if blockcount <= self.blockcount:\n return\n for i in range(self.blockcount, blockcount):\n self.f.write(self.bar_char)\n self.blockcount = blockcount\n else:\n raise Exception('unknown value for view_type: %r' % self.view_type)\n if complete == 100:\n self.f.write('\\n')\n self.f.flush()", "def set_Progress(self,func):\n self.__obj.set_Progress(func)", "def setProgress(self, progress):\n\t\tself.config.PROGRESS = [progress]", "def current_progress(self, current_progress):\n\n self._current_progress = current_progress" ]
[ "0.7885928", "0.772645", "0.7424771", "0.7295443", "0.72013587", "0.71065104", "0.70746356", "0.70708585", "0.70708585", "0.701697", "0.70077187", "0.70045197", "0.6987702", "0.6974532", "0.69179356", "0.68095684", "0.6750767", "0.6744838", "0.6734447", "0.66794497", "0.66031605", "0.65641457", "0.6560354", "0.65372336", "0.6495226", "0.6495226", "0.6406498", "0.6378105", "0.63753635", "0.6295241" ]
0.86373645
0
Draws the outline (border) of the progressbar, with a thickness value from self.border_thickness.
def _draw_outline(self) -> None: stroke = self.border_thickness # draw outline rectangle for _w in range(self.widget_width): for line in range(stroke): self._bitmap[_w, line] = 1 self._bitmap[_w, self.widget_height - 1 - line] = 1 for _h in range(self.widget_height): for line in range(stroke): self._bitmap[line, _h] = 1 self._bitmap[self.widget_width - 1 - line, _h] = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawBorder(self):\n\t\t# horizontal lines\n\t\tself.wts(0, 0, '╭' + '─' * (self.width - 2) + '╮', self._borderColor)\t\t\t\t\t\t# Top\n\t\tself.wts(self.height - 2, 0, '└' + '─' * (self.width - 2) + '╯', self._borderColor)\t\t\t# Bottom\n\t\t# vertical lines\n\t\tfor yPos in range(1, self.height - 2):\n\t\t\tself.wts(yPos, 0, '│', self._borderColor)\n\t\t\tself.wts(yPos, self.width - 1, '│', self._borderColor)", "def drawBorder(self,color,x1,y1,x2,y2,thick):\n self.drawRect(color,x1,y1,x2,y1+thick)\n self.drawRect(color,x1,y1,x1+thick,y2)\n self.drawRect(color,x2-thick,y1,x2,y2)\n self.drawRect(color,x1,y2-thick,x2,y2)", "def DrawBorder(self, dc, window, rect, pane): \r\n\r\n drect = wx.Rect(*rect)\r\n \r\n dc.SetPen(self._border_pen)\r\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\r\n\r\n border_width = self.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)\r\n\r\n if pane.IsToolbar():\r\n \r\n for ii in xrange(0, border_width):\r\n \r\n dc.SetPen(wx.WHITE_PEN)\r\n dc.DrawLine(drect.x, drect.y, drect.x+drect.width, drect.y)\r\n dc.DrawLine(drect.x, drect.y, drect.x, drect.y+drect.height)\r\n dc.SetPen(self._border_pen) \r\n dc.DrawLine(drect.x, drect.y+drect.height-1,\r\n drect.x+drect.width, drect.y+drect.height-1)\r\n dc.DrawLine(drect.x+drect.width-1, drect.y,\r\n drect.x+drect.width-1, drect.y+drect.height)\r\n drect.Deflate(1, 1)\r\n \r\n else:\r\n \r\n for ii in xrange(0, border_width):\r\n \r\n dc.DrawRectangle(drect.x, drect.y, drect.width, drect.height)\r\n drect.Deflate(1, 1)", "def drawBorder(self):\n if self._focused:\n self._window.attron(curses.A_BOLD)\n else:\n self._window.attroff(curses.A_BOLD)\n self._window.border()\n self._window.addstr(0, 1, self.__title)\n self._window.attroff(curses.A_BOLD)", "def setBorder(self, b):\n self.border = fn.mkPen(b)\n self.update()", "def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)", "def border_thickness(self) -> int:\n return self._border_thickness", "def make_border(self):\n\n scaled_inside = self.inside_border * self.SCALE\n scaled_outside = self.outside_border * self.SCALE\n scaled_width = self.width * self.SCALE\n\n horizontal_line = 'M {x0} {y0} h {length} v {width} h -{length} z'\n vertical_line = 'M {x0} {y0} v {length} h {width} v -{length} z'\n\n subpaths = list()\n # top line\n subpaths.append(horizontal_line.format(\n x0=scaled_outside,\n y0=scaled_outside,\n length=scaled_inside * 2 + scaled_width + 2 * self.line_size,\n width=self.line_size\n ))\n\n # bottom line - first segment\n subpaths.append(horizontal_line.format(\n x0=scaled_outside,\n y0=scaled_outside + self.line_size + 2 * scaled_inside + scaled_width,\n length=self.SCALE * 2,\n width=self.line_size\n ))\n\n # bottom line - second segment\n subpaths.append(horizontal_line.format(\n x0=scaled_outside + 22 * self.SCALE,\n y0=scaled_outside + self.line_size + 2 * scaled_inside + scaled_width,\n length=scaled_width + 2 * scaled_inside + 2 * self.line_size - 22 * self.SCALE, # 22 = 2 + 2 + 16 + 2\n width=self.line_size\n ))\n\n # left line\n subpaths.append(vertical_line.format(\n x0=scaled_outside,\n y0=scaled_outside + self.line_size,\n length=scaled_width + 2 * scaled_inside,\n width=self.line_size\n ))\n\n # right line\n subpaths.append(vertical_line.format(\n x0=scaled_outside + self.line_size + 2 * scaled_inside + scaled_width,\n y0=scaled_outside + self.line_size,\n length=scaled_width + 2 * scaled_inside,\n width=self.line_size\n ))\n\n return ET.Element(ET.QName(\"path\"), style=self.QR_PATH_STYLE, d=' '.join(subpaths), id=\"qrplatba-border\")", "def _draw_history_border(self, painter):\n\t\tx = self._history_left\n\t\ty = self._history_top\n\t\tw = self._history_width\n\t\th = self._history_bottom - self._history_top\n\n\t\tpainter.setBrush(QtCore.Qt.NoBrush)\n\t\tpainter.setPen(QtCore.Qt.black)\n\t\tpainter.drawRect(x, y, w, h)\n\t\tpainter.setBrush(self._default_brush)\n\t\tpainter.setPen(self._default_pen)", "def draw_border():\n \n length = len(BORDER_COORDS)\n \n # Constants for sine wave\n b = 2 * math.pi / length\n speed = 2\n \n # Draw sinusoid red/green design\n for i in range(length):\n # Sine function\n t = perf_counter()\n sine = math.sin(b*i + speed*t) # Wave with period 28\n \n # Map sine value from [-1, 1] to [0, 4)\n red = min(math.floor(2 * sine + 2), 3)\n \n # Fade red and green colors\n lp.led_ctrl_xy(*BORDER_COORDS[i], red, 3 - red)", "def set_bottom_border(self, val):\n self.bborder = val", "def set_border(self, color: tuple = (0, 0, 0, 255), width: int = 1):\n self.border_color = color\n self.border = width", "def drawRectWithBorder(screen, bColor, fColor, posX, posY, height, width, bWidth):\n \n #draw outline rect \n pygame.draw.rect(screen, bColor, (posX, posY, height, width))\n #draw fill rect\n pygame.draw.rect(screen, fColor, (posX + bWidth, posY + bWidth, height - bWidth * 2, width - bWidth * 2))", "def GetBorderPen(self):\r\n\r\n return self._borderPen", "def update_border(self):\n # side borders\n for row_position in range(NUM_ROWS+BORDER_WIDTH*2):\n self.stdscr.addstr(row_position, 0, '|', curses.color_pair(7))\n self.stdscr.addstr(row_position, NUM_COLUMNS*BLOCK_WIDTH+1, '|', curses.color_pair(7))\n # top and bottom borders\n for column_position in range(NUM_COLUMNS*BLOCK_WIDTH+BORDER_WIDTH*2):\n self.stdscr.addstr(0, column_position, '-', curses.color_pair(7))\n self.stdscr.addstr(NUM_ROWS+1, column_position, '-', curses.color_pair(7))", "def DrawSplitterBorder(*args, **kwargs):\n return _gdi_.RendererNative_DrawSplitterBorder(*args, **kwargs)", "def set_line_thickness(thickness):\r\n global _current_line_thickness\r\n _current_line_thickness = thickness", "def _get_fill_border_size(self) -> int:\n\n return (2 * self.border_thickness) + (2 * self.margin_size)", "def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val", "def border(self):\n ...", "def paint_event(self, event):\n\n painter = QtGui.QPainter(self.overlay)\n\n # paint \"max-length\" vertical bar\n painter.setPen(QtGui.QColor(207, 228, 255, 20))\n x = self.get_line_length_width()\n painter.drawLine(x, 0, x, kk.INF_HEIGHT)\n\n painter.setPen(QtCore.Qt.NoPen)\n\n if len(self.cursors) > 1:\n for cursor in self.cursors:\n painter.setBrush(QtGui.QColor(*self.cursor_colors[self.cursor_state]))\n try:\n rect = self.txt_edit.cursorRect(cursor)\n # self.padding must be False on Maya Script Editor but may\n # have to be applied if used on another QTextEdit\n x_pos = rect.x() +kk.LEFT_PADDING if self.apply_padding else rect.x()\n\n painter.drawRect(\n x_pos,\n rect.y(),\n rect.width(),\n rect.height()\n )\n\n except:\n pass\n\n rect = self.txt_edit.cursorRect(self.cursors[-1])\n painter.setBrush(QtGui.QColor(207, 228, 255, 10))\n painter.drawRect(\n 0,\n rect.y(),\n kk.INF_WIDTH,\n rect.height()\n )", "def drawProgressBar(self, percent, barLen = 20):\n sys.stdout.write(\"\\r\")\n progress = \"\"\n for i in range(barLen):\n if i < int(barLen * percent):\n progress += \"=\"\n else:\n progress += \" \"\n sys.stdout.write(\"[ %s ] %.2f%%\" % (progress, percent * 100))\n sys.stdout.flush()", "def paintEvent(self, event):\n\n painter = QPainter(self)\n\n painter.setPen(QPen(Qt.cyan, 3, Qt.DotLine))\n\n x = round(self.geometry.width()/2 - 150)\n y = round(self.geometry.height()/6)\n height = round(4*self.geometry.height()/6)\n \n painter.drawRect(x, y, 175, height)\n painter.end()", "def DrawSeparator(self, dc, wnd, _rect):\r\n \r\n horizontal = True\r\n if self._agwFlags & AUI_TB_VERTICAL:\r\n horizontal = False\r\n\r\n rect = wx.Rect(*_rect)\r\n\r\n if horizontal:\r\n \r\n rect.x += (rect.width/2)\r\n rect.width = 1\r\n new_height = (rect.height*3)/4\r\n rect.y += (rect.height/2) - (new_height/2)\r\n rect.height = new_height\r\n \r\n else:\r\n \r\n rect.y += (rect.height/2)\r\n rect.height = 1\r\n new_width = (rect.width*3)/4\r\n rect.x += (rect.width/2) - (new_width/2)\r\n rect.width = new_width\r\n \r\n start_colour = StepColour(self._base_colour, 80)\r\n end_colour = StepColour(self._base_colour, 80)\r\n dc.GradientFillLinear(rect, start_colour, end_colour, (horizontal and [wx.SOUTH] or [wx.EAST])[0])", "def border_color(self) -> int:\n return self._border_color", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def setBarBorderColor(clr=-1):\n dislin.barbor(clr)", "def outline(self, x, y, width=None, height=None, char=None,\n fg=(255, 255, 255), bg=None):\n self.console.draw_frame(x, y, width, height, char, fg, bg)" ]
[ "0.6980046", "0.6372926", "0.6327431", "0.6217453", "0.5957238", "0.58896434", "0.58797497", "0.5851642", "0.58446896", "0.58434147", "0.58277005", "0.5813987", "0.5771594", "0.57411146", "0.57224476", "0.56495273", "0.55723834", "0.55633676", "0.5514165", "0.5503805", "0.548805", "0.54446906", "0.54340106", "0.53964716", "0.538282", "0.5376009", "0.5376009", "0.5376009", "0.5364318", "0.53437674" ]
0.7037233
0
Determines any visual space reserved for the widget based on the defined border thickness, and whether a margin should be placed between the border and the bar. The value is calculated as (2 x border_thickness) minus (2 x margin_size). The value for margin_size is either 0 (zero) or 1 (one) depending on the value of margin_size when the widget was created.
def _get_fill_border_size(self) -> int: return (2 * self.border_thickness) + (2 * self.margin_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def margin_size(self, value: int) -> None:\n\n if not isinstance(value, int):\n raise TypeError(\"The margin size must be an integer\")\n\n margin_spacing = (2 * value) + (2 * self._border_thickness)\n\n if margin_spacing >= self.widget_width:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's width.\"\n )\n\n if margin_spacing >= self.widget_height:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's height.\"\n )\n\n self._margin_size = value\n self._set_progress(self._progress) # For a render pass", "def GetBorder(self):\r\n\r\n if wx.Platform == \"__WXMAC__\":\r\n return 6\r\n elif wx.Platform == \"__WXGTK__\":\r\n return 3\r\n elif wx.Platform == \"__WXMSW__\":\r\n return self._pButton.ConvertDialogSizeToPixels(wx.Size(2, 0)).x\r\n else:\r\n return 5", "def border_thickness(self) -> int:\n return self._border_thickness", "def DrawBorder(self, dc, window, rect, pane): \r\n\r\n drect = wx.Rect(*rect)\r\n \r\n dc.SetPen(self._border_pen)\r\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\r\n\r\n border_width = self.GetMetric(AUI_DOCKART_PANE_BORDER_SIZE)\r\n\r\n if pane.IsToolbar():\r\n \r\n for ii in xrange(0, border_width):\r\n \r\n dc.SetPen(wx.WHITE_PEN)\r\n dc.DrawLine(drect.x, drect.y, drect.x+drect.width, drect.y)\r\n dc.DrawLine(drect.x, drect.y, drect.x, drect.y+drect.height)\r\n dc.SetPen(self._border_pen) \r\n dc.DrawLine(drect.x, drect.y+drect.height-1,\r\n drect.x+drect.width, drect.y+drect.height-1)\r\n dc.DrawLine(drect.x+drect.width-1, drect.y,\r\n drect.x+drect.width-1, drect.y+drect.height)\r\n drect.Deflate(1, 1)\r\n \r\n else:\r\n \r\n for ii in xrange(0, border_width):\r\n \r\n dc.DrawRectangle(drect.x, drect.y, drect.width, drect.height)\r\n drect.Deflate(1, 1)", "def margin_height(self):\n return self.border_height() + self.margin_top + self.margin_bottom", "def margin_width(self):\n return self.border_width() + self.margin_left + self.margin_right", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def border_thickness(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"border_thickness\")", "def margin(self):\r\n return self._generate_spacing_info(self.config['margin'])", "def DrawSplitterBorder(*args, **kwargs):\n return _gdi_.RendererNative_DrawSplitterBorder(*args, **kwargs)", "def margins(self) -> tuple[int, int, int, int]:\n return self._widget._mgui_get_margins()", "def getMargin(self):\n assert False", "def margin_size(self) -> int:\n return self._margin_size", "def vertical_spacing(self):\r\n return self.padding[0] + self.padding[2] + self.margin[0] + self.margin[2]", "def _create_margin_shape(self):\n\n a, b = gc((self.size/2 - self.margin),\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.margin_points = zip(a, b)\n self.margin_polygon = gdspy.Polygon(self.margin_points, self.MARGIN_LAYER)\n self.cell.add(self.margin_polygon)", "def GetToolBorderPadding(self):\r\n\r\n return self._tool_border_padding", "def get_margin(self):\n _pal.lib.geometry_get_margin.restype = c.c_float\n return _pal.lib.geometry_get_margin(self._geometry)", "def border_width(self):\n return self.padding_width() + self.border_left_width + \\\n self.border_right_width", "def drawBorder(self):\n\t\t# horizontal lines\n\t\tself.wts(0, 0, '╭' + '─' * (self.width - 2) + '╮', self._borderColor)\t\t\t\t\t\t# Top\n\t\tself.wts(self.height - 2, 0, '└' + '─' * (self.width - 2) + '╯', self._borderColor)\t\t\t# Bottom\n\t\t# vertical lines\n\t\tfor yPos in range(1, self.height - 2):\n\t\t\tself.wts(yPos, 0, '│', self._borderColor)\n\t\t\tself.wts(yPos, self.width - 1, '│', self._borderColor)", "def horizontal_spacing(self):\r\n return self.padding[1] + self.padding[3] + self.margin[1] + self.margin[3]", "def border_width(self):\n if self.has_border:\n return self._border_actor.GetProperty().GetLineWidth()\n return 0", "def getBorder(self):\n return self.__border", "def border_height(self):\n return self.padding_height() + self.border_top_width + \\\n self.border_bottom_width", "def _get_border(self, border, size):\n k = 2 * border / size\n i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))\n return border // i", "def change_margin(self, margin):\n self.margin = margin * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def available_margin(self) -> float:\n return self.position.exchange.available_margin", "def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val", "def GetDefaultBorder(self):\r\n\r\n return wx.BORDER_NONE", "def GetDefaultBorder(self):\r\n\r\n return wx.BORDER_NONE" ]
[ "0.69769204", "0.6564969", "0.6504523", "0.6388232", "0.6185595", "0.61238915", "0.6095111", "0.6095111", "0.6095111", "0.6057181", "0.60567766", "0.5963709", "0.5865675", "0.58175457", "0.57987976", "0.575753", "0.5754858", "0.5664051", "0.5650303", "0.5649332", "0.5642728", "0.564079", "0.55628985", "0.5510461", "0.55030113", "0.54975915", "0.54894024", "0.5478878", "0.54671186", "0.54671186" ]
0.6629648
1
Returns the size of the margin on a single side of the display
def margin_size(self) -> int: return self._margin_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def margin_width(self):\n return self.border_width() + self.margin_left + self.margin_right", "def margin(self):\r\n return self._generate_spacing_info(self.config['margin'])", "def getMargin(self):\n assert False", "def get_margin(self):\n _pal.lib.geometry_get_margin.restype = c.c_float\n return _pal.lib.geometry_get_margin(self._geometry)", "def get_margin(self):\n return unicode(self._visual_indent * 20)", "def margin(self) -> Tuple[int, int, int, int]:\n return (self.imargin[0].to_pixels(self.parent.width),\n self.imargin[1].to_pixels(self.parent.width),\n self.imargin[2].to_pixels(self.parent.height),\n self.imargin[3].to_pixels(self.parent.height))", "def _get_margin_width(self, margin: Margin) -> int:\n\n # Margin.get_width, needs to have a UIContent instance.\n def get_ui_content() -> UIContent:\n return self._get_ui_content(width=0, height=0)\n\n def get_width() -> int:\n return margin.get_width(get_ui_content)\n\n key = (margin, get_app().render_counter)\n return self._margin_width_cache.get(key, get_width)", "def margins(self) -> tuple[int, int, int, int]:\n return self._widget._mgui_get_margins()", "def available_margin(self) -> float:\n return self.position.exchange.available_margin", "def horizontal_spacing(self):\r\n return self.padding[1] + self.padding[3] + self.margin[1] + self.margin[3]", "def getSpacing(self):\n\t\tif not self.spacing:\n\t\t\ta, b, c = self.getVoxelSize()\n\t\t\tself.spacing = [1, b / a, c / a]\n\t\treturn self.spacing", "def margin(self):\n sp = self.sale_price or zero\n if u.isempty(sp):\n return zero\n cp = self.cost_price or zero\n return u.decimal((um-(cp/sp))*cem, True)", "def Margin(self):\n s = self.margin\n assert s in range(1,6), \"Margin score out of bounds.\"\n if s == 1: return 'Poor'\n elif s == 2: return 'Near Poor'\n elif s == 3: return 'Medium'\n elif s == 4: return 'Near Sharp'\n elif s == 5: return 'Sharp'", "def get_margin(self, selling_price):\n selling_price = ( selling_price - self.product_price ) / 2\n return selling_price", "def width(self) -> int:", "def width(self) -> int:", "def vertical_spacing(self):\r\n return self.padding[0] + self.padding[2] + self.margin[0] + self.margin[2]", "def gap_width(self):\n return self.container['gap_width']", "def GetSpacing(self):\r\n\r\n return self._spacing", "def _get_total_margin_width(self) -> int:\n return sum(self._get_margin_width(m) for m in self.left_margins) + sum(\n self._get_margin_width(m) for m in self.right_margins\n )", "def size(self):\n return (self.width)", "def margin_height(self):\n return self.border_height() + self.margin_top + self.margin_bottom", "def margin(x):\n s = 0.0\n for i in range(len(axes)):\n s = s + (x[i]-center[i])**2/axes[i]**2\n return s - 1.0", "def margin_size(self, value: int) -> None:\n\n if not isinstance(value, int):\n raise TypeError(\"The margin size must be an integer\")\n\n margin_spacing = (2 * value) + (2 * self._border_thickness)\n\n if margin_spacing >= self.widget_width:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's width.\"\n )\n\n if margin_spacing >= self.widget_height:\n raise ValueError(\n \"The size of the borders and margins combined can total the same or more\"\n \"than the widget's height.\"\n )\n\n self._margin_size = value\n self._set_progress(self._progress) # For a render pass", "def GetSpacerPixels(self):\r\n\r\n return self.spacer_pixels", "def calc_draw_margin(draw_probability, size, env=None):\n if env is None:\n env = global_env()\n return env.ppf((draw_probability + 1) / 2.) * math.sqrt(size) * env.beta", "def get_offset_value():\n # TODO rename it 'get_margin_value'\n # should be greater than 2 (maybe 1 is enough)\n return 5", "def size(self):\n return self.width", "def size(self):\n return self.width", "def size(self):\n return self.width" ]
[ "0.76753235", "0.7494543", "0.74747205", "0.7453971", "0.73529184", "0.7100232", "0.6973377", "0.69361866", "0.6888335", "0.67849207", "0.6593433", "0.6588636", "0.6546149", "0.64597106", "0.6458608", "0.6458608", "0.6456268", "0.6439624", "0.6420539", "0.6403202", "0.63916403", "0.6367906", "0.63212675", "0.6318012", "0.630173", "0.62256986", "0.6197546", "0.61832905", "0.61832905", "0.61832905" ]
0.7870077
0
Sets the new size of the margin to be used between the border (if displayed) and the value bar.
def margin_size(self, value: int) -> None: if not isinstance(value, int): raise TypeError("The margin size must be an integer") margin_spacing = (2 * value) + (2 * self._border_thickness) if margin_spacing >= self.widget_width: raise ValueError( "The size of the borders and margins combined can total the same or more" "than the widget's width." ) if margin_spacing >= self.widget_height: raise ValueError( "The size of the borders and margins combined can total the same or more" "than the widget's height." ) self._margin_size = value self._set_progress(self._progress) # For a render pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_margin(self, margin):\n _pal.lib.geometry_set_margin(self._geometry, c.c_float(margin))", "def SetMarginsSize(self, size):\r\n \r\n self.SetMargins(size.x, size.x, size.y, size.y)", "def change_margin(self, margin):\n self.margin = margin * self._MM_IN_MICRONS\n\n self._create_drawing_area()\n self.partition(self.rows, self.cols)", "def set_margin(self, value):\n value = u.decimal(value)\n if u.isempty(value):\n self.sale_price = self.cost_price\n else:\n cp = self.cost_price or zero\n self.sale_price = u.decimal(cp/((cem-value)/cem), True)", "def margin_size(self) -> int:\n return self._margin_size", "def setMarginPercentage(self, perc):\n\t\tself.marginPercentage = perc", "def margin(self):\r\n return self._generate_spacing_info(self.config['margin'])", "def SetMarginsXY(self, x, y):\r\n \r\n self.SetMargins(x, x, y, y)", "def setBarWidth(w):\n dislin.barwth(w)", "def getMargin(self):\n assert False", "def size(self, value):\n self.width = value", "def changeSize(self, value):\n self.layer.brush_size = value", "def margin_height(self):\n return self.border_height() + self.margin_top + self.margin_bottom", "def margins(self) -> tuple[int, int, int, int]:\n return self._widget._mgui_get_margins()", "def set_borders(self, val):\n self.rborder = val\n self.lborder = val\n self.tborder = val\n self.bborder = val", "def setBorder():\n dislin.pagera()", "def margin_top(self, value):\n self._margin_top = value", "def set_margin(self, margin, margin_text=None):\n self._margin = margin\n if margin_text is None:\n self._margin_text = margin\n else:\n self._margin_text = margin_text\n self._final = None # Force rebuild", "def set_size(self, value='S'):\n upper = value.upper()\n\n if upper == 'M': # Medium: double height\n # size = 0x01\n # charHeight = 48\n # maxColumn = 32\n self.double_height_on()\n self.double_width_off()\n elif upper == 'L': # Large: double width and height\n # size = 0x11\n # charHeight = 48\n # maxColumn = 16\n self.double_height_on()\n self.double_width_on()\n else: # Small: standard width and height\n # size = 0x00\n # charHeight = 24\n # maxColumn = 32\n self.double_width_off()\n self.double_height_off()\n # writeBytes(ASCII_GS, '!', size)\n # prevByte = '\\n' # Setting the size adds a linefeed", "def set_margins(self, margins='2.5cm', top=None, bottom=None, left=None, right=None):\n top = top or margins\n bottom = bottom or margins\n left = left or margins\n right = right or margins\n\n self.add_package('geometry', top=top, bottom=bottom, left=left, right=right)", "def margin(self, parameter, margin):\n #Use array type to multipy values by margin\n array = numpy.array(self.dataframe[parameter])\n array = array * (1+margin)\n self.dataframe[parameter] = array\n logging.info('Margin of {} successfully applied to the parameter values'.format(margin))", "def set_margins_and_spacing(self):\n\n #margin_list\n margin_list = [0,0,0,0]\n\n #lyt_classes_list\n lyt_classes_list = [QtGui.QStackedLayout, QtGui.QGridLayout, QtGui.QFormLayout, \n QtGui.QBoxLayout, QtGui.QVBoxLayout, QtGui.QHBoxLayout, QtGui.QBoxLayout]\n\n #lyt_list\n lyt_list = []\n for lyt_class in lyt_classes_list:\n lyt_list += [wdgt for wdgt in self.findChildren(lyt_class)]\n\n\n \n #set margin and spacing\n for lyt in lyt_list:\n\n #check type\n if(type(lyt) in lyt_classes_list):\n\n #set\n lyt.setContentsMargins(*margin_list)\n lyt.setSpacing(0)", "def setContentsMargins( self, left, top, right, bottom ):\n self._margins = (left, top, right, bottom)\n self._titleFont = None", "def margin_width(self):\n return self.border_width() + self.margin_left + self.margin_right", "def _get_fill_border_size(self) -> int:\n\n return (2 * self.border_thickness) + (2 * self.margin_size)", "def setBarWidth(w=0.75):\n dislin.barwth(w)", "def _change_height(self, ax, new_value):\n for patch in ax.patches:\n current_height = patch.get_height()\n diff = current_height - new_value\n\n # we change the bar height\n patch.set_height(new_value)\n\n # we recenter the bar\n patch.set_y(patch.get_y() + diff * .5)", "def get_margin(self):\n _pal.lib.geometry_get_margin.restype = c.c_float\n return _pal.lib.geometry_get_margin(self._geometry)", "def change_width(self, value):\n self.layer.edge_width = value\n self.widthSpinBox.clearFocus()\n self.setFocus()", "def _update_dimensions(self):\n _, self.width = self.window.getmaxyx()\n self.spacing = self.width // self.total_columns" ]
[ "0.7088271", "0.6982609", "0.69273084", "0.6687851", "0.6584829", "0.65272593", "0.651799", "0.6422928", "0.61395323", "0.6133535", "0.60705185", "0.60655385", "0.6013266", "0.5968473", "0.5962315", "0.59580904", "0.5943472", "0.58832717", "0.58786684", "0.58772904", "0.5874312", "0.5859741", "0.5855486", "0.58512557", "0.5835179", "0.58264476", "0.5818489", "0.5812841", "0.58110684", "0.57994294" ]
0.79206103
0
Gets the ratio (percentage) of a given value within the range of self.minimum and self.maximum.
def get_value_ratio(self, value: Union[int, float]) -> float: if self.maximum == self.minimum: return 0.0 return (float(value) - self.minimum) / (self.maximum - self.minimum)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_rating() / self.field.range)", "def getPercent(self):\n if isinstance(self.score,numbers.Number) and self.getMaximum():\n return (1.0*self.score/self.getMaximum())\n return None", "def get_real_percent(self):\n if not (self.votes and self.score):\n return 0\n return 100 * (self.get_real_rating() / self.field.range)", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def health_percentage(self) -> Union[int, float]:\n if not self.proto.health_max:\n return 0\n return self.proto.health / self.proto.health_max", "def percent(values, p=0.5):\n m = min(values)\n interval = max(values) - m\n return m + p*interval", "def __normalize(self, value, lower_bound, upper_bound):\n\n min_max_diff = self.max - self.min\n bound_diff = upper_bound - lower_bound\n return (value - self.min) / min_max_diff * bound_diff + lower_bound", "def __get_value(self, values):\n label_range = np.abs(self.max_range - self.min_range)\n range_min = np.minimum(self.min_range, self.max_range)\n range_max = np.maximum(self.min_range, self.max_range)\n values_clipped = np.minimum(np.maximum(values, range_min), range_max)\n proportion = np.abs(values_clipped - self.min_range) / label_range\n vertices = (proportion * 100)\n\n return vertices", "def get_percentage(self):\n return self.percentage", "def get_percentage(self):\n return self.percentage", "def percentage(self) -> str:\n return ranged_value_to_percentage(\n self._device.fan_speed_limits, self._device.fan_speed\n )", "def percentage(a, b):\n return (a * 100.0) / b", "def percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"percentage\")", "def ratio(self):\n return float(self.max_width) / self.max_height", "def exceeded_ratio(self) -> float:\n return self.amount_spent / self.total_amount", "def percentage(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"percentage\")", "def ratio_func(a, b):\n return a / b", "def max_percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"max_percentage\")", "def get_count(self, min_value, max_value):\n index = self.get_bin_index(min_value)\n current_start_value = self.values[index]\n current_stop_value = self.values[index + 1]\n count = 0\n # Add total in this area:\n count += self.counts[index]\n if current_start_value != -float(\"inf\"):\n # Remove proportion before min_value:\n current_total_range = current_stop_value - current_start_value\n percent = (min_value - current_start_value) / current_total_range\n count -= self.counts[index] * percent\n if max_value < current_stop_value:\n # stop is inside this area too, so remove after max\n if current_start_value != -float(\"inf\"):\n percent = (current_stop_value - max_value) / current_total_range\n count -= self.counts[index] * percent\n return count\n # max_value is beyond this area, so loop until last area:\n index += 1\n while max_value > self.values[index + 1]:\n # add the whole count\n count += self.counts[index]\n index += 1\n # finally, add the proportion in last area before max_value:\n current_start_value = self.values[index]\n current_stop_value = self.values[index + 1]\n if current_stop_value != float(\"inf\"):\n current_total_range = current_stop_value - current_start_value\n percent = (max_value - current_start_value) / current_total_range\n count += self.counts[index] * percent\n else:\n count += self.counts[index]\n return count", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0", "def scale(self, value):\n return (float(value) - float(self.minimum)) / \\\n float(self.maximum - self.minimum) * 2.0 - 1.0", "def allowedPercent(self, lower, upper=None, msg=None):\n return allowed_percent(lower, upper, msg)", "def relative_range(self):\n self.calculate_relative_mags()\n string = '{:.0f}-{:.0f}Hz: {:.5f}'\n s_ind = self.get_bin(self.s_freq)\n e_ind = self.get_bin(self.e_freq)\n lst = self.rel_mags[s_ind:e_ind+1]\n return sum(lst)/len(lst)", "def min_percentage(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"min_percentage\")", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def cover_ratio(self, other):\n VERIFICATION.verify_type(other, Rect, \"cover_ratio target must be Rect\")\n\n intersection_rect = self.intersection(other)\n if intersection_rect is None:\n return 0.0\n\n smaller_rect = self if self.area() < other.area() else other\n cover_score = float(intersection_rect.area())/float(smaller_rect.area())\n return cover_score", "def percent(self, value) -> 'Size':\n raise_not_number(value)\n self.maximum = '{}%'.format(value)\n return self", "def get_score_percent(self, value):\n qs_related = RoundData.objects.prefetch_related(\n 'shotdata').select_related('shotdata')\n\n round_holes = int(self.round_type)\n\n if value == 'par':\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'birdie_better':\n return round((qs_related.filter(shotdata__nr_strokes__lt=F('shotdata__hole__par')).count()/round_holes), 2)\n if value == 'tbogey_worse':\n return round((qs_related.filter(shotdata__nr_strokes__gte=F('shotdata__hole__par')+3).count()/round_holes), 2)\n if isinstance(value, int):\n return round((qs_related.filter(shotdata__nr_strokes=F('shotdata__hole__par') + value).count()/round_holes), 2)", "def span_rbw_ratio(self):\r\n res = self._visa.query(f\"SENSE{self._screen()}:BANDWIDTH:RESOLUTION:RATIO?\")\r\n return 1 / float(res)" ]
[ "0.6833876", "0.67152834", "0.6532714", "0.64532036", "0.64532036", "0.64504224", "0.6440304", "0.64198923", "0.6418154", "0.6418154", "0.6403832", "0.63265127", "0.6325848", "0.632183", "0.63187045", "0.6256281", "0.6231153", "0.6217156", "0.6195825", "0.6180276", "0.61256045", "0.611453", "0.6103795", "0.6071618", "0.6049993", "0.6008061", "0.6004081", "0.599192", "0.59723", "0.5969998" ]
0.8040569
0
Does the work of actually creating the graphical representation of the value (percentage, aka "progress") to be displayed.
def _render( self, _old_value: Union[int, float], _new_value: Union[int, float], _progress_value: float, ) -> None: _prev_ratio, _new_ratio = self._get_ratios(_old_value, _new_value) _old_value_size, _new_value_size = self._get_value_sizes( _prev_ratio, _new_ratio ) # Adjusts for edge cases, such as 0-width non-zero value, or 100% width # non-maximum values _new_value_size = self._adjust_size_for_range_limits( _new_value_size, _new_value ) # Default values for increasing value _color = 2 _incr = 1 _start = max(_old_value_size, 0) _end = max(_new_value_size, 0) if _old_value_size >= _new_value_size: # Override defaults to be decreasing _color = 0 # Clear _incr = -1 # Iterate range downward _start = max(_old_value_size, 0) - 1 _end = max(_new_value_size, 0) - 1 # If we're setting to minimum, make sure we're clearing by # starting one "bar" further if _new_value == self.minimum: _start += 1 _render_offset = self.margin_size + self.border_thickness vert_start, vert_end, vert_incr = self._get_vertical_fill(_start, _end, _incr) horiz_start, horiz_end, horiz_incr = self._get_horizontal_fill( _start, _end, _incr ) vert_start += _render_offset vert_end += _render_offset horiz_start += _render_offset horiz_end += _render_offset for vertical_position in range(vert_start, vert_end, vert_incr): for horizontal_position in range(horiz_start, horiz_end, horiz_incr): self._bitmap[horizontal_position, vertical_position] = _color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_progress(self, value: float) -> None:\n\n self._progress = round(value, 4)\n self._render(self._old_value, self._value, value)", "def prograssBar(val, final):\n end = \"\"\n maxlen = 50\n step = final // maxlen\n\n print(\"\\r[ \" + \"#\" * (val // step) + \" ] \" +\n str(int(val * 100.0 / final)) + \"% \", end=end)", "def render(self, progress):\n assert progress >= 0, \"Progress must be positive\"\n\n pct = progress / self.end\n n_bars = int(pct * self.length)\n bars = \"=\" * n_bars\n if n_bars < self.length:\n bars += \">\"\n\n return f\"[{bars:70s}] {pct:4.0%} {progress:8d} / {self.end:8d}\"", "def progress(self, value):\n self.step = float(value)\n self._draw()", "def update_percent(self):", "def __str__(self):\n width = self.width\n if self.length == 0:\n percent = 1\n else:\n percent = max(self.value, 0) / self.length\n pg_char = self.pg_char\n ending = ' ' + (self.str_time_remaining()\n if self.timer else '{0} of {1} complete'.format(\n self.value, self.length))\n if width - len(ending) < 10 or self.has_output:\n self.width = 0\n if self.timer:\n return \"{0:.0%} complete: {1}\".format(\n percent, self.str_time_remaining())\n return \"{0:.0%} complete\".format(percent)\n num_of_chars = int(percent * self.width)\n pbar = '[' + pg_char*num_of_chars + \\\n ' '*(self.width-num_of_chars) + ']' + ending\n\n str_percent = ' {0:.0%} '.format(percent)\n\n return pbar[:self.width//2 - 2] \\\n + str_percent + pbar[self.width//2+len(str_percent) - 2:]", "def _set_percentage(self):\n\n step = float(self.step)\n end = float(self.end)\n self.percentage = format((100 * step / end), '.1f')", "def update_amount(self, newAmount=0, suffix=''):\n if newAmount < self.min:\n newAmount = self.min\n if newAmount > self.max:\n newAmount = self.max\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = np.float(self.amount - self.min)\n percentDone = (diffFromMin / np.float(self.span)) * 100.0\n percentDone = np.int(np.round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2 - 18\n numHashes = (percentDone / 100.0) * allFull\n numHashes = np.int(np.round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special cases for\n # empty and full\n if numHashes == 0:\n self.prog_bar = '%s[>%s]' % (self.prefix, ' '*(allFull-1))\n elif numHashes == allFull:\n self.prog_bar = '%s[%s]' % (self.prefix, '='*allFull)\n if suffix:\n self.prog_bar += ' %s' % (suffix)\n else:\n self.prog_bar = '[%s>%s]' % ('='*(numHashes-1), ' '*(allFull-numHashes))\n # figure out where to put the percentage, roughly centered\n percentPlace = int(len(self.prog_bar)/2 - len(str(percentDone)))\n percentString = ' ' + str(percentDone) + '% '\n # slice the percentage into the bar\n self.prog_bar = ''.join([self.prog_bar[0:percentPlace],\n percentString,\n self.prog_bar[percentPlace+len(percentString):]])\n # prefix and suffix\n self.prog_bar = self.prefix + self.prog_bar\n if suffix:\n self.prog_bar += ' %s' % (suffix)\n # time info - elapsed time and estimated remaining time\n if percentDone > 0:\n elapsed_time = time.time() - self.start_time\n self.prog_bar += '%5ds / %5ds' % (int(elapsed_time),\n int(elapsed_time * (100./percentDone-1)))", "def updateAmount(self, newAmount = 0):\n if newAmount and self.starting_amount is None:\n self.starting_amount = newAmount\n self.starting_time = time.time()\n if newAmount < self.min: newAmount = self.min\n if newAmount > self.max: newAmount = self.max\n self.prev_amount = self.amount\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = float(self.amount - self.min)\n percentDone = (diffFromMin / float(self.span)) * 100.0\n percentDone = int(round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2\n numHashes = (percentDone / 100.0) * allFull\n numHashes = int(round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special cases for\n # empty and full\n\n if numHashes == 0:\n self.progBar = \"[>%s]\" % (' '*(allFull-1))\n elif numHashes == allFull:\n self.progBar = \"[%s]\" % ('='*allFull)\n else:\n self.progBar = \"[%s>%s]\" % ('='*(numHashes-1),\n ' '*(allFull-numHashes))\n \n if self.show_percentage:\n # figure out where to put the percentage, roughly centered\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = str(percentDone) + \"%\"\n else:\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = '%s/%s' % (self.amount, self.span)\n # slice the percentage into the bar\n self.progBar = ''.join([self.progBar[0:percentPlace], percentString,\n self.progBar[percentPlace+len(percentString):]\n ])\n if self.starting_amount is not None:\n amount_diff = self.amount - self.starting_amount\n if amount_diff:\n self.prev_time = self.current_time\n self.current_time = time.time()\n elapsed = self.current_time - self.starting_time\n eta = elapsed * (self.max - self.amount)/float(amount_diff)\n self.progBar += ' ETA:'+time_to_str(eta)", "def set_progress_value(self, value):\r\n\r\n pass", "def update_progressbar(self, count, value):\n self.status(\"Progress %s/%s\" % (value, count))", "def _set_eel_display(self, value):\n self.eel_display = \"\"\"<div class=\"progress\">\n <div class=\"progress-bar\" role=\"progressbar\" aria-valuenow=\"{0}\" \n aria-valuemin=\"0\" aria-valuemax=\"100\"\n style=\"min-width: 2em; width: {0}%;\"> {0}% </div>\n </div>\n \"\"\".format((value/self.MAX_EELS)*100)", "def _draw(self):\n if self._total == 0:\n return\n self._clear()\n\n print('{0:-<{1}}{2:3d}% ({3:{5}d}/{4:{5}d}) ETA {6}'.format(\n \"=\" * round(self._bar_width * self._current / self._total),\n self._bar_width,\n round(math.floor(self._current / self._total * 100)),\n self._current,\n self._total,\n self._digits,\n self._remaining_time\n ), end='\\r')", "def display_float( \\\n r : RenderingManager, \\\n value : float, \\\n name : str = 'display name', \\\n x : int = 20, \\\n y : int = 20, \\\n width : int = 200, \\\n height : int = 60):\n # Calculations:\n pad = width / 20\n bar_width = width/2 - pad\n bar_height = 3*height/5 - 2*pad\n value_width = bar_width*abs(value)\n\n # Rendering:\n r.begin_rendering(f'display {name} in box')\n\n #Box\n colour_box = r.create_color(150, 0, 0, 0)\n r.draw_rect_2d(x, y, int(width*0.95), height, True, colour_box)\n # BUG Rects draw too wide. Temporary workaround.\n\n #Title\n colour_name = r.white()\n font_size = int(height / 150) + 1\n r.draw_string_2d(int(x+pad), int(y+pad), font_size, font_size, f'{name}', colour_name)\n\n #Bar\n pos = r.create_color(255, 0, 200, 50)\n neg = r.create_color(255, 150, 20, 20)\n colour_bar = pos if sign(value) == 1 else neg\n\n if sign(value) == 1:\n r.draw_rect_2d(int(x+width/2), int(y+pad + 2*height/5), int(value_width*0.95), int(bar_height), True, colour_bar)\n # BUG Rects draw too wide. Temporary workaround.\n else:\n r.draw_rect_2d(int(x+width/2-value_width), (y+pad + 2*height/5), int(value_width*0.95), int(bar_height), True, colour_bar)\n # BUG Rects draw too wide. Temporary workaround.\n\n #Zero Line\n colour_zero = r.white()\n r.draw_rect_2d(int(x+width/2)-1, int(y+pad + 2*height/5)-2, 2, int(bar_height)+4, True, colour_zero)\n\n r.end_rendering()", "def _draw(self):\n\n self._set_percentage()\n spaces = \"\".join([' ' for _ in range(len(str(self.percentage)), 5)])\n porc = \"\\r\" + str(self.text) + spaces + str(self.percentage) + \"%[\"\n pos = (((self.step / (self.end - self.start) * 100) * (self.width - len(porc))) / 100)\n self._write(porc)\n for i in range(int(pos)):\n self._write(self.bar)\n self._write(next(self.pacman))\n for i in range(int(pos), len(self.candybar) - 18):\n self._write(self.candybar[i])\n self._write(\"] > \" if self.follower and self.step < self.len else \"]\")\n\n sys.stdout.flush()\n\n if self.step == self.len:\n self._write(\"\\n\")", "def _progress_bar(free_key, capacity_key, result_key, unit):\n if free_key not in info or capacity_key not in info:\n return\n free = info[free_key]\n del info[free_key]\n capacity = info[capacity_key]\n del info[capacity_key]\n\n simple_stats = (\n 'Current: {} {unit}\\n'\n 'Free: {} {unit}\\n'\n 'Max: {} {unit}'.format(\n capacity - free, free, capacity, unit=unit))\n\n if not 0 <= free <= capacity > 0:\n log.warning(\n '{} ({}) and {} ({}) have weird ratio, skipping progress '\n 'calculation'.format(\n free_key, free, capacity_key, capacity)\n )\n info[result_key] = red(simple_stats)\n return\n\n assert 0 <= free <= capacity\n ratio = 1 - float(free) / float(capacity)\n if ratio >= 0.9:\n color = red\n elif ratio >= 0.8:\n color = yellow\n else:\n color = green\n\n max_bars = 20\n num_bars = int(round(ratio * max_bars))\n info[result_key] = (\n '[{}{}] {}%\\n{}'.format(\n color('#' * num_bars), ' ' * (max_bars - num_bars),\n int(round(ratio * 100)),\n simple_stats,\n )\n )", "def set_progress(self, progress: float):", "def get_text(self):\n # If percentage is zero, round it\n if self.percentage == 0:\n self.percentage = str(\"< 0.01\")\n text = str(self.percentage) + \"% on line \" + self.line\n return text", "def drawProgressBar(self, percent, barLen = 20):\n sys.stdout.write(\"\\r\")\n progress = \"\"\n for i in range(barLen):\n if i < int(barLen * percent):\n progress += \"=\"\n else:\n progress += \" \"\n sys.stdout.write(\"[ %s ] %.2f%%\" % (progress, percent * 100))\n sys.stdout.flush()", "def __call__(self, param):\n count = param.nbatch\n filled_len = int(round(self.bar_len * count / float(self.total)))\n percents = math.ceil(100.0 * count / float(self.total))\n prog_bar = '=' * filled_len + '-' * (self.bar_len - filled_len)\n logging.info('[%s] %s%s\\r', prog_bar, percents, '%')", "def updateAmount(self, newAmount=0):\n\n if newAmount <= self.min:\n newAmount = self.min\n if newAmount >= self.max:\n newAmount = self.max\n\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = float(self.amount - self.min)\n percentDone = (diffFromMin / float(self.span)) * 100.0\n percentDone = int(round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2\n numHashes = (percentDone / 100.0) * allFull\n numHashes = int(round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special\n # cases for empty and full\n if numHashes == 0:\n self.progBar = \"[%s%s]\" % (self.edge, ' ' * (allFull - 1))\n elif numHashes == allFull:\n self.progBar = \"[%s]\" % (self.marker * allFull)\n else:\n self.progBar = \"[%s%s%s]\" % (self.marker * (numHashes - 1),\n self.edge,\n ' ' * (allFull - numHashes))\n\n # figure out where to put the percentage, roughly centered\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = str(percentDone) + \"%\"\n\n # slice the percentage into the bar\n self.progBar = ''.join([self.progBar[0:percentPlace], percentString,\n self.progBar[percentPlace+len(percentString):]\n ])", "def _dl_progress_bar(self):\n if not self.show_progress:\n return\n\n if self.file_size:\n ratio = float(self.bytes_read) / self.file_size\n else:\n ratio = 1\n percent = int(ratio * 100)\n\n bar_len = 60\n done = int(bar_len * ratio)\n bar = ('=' * done) + (' ' * (bar_len - done))\n\n progress = '{percent: >3}%: [{bar}]'.format(percent=percent, bar=bar)\n backspace = '\\b' * len(progress)\n print(backspace + '\\r', end='')\n print(progress, end='')", "def getProgress(self):", "def _progress(self, num_completed_batches, data_loader):\n return '[{}/{} ({:.0f}%)]'.format(num_completed_batches, len(data_loader),\n 100.0 * num_completed_batches / len(data_loader))", "def _printProgressBar(self, fractionComplete):\n import sys\n nInc = 50\n count = int(nInc * fractionComplete)\n proBar = \"|\"\n for i in range(nInc):\n if i < count:\n proBar += \"-\"\n else:\n proBar += \" \"\n proBar += \"|\"\n print((proBar, int(fractionComplete * 100), \"%\\r\",))\n sys.stdout.flush()\n\n return", "def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step", "def progress(self, value: float) -> None:\n\n if not isinstance(value, (float, int)):\n raise TypeError(\"'progress' must be an int or a float\")\n\n if not 0.0 <= value <= 100.0:\n raise ValueError(\"'progress' must be between 0 and 100\")\n\n self.value = (self.minimum + (self.maximum - self.minimum)) * (value * 0.01)", "def _progressBar(self, percent, printEvery=10):\n floor = int(percent)\n sys.stdout.write('\\r' * (floor + 9))\n sys.stdout.write('[')\n sys.stdout.write('=' * (floor/printEvery))\n sys.stdout.write('>] {:02.2f}%'.format(percent))\n sys.stdout.flush()", "def _bar_progress(self, count, done=False):\n if self.blank:\n return\n self.current_count = count\n count = min(count, self.total)\n if self.total == count or not self.total:\n complete = 100\n else:\n complete = int(floor(100.0*count/self.total))\n if complete <= self.last_percent:\n return\n self.last_percent = complete\n if self.view_type is self.PERCENT:\n self.f.write('\b\b\b\b%3d%%' % complete)\n elif self.view_type is self.BAR:\n blockcount = int(complete//2)\n if blockcount <= self.blockcount:\n return\n for i in range(self.blockcount, blockcount):\n self.f.write(self.bar_char)\n self.blockcount = blockcount\n else:\n raise Exception('unknown value for view_type: %r' % self.view_type)\n if complete == 100:\n self.f.write('\\n')\n self.f.flush()", "def printProgress(self, percentage):\n #print '%s\\r' % ' '*20, # clean up row\n #print '%3d%% ' % percentage, # ending with comma prevents newline from being appended\n sys.stdout.flush()" ]
[ "0.72470385", "0.71253514", "0.70749027", "0.6908125", "0.6892346", "0.6851142", "0.6813287", "0.6738935", "0.6731503", "0.66507053", "0.66301835", "0.6625872", "0.65764683", "0.6552549", "0.6533988", "0.65089285", "0.64166343", "0.6414557", "0.6399664", "0.6396538", "0.6395756", "0.6395568", "0.63889253", "0.6381841", "0.63538396", "0.6348493", "0.63395625", "0.6328625", "0.63245887", "0.63159263" ]
0.725467
0
Get a cube with the faces out of order on construction.
def unoriented_cube(): faces = get_oriented_cube_faces() for face in faces: np.random.shuffle(face) poly = Polyhedron(get_cube_points(), faces, faces_are_convex=True) poly.sort_faces() return poly
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cube_array(self):\n cube_sides = {}\n\n for side in SIDES:\n cube_sides[side] = []\n \n # Todo Break this loop into helper functions for clarity and simplicity\n for coord in COORDS_3:\n for cubie in self.cubies:\n # Making sure that the cubes cubies are processed in the correct order\n if np.array_equal(cubie.coordinates, coord): \n \n \n for side in SIDES:\n if cubie.in_side(side):\n for face in cubie.faces:\n \n # Checking that the face of the cubie has the same norm as the side we are processing\n if np.array_equal(face.norm, NORMS[side]):\n cube_sides[side].append(face.colour)\n\n new_list = [cube_sides[\"U\"], cube_sides[\"F\"], reversal(cube_sides[\"R\"]), reversal(cube_sides[\"B\"]),\n cube_sides[\"L\"], reversal(cube_sides[\"D\"])]\n \n final_list = [nine_to_3x3(side) for side in new_list]\n return final_list", "def cube_faces(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = []\n\n x, y = np.mgrid[xmin:xmax:3j, ymin:ymax:3j]\n z = np.ones(y.shape) * zmin\n faces.append((x, y, z))\n\n x, y = np.mgrid[xmin:xmax:3j, ymin:ymax:3j]\n z = np.ones(y.shape) * zmax\n faces.append((x, y, z))\n\n x, z = np.mgrid[xmin:xmax:3j, zmin:zmax:3j]\n y = np.ones(z.shape) * ymin\n faces.append((x, y, z))\n\n x, z = np.mgrid[xmin:xmax:3j, zmin:zmax:3j]\n y = np.ones(z.shape) * ymax\n faces.append((x, y, z))\n\n y, z = np.mgrid[ymin:ymax:3j, zmin:zmax:3j]\n x = np.ones(z.shape) * xmin\n faces.append((x, y, z))\n\n y, z = np.mgrid[ymin:ymax:3j, zmin:zmax:3j]\n x = np.ones(z.shape) * xmax\n faces.append((x, y, z))\n\n return faces", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def cube_vertices(x, y, z, nx, ny=None, nz=None):\n if ny == None: ny = nx\n if nz == None: nz = nx\n return [\n x - nx, y + ny, z - nz, x - nx, y + ny, z + nz, x + nx, y + ny, z + nz, x + nx, y + ny, z - nz, # top\n x - nx, y - ny, z - nz, x + nx, y - ny, z - nz, x + nx, y - ny, z + nz, x - nx, y - ny, z + nz, # bottom\n x - nx, y - ny, z - nz, x - nx, y - ny, z + nz, x - nx, y + ny, z + nz, x - nx, y + ny, z - nz, # left\n x + nx, y - ny, z + nz, x + nx, y - ny, z - nz, x + nx, y + ny, z - nz, x + nx, y + ny, z + nz, # right\n x - nx, y - ny, z + nz, x + nx, y - ny, z + nz, x + nx, y + ny, z + nz, x - nx, y + ny, z + nz, # front\n x + nx, y - ny, z - nz, x - nx, y - ny, z - nz, x - nx, y + ny, z - nz, x + nx, y + ny, z - nz, # back\n ]", "def cube_from_bbox(bbox):\n cube = pm.polyCube(\n width=bbox.width(),\n height=bbox.height(),\n depth=bbox.depth(),\n ch=False\n )\n cube[0].setAttr('t', bbox.center())\n return cube[0]", "def n_cube(self, dim_n):\n if dim_n == 1:\n return Polyhedron(vertices = [[1],[-1]])\n\n pre_cube = polytopes.n_cube(dim_n-1)\n vertices = [];\n for pre_v in pre_cube.vertex_generator():\n vertices.append( [ 1] + [v for v in pre_v] );\n vertices.append( [-1] + [v for v in pre_v] );\n return Polyhedron(vertices = vertices)", "def cube_vertices(x, y, z, n):\r\n return [\r\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\r\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\r\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\r\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\r\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\r\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\r\n ]", "def getCube(unique_name):", "def test_cube(self):\n\n # No isosurface\n cube_zero = numpy.zeros((2, 2, 2), dtype=numpy.float32)\n\n result = marchingcubes.MarchingCubes(cube_zero, 1.)\n self.assertEqual(result.shape, cube_zero.shape)\n self.assertEqual(result.isolevel, 1.)\n self.assertEqual(result.invert_normals, True)\n\n vertices, normals, indices = result\n self.assertEqual(len(vertices), 0)\n self.assertEqual(len(normals), 0)\n self.assertEqual(len(indices), 0)\n\n # Cube array dimensions: shape = (dim 0, dim 1, dim2)\n #\n # dim 0 (Z)\n # ^\n # |\n # 4 +------+ 5\n # /| /|\n # / | / |\n # 6 +------+ 7|\n # | | | |\n # |0 +---|--+ 1 -> dim 2 (X)\n # | / | /\n # |/ |/\n # 2 +------+ 3\n # /\n # dim 1 (Y)\n\n # isosurface perpendicular to dim 0 (Z)\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((1., 1.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 0], level)\n self.assertAllClose(normals, (1., 0., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 1 (Y)\n cube = numpy.array(\n (((0., 0.), (1., 1.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.2\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(vertices[:, 1], level)\n self.assertAllClose(normals, (0., -1., 0.))\n self.assertEqual(len(indices), 2)\n\n # isosurface perpendicular to dim 2 (X)\n cube = numpy.array(\n (((0., 1.), (0., 1.)),\n ((0., 1.), (0., 1.))), dtype=numpy.float32)\n level = 0.9\n vertices, normals, indices = marchingcubes.MarchingCubes(\n cube, level, invert_normals=False)\n self.assertAllClose(vertices[:, 2], level)\n self.assertAllClose(normals, (0., 0., 1.))\n self.assertEqual(len(indices), 2)\n\n # isosurface normal in dim1, dim 0 (Y, Z) plane\n cube = numpy.array(\n (((0., 0.), (0., 0.)),\n ((0., 0.), (1., 1.))), dtype=numpy.float32)\n level = 0.5\n vertices, normals, indices = marchingcubes.MarchingCubes(cube, level)\n self.assertAllClose(normals[:, 2], 0.)\n self.assertEqual(len(indices), 2)", "def create_cube():\n new_cube = RubicsCube2x2()\n show_cube_console(new_cube)\n\n seed = [10, 9, 17, 14, 11, 8, 3, 2, 17, 3, 9, 7, 15, 4, 14, 14, 3, 3, \\\n 13, 7, 15, 9, 14, 13, 11, 17, 7, 10, 5, 16, 11, 5, 7, 10, 14, \\\n 7, 17, 7, 8, 6, 12, 3, 6, 1, 16, 12, 5, 13, 3, 4]\n for move in seed:\n new_cube.do_move(move)\n return new_cube", "def copy(self):\n new_cubies = [cubie.copy() for cubie in self.cubies]\n new_cube = Cube(new_cubies)\n return new_cube", "def test_2_1_3D_cube_init(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0), (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5)]\n\n nn_checks = {\n (1, 1, 1): [(1, 1, 0), (0, 1, 1), (1, 0, 0), (0, 0, 1), (1, 0, 1),\n (0.5, 0.5, 0.5), (0, 1, 0)],\n (1, 0, 1): [(1, 0, 0), (0, 0, 1), (0, 0, 0), (0.5, 0.5, 0.5),\n (1, 1, 1)],\n (0.5, 0.5, 0.5): [(1, 1, 0), (0, 1, 1), (0, 1, 0), (1, 0, 0),\n (0, 0, 1), (1, 0, 1), (0, 0, 0), (1, 1, 1)]}\n\n init_triangulation(3, 0, check, nn_checks)", "def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)", "def to_pycuber(self) -> pycuber.Cube:\n self.soft_align_faces()\n qpos_copy = self.sim.data.qpos.copy()\n\n cubies = []\n\n for i in range(27):\n cubelet_meta = self.cubelet_meta_info[i]\n\n if cubelet_meta[\"type\"] == \"cubelet\":\n mtx = self._cubelet_rotation_matrix(cubelet_meta, qpos_copy)\n\n original_coords = cubelet_meta[\"coords\"]\n # current_coords = (mtx @ cubelet_meta['coords'].astype(float)).round().astype(int)\n\n cubie_desc = {}\n\n for prev_axis, sign in enumerate(original_coords):\n if sign != 0:\n vec = mtx[:, prev_axis] * sign\n new_axis = np.abs(vec).argmax()\n new_sign = vec[new_axis]\n\n color = PYCUBER_REVERSE_COLORS[prev_axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[new_axis, new_sign]\n\n cubie_desc[loc] = pycuber.Square(color)\n\n if len(cubie_desc) == 3:\n cubies.append(pycuber.Corner(**cubie_desc))\n elif len(cubie_desc) == 2:\n cubies.append(pycuber.Edge(**cubie_desc))\n if cubelet_meta[\"type\"] == \"driver\":\n original_coords = cubelet_meta[\"coords\"]\n axis = np.abs(original_coords).argmax()\n sign = original_coords[axis]\n\n color = PYCUBER_REVERSE_COLORS[axis, sign]\n loc = PYCUBER_REVERSE_LOCATIONS[axis, sign]\n\n cubie_desc = {loc: pycuber.Square(color)}\n cubies.append(pycuber.Centre(**cubie_desc))\n\n return pycuber.Cube(cubies=cubies)", "def copy(self):\n new_cubie = Cubie(self.coordinates[:])\n new_cubie.faces = [face.copy() for face in self.faces]\n return new_cubie", "def index_as_cube(self):\n return _IndexAsCubeSlicer(self)", "def create_cube(color=COLOR_WHITE):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glBegin(GL_QUADS)\n glColor4fv(color)\n drawVertexListCreateNormal([a, b, c, d])\n drawVertexListCreateNormal([b, f, g, c])\n drawVertexListCreateNormal([f, e, h, g])\n drawVertexListCreateNormal([e, a, d, h])\n drawVertexListCreateNormal([d, c, g, h])\n drawVertexListCreateNormal([a, e, f, b])\n glEnd()\n glPopMatrix()\n glEndList()\n return obj", "def __init__(self, cube_file):\n\t\timport pyfits, pywcs\n\t\t# Put the cube in RA - DEC - RM order and save it\n\t\tCube.__init__(self, np.transpose(pyfits.getdata(cube_file), (2, 1, 0)))\n\t\tself.wcs = pywcs.WCS(pyfits.getheader(cube_file))\n\n\t\tsky0 = self.pix2sky([0,0,0])\n\t \tskyN = self.pix2sky([self.x_max,self.y_max,self.z_max])\n\t \tself.ra_min = min(sky0[0],skyN[0])\n\t\tself.ra_max = max(sky0[0],skyN[0])\n\t\tself.ra_step = (self.ra_max-self.ra_min)/self.x_max\n\t \tself.dec_min = min(sky0[1],skyN[1])\n\t self.dec_max = max(sky0[1],skyN[1])\n\t\tself.dec_step = (self.dec_max-self.dec_min)/self.y_max\n\t\tself.fd_min = min(sky0[2],skyN[2])\n\t\tself.fd_max = max(sky0[2],skyN[2])\n\t\tself.fd_step = (self.fd_max-self.fd_min)/self.z_max", "def map_face(self):\n #Array Order: U0,D1,R2,L3,F4,B5,\n \n cube_list = []\n cube_list = self.cube.definition()\n \n for index, cubit in enumerate(self.faces['Up']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index]])\n for index, cubit in enumerate(self.faces['Ri']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+9]])\n for index, cubit in enumerate(self.faces['Ft']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+18]])\n for index, cubit in enumerate(self.faces['Dn']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+27]])\n for index, cubit in enumerate(self.faces['Le']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+36]])\n for index, cubit in enumerate(self.faces['Bk']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+45]])", "def flatten_cube(cube):\n nb=cube.shape[0]\n cube_flattened = np.zeros_like(cube)\n for i in range(nb):\n cube_flattened[i,:,:] = flatten_image(cube[i,:,:])\n return cube_flattened", "def cube_data(self):\n cube_data = copy.deepcopy(self.data)\n cube_data.shape = [self.nints * self.ngroups, self.rows, self.columns]\n return cube_data", "def create_cube_solid(color=COLOR_WHITE):\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glColor4fv(color)\n try:\n glutSolidCube(1.0)\n except:\n if not _ERRS[3]:\n printGLError(\n \"la version actual de OpenGL no posee la funcion glutSolidCube\")\n _ERRS[3] = True\n glPopMatrix()\n glEndList()\n return obj", "def faces(self) -> Polygon:\n return Polygon(self.array, copy=False)", "def polyCube(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n constructionHistory: bool=True, createUVs: Union[int, bool]=3, depth: Union[float,\n bool]=1.0, height: Union[float, bool]=1.0, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, object: bool=True, subdivisionsDepth: Union[int, bool]=1,\n subdivisionsHeight: Union[int, bool]=1, subdivisionsWidth: Union[int, bool]=1,\n subdivisionsX: Union[int, bool]=1, subdivisionsY: Union[int, bool]=1,\n subdivisionsZ: Union[int, bool]=1, texture: Union[int, bool]=1, width: Union[float,\n bool]=1.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr],\n Any]:\n pass", "def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)", "def genCubes():\n offset = vpy.vector(.5, .5, .5)\n size = vpy.vector(.2, .2, .2)\n B1 = vpy.box(pos=vpy.vector(0, 0, 0)-offset,\n color=vpy.vector(0, 0, 0), size=size, make_trail=True)\n B2 = vpy.box(pos=vpy.vector(0, 0, 1)-offset,\n color=vpy.vector(0, 0, 1), size=size, make_trail=True)\n B3 = vpy.box(pos=vpy.vector(0, 1, 1)-offset,\n color=vpy.vector(0, 1, 1), size=size, make_trail=True)\n B4 = vpy.box(pos=vpy.vector(0, 1, 0)-offset,\n color=vpy.vector(0, 1, 0), size=size, make_trail=True)\n\n B5 = vpy.box(pos=vpy.vector(1, 0, 0)-offset,\n color=vpy.vector(1, 0, 0), size=size, make_trail=True)\n B6 = vpy.box(pos=vpy.vector(1, 0, 1)-offset,\n color=vpy.vector(1, 0, 1), size=size, make_trail=True)\n B7 = vpy.box(pos=vpy.vector(1, 1, 0)-offset,\n color=vpy.vector(1, 1, 0), size=size, make_trail=True)\n B8 = vpy.box(pos=vpy.vector(1, 1, 1)-offset,\n color=vpy.vector(1, 1, 1), size=size, make_trail=True)\n\n return [B1, B2, B3, B4, B5, B6, B7, B8]", "def faces(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._faces", "def _getFacesAndMaterials_bpy(self):\r\n obj = self.obj\r\n mesh = obj.data\r\n polygonDict = {} # a dict that holds faces (dict), their vertices (dict: positions and materials)\r\n # self._checkForUndoMess()\r\n\r\n for n in range (0, len(mesh.polygons)):\r\n f = mesh.polygons[n] # current face\r\n\r\n # create local dict\r\n d = {}\r\n\r\n # get face material\r\n slot = obj.material_slots[f.material_index]\r\n mat = slot.material\r\n d['material'] = mat.name\r\n\r\n # get face vertices\r\n v_list = []\r\n for v in f.vertices: # browse through vertice index\r\n vect = obj.matrix_world * mesh.vertices[v].co\r\n v_list.append(vect)\r\n \r\n # add third twice for triangle face (expected by evertims raytracing client)\r\n if( len(f.vertices) == 3 ): \r\n vect = obj.matrix_world * mesh.vertices[ f.vertices[2] ].co\r\n v_list.append(vect)\r\n\r\n d['vertices'] = v_list\r\n\r\n # store local dict\r\n polygonDict[n] = d\r\n return polygonDict", "def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices", "def cube_colors(self, cubes):\n n = cubes.shape[0]\n col = np.zeros((n ** 3, 3))\n terrain_col = (66, 244, 72)\n empty_col = self.background\n for i in range(n):\n for j in range(n):\n for k in range(n):\n c = cubes[i, j, k]\n col[i * n ** 2 + j * n + k] = empty_col if c.state == 'empty' else terrain_col\n self.wireframe_col = col" ]
[ "0.682039", "0.67192125", "0.6679414", "0.6615704", "0.6559546", "0.654458", "0.6534792", "0.64524424", "0.637708", "0.63354915", "0.62427455", "0.6239464", "0.62222666", "0.6202463", "0.61873555", "0.612637", "0.6067909", "0.60508484", "0.6008664", "0.600427", "0.598841", "0.5915782", "0.5873456", "0.5867144", "0.5844306", "0.5835761", "0.5826171", "0.5819677", "0.58174014", "0.5814598" ]
0.801218
0
Check correctness of 2d shape distance implementations.
def assert_distance_to_surface_2d(shape, angles, computed_distance): xy = np.array( [computed_distance * np.cos(angles), computed_distance * np.sin(angles)] ) xy = np.transpose(xy) hull = ConvexHull(xy) # Test the area assert np.isclose(shape.area, hull.volume) # Test the circumference assert np.isclose(shape.perimeter, hull.area)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shapeCompare(*args, **kwargs)->int:\n pass", "def distance_checker(xyz1, xyz2):\n return math.sqrt((xyz1[0] - xyz2[0])**2 + (xyz1[1] - xyz2[1])**2 +\n (xyz1[2] - xyz2[2])**2)", "def check_shape(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return all(attr1.shape.eval() == attr2.shape.eval())", "def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def testIsotropicDistance(self):\n (w,h) = self.im1_1.getSize()\n \n self.im1_1.reset()\n drawSquare(self.im1_1, (w//2-1, h//2-1, w//2+1, h//2+1), 1)\n \n self.im8_3.reset()\n drawSquare(self.im8_3, (w//2-1, h//2-1, w//2+1, h//2+1), 1)\n self.im8_3.setPixel(2, (w//2, h//2))\n isotropicDistance(self.im1_1, self.im8_1)\n (x,y) = compare(self.im8_1, self.im8_3, self.im8_2)\n self.assertTrue(x<0)", "def test_euclidean_distance_2dimension(self):\n dist_from_origin = lambda v: euclidean_distance([0, 0], v)\n\n # Do it first with known whole numbers\n self.assertEqual(5, dist_from_origin([3, 4]))\n self.assertEqual(5, dist_from_origin([-3, -4]))\n\n self.assertEqual(13, dist_from_origin([5, 12]))\n self.assertEqual(13, dist_from_origin([-5, -12]))\n\n self.assertEqual(15, dist_from_origin([9, 12]))\n self.assertEqual(15, dist_from_origin([-9, -12]))\n\n self.assertEqual(52, dist_from_origin([20, 48]))\n self.assertEqual(52, dist_from_origin([-20, -48]))\n\n # Next we will do it with a couple doubles, but we need\n # to round\n self.assertEqual(8.6023, round(dist_from_origin([5, 7]), 4))\n self.assertEqual(8.6023, round(dist_from_origin([-5, -7]), 4))\n\n self.assertEqual(13.0384, round(dist_from_origin([7, 11]), 4))\n self.assertEqual(13.0384, round(dist_from_origin([-7, -11]), 4))\n\n self.assertEqual(17.0294, round(dist_from_origin([11, 13]), 4))\n self.assertEqual(17.0294, round(dist_from_origin([-11, -13]), 4))\n\n dist_from_initial = lambda v: euclidean_distance([7, 23], v)\n\n # Do it first with known whole numbers\n self.assertEqual(5, dist_from_initial([7 + 3, 23 + 4]))\n self.assertEqual(5, dist_from_initial([7 - 3, 23 - 4]))\n\n self.assertEqual(13, dist_from_initial([7 + 5, 23 + 12]))\n self.assertEqual(13, dist_from_initial([7 - 5, 23 - 12]))\n\n self.assertEqual(15, dist_from_initial([7 + 9, 23 + 12]))\n self.assertEqual(15, dist_from_initial([7 - 9, 23 - 12]))\n\n self.assertEqual(52, dist_from_initial([7 + 20, 23 + 48]))\n self.assertEqual(52, dist_from_initial([7 + 20, 23 + 48]))\n\n # Next we do it with some doubles which require rounding\n self.assertEqual(8.6023, round(dist_from_initial([7 + 5, 23 + 7]), 4))\n self.assertEqual(8.6023, round(dist_from_initial([7 - 5, 23 - 7]), 4))\n\n self.assertEqual(13.0384, round(dist_from_initial([7 + 7, 23 + 11]), 4))\n self.assertEqual(13.0384, round(dist_from_initial([7 - 7, 23 - 11]), 4))\n\n self.assertEqual(17.0294, round(dist_from_initial([7 + 11, 23 + 13]), 4))\n self.assertEqual(17.0294, round(dist_from_initial([7 - 11, 23 - 13]), 4))", "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def test_shape_fail():\n lons, lats = np.arange(10), np.arange(10).reshape(5, 2)\n emsg = \"Require longitudes and latitudes with same shape\"\n with pytest.raises(ValueError, match=emsg):\n _ = to_cartesian(lons, lats)", "def test_distance(self):\n self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))\n self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)", "def validate_distance_input(X):\n\n \"\"\"The distance array should be square and rank 2\"\"\"\n msg='Distance Array should be 2-dimensional, got {} dimensions'\n assert(np.ndim(X) == 2), msg.format(np.ndim(X))\n msg='Distance Array should be square, got {} shape'\n assert(X.shape[0] == X.shape[1]), msg.format(X.shape)\n\n \"\"\"The array should be uppper triangular with zeros in the bottom\n diagonal AND along the diagonal\"\"\"\n lower_indicies = np.tril_indices(X.shape[0], 0)\n if not np.all(X[lower_indicies] == 0):\n msg=('All Lower Triangular elements of the distance array should be' +\n 'Zero. got {} Non-Zero Indicies')\n non_zero = np.where(X[lower_indicies] != 0)\n raise ValueError(msg.format(X[non_zero]))\n\n return None", "def test_distances(self):\n\n cent_1 = np.array([0.5, 0.5])\n verts_1 = np.array([[0., 1.], [0., 0.], [1., 0.], [1., 1.]])\n cent_2 = cent_1 - 0.5\n verts_2 = verts_1 - np.array([0.5, 0.5])\n\n # Compare the center-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.cvdist(verts_1, cent_1) == po.cvdist(verts_2, cent_2)))\n # Compare the vertex-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.vvdist(verts_1) == po.vvdist(verts_2)))", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def testIsotropicDistanceDepthAcceptance(self):\n self.assertRaises(MambaError, isotropicDistance, self.im8_1, self.im8_2)\n self.assertRaises(MambaError, isotropicDistance, self.im32_1, self.im8_2)", "def is_valid_size(self, dot_width, dot_height, distance, screen_width, screen_height):\n if dot_width * distance > screen_width or dot_height * distance > screen_height:\n return False\n return True", "def is_valid_overlap_xy(dir_id, p1, p2, pattern_catalog, pattern_width, adjacency_directions):\n #dir_corrected = (0 - adjacency_directions[dir_id].x, 0 - adjacency_directions[dir_id].y)\n dir_corrected = (0 + adjacency_directions[dir_id].x, 0 + adjacency_directions[dir_id].y)\n dimensions = (1,0)\n not_a_number = -1\n #TODO: can probably speed this up by using the right slices, rather than rolling the whole pattern...\n #print(d, p2, p1)\n shifted = np.roll(np.pad(pattern_catalog[p2], pattern_width, mode='constant', constant_values = not_a_number), dir_corrected, dimensions)\n compare = shifted[pattern_width:pattern_width+pattern_width, pattern_width:pattern_width+pattern_width]\n left = max(0,0 + dir_corrected[0])\n right = min(pattern_width, pattern_width + dir_corrected[0])\n top = max(0,0 + dir_corrected[1])\n bottom = min(pattern_width, pattern_width + dir_corrected[1])\n a = pattern_catalog[p1][top:bottom,left:right]\n b = compare[top:bottom,left:right]\n res = np.array_equal(a,b)\n #print(f\"res: {res}\")\n return res", "def is_distance(x, y):\n assert (x.dtype == np.float64 and y.dtype == np.float64) or (\n x.dtype == np.float32 and y.dtype == np.float32)\n\n # TODO\n raise NotImplementedError", "def test_point_within_dimensions_border():\n point = np.array([100, 20])\n image_dimensions = np.array([100, 100])\n assert not point_within_dimensions(point, image_dimensions)", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def find_dimensions_not_attentive_imgs(y1, x2, y2, x1, shape):\n if y1 - 0.20 * y1 > 0:\n y1 = int(y1 - 0.20 * y1)\n elif y1 - 0.1 * y1 > 0:\n y1 = int(y1 - 0.1 * y1)\n\n if x1 - 0.1 * x1 > 0:\n x1 = int(x1 - 0.1 * x1)\n\n if y2 + 0.1 * y2 < shape[0]:\n y2 = int(y2 + 0.1 * y2)\n\n if x2 + 0.1 * x2 < shape[1]:\n x2 = int(x2 + 0.1 * x2)\n\n return y1, x2, y2, x1", "def test_euclidean_distance_Ndimension(self):\n\n self.assertEqual(15, euclidean_distance([0, 0, 0], [10, 10, 5]))\n self.assertEqual(15, euclidean_distance([0, 0, 0], [-10, -10, -5]))\n\n self.assertEqual(17, euclidean_distance([0, 0, 0, 0], [10, 10, 8, 5]))\n self.assertEqual(17, euclidean_distance([0, 0, 0, 0], [-10, -10, -8, -5]))\n\n self.assertEqual(8, euclidean_distance([0, 0, 0, 0, 0], [5, 1, 1, 1, 6]))\n self.assertEqual(8, euclidean_distance([0, 0, 0, 0, 0], [-5, -1, -1, -1, -6]))", "def test_distance(self):\n for emb_vals, point, dist_gt in self.DISTANCE_EXAMPLES:\n print(emb_vals, point, dist_gt)\n emb = to_emb(emb_vals)\n dist = emb.distance(point)\n assert np.allclose(dist, dist_gt), \\\n (\"Wrong distance for point {}: expected {} but was {};\"\n \"\\nembedding:\\n{}\").format(point, dist_gt, dist, str(emb))", "def test_point_within_dimensions_invalid_sizes():\n point = np.array([20, 20, 20])\n image_dimensions = np.array([100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)\n\n point = np.array([20, 20])\n image_dimensions = np.array([100, 100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def test_distance(layout, gain_calc):\n def direct_gains(size, distance):\n block_format = AudioBlockFormatObjects(position=dict(azimuth=0, elevation=0, distance=distance),\n width=size, height=size)\n return gain_calc.render(ObjectTypeMetadata(block_format)).direct\n\n for size in [0, 30]:\n distances = np.linspace(0, 1, 10)\n pvs = np.array([direct_gains(size, distance) for distance in distances])\n ev_len = np.linalg.norm(np.square(pvs).dot(layout.norm_positions), axis=1)\n assert np.all(np.diff(ev_len) > 0)", "def check_2x2_solved(self):\n return self._grid[0][0] == 0 and self._grid[0][1] == 1 \\\n and self._grid[1][0] == self._width*1 and self._grid[1][1] == (1 + self._width * 1)", "def test_euclidean_distance(self):\n knn = Knn(n_neighbors=3)\n knn.fit(np.array(little_X), little_Y)\n d = knn._euclidean_distance(np.array([5, 6]))\n assert (d == [5,5]).all(), \"Euclidean Distance is not correct\"", "def Distance(item0, item1):\n \n SumOfDims = len(item0) + len(item1)\n \n if SumOfDims == 4:\n # 2D points/indices.\n distance = ((item0[0] - item1[0])**2 \\\n + (item0[1] - item1[1])**2)**0.5\n \n return distance\n \n elif SumOfDims == 6:\n # 3D points/indices.\n distance = ((item0[0] - item1[0])**2 \\\n + (item0[1] - item1[1])**2 \\\n + (item0[2] - item1[2])**2)**0.5\n \n return distance\n \n \n else:\n msg = \"The inputs must both be 2D or 3D lists of points/indices.\"\n \n raise Exception(msg)", "def find_dimensions_attentive_imgs(y1, x2, y2, x1, shape):\n if y1 - 0.20 * y1 > 0:\n y1 = int(y1 - 0.20 * y1)\n elif y1 - 0.15 * y1 > 0:\n y1 = int(y1 - 0.15 * y1)\n elif y1 - 0.1 * y1 > 0:\n y1 = int(y1 - 0.1 * y1)\n\n # manipulate x1\n if x1 - 0.1 * x1 > 0:\n x1 = int(x1 - 0.1 * x1)\n\n # manipulate y2\n if y2 + 0.1 * y2 < shape[0]:\n y2 = int(y2 + 0.1 * y2)\n\n # manipulate x2\n if x2 + 0.1 * x2 < shape[1]:\n x2 = int(x2 + 0.1 * x2)\n\n return y1, x2, y2, x1" ]
[ "0.66555154", "0.63205105", "0.63129497", "0.63082397", "0.628651", "0.62430817", "0.6223579", "0.61524224", "0.6145492", "0.6106977", "0.6094515", "0.60916805", "0.60875034", "0.6085718", "0.6083386", "0.6075494", "0.6069088", "0.60206515", "0.6012316", "0.60007405", "0.59624594", "0.59494096", "0.59465927", "0.59185886", "0.5892585", "0.58613694", "0.5847765", "0.58468086", "0.5830464", "0.5826646" ]
0.6402657
1
Generate a quaternion from axis [x, y, z] and angle theta.
def quaternion_from_axis_angle(x, y, z, theta): if x == y == z == 0: return np.array([1, 0, 0, 0]) axis = np.array([x, y, z]) axis /= np.linalg.norm(axis) return rowan.from_axis_angle(axis, theta)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def angle_axis_to_quaternion(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis /= np.linalg.norm(axis)\n quat = np.zeros(4)\n angle = theta/2\n quat[0] = np.cos(angle)\n quat[1:] = np.sin(angle) * axis\n\n return quat", "def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:\n if not torch.is_tensor(angle_axis):\n raise TypeError(\"Input type is not a torch.Tensor. Got {}\".format(\n type(angle_axis)))\n\n if not angle_axis.shape[-1] == 3:\n raise ValueError(\"Input must be a tensor of shape Nx3 or 3. Got {}\"\n .format(angle_axis.shape))\n # unpack input and compute conversion\n a0: torch.Tensor = angle_axis[..., 0:1]\n a1: torch.Tensor = angle_axis[..., 1:2]\n a2: torch.Tensor = angle_axis[..., 2:3]\n theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2\n\n theta: torch.Tensor = torch.sqrt(theta_squared)\n half_theta: torch.Tensor = theta * 0.5\n\n mask: torch.Tensor = theta_squared > 0.0\n ones: torch.Tensor = torch.ones_like(half_theta)\n\n k_neg: torch.Tensor = 0.5 * ones\n k_pos: torch.Tensor = torch.sin(half_theta) / theta\n k: torch.Tensor = torch.where(mask, k_pos, k_neg)\n w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)\n\n quaternion: torch.Tensor = torch.zeros_like(angle_axis)\n quaternion[..., 0:1] += a0 * k\n quaternion[..., 1:2] += a1 * k\n quaternion[..., 2:3] += a2 * k\n return torch.cat([w, quaternion], dim=-1)", "def angle_to_quaternion(angle):\n\treturn Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def angle_to_quaternion(angle):\n return Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))", "def axang2quat(ax_ang):\n\n if ax_ang.ndim == 1:\n if np.size(ax_ang) == 5:\n ax_ang = np.reshape(ax_ang, (5, 1))\n msz = 1\n elif np.size(ax_ang) == 4:\n ax_ang = np.reshape(np.hstack((ax_ang, np.array([1]))), (5, 1))\n msz = 1\n else:\n raise Exception('Wrong Input Type')\n elif ax_ang.ndim == 2:\n if np.shape(ax_ang)[0] == 5:\n msz = np.shape(ax_ang)[1]\n elif np.shape(ax_ang)[1] == 5:\n ax_ang = ax_ang.transpose()\n msz = np.shape(ax_ang)[1]\n else:\n raise Exception('Wrong Input Type')\n else:\n raise Exception('Wrong Input Type')\n\n direction = ax_ang[0:3, :]\n angle = ax_ang[3, :]\n\n d = np.array(direction, dtype=np.float64)\n d /= np.linalg.norm(d, axis=0)\n x = d[0, :]\n y = d[1, :]\n z = d[2, :]\n q0 = np.cos(angle/2)\n s = np.sin(angle/2)\n\n q1 = x*s\n q2 = y*s\n q3 = z*s\n\n qtype = 0*q3;\n inds1 = np.where(ax_ang[4, :] == -1); qtype[inds1] = -1;\n inds2 = np.where(ax_ang[4, :] == 1); qtype[inds2] = 1;\n\n return quat.Quaternion(q0, q1, q2, q3, qtype)", "def quaternion_about_axis(angle, axis):\r\n q = numpy.array([0.0, axis[0], axis[1], axis[2]])\r\n qlen = vector_norm(q)\r\n if qlen > _EPS:\r\n q *= math.sin(angle/2.0) / qlen\r\n q[0] = math.cos(angle/2.0)\r\n return q", "def v_theta_to_quaternion(v, theta):\r\n v_x, v_y, v_z = v[0], v[1], v[2]\r\n w = math.cos(theta / 2)\r\n x = v_x * math.sin(theta / 2)\r\n y = v_y * math.sin(theta / 2)\r\n z = v_z * math.sin(theta / 2)\r\n return w, x, y, z", "def euler_to_quaternion(psi, theta, phi):\n # Abbreviations for the various angular functions\n cy = np.cos(psi * 0.5)\n sy = np.sin(psi * 0.5)\n cp = np.cos(theta * 0.5)\n sp = np.sin(theta * 0.5)\n cr = np.cos(phi * 0.5)\n sr = np.sin(phi * 0.5)\n\n q = np.zeros(4)\n q[0] = cy * cp * cr + sy * sp * sr\n q[1] = cy * cp * sr - sy * sp * cr\n q[2] = sy * cp * sr + cy * sp * cr\n q[3] = sy * cp * cr - cy * sp * sr\n return q", "def _create_quaternion(direction, up) -> Tuple[float, float, float, float]:\n direction = direction / spy.vnorm(direction)\n up = up / spy.vnorm(up)\n\n x = spy.vcrss(up, direction)\n x = x / spy.vnorm(x)\n y = spy.vcrss(direction, x)\n y = y / spy.vnorm(y)\n z = direction\n\n r = sqrt(1.0 + x[0] + y[1] + z[2]) * 0.5\n i = (y[2] - z[1]) / (4 * r)\n j = (z[0] - x[2]) / (4 * r)\n k = (x[1] - y[0]) / (4 * r)\n\n return r, i, j, k", "def qrotate(points, axis, theta):\n q = Quaternion.rotator(axis, theta)\n return q.rotate(points)", "def euler_to_quaternion(yaw, pitch, roll):\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z", "def _quaternions(self, R):\n # Simple Wikipedia version\n # en.wikipedia.org/wiki/Rotation_matrix#Quaternion\n # For other options see math.stackexchange.com/questions/2074316/calculating-rotation-axis-from-rotation-matrix\n diag = torch.diagonal(R, dim1=-2, dim2=-1)\n Rxx, Ryy, Rzz = diag.unbind(-1)\n magnitudes = 0.5 * torch.sqrt(torch.abs(1 + torch.stack([\n Rxx - Ryy - Rzz, \n - Rxx + Ryy - Rzz, \n - Rxx - Ryy + Rzz\n ], -1)))\n _R = lambda i,j: R[:,:,:,i,j]\n signs = torch.sign(torch.stack([\n _R(2,1) - _R(1,2),\n _R(0,2) - _R(2,0),\n _R(1,0) - _R(0,1)\n ], -1))\n xyz = signs * magnitudes\n # The relu enforces a non-negative trace\n w = torch.sqrt(F.relu(1 + diag.sum(-1, keepdim=True))) / 2.\n Q = torch.cat((xyz, w), -1)\n Q = F.normalize(Q, dim=-1)\n return Q", "def to_quaternion(self, roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def random_quaternion():\n\n import numpy as np\n \n while True: # Loop until within unit disk\n zeta = 2.0*np.random.rand(2) - 1.0 # Two uniform random numbers between -1 and 1\n norm1 = np.sum ( zeta**2 ) # Squared magnitude\n if norm1 < 1.0: # Test for within unit disk\n break\n\n while True: # Loop until within unit disk\n beta = 2.0*np.random.rand(2) - 1.0 # Two uniform random numbers between -1 and 1\n norm2 = np.sum ( beta**2 ) # Squared magnitude\n if norm2 < 1.0: # Test for within unit disk\n break\n\n f = np.sqrt ( ( 1.0 - norm1 ) / norm2 )\n return np.array ( ( zeta[0], zeta[1], beta[0]*f, beta[1]*f ), dtype=np.float_ ) # Random quaternion", "def RPY_to_quaternion(phi: float, theta: float, psi: float):\n\n quaternion = zeros(4)\n quaternion[0] = cos(phi / 2) * cos(theta / 2) * cos(psi / 2) + sin(phi / 2) * sin(theta / 2) * sin(psi / 2)\n quaternion[1] = sin(phi / 2) * cos(theta / 2) * cos(psi / 2) - cos(phi / 2) * sin(theta / 2) * sin(psi / 2)\n quaternion[2] = cos(phi / 2) * sin(theta / 2) * cos(psi / 2) + sin(phi / 2) * cos(theta / 2) * sin(psi / 2)\n quaternion[3] = cos(phi / 2) * cos(theta / 2) * sin(psi / 2) - sin(phi / 2) * sin(theta / 2) * cos(psi / 2)\n\n return quaternion", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def quaternion_to_angle(q):\n x, y, z, w = q.x, q.y, q.z, q.w\n roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n return yaw", "def euler_to_quat(self, yaw):\n quat_array = t.quaternion_from_euler(0.0, 0.0, yaw)\n return Quaternion(quat_array[0], quat_array[1], quat_array[2], quat_array[3])", "def quaternion_to_angle(self, q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def sample_quat(angle3):\r\n roll = np.random.uniform(*angle3[0]) * np.pi / 180\r\n pitch = np.random.uniform(*angle3[1]) * np.pi / 180\r\n yaw = np.random.uniform(*angle3[2]) * np.pi / 180\r\n\r\n quat = quaternion.from_euler_angles(roll, pitch, yaw)\r\n return quat.normalized().components", "def get_random_quaternion(self):\n random_angles = self.get_random_vector([0,0,0], [2*np.pi, 2*np.pi, 1])\n return tf.transformations.quaternion_from_euler(random_angles[0],\n random_angles[1],\n 0)", "def quaternion_to_angle(q):\n\tx, y, z, w = q.x, q.y, q.z, q.w\n\troll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))\n\treturn yaw", "def _rotate_quaternion(self, q):\n self._normalise()\n return self * q * self.conjugate", "def angle_axis_to_rot3d(axis, theta):\n if isinstance(axis, string_types):\n axis = axis.lower()\n if axis == 'x':\n axis = np.array([1., 0., 0.])\n elif axis == 'y':\n axis = np.array([0., 1., 0.])\n elif axis == 'z':\n axis = np.array([0., 0., 1.])\n else:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n elif len(axis) != 3:\n raise ValueError(\"Axis should be 'x', 'y', 'z' or a 3D vector.\")\n axis = axis.astype(float)\n axis /= np.linalg.norm(axis)\n a = axis[0]\n b = axis[1]\n c = axis[2]\n cos_theta = np.cos(theta)\n bracket = 1 - cos_theta\n a_bracket = a * bracket\n b_bracket = b * bracket\n c_bracket = c * bracket\n sin_theta = np.sin(theta)\n a_sin_theta = a * sin_theta\n b_sin_theta = b * sin_theta\n c_sin_theta = c * sin_theta\n rot3d = np.array(\n [[a * a_bracket + cos_theta, a * b_bracket - c_sin_theta, a * c_bracket + b_sin_theta],\n [b * a_bracket + c_sin_theta, b * b_bracket + cos_theta, b * c_bracket - a_sin_theta],\n [c * a_bracket - b_sin_theta, c * b_bracket + a_sin_theta, c * c_bracket + cos_theta]])\n return rot3d", "def to_quaternion(self,roll=0.0, pitch=0.0, yaw=0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def euler2quaternion(psi, theta, phi):\n if abs(psi) == 0 and abs(theta) == 0 and abs(phi) == 0:\n quaternion = np.array([1., 0., 0., 0.])\n else:\n R = euler2rot3D(psi, theta, phi)\n W = np.array([R[1, 2]-R[2, 1], R[2, 0]-R[0, 2], R[0, 1]-R[1, 0]])\n if W[0] >= 0:\n W /= np.linalg.norm(W)\n else:\n W /= np.linalg.norm(W) * -1\n theta = np.arccos(0.5 * (np.trace(R) - 1))\n CCisTheta = corrCoeff(R, angleAxis2rot3D(W, theta))\n CCisNegTheta = corrCoeff(R, angleAxis2rot3D(W, -theta))\n if CCisNegTheta > CCisTheta:\n theta = -theta\n quaternion = np.array([np.cos(theta/2.), np.sin(theta/2.)*W[0], np.sin(theta/2.)*W[1], np.sin(theta/2.)*W[2]])\n if quaternion[0] < 0:\n quaternion *= -1\n return quaternion", "def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def to_quaternion(roll = 0.0, pitch = 0.0, yaw = 0.0):\n t0 = math.cos(math.radians(yaw * 0.5))\n t1 = math.sin(math.radians(yaw * 0.5))\n t2 = math.cos(math.radians(roll * 0.5))\n t3 = math.sin(math.radians(roll * 0.5))\n t4 = math.cos(math.radians(pitch * 0.5))\n t5 = math.sin(math.radians(pitch * 0.5))\n\n w = t0 * t2 * t4 + t1 * t3 * t5\n x = t0 * t3 * t4 - t1 * t2 * t5\n y = t0 * t2 * t5 + t1 * t3 * t4\n z = t1 * t2 * t4 - t0 * t3 * t5\n\n return [w, x, y, z]", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def quaternion(self, name, q):\n R = self.R(name=name, q=q)\n quat = transformations.unit_vector(\n transformations.quaternion_from_matrix(matrix=R))\n return quat" ]
[ "0.76926434", "0.7200943", "0.7178451", "0.71368057", "0.69507265", "0.6892011", "0.6861867", "0.68281937", "0.6820917", "0.68190604", "0.6812797", "0.68040746", "0.6796988", "0.67912024", "0.67655146", "0.675524", "0.6728748", "0.670903", "0.6706233", "0.6704836", "0.66967773", "0.6657148", "0.6654745", "0.6650908", "0.6632212", "0.66159284", "0.65813965", "0.65813965", "0.6577543", "0.65772337" ]
0.849171
0
Check if two spheres are almost equal. Works for both circles and spheres. All args and kwargs are forwarded to np.isclose and np.allclose.
def sphere_isclose(c1, c2, *args, **kwargs): return np.isclose(c1.radius, c2.radius, *args, **kwargs) and np.allclose( c1.center, c2.center, *args, **kwargs )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _point_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n return np.allclose(a._Point__loc, b._Point__loc,\n rtol=rtol, atol=atol)", "def almost_equal(self, other, rtol=1e-05, atol=1e-08):\n\n # float attributes defining the instance\n fkeys = ['x0', 'y0', 'dx', 'dy']\n # unambiguous attributes\n ckeys = ['nx', 'ny', 'origin']\n\n ok = True\n for k in fkeys:\n ok = ok and np.isclose(getattr(self.corner_grid, k),\n getattr(other.corner_grid, k),\n rtol=rtol, atol=atol)\n for k in ckeys:\n _ok = getattr(self.corner_grid, k) == getattr(other.corner_grid, k)\n ok = ok and _ok\n p1 = self.corner_grid.proj\n p2 = other.corner_grid.proj\n return ok and proj_is_same(p1, p2)", "def test_equality(self):\n\n s3 = space(curvature=1/5)\n for k in (0, -1, 1, 1.75, 0.325, 1/7, -1.75, -0.325, -1/7):\n s1 = space(fake_curvature=k)\n s2 = space(fake_curvature=k)\n self.assertTrue(s1 == s2)\n self.assertTrue(hash(s1) == hash(s2))\n self.assertTrue(str(s1) == str(s2))\n self.assertTrue(repr(s1) == repr(s2))\n self.assertTrue(s1 != s3)", "def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2", "def test_particle_mass_equivalent_args(arg1, kwargs1, arg2, kwargs2, expected):\n\n result1 = particle_mass(arg1, **kwargs1)\n result2 = particle_mass(arg2, **kwargs2)\n\n assert u.isclose(result1, result2), (\n f\"particle_mass({arg1!r}, **{kwargs1}) = {result1!r}, whereas \"\n f\"particle_mass({arg2!r}, **{kwargs2}) = {result2!r}. \"\n f\"These results are not equivalent as expected.\"\n )\n\n if expected is not None:\n assert u.isclose(result1, result2) and u.isclose( # noqa: PT018\n result2, expected\n ), (\n f\"particle_mass({arg1!r}, **{kwargs1}) = {result1!r} and \"\n f\"particle_mass({arg2!r}, **{kwargs2}) = {result2!r}, but \"\n f\"these results are not equal to {expected!r} as expected.\"\n )", "def _chain_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.allclose(a_seg, b_seg,\n rtol=RTOL, atol=ATOL):\n return False\n return True", "def _almost_equal(x, y):\n pass", "def equals_exact(self, other, tolerance): # -> bool:\n ...", "def test_euclidean_unit_spheres(self):\n \n s1_ref = 6.28318530717958647692528676655867\n v2_ref = 3.14159265358979323846264338327933\n s2_ref = 12.5663706143591729538505735331173\n v3_ref = 4.18879020478639098461685784437218\n\n s = space(curvature=0)\n\n self.assertTrue(isclose(\n s.sphere_s1(1),\n s1_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_s1(s1_ref),\n 1\n ))\n self.assertTrue(isclose(\n s.sphere_v2(1),\n v2_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_v2(v2_ref),\n 1\n ))\n self.assertTrue(isclose(\n s.sphere_s2(1),\n s2_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_s2(s2_ref),\n 1\n ))\n self.assertTrue(isclose(\n s.sphere_v3(1),\n v3_ref\n ))\n self.assertTrue(isclose(\n s.inv_sphere_v3(v3_ref),\n 1\n ))", "def equals(a, b, tol=1e-10):\n return np.abs(a-b) <= tol", "def __eq__(self, other):\n if isinstance(other, self.__class__):\n return math.isclose(self.x, other.x, rel_tol=1e-12, abs_tol=1e-12) and\\\n math.isclose(self.y, other.y, rel_tol=1e-12, abs_tol=1e-12)\n else:\n return False", "def test_sphere2(self):\n fun = get_problem('sphere2', dimension=2, lower=-1, upper=1)\n self.assertAlmostEqual(fun(np.zeros(2)), 0.0)", "def test_eq():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.sin(x) + fwd.cos(y)\n g = fwd.sin(x) + fwd.cos(y)\n h = fwd.sin(y) + fwd.cos(x)\n assert f == g\n assert f != h", "def check_complex_vector_approx_eq(vec1, vec2, error=0.00001,\n error_message=\"Error: vectors are not approximately equal.\"):\n\n assert(len(vec1) == len(vec2)), 'Length of v1 = %d, Length of v2 = %d' % (len(vec1), len(vec2))\n for i in range(len(vec1)):\n if (abs(vec1[i].real - vec2[i].real) > error) or (abs(vec1[i].imag - vec2[i].imag) > error):\n print(\"-------- VALUES DO NOT MATCH AT INDEX %d --------\" % (i))\n print(str(vec1[i]) + \" != \" + str(vec2[i]))\n print(\"v1: \" + str(vec1[:10]))\n print(\"v2: \" + str(vec2[:10]))\n raise ValueError(error_message)", "def test_hash_equality(self):\n origin = np.random.randn(3)\n normal = np.random.randn(3)\n up_vector = np.random.randn(3)\n up_vector2 = np.random.randn(3)\n p1 = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n p2 = shapes_3d.CoordinatePlane(origin, normal, up_vector)\n p3 = shapes_3d.CoordinatePlane(origin, normal, up_vector2)\n \n self.assertEqual(p1, p2)\n self.assertNotEqual(p1, p3)", "def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True", "def equals_exact(a, b, tolerance=0.0, normalize=False, **kwargs):\n if normalize:\n a = lib.normalize(a)\n b = lib.normalize(b)\n\n return lib.equals_exact(a, b, tolerance, **kwargs)", "def sphbear (lat1, lon1, lat2, lon2, tol=1e-15):\n # cross product on outer axis:\n ocross = lambda a, b: np.cross (a, b, axisa=0, axisb=0, axisc=0)\n\n # if args have shape S, this has shape (3, S)\n v1 = np.asarray ([np.cos (lat1) * np.cos (lon1),\n np.cos (lat1) * np.sin (lon1),\n np.sin (lat1)])\n\n v2 = np.asarray ([np.cos (lat2) * np.cos (lon2),\n np.cos (lat2) * np.sin (lon2),\n np.sin (lat2)])\n\n is_bad = (v1[0]**2 + v1[1]**2) < tol\n\n p12 = ocross (v1, v2) # ~\"perpendicular to great circle containing points\"\n p1z = np.asarray ([v1[1], -v1[0], np.zeros_like (lat1)]) # ~\"perp to base and Z axis\"\n cm = np.sqrt ((ocross (p12, p1z)**2).sum (axis=0)) # ~\"angle between the vectors\"\n bearing = np.arctan2 (cm, np.sum (p12 * p1z, axis=0))\n bearing = np.where (p12[2] < 0, -bearing, bearing) # convert to [-pi/2, pi/2]\n bearing = np.where (np.abs (bearing) < tol, 0, bearing) # clamp\n bearing[np.where (is_bad)] = np.nan\n return bearing", "def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance", "def almosteq(a, b, rel_eps=1e-6, abs_eps=1e-8):\n if type(a) in float_int and type(b) in float_int:\n return math.isclose(a, b, rel_tol=rel_eps, abs_tol=abs_eps)\n else:\n return np.isclose(a, b, rtol=rel_eps, atol=abs_eps)", "def test_non_euclidean_scale_curvature(self):\n\n magic = 77773.333773777773733\n for kdir in (1, -1):\n for mul in (2, 5, 1/3, 1/11, magic, 1/magic):\n for name, dim in (\n ('sphere_s1', 1),\n ('sphere_v2', 2),\n ('sphere_s2', 2),\n ('sphere_v3', 3)\n ):\n s1 = space(fake_curvature=kdir)\n s2 = space(fake_curvature=kdir / mul)\n self.assertTrue(isclose(\n getattr(s1, name)(1) * mul**dim,\n getattr(s2, name)(mul)\n ))", "def test_almost_equal(self):\n x = Point(\n lat=23.4,\n lng=23.1,\n author=self.u\n )\n self.assertTrue(self.a == x)\n self.assertFalse(self.a != x)", "def eq(m1, m2, tol):\n if m1.ndim == 2 and m2.ndim == 2:\n m = abs(m1 - m2)\n\n if np.amax(m) < tol:\n return True\n else:\n return False\n elif m1.ndim == 2:\n msz = np.shape(m2)[0]\n tmat1 = m1.reshape((1, 9)) \n tmat2 = np.tile(tmat1, (msz, 1))\n tmat3 = tmat2.reshape(msz, 3, 3)\n\n m = abs(tmat3 - m2)\n max1 = np.amax(np.amax(m, axis=1), axis=1) < tol\n if np.any(max1):\n return True\n else:\n return False\n\n elif m2.ndim == 2:\n msz = np.shape(m1)[0]\n tmat1 = m2.reshape(msz, (1, 9))\n tmat2 = np.tile(tmat1, (msz, 1))\n tmat3 = tmat2.reshape(msz, 3, 3)\n\n m = abs(m1 - tmat3)\n max1 = np.amax(np.amax(m, axis=1), axis=1) < tol\n if np.any(max1):\n return True\n else:\n return False\n else:\n if np.shape(m1)[0] == np.shape(m2)[0]:\n m = abs(m1 - m2)\n max1 = np.amax(np.amax(m, axis=1), axis=1) < tol\n return np.where(max1)\n else:\n raise Exception('Wrong Input Types')", "def assert_allclose(a, b, rtol=1e-5, atol=1e-8):\r\n assert a.shape == b.shape, f\"shape mismatch a={a.shape} b={b.shape}\"\r\n assert np.allclose(a, b, rtol=rtol, atol=atol)", "def __eq__(self, other):\n if self.get_dimensions() == other.get_dimensions():\n is_equal = (np.allclose(self.lon_arr, other.lon_arr) and\n np.allclose(self.lat_arr, other.lat_arr))\n else:\n is_equal = False\n return is_equal", "def _vertices_are_equal(\n vertices1: List[np.ndarray], vertices2: List[np.ndarray]\n) -> bool:\n if len(vertices1) != len(vertices2):\n return False\n diff = vertices1 - vertices2\n if np.abs(np.max(diff)) < ways_are_equal_tolerance:\n return True\n return False", "def __eq__(self, other):\n if self.__class__ != other.__class__:\n return False\n return self.mesh == other.mesh and \\\n npw.equal(self.shape, other.shape).all() and \\\n self.domain == other.domain", "def realEqual(x,y,eps=10e-10):\n return abs(x-y) < eps", "def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def identical_grid(self, other) -> bool:\n return (\n (\n self.crs is None\n or other.raster.crs is None\n or self.crs == other.raster.crs\n )\n and np.allclose(self.transform, other.raster.transform, atol=1e-06)\n and np.allclose(self.shape, other.raster.shape)\n )" ]
[ "0.6594077", "0.65919226", "0.65481305", "0.6344976", "0.6266819", "0.6196258", "0.61764526", "0.6156307", "0.61361444", "0.61240405", "0.6084039", "0.60762227", "0.60162324", "0.60157394", "0.5998651", "0.59830666", "0.5966321", "0.59443253", "0.5931793", "0.5924427", "0.59239686", "0.5920258", "0.5916937", "0.58921534", "0.589105", "0.5883095", "0.5882835", "0.58827966", "0.58767474", "0.5873687" ]
0.7489281
0
Test getting and setting the minimal bounding circle radius. This function will work for any shape in two or three dimensions based on the generic base class APIs, so it can be called in other pytest tests.
def _test_get_set_minimal_bounding_sphere_radius(shape, centered=False): base_attr = "minimal" + ("_centered_" if centered else "_") sphere_type = "circle" if isinstance(shape, Shape2D) else "sphere" attr = base_attr + "bounding_" + sphere_type bounding_sphere = getattr(shape, attr) bounding_sphere_radius = getattr(shape, attr + "_radius") assert np.isclose(bounding_sphere_radius, bounding_sphere.radius) setattr(shape, attr + "_radius", bounding_sphere_radius * 2) assert np.isclose(getattr(shape, attr).radius, bounding_sphere_radius * 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()", "def test_update_radius():\n center = Coordinates(1, 1)\n rad1 = 20.3\n speed = 30\n\n i = Intersection(center, rad1, speed)\n\n assert i.get_radius() == 20.3\n\n i.update_radius(56.5)\n\n assert i.get_radius() == 56.5", "def test_center_point_changes_bounding_box(self):\n\n default_shape_bb = ((-(10 + 2) / 2, -(10 + 2) / 2, -(10 + 2) / 2), ((10 + 2) / 2, (10 + 2) / 2, (10 + 2) / 2))\n assert self.test_shape.bounding_box == default_shape_bb\n\n self.test_shape.center_coordinate = (1, 1, 1)\n\n assert self.test_shape.bounding_box == (\n (default_shape_bb[0][0] + 1, default_shape_bb[0][1] + 1, default_shape_bb[0][2] + 1),\n (default_shape_bb[1][0] + 1, default_shape_bb[1][1] + 1, default_shape_bb[1][2] + 1),\n )\n\n self.test_shape.center_coordinate = (-2, 3, 14)\n\n assert self.test_shape.bounding_box == (\n (default_shape_bb[0][0] - 2, default_shape_bb[0][1] + 3, default_shape_bb[0][2] + 14),\n (default_shape_bb[1][0] - 2, default_shape_bb[1][1] + 3, default_shape_bb[1][2] + 14),\n )", "def get_radius(size):\n return (size * 10) - 5", "def __init__(self, name: str, radius: float):\r\n\r\n Shape.__init__(self, name)\r\n self.__radius = radius\r\n self.validation()", "def objects_radius(self, centre, radius):", "def get_radius(self):", "def get_radius(self):\r\n return 1", "def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4", "def filled_circle(shape, radius, center=None):\n\tr2 = radius*radius\n\tif center is None:\n\t\t### set to center of array\n\t\tcenter = (shape[0]-1)/2.0,(shape[1]-1)/2.0\n\tdef func(i0, i1):\n\t\tii0 = i0 - center[0]\n\t\tii1 = i1 - center[1]\n\t\trr2 = ii0**2 + ii1**2\n\t\tc = numpy.where(rr2<r2, 0.0, 1.0)\n\t\treturn c\n\treturn numpy.fromfunction(func, shape)", "def test_get_center():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_center = get_center(center, radius, 25)\n\n assert returned_center.get_x() == center.get_x()\n assert returned_center.get_y() == center.get_y()", "def test_minimal_bounding_box(self):\n\n big = (95.06, -11.0, 141.0, 5.9)\n mid = [103.28, -8.46, 109.67, -4.68]\n sml = (106.818998, -6.18585170, 106.82264510, -6.1810)\n\n min_res = 0.008333333333000\n eps = 1.0e-4\n\n # Check that sml box is actually too small\n assert sml[2] - sml[0] < min_res\n assert sml[3] - sml[1] < min_res\n\n for bbox in [big, mid, sml]:\n # Calculate minimal bounding box\n adjusted_bbox = minimal_bounding_box(bbox, min_res, eps=eps)\n\n # Check that adjusted box exceeds minimal resolution\n assert adjusted_bbox[2] - adjusted_bbox[0] > min_res\n assert adjusted_bbox[3] - adjusted_bbox[1] > min_res\n\n # Check that if box was adjusted eps was applied\n if bbox[2] - bbox[0] <= min_res:\n assert numpy.allclose(adjusted_bbox[2] - adjusted_bbox[0],\n min_res + (2 * eps))\n\n if bbox[3] - bbox[1] <= min_res:\n assert numpy.allclose(adjusted_bbox[3] - adjusted_bbox[1],\n min_res + (2 * eps))\n\n # Check that input box was not changed\n assert adjusted_bbox is not bbox", "def fit_circle_func():\n pass", "def boundingCircle(self):\n\n try:\n import cv2\n except:\n logger.warning(\"Unable to import cv2\")\n return None\n\n # contour of the blob in image\n contour = self.contour()\n\n points = []\n # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()\n for pair in contour:\n points.append([[pair[0], pair[1]]])\n\n points = np.array(points)\n\n (cen, rad) = cv2.minEnclosingCircle(points);\n\n return (cen[0], cen[1], rad)", "def _get_rounded_bounding_box(\n geom: BasePolygon, width: Numeric\n ) -> Tuple[int, int, int, int]:\n return (\n geom.bounds[0] - (geom.bounds[0] % width),\n geom.bounds[1] - (geom.bounds[1] % width),\n geom.bounds[2] + (-geom.bounds[2] % width),\n geom.bounds[3] + (-geom.bounds[3] % width),\n )", "def circular_levelset(shape, center, sqradius, scalerow=1.0):\n grid = np.mgrid[list(map(slice, shape))].T - center\n phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))\n u = np.float_(phi > 0)\n return u", "def circle(center, radius, *args, **kwargs):\n return patch.Circle(center, radius, *args, **kwargs)", "def test_calc_circle(self):\n t = AioBaseTurtle()\n steps, step_len, rot_step = t._calc_circle(100, extent=180)\n self.assertEqual(steps, 14)\n self.assertAlmostEqual(rot_step, 180.0 / 14.0)\n self.assertAlmostEqual(step_len, 22.3928952207)", "def initialize_radius(src):\n\n x_length = (np.amax(src[:, 0]) - np.amin(src[:, 0]))\n y_length = (np.amax(src[:, 1]) - np.amin(src[:, 1]))\n z_length = (np.amax(src[:, 2]) - np.amin(src[:, 2]))\n\n max_length = max(x_length, y_length, z_length)\n\n if max_length > 50:\n radius = 10\n elif max_length > 1:\n radius = 1\n else:\n radius = 0.01\n\n return radius", "def GetCircle(circle):\r\n pass", "def test_calculate_crow_bounds_shape_parameter_type1(self):\n\n _bounds = calculate_crow_bounds(22, 620.0, 0.4239, 0.6142, 0.9, 1, 1)\n self.assertAlmostEqual(_bounds[0], 0.4356064)\n self.assertAlmostEqual(_bounds[1], 0.8844610)", "def get_gaussian_radius(box_size, min_overlap):\n box_tensor = box_size\n width, height = box_tensor[..., 0], box_tensor[..., 1]\n\n a1 = 1\n b1 = (height + width)\n c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n sq1 = torch.sqrt(b1 ** 2 - 4 * a1 * c1)\n # r1 = (b1 + sq1) / 2\n r1 = (b1 - sq1) / (2 * a1)\n\n a2 = 4\n b2 = 2 * (height + width)\n c2 = (1 - min_overlap) * width * height\n sq2 =torch.sqrt(b2 ** 2 - 4 * a2 * c2)\n # r2 = (b2 + sq2) / 2\n r2 = (b2 - sq2) / (2 * a2)\n\n a3 = 4 * min_overlap\n b3 = -2 * min_overlap * (height + width)\n c3 = (min_overlap - 1) * width * height\n sq3 = torch.sqrt(b3 ** 2 - 4 * a3 * c3)\n # r3 = (b3 + sq3) / 2\n r3 = (b3 + sq3) / (2 * a3)\n\n return torch.min(r1, torch.min(r2, r3))\n # return np.min(box_size,axis=1)/3*2", "def test_circumference_area(self):\n self.assertEqual(9.425, circumference_area(self.values['radius']))", "def __init__(self, shape: Tuple[int, int], spacing: float, asymmetric_grid: bool):\n cols, rows = shape\n super().__init__(\n CalibrationTargetType.CircleGrid,\n rows,\n cols,\n spacing=spacing,\n asymmetric_grid=asymmetric_grid,\n )", "def shape_type(self):\n return \"circle\"", "def test_cut_volume(self):\n\n shape_with_cut = ExtrudeCircleShape(points=[(30, 0)], radius=20, distance=40, cut=self.test_shape)\n\n assert shape_with_cut.volume() == pytest.approx((math.pi * (20**2) * 40) - (math.pi * (10**2) * 30))", "def test_get_neighborhood_radius_consistent():\r\n grid_spacing = random.uniform(1e-6, 10.0)\r\n center = numpy.random.random(random.randint(1, 3))\r\n\r\n # Find points with radius neighborhood\r\n radius = random.uniform(_distance_to_nearest(grid_spacing, center), grid_spacing*5)\r\n points = ill.get_neighborhood_radius(grid_spacing, center, radius)\r\n\r\n # Every points found within this radius, should be in the points of a larger radius\r\n outer_points = ill.get_neighborhood_radius(grid_spacing, center,\r\n radius+random.uniform(0.0, grid_spacing*5))\r\n\r\n for point in points:\r\n assert point in outer_points", "def __init__(self, radius):\n self.radius = radius", "def test_set_diameter():\n radius = 10\n c = Circle(radius) \n expected_diameter = 10 \n c.diameter = expected_diameter \n assert c.diameter == expected_diameter\n assert c.radius == expected_diameter / 2", "def test_init():\n radius = 10\n c = Circle(radius)\n assert isinstance(c, Circle)\n assert c.radius == radius" ]
[ "0.654615", "0.61484224", "0.61027306", "0.60067105", "0.59871185", "0.5964421", "0.5933814", "0.58623713", "0.58558685", "0.582508", "0.58149123", "0.5797314", "0.5770679", "0.576661", "0.5736985", "0.57300043", "0.5726174", "0.571676", "0.5708322", "0.5696453", "0.5671404", "0.56700414", "0.56692463", "0.56628716", "0.56594986", "0.56538916", "0.56319386", "0.56282485", "0.5617655", "0.5616519" ]
0.73728734
0
Draws the background comprising of the lightblue sky and green grass
def background(): sky_color = (66, 170, 255) # color of the sky grass_color = (0, 128, 0) # color of the grass rect(screen, sky_color, (0, 0, 500, 250), 0) # sky rect(screen, grass_color, (0, 250, 500, 250), 0) # grass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_background(self):\n backgrounds = {\n \"forest\": (38, 106, 46),\n \"desert\": (194, 178, 128)\n }\n self.background_surface.fill(backgrounds[self.geography])", "def main_background():\n surface.fill(COLOR_GRAY)", "def draw_bg(self):\n self.screen.fill(self.bg)", "def main_background():\n surface.fill(COLOR_BACKGROUND)", "def draw_bg (self):\n self.health = max(0.0, min(1.0, (self.healthsteps + self.mud.value) / self.healthsteps))\n healthycolor = (0x11, 0x22, 0x44)\n pollutedcolor = (0x66, 0x66, 0)\n self.watercolor = [int((a - b) * self.health + b)\n for a,b in zip(healthycolor, pollutedcolor)]\n colorname = \"rgb({},{},{})\".format(*self.watercolor)\n w, h = self.width, self.height\n self.draw.rectangle((0,0,w-1,self.level_px-1), \"#000000\")\n self.draw.rectangle((0,self.level_px,w-1,h-1), colorname)", "def build_background():\n layer_1 = GRect(800, 550)\n layer_1.filled = True\n layer_1.color = 'silver'\n layer_1.fill_color = 'silver'\n window.add(layer_1)\n layer_2 = GRect(800, 90)\n layer_2.filled = True\n layer_2.color = 'whitesmoke'\n layer_2.fill_color = 'whitesmoke'\n window.add(layer_2)\n layer_3 = GRect(800, 40, x=0, y=510)\n layer_3.filled = True\n layer_3.color = 'whitesmoke'\n layer_3.fill_color = 'whitesmoke'\n window.add(layer_3)", "def draw_background(self):\n back = pygame.Surface(self.size)\n width, height = self.size\n self.shapes['gradient'] = shapes.gen_gradient(\n (width, height / 2),\n self.colors[3],\n self.colors[4]\n )\n back.blit(self.shapes['gradient'], (0, height - self.sh('gradient')))\n\n # TODO: Don't use static path/icon\n image = '/usr/share/icons/Tango/scalable/mimetypes/audio-x-generic.svg'\n self.shapes['musicimg'] = load_svg(image, [height/2]*2)\n back.blit(\n self.shapes['musicimg'],\n (width / 10, (height - self.sh('musicimg')) / 2)\n )\n return back", "def drawBackground(self,screen):\n pygame.draw.rect(screen,(240,240,240),(self.basepos[0],self.basepos[1],204,504))\n pygame.draw.rect(screen,(0,0,0),(self.basepos[0]+2,self.basepos[1]+2,200,500))", "def draw_bg(self):\n for y in range(WIN_HEIGHT/32): #TODO: make sure this process is correct and efficient.\n for x in range(WIN_WIDTH/32):\n self.screen_image.blit(self.bg, (x * 32, y * 32))", "def mainmenu_background():\n surface.fill((40, 0, 40))", "def draw_background(self, t):\n pass", "def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)", "def background_maker():\n background = GRect(window.width, window.height)\n background.filled = True\n background.fill_color = '0xFFFCEC'\n background.color = '0xFFFCEC'\n return background", "def draw_environment():\n rect(screen, LIGHT_GRAY, (0, 0, 800, 450)) # grey sky\n rect(screen, WHITE, (0, 450, 800, 1000)) # white ground", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def draw_background(self, verbosity=0):\n log.debug(\"Drawing background\")\n\n log.debug(\"Drawing 'rock' background\")\n pygame.draw.rect(self.surface, (127, 127, 127), self.surface.get_rect())\n\n log.debug(\"Drawing Region contents\")\n for region in self.dungeon_map.regions:\n coords = self.map_to_screen(region.coords)\n if verbosity > 0:\n # Color-code regions for convenience\n if region.kind == Region.ROOM:\n color = (255, 255, 240)\n elif region.kind == Region.CHAMBER:\n color = (255, 240, 255)\n elif region.kind == Region.PASSAGE:\n color = (240, 255, 255)\n else:\n raise LookupError(\"Unknown Region kind '{0}'\"\n .format(region.kind))\n else:\n color = (255, 255, 255)\n pygame.draw.polygon(self.surface, color, coords)\n\n if verbosity == 0:\n return\n log.debug(\"Drawing Connection contents\")\n for conn in self.dungeon_map.connections:\n coords = self.map_to_screen(conn.get_poly_coords())\n if (conn.kind == Connection.DOOR or\n conn.kind == Connection.SECRET or\n conn.kind == Connection.ONEWAY):\n if conn.is_incomplete():\n color = (127, 140, 127)\n else:\n color = (240, 255, 240)\n elif conn.kind == Connection.ARCH:\n if conn.is_incomplete():\n color = (127, 127, 140)\n else:\n color = (240, 240, 255)\n elif conn.kind == Connection.OPEN:\n if conn.is_incomplete():\n color = (140, 127, 127)\n else:\n color = (255, 240, 240)\n else:\n continue\n pygame.draw.polygon(self.surface, color, coords)", "def blit_background(self):\n self.screen.fill([67, 67, 67])\n self.screen.blit(self.background, (0,0))\n pygame.draw.rect(self.screen, (0, 0, 0), self.seperate_line)", "def paintScreen(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BACKGROUND_LEFT)\n self.imgBackgroundLeft = guiobjects.OcempImageMapTransparent(imgPath)\n self.window.add_child(self.imgBackgroundLeft)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BACKGROUND_RIGHT)\n imgBackgroundRight = guiobjects.OcempImageMapTransparent(imgPath)\n imgBackgroundRight.topleft = 297, 0\n self.window.add_child(imgBackgroundRight)", "def draw_ground(self):\r\n win.blit(self.ground, (0, 400))", "def setBackground(self, r, g, b): \n\t\tpass", "def make_background(self):\n for x in range(self.env_list[0].size):\n for y in range(self.env_list[0].size):\n img = load_image(\"dirt.png\")[0]\n self.background.blit(img, (x*50, y*50))", "def draw_background(stdscr, title=\"2048 Rogue-Agile Edition\", color_scheme=2):\n console_height, console_width = stdscr.getmaxyx()\n brd.color_border(stdscr, (0, 0), (console_width, console_height), color_scheme)\n brd.color_hash(stdscr, (0, 0), (console_width, console_height), color_scheme)\n brd.add_title(stdscr, title, underline=False, color_scheme=color_scheme)", "def draw_housing():\r\n green.pensize(3)\r\n green.color(\"black\", \"darkgrey\")\r\n green.begin_fill()\r\n green.forward(80)\r\n green.left(90)\r\n green.forward(200)\r\n green.circle(40, 180)\r\n green.forward(200)\r\n green.left(90)\r\n green.end_fill()", "def do_green_screen(x, bg):\n\tassert isinstance(x, np.ndarray) and isinstance(bg, np.ndarray), 'inputs must be numpy arrays'\n\tassert x.dtype == np.uint8 and bg.dtype == np.uint8, 'inputs must be uint8 arrays'\n\t\n\t# Get image sizes\n\tx_h, x_w = x.shape[1:]\n\n\t# Convert to RGBA images\n\tim = TF.to_pil_image(torch.ByteTensor(x))\n\tim = im.convert('RGBA')\n\tpix = im.load()\n\tbg = TF.to_pil_image(torch.ByteTensor(bg))\n\tbg = bg.convert('RGBA')\n\tbg = bg.load()\n\n\t# Replace pixels\n\tfor x in range(x_w):\n\t\tfor y in range(x_h):\n\t\t\tr, g, b, a = pix[x, y]\n\t\t\th_ratio, s_ratio, v_ratio = rgb_to_hsv(r / 255., g / 255., b / 255.)\n\t\t\th, s, v = (h_ratio * 360, s_ratio * 255, v_ratio * 255)\n\n\t\t\tmin_h, min_s, min_v = (100, 80, 70)\n\t\t\tmax_h, max_s, max_v = (185, 255, 255)\n\t\t\tif min_h <= h <= max_h and min_s <= s <= max_s and min_v <= v <= max_v:\n\t\t\t\tpix[x, y] = bg[x, y]\n\n\treturn np.moveaxis(np.array(im).astype(np.uint8), -1, 0)[:3]", "def _bg_update(self):\n self.screen.fill(colour.BLACK)\n for star in self._stars:\n if star[2] + star[1] > self.s_height:\n star[1] = 0\n else:\n star[1] += star[2]\n self.screen.set_at((star[0], star[1]), colour.WHITE)", "def main_background(self):\n self.screen.blit(self.background, (0, 0))", "def drawBackground(self):\n if self.newFrameArrived and not self.reshaping:\n imgHeight, imgwidth, _ = self.currentFrame.shape\n if imgHeight == self.height and imgwidth == self.width:\n glDisable(GL_DEPTH_TEST)\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n #print \"Happy printings1\"\n #glMatrixMode(GL_MODELVIEW)\n #glLoadIdentity()\n\n #print \"Happy printings\"\n glLoadIdentity()\n #print \"Happy printings\"\n glOrtho(0, self.width, 0, self.height, -1.0, 1.0)\n #print \"Happy printings\"\n glViewport(0, 0, self.width, self.height)\n #print \"Happy printings\"\n glDisable(GL_TEXTURE_2D)\n glPixelZoom(1, -1)\n glRasterPos3f(0, self.height-0.5, -1)\n #print \"Happy printings5\"\n glDrawPixels(self.width, self.height, GL_RGB, GL_UNSIGNED_BYTE, self.currentFrame)\n #print \"Happy printings6\"\n # glBegin(GL_QUADS)\n # glTexCoord2f(0.0,0.0); glVertex3f(-4.0,-3.0,-10.0)\n # glTexCoord2f(1.0,0.0); glVertex3f( 4.0,-3.0,-10.0)\n # glTexCoord2f(1.0,1.0); glVertex3f( 4.0, 3.0,-10.0)\n # glTexCoord2f(0.0,1.0); glVertex3f(-4.0, 3.0,-10.0)\n # glEnd()\n glPopMatrix()\n glMatrixMode(GL_MODELVIEW)\n glPopMatrix()\n glEnable(GL_DEPTH_TEST)\n #self.newFrameArrived = False", "def background(self, Background):\n SCREEN.blit(Background, (0, 0))\n #SCREEN.fill((0,0,0))", "def DrawPlainBackground(self, dc, wnd, _rect):\r\n \r\n rect = wx.Rect(*_rect)\r\n rect.height += 1\r\n\r\n dc.SetBrush(wx.Brush(wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DFACE)))\r\n dc.DrawRectangle(rect.x - 1, rect.y - 1, rect.width + 2, rect.height + 1)" ]
[ "0.78102857", "0.7737531", "0.73148644", "0.7291126", "0.7223079", "0.71805614", "0.6887953", "0.6861217", "0.670932", "0.67014545", "0.6694754", "0.6650995", "0.6435646", "0.64259964", "0.64249414", "0.64249414", "0.6345578", "0.6331869", "0.6303537", "0.6301052", "0.6217507", "0.6162414", "0.6118676", "0.607212", "0.6055738", "0.6048268", "0.60400736", "0.6038477", "0.6000863", "0.5995138" ]
0.812715
0
The function draws a tree by forming its trunk as a rectangle and all its leaves as six circles.
def tree(xt, yt, t): green = (1, 50, 32) # leaf color rect(screen, (150, 75, 0), (xt, yt, 15 * t, 60 * t), 0) # tree's trunk circle(screen, green, (xt + 15 * t / 2, yt - 30 * t), 30 * t) # leaves circle(screen, green, (xt + 15 * t / 2 + 30 * t, yt - 30 * t + 15 * t), 30 * t) # leaves circle(screen, green, (xt + 15 * t / 2 - 30 * t, yt - 30 * t + 15 * t), 30 * t) # leaves circle(screen, green, (xt + 15 * t / 2 + 30 * t, yt - 30 * t - 20 * t), 30 * t) # leaves circle(screen, green, (xt + 15 * t / 2 - 30 * t, yt - 30 * t - 20 * t), 30 * t) # leaves circle(screen, green, (xt + 15 * t / 2, yt - 30 * t - 50 * t), 30 * t) # leaves
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_tree(self) -> None:\n import turtle\n\n def height(head):\n return 1 + max(height(head.left), height(head.right)) if head else -1\n\n def jump_to(x, y):\n t.penup()\n t.goto(x, y)\n t.pendown()\n\n def draw(node, x, y, dx):\n if node:\n t.goto(x, y)\n jump_to(x, y - 20)\n t.write(node.val, align=\"center\", font=(\"Arial\", 15, \"normal\"))\n t.circle(10)\n draw(node.left, x - dx, y - 50, dx / 2)\n jump_to(x, y - 20)\n draw(node.right, x + dx, y - 50, dx / 2)\n\n t = turtle.Turtle()\n t.speed(0)\n turtle.delay(0)\n h = height(self)\n jump_to(0, 100 + 10 * h)\n draw(self, 0, 100 + 10 * h, 20 * h)\n t.hideturtle()\n turtle.mainloop()", "def drawtree(self):\r\n\r\n Phylo.draw(self.tree)", "def draw_trees(*trees):\n TreeView(*trees).mainloop()\n return", "def binary_tree(length, depth):\n if depth == 0:\n return # base case\n posx = turtle.xcor()\n posy = turtle.ycor()\n left(length, depth)\n turtle.up()\n turtle.goto(posx, posy)\n #turtle.dot()\n turtle.down()\n right(length, depth)", "def Draw_Tree( self, rooted_tree, menuoptions = 0, editor = 0 ):\r\n #Clear the previous information\r\n self.Reset_Selection()\r\n self.canvas_one.delete( ALL )\r\n self.canvas_two.delete( ALL )\r\n self.handle_list = []\r\n \r\n if editor:\r\n self.Adjust_Menu( menuoptions )\r\n #if no node\r\n if( rooted_tree == 0 ):\r\n self.canvas_one.create_text( cb.xorigin, 5, text=\"There is no tree to display\", anchor = NW )\r\n ys = 0\r\n #one node\r\n elif( rooted_tree.sub == [] ):\r\n #if there is only one node, make its length one because a zero length will not show up\r\n store = rooted_tree.data.length\r\n rooted_tree.data.length = 1\r\n xlong = rooted_tree.Longest_Branch( )\r\n cb.New_XLong( xlong )\r\n ys = self.Draw_Node( rooted_tree, cb.xorigin, cb.yorigin)\r\n rooted_tree.data.length = store\r\n else:\r\n #recursively draw the tree, temporarily store the root's length and make it zero\r\n #If the root is long(Isolated), it does not squish the rest of the data\r\n store = rooted_tree.data.length\r\n rooted_tree.data.length = 0\r\n #Get the longest distance from root to leaf\r\n xlong = rooted_tree.Longest_Branch( )\r\n cb.New_XLong( xlong ) #Change the scale\r\n ys, ypos1 = self.Rec_Draw_Tree( rooted_tree, cb.xorigin, cb.yorigin )\r\n #Extend the root node so that it is visible\r\n ls = self.Find_Line_By_Node( rooted_tree )\r\n self.canvas_one.coords( ls.line_handle, cb.xorigin-5, ypos1, cb.xorigin, ypos1 )\r\n rooted_tree.data.length = store #restore the root node's length\r\n ys = ys + cb.ytick\r\n self.canvas_one.create_text(20,ys,text=\"_____\")\r\n self.canvas_two.create_text(20,ys,text=\"_____\") #end markers\r\n #Set the scrollregions of the canvases\r\n ys = ys + cb.ytick\r\n self.ys = ys + 0*cb.ytick\r\n self.canvas_one.config( scrollregion = ( 0, 0, 300, self.ys ) )\r\n self.canvas_two.config( scrollregion = ( 0, 0, 300, self.ys ) )\r\n self.Draw_Scale()", "def tree():\n nobv.visual_tree()", "def repr_tree(tree, viz, current_node, rec_depth, color_map, parameters):\r\n for child in tree.children:\r\n if child.operator is None:\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if child.label is None:\r\n viz.node(this_trans_id, \"tau\", style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(child, color_map)\r\n viz.node(this_trans_id, str(child), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n condition_wo_operator = child.operator == pt_operator.Operator.XOR and len(\r\n child.children) == 1 and child.children[0].operator is None\r\n if condition_wo_operator:\r\n childchild = child.children[0]\r\n viz.attr('node', shape='box', fixedsize='true', width=\"2.5\",\r\n fontsize=\"8\")\r\n this_trans_id = str(uuid.uuid4())\r\n if childchild.label is None:\r\n viz.node(this_trans_id, str(childchild), style='filled', fillcolor='black')\r\n else:\r\n node_color = get_color(childchild, color_map)\r\n viz.node(this_trans_id, str(childchild), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, this_trans_id)\r\n else:\r\n viz.attr('node', shape='circle', fixedsize='true', width=\"0.6\",\r\n fontsize=\"14\")\r\n op_node_identifier = str(uuid.uuid4())\r\n node_color = get_color(child, color_map)\r\n viz.node(op_node_identifier, str(child.operator), color=node_color, fontcolor=node_color)\r\n viz.edge(current_node, op_node_identifier)\r\n viz = repr_tree(child, viz, op_node_identifier, rec_depth + 1, color_map, parameters)\r\n return viz", "def makeTree(size):\n # Probability that a character will be green.\n prob_gr = 0.6\n # Colour codes.\n colours = [31, 33, 34, 35, 36, 37]\n # Characters to use for decorations. Experiment with these.\n # The chr(169) and chr(174) characters may not work in all terminals\n # (extended ASCII, c and r in a circle).\n decs = ['@', '&', '*', chr(169), chr(174)]\n\n # Format string for printing blinking characters.\n blink_col = \"\\033[5;{0}m{1}\\033[0m\"\n # String to print a green octothorpe ('#').\n leaf = \"\\033[32m#\\033[0m\"\n\n # Width of the tree, will grow by 2 each time.\n width = 1\n # Initialise the tree string, with a star at the top.\n tree = \"\\n{}*\\n\".format(' ' * (size))\n\n \"\"\" Main Loop starts now.\"\"\"\n\n \"\"\" We can't use the normal \"format\" centering approach:\n (\"{:^nn}\".format(string) where \"nn\" is the width of the line), \n with these ansi codes. This is because Python sees the strings as being\n more than one character long (15 & 10 for baubles and leaves).\"\"\"\n\n # Loop from (size - 1) down to 0, using the counter as the padding size.\n for pad in range(size - 1, -1, -1):\n # Increase the width of the tree by 2.\n width += 2\n\n # Put the characters for the line in \"temp\".\n temp = \"\"\n for j in range(width):\n # Make some leaves.\n if random() < prob_gr:\n temp += leaf\n # And also some baubles.\n else:\n temp += blink_col.format(choice(colours), choice(decs))\n\n # Add that string to the line, with padding.\n tree += \"{0}{1}\\n\".format(' ' * pad, temp)\n\n # Add a \"trunk\" of 2 lines and return.\n return tree + \"{0}{1}\\n\".format(' ' * (size - 1), \"000\") * 2", "def plotTree(self):\n t = self.make(self.tree)\n t.draw()", "def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))", "def draw_tree(t, df, size=10, ratio=0.6, precision=0):\n s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,\n special_characters=True, rotate=True, precision=precision)\n IPython.display.display(graphviz.Source(re.sub('Tree {',\n f'Tree {{ size={size}; ratio={ratio}', s)))", "def draw_tree(self, ax: plt.Axes = None, tree_depth: int = None, exclude_empty: bool = False,\n line_width: int = 1, edge_color='red', plot_nodes: bool = False, plot_points: bool = False):\n\n manager = self.root.manager\n manager._finalize_data()\n\n root_quad = self.root\n norm = matplotlib.colors.Normalize(vmin=root_quad.settings['min_depth'], vmax=root_quad.settings['max_depth'])\n cmap = matplotlib.cm.rainbow\n\n if ax is None:\n ax = plt.subplots(figsize=[11, 7], dpi=150)[1]\n\n if tree_depth is None or tree_depth == 0:\n if exclude_empty and not self.index:\n pass\n else:\n sizes = [self.maxs[0] - self.mins[0], self.maxs[1] - self.mins[1]]\n if self.quad_index != -1:\n try:\n idx = self.quad_index[0], self.quad_index[1]\n except:\n idx = self.quad_index\n quad_z = manager.node_data['z'][idx].compute()\n rect = matplotlib.patches.Rectangle(self.mins, *sizes, zorder=2, alpha=0.5, lw=line_width, ec=edge_color, fc=cmap(norm(quad_z)))\n if plot_nodes:\n quad_x = manager.node_data['x'][idx].compute()\n quad_y = manager.node_data['y'][idx].compute()\n ax.scatter(quad_x, quad_y, s=5)\n if plot_points:\n ax.scatter(manager.data['x'][self.index].compute(),\n manager.data['y'][self.index].compute(), s=2)\n else: # no depth for the quad\n rect = matplotlib.patches.Rectangle(self.mins, *sizes, zorder=2, alpha=1, lw=line_width, ec=edge_color, fc='None')\n ax.add_patch(rect)\n\n if tree_depth is None:\n for child in self.children:\n child.draw_tree(ax, tree_depth=None, exclude_empty=exclude_empty, line_width=line_width, edge_color=edge_color, plot_points=plot_points, plot_nodes=plot_nodes)\n elif tree_depth > 0:\n for child in self.children:\n child.draw_tree(ax, tree_depth=tree_depth - 1, exclude_empty=exclude_empty, line_width=line_width, edge_color=edge_color, plot_points=plot_points, plot_nodes=plot_nodes)\n\n if (self.tree_depth == 0) or (tree_depth is None and self.tree_depth == 0):\n xsize = self.maxs[0] - self.mins[0]\n ysize = self.maxs[1] - self.mins[1]\n ax.set_ylim(self.mins[1] - ysize / 10, self.maxs[1] + ysize / 10)\n ax.set_xlim(self.mins[0] - xsize / 10, self.maxs[0] + xsize / 10)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n plt.gcf().colorbar(matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap), cax=cax, orientation='vertical', label='Depth (+down, meters)')\n\n return ax", "def tree(branchLen):\n if branchLen > 5:\n t.backward(branchLen)\n t.right(20)\n tree(branchLen-16,t)\n t.left(40)\n tree(branchLen-16,t)\n t.right(20)\n t.forward(branchLen)", "def Rec_Draw_Tree( self, cur_node, xs, ys ):\r\n yhold = [] #holds the y values of the children\r\n ypos1 = 0 #the yvalue of the current node\r\n ypos = 0\r\n new_xstart = cur_node.data.length * cb.xtick + xs\r\n #for each child of the current node\r\n for i in range( len( cur_node.sub ) ):\r\n #current node is to be drawn before the (cb.order)-th child\r\n if ( i == cb.order ):\r\n ypos1 = self.Draw_Node( cur_node, xs, ys )\r\n if( cb.order == 1 ):\r\n ypos1 = ys\r\n ys = ypos1 + cb.ytick\r\n if( len( cur_node.sub[i].sub ) == 0 ):#Draw a leaf\r\n ypos = self.Draw_Node( cur_node.sub[i], new_xstart, ys )\r\n yhold.append( int(ypos) )\r\n else: #Draw an internal node\r\n ys, ypos = self.Rec_Draw_Tree( cur_node.sub[i], new_xstart, ys )\r\n yhold.append( ypos )\r\n if( i < len( cur_node.sub ) - 1 ):\r\n ys = ys + cb.ytick\r\n if ( cb.order != 1 and cb.order == len( cur_node.sub ) ):\r\n ypos1 = self.Draw_Node( cur_node, xs, ys )\r\n elif( cb.order == 1 and cb.order == len( cur_node.sub) ):\r\n ypos1 = self.Draw_Node( cur_node, xs , ys+cb.ytick )\r\n ypos1 = ypos1 - cb.ytick\r\n\r\n #draw the vertical lines to the children\r\n for item in yhold:\r\n self.canvas_one.create_line( new_xstart, item, new_xstart, ypos1, width = 3, fill=self.branch_color )\r\n #return the farthest vertical position drawn and the position of the line of the current segment\r\n return ys, ypos1", "def draw_tree(self):\n\n print \"--- \" + str(self.name)\n \n def draw_child_tree(current, depth):\n \n for c in current.children:\n print depth * \" \" + \"|-- \" + str(c.name)\n if hasattr(c, 'children'):\n draw_child_tree(c, depth + 1)\n \n draw_child_tree(self, 1)\n \n return", "def draw_tree(self):\n nx.draw(self.diffusion_tree, with_labels=True)", "def savage(t):\r\n a=t.GetListOfBranches()\r\n n=0\r\n for i in range(len(a)):\r\n if a[i]: n= n+1\r\n else: break\r\n size0=sqrt(n)\r\n size1=int(size0)\r\n if size0==size1: size=size1\r\n else: size=size1+1\r\n print size,n\r\n c=TCanvas()\r\n c.Divide(size,size)\r\n for i in range(n):\r\n c.cd(i+1)\r\n t.Draw(a[i].GetName())\r\n return c", "def draw(self):\n for tree_idx, tree in enumerate(self.trees):\n print(\"==========================================\\nTree\",\n tree_idx)\n self._print_tree(tree)", "def plot_tree(tree, min_x, max_x, min_y, max_y, prev_node, branch, depth=0):\r\n \r\n cur_node = tree.location # current tree's node\r\n left_branch = tree.left_child # its left branch\r\n right_branch = tree.right_child # its right branch\r\n \r\n # set line's width depending on tree's depth\r\n if depth > len(line_width)-1:\r\n ln_width = line_width[len(line_width)-1]\r\n else:\r\n ln_width = line_width[depth]\r\n \r\n k = len(cur_node.position) - 1 # k = 2\r\n axis = depth % k\r\n \r\n # draw a vertical splitting line\r\n if axis == 0:\r\n \r\n if branch is not None and prev_node is not None:\r\n \r\n if branch:\r\n max_y = prev_node[1]\r\n else:\r\n min_y = prev_node[1]\r\n \r\n plt.plot([cur_node.position[0],cur_node.position[0]], [min_y,max_y], linestyle='-', color='red', linewidth=ln_width)\r\n \r\n # draw a horizontal splitting line\r\n elif axis == 1:\r\n \r\n if branch is not None and prev_node is not None:\r\n \r\n if branch:\r\n max_x = prev_node[0]\r\n else:\r\n min_x = prev_node[0]\r\n \r\n plt.plot([min_x,max_x], [cur_node.position[1],cur_node.position[1]], linestyle='-', color='blue', linewidth=ln_width)\r\n \r\n # draw the current node\r\n plt.plot(cur_node.position[0], cur_node.position[1], 'ko')\r\n \r\n # draw left and right branches of the current node\r\n if left_branch is not None:\r\n plot_tree(left_branch, min_x, max_x, min_y, max_y, cur_node.position, True, depth+1)\r\n \r\n if right_branch is not None:\r\n plot_tree(right_branch, min_x, max_x, min_y, max_y, cur_node.position, False, depth+1)", "def buildTree(self,newick):\n\t\tfor i in range(len(newick)):\n\t\t\tif newick[i] == \"(\":\n\t\t\t\tself.currNode.children.append(node.node(self.currNode))\n\t\t\t\tself.currNode=self.currNode.children[0]\n\t\t\t#polytomy support enabled\n\t\t\telif newick[i] == \",\":\n\t\t\t\tself.currNode=self.currNode.parent\n\t\t\t\tself.currNode.children.append(node.node(self.currNode))\n\t\t\t\tself.currNode=self.currNode.children[-1]\n\t\t\telif newick[i] == \")\":\n\t\t\t\tself.currNode=self.currNode.parent\n\t\t\telse:\n\t\t\t\tself.currNode.info+=newick[i]", "def draw_states(self):\n drawing = self.tree.draw(\n width=400,\n height=300,\n layout='d',\n node_labels=(\"idx\", 1, 1),\n node_sizes=15,\n node_style={\"stroke\": \"black\", \"stroke-width\": 2},\n node_colors=[\n toytree.colors[int(round(i[1]))] if isinstance(i, (list, np.ndarray))\n else \"white\" \n for i in self.tree.get_node_values(\"likelihood\", True, True)\n ],\n )\n return drawing", "def visualize_tree(root):\n _visualize_tree(root, [], 0, '-')", "def visualise_binary_tree(self):\n tree_elements = [i for i in self.breadthfirst()] # saves the nodes of the tree in an array after the breadthfirst transversal is executed\n height = self.height(self.root())\n n = sum([2 ** i for i in range(0, height + 1)]) # total number of possible nodes of a tree\n array_tree = n * [\" \"] # array-based representation of a binary tree implemented by using level-numbering of positions(chapter 8.3.2 of Goodrich book)\n array_tree[0] = tree_elements[0] # assigning the root\n for i in range(0, len(tree_elements)):\n index1 = i\n if tree_elements[i] in array_tree:\n index1 = array_tree.index(tree_elements[i])\n for j in range(i, len(tree_elements)):\n if tree_elements[j] == self.left(tree_elements[i]):\n array_tree[2 * index1 + 1] = tree_elements[j]\n if tree_elements[j] == self.right(tree_elements[i]):\n array_tree[2 * index1 + 2] = tree_elements[j]\n break\n for i in range(0, len(array_tree)):\n if array_tree[i] != \" \": # the empty nodes are represented by \" \"\n array_tree[i] = array_tree[i].element() # changing the array from nodes to elements of the nodes\n height1 = height\n spaces = 2 ** (height + 1) - 2 # initialises the number of spaces that have to be added when displaying the nodes\n height -= 1\n pos = 0 # index of the node that is displayed\n print(spaces * \" \" + array_tree[pos])\n for i in range(0, height1 + 1): #iterates through all the levels of the binary tree\n spaces = 2 ** (height + 1) - 2\n level = spaces * \" \" # initialises each level of the binary tree with the appropiate number of spaces\n height += 1\n spaces = 2 ** (height + 1) - 1\n if 2 * pos + 3 > len(array_tree): # exit the loop if the tree was traversed\n break\n for j in range(0, 2 ** i):\n level += array_tree[2 * pos + 1] + \" \" * spaces + array_tree[2 * pos + 2] + \" \" * spaces # adds the nodes from that level\n pos += 1\n height -= 2\n print(level)", "def draw(self):\n pos = Point([2,2])\n\n if self.classes == None:\n classes = self.lumpy.get_class_list()\n else:\n classes = [make_thing(self.lumpy, cls) for cls in self.classes]\n\n # find the classes that have no parents, and find the\n # height of each tree\n roots = [c for c in classes if c.parents == []]\n for root in roots:\n root.set_height()\n\n # for all the leaf nodes, compute the distance to\n # the parent\n leafs = [c for c in classes if c.childs == []]\n for leaf in leafs:\n leaf.set_depth()\n\n # if we're drawing all the classes, start with the roots;\n # otherwise draw the classes we were given.\n if self.classes == None:\n drawn = self.draw_classes(roots, pos)\n else:\n drawn = self.draw_classes(classes, pos)\n \n self.draw_arrows()\n\n # configure the scroll region\n bbox = Canvas.bbox(self.canvas, ALL)\n self.canvas.configure(scrollregion=bbox)", "def _decorate_tree(t, series):\n for i, n in enumerate(t.postorder()):\n n.size = 30\n if n.is_root():\n n.size = 50\n elif n.name == n.parent.children[0].name:\n n.color = '#00FF00' # left child is green\n else:\n n.color = '#FF0000' # right child is red\n if not n.is_tip():\n t.length = series.loc[n.name]\n return t", "def makeTTree():\n \n tree = TTree(\"tree\",\"tree\")\n px = array('d',[0])\n py = array('d',[0])\n pz = array('d',[0])\n pi = array('i',[0])\n tree.Branch(\"x\",px,\"x/D\")\n tree.Branch(\"y\",py,\"y/D\")\n tree.Branch(\"z\",pz,\"y/D\")\n tree.Branch(\"i\",pi,\"y/I\")\n for i in range(500):\n px[0] = gRandom.Gaus(0,3)\n py[0] = gRandom.Uniform()*30 - 15\n pz[0] = gRandom.Gaus(0,5)\n pi[0] = i%3\n tree.Fill()\n return tree", "def test_render_tree(self) -> None:\n\n def get_children(node):\n return node.children\n\n node, expect, withtags = self.tree_case_1()\n actual = render_tree(node, get_children)\n assert expect == actual, (expect, actual)\n\n node, expect, withtags = self.tree_case_2()\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)\n\n # Ensure that we can call render_tree on the same Node\n # again. This wasn't possible in version 2.4.1 and earlier\n # due to a bug in render_tree (visited was set to {} as default\n # parameter)\n actual = render_tree(node, get_children, 1)\n assert expect == actual, (expect, actual)", "def recursifTreePrinter(tree,indent):\n listOfBranches = tree.GetListOfBranches()\n if len(listOfBranches) > 0: # Width informations\n maxCharName = max([len(branch.GetName()) \\\n for branch in listOfBranches])\n maxCharTitle = max([len(branch.GetTitle()) \\\n for branch in listOfBranches])\n dic = { \\\n \"nameWidth\":maxCharName+2, \\\n \"titleWidth\":maxCharTitle+4, \\\n \"memoryWidth\":1}\n for branch in listOfBranches: # Print loop\n rec = \\\n [branch.GetName(), \\\n \"\\\"\"+branch.GetTitle()+\"\\\"\", \\\n str(branch.GetTotBytes())]\n write(TREE_TEMPLATE.format(*rec,**dic),indent,end=\"\\n\")\n recursifTreePrinter(branch,indent+2)", "def make_trees(self):\n self.trees = build_recursively_from_cells(self.cells, container=self)\n# self.trees = []\n# for cell in self.cells:\n# if cell.bpointer is None: # test whether cell is root\n# tree = Colony(container=self)\n# tree.add_cell_recursive(cell)\n# self.trees.append(tree)\n return", "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root" ]
[ "0.7460558", "0.72682446", "0.6904891", "0.68636274", "0.6776809", "0.6700191", "0.6676679", "0.6673319", "0.66718125", "0.66088754", "0.66088754", "0.65637016", "0.65432936", "0.65039027", "0.64526457", "0.64284134", "0.6277493", "0.6261355", "0.6227239", "0.61798245", "0.613065", "0.61291933", "0.6117561", "0.60788774", "0.5994715", "0.5971515", "0.59233963", "0.590908", "0.5885545", "0.5870496" ]
0.77194875
0
The function draws a sun using a circle and triangles to imitate its rays.
def sun(xs, ys, s, n): yellow = (255, 255, 0) # sun color circle(screen, yellow, (xs, ys), 30 * s) # sun body for k in range(n + 1): # sun rays on the upper side of the sun polygon(screen, yellow, [(xs + 45 * s * np.cos(np.pi / n * (k - 1 / 2)), ys - 45 * s * np.sin(np.pi / n * (k - 1 / 2))), (xs + 30 * s * np.cos(np.pi * (k - 1) / n), ys - 30 * s * np.sin(np.pi * (k - 1) / n)), (xs + 30 * s * np.cos(np.pi * k / n), ys - 30 * s * np.sin(np.pi * k / n))], 0) for k in range(n + 1): # sun rays on the lower side of the sun polygon(screen, yellow, [(xs + 45 * s * np.cos(np.pi / n * (k - 1 / 2)), ys + 45 * s * np.sin(np.pi / n * (k - 1 / 2))), (xs + 30 * s * np.cos(np.pi * (k - 1) / n), ys + 30 * s * np.sin(np.pi * (k - 1) / n)), (xs + 30 * s * np.cos(np.pi * k / n), ys + 30 * s * np.sin(np.pi * k / n))], 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_sun():\n lisandro.penup()\n lisandro.goto(40, 90)\n lisandro.begin_fill()\n lisandro.circle(150) # draws out a circle with a radius of 150 for the sun.\n lisandro.end_fill()\n lisandro.hideturtle()", "def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()", "def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)", "def direct(sun_pos, grid):\n\n # for each pixel at top of grid pass sun rays in\n for i in xrange(grid.gr.shape[0]):\n \"\"\"\n Make an array starting at loc\n \"\"\"\n xpos = i * grid.xres\n ypos = grid.zres * grid.zsize\n pos = np.array(xpos, ypos)\n direction = pos - sun_pos / np.norm(pos - sun_pos) # this location minus \n r = ray(pos, direction)\n \"\"\"\n The ray now travels down through the canopy being\n altered by transmission and reflectance\n\n amount of scattering vs absorption is determined by leaf area density\n\n \"\"\"", "def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )", "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_circle(c):\n turtle.circle(c.radius)", "def draw_ball():\n\n draw_circle(ball, 'yellow')", "def triangulate_analytic_sun_at_center(self,r1,x2,y2,r2,x3,y3,r3):\n gamma=(r1**2+x2**2+y2**2-r2**2)/(2.0*x2)\n\ta=(y2**2)/(float(x2**2))\n\tb=-2.0*gamma*y2/x2\n\tc=gamma**2-r1**2\n\ty_plus=(-b+np.sqrt((b**2)-4*a*c))/(2.0*a)\n\ty_minus=(-b-np.sqrt((b**2)-4*a*c))/(2.0*a)\n x_plus=gamma-y_plus*y2/float(x2)\n x_minus=gamma-y_minus*y2/float(x2)\n difference_plus=(x_plus-x3)**2+(y_plus-y3)**2-r3**2\n difference_minus=(x_minus-x3)**2+(y_minus-y3)**2-r3**2\n if abs(difference_minus) < abs(difference_plus):\n print \"Difference minus\", difference_minus\n print x_minus, y_minus\n return x_minus, x_plus, difference_minus\n else:\n print \"Difference plus\", difference_plus\n print x_plus, y_plus\n return x_plus, y_plus, difference_plus", "def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)", "def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)", "def draw_hexagon(): \r\n turtle.forward(100)\r\n turtle.left(60)\r\n turtle.forward(100)\r\n turtle.left(60)\r\n turtle.forward(100)\r\n turtle.left(60)\r\n turtle.forward(100)\r\n turtle.left(60)\r\n turtle.forward(100)\r\n turtle.left(60)\r\n turtle.forward(100)\r\n turtle.left(60)", "def main():\r\n x = int(input(\"Enter the x coordinate of the center point: \"))\r\n y = int(input(\"Enter the y coordinate of the center point: \"))\r\n radius = int(input(\"Enter the radius: \"))\r\n drawCircle(Turtle(), x, y, radius)\r\n sleep(5)", "def draw_star(x=0,y=0,radius=10):\n cx = x\n cy = y+radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()\n cy = y-radius\n bx = cx * math.cos(2*math.pi/3) - ( cy * math.sin(2*math.pi/3) )\n by = cx * math.sin(2*math.pi/3) + ( cy * math.cos(2*math.pi/3) )\n ax = cx * math.cos(4*math.pi/3) - ( cy * math.sin(4*math.pi/3) )\n ay = cx * math.sin(4*math.pi/3) + ( cy * math.cos(4*math.pi/3) )\n my_turtle.penup()\n my_turtle.goto(cx, cy)\n my_turtle.pendown()\n my_turtle.goto(bx, by)\n my_turtle.goto(ax, ay)\n my_turtle.goto(cx, cy)\n my_turtle.penup()", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)", "def make_circle_fill():\n num_points = 40\n batch = pyglet.graphics.Batch()\n rad = math.pi * 2 / num_points # getting 360 / n in radians\n index = list(itertools.chain.from_iterable( (0, x-1, x) for x in range(2, num_points+1) ))\n index += [0, 1, num_points] # end of fan\n vertices = [0, 0] # adding center of fan\n for i in range(1, num_points + 1):\n angle = rad * i\n vertices += [math.cos(angle), math.sin(angle)]\n vertices += [1, 0] # adding end of fan\n circle = pyglet.graphics.vertex_list_indexed(num_points+2, index, ('v2f', vertices))\n return circle", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, self.radius, TARGET_COLOR)", "def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))", "def draw_flower():\n # This draws the center hexagon\n length_hex = get_length_hexagon()\n petal_color = get_petal_color()\n draw_hex(length_hex, get_center_color())\n # Move to the correct location and draw a hexagon for petal\n turtle.penup()\n turtle.left(120)\n turtle.forward(length_hex)\n turtle.right(60)\n turtle.forward(length_hex)\n turtle.right(60)\n turtle.pendown()\n draw_hex(length_hex, petal_color)\n turtle.penup()\n turtle.forward(length_hex)\n turtle.right(60)\n turtle.forward(length_hex)\n turtle.left(60)\n turtle.pendown()\n draw_hex(length_hex, petal_color)\n turtle.penup()\n turtle.left(240)\n turtle.forward(length_hex)\n turtle.left(60)\n turtle.forward(length_hex)\n turtle.left(60)\n turtle.pendown()\n draw_hex(length_hex, petal_color)\n turtle.penup()\n turtle.left(240)\n turtle.forward(length_hex)\n turtle.right(60)\n turtle.forward(length_hex)\n turtle.left(180)\n turtle.pendown()\n draw_hex(length_hex, petal_color)\n turtle.penup()\n turtle.left(120)\n turtle.forward(length_hex)\n turtle.left(60)\n turtle.forward(length_hex)\n turtle.left(180)\n turtle.pendown()\n draw_hex(length_hex, petal_color)\n turtle.penup()\n turtle.left(120)\n turtle.forward(length_hex)\n turtle.right(60)\n turtle.forward(length_hex)\n turtle.right(60)\n turtle.pendown()\n draw_hex(length_hex, petal_color)", "def fermat_spiral(a, t):\n global scale\n t.ht()\n t.pd()\n tt = Turtle()\n tt.ht()\n tt.pd()\n tt.color('red')\n tt.seth(180)\n for i in range(300):\n theata = i * get_radian()\n r = sqrt(a**2*theata)\n x = r * cos(theata)\n y = r * sin(theata)\n t.goto(x, y)\n tt.goto(-x, -y)\n t.up()\n t.home()\n t.fd(a + 10)\n t.seth(90)\n t.pensize(2)\n t.color('purple')\n t.pd()\n t.circle(30)", "def draw_full_circle(x, y, radius):\n iterations = int(2 * radius * pi)\n s = sin(2 * pi / iterations)\n c = cos(2 * pi / iterations)\n\n dx, dy = radius, 0.\n\n glBegin(GL_TRIANGLE_FAN)\n glVertex2f(x, y)\n for _ in range(iterations + 1):\n glVertex2f(x + dx, y + dy)\n dx, dy = (dx * c + dy * s), (dy * c - dx * s)\n glEnd()", "def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)", "def circle(self, center, rad):\n self.gc.show_circles(center[0], center[1], rad, facecolor='none', edgecolor=self.color, linewidth=0.5)", "def draw_spiral(t):\r\n\r\n colors = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\r\n\r\n for x in range(400):\r\n c = colors[x % 6]\r\n t.pencolor(c)\r\n\r\n t.forward(x)\r\n t.left(59)", "def draw_arc_filled(center_x, center_y,\n width, height,\n color,\n start_angle, end_angle,\n tilt_angle=0):\n num_segments = 128\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n GL.glEnable(GL.GL_LINE_SMOOTH)\n GL.glHint(GL.GL_LINE_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n GL.glRotatef(tilt_angle, 0, 0, 1)\n\n # Set color\n if len(color) == 4:\n GL.glColor4ub(color[0], color[1], color[2], color[3])\n elif len(color) == 3:\n GL.glColor4ub(color[0], color[1], color[2], 255)\n\n GL.glBegin(GL.GL_TRIANGLE_FAN)\n\n start_segment = int(start_angle / 360 * num_segments)\n end_segment = int(end_angle / 360 * num_segments)\n GL.glVertex3f(0, 0, 0.5)\n\n for segment in range(start_segment, end_segment + 1):\n theta = 2.0 * 3.1415926 * segment / num_segments\n\n x = width * math.cos(theta)\n y = height * math.sin(theta)\n\n GL.glVertex3f(x, y, 0.5)\n\n GL.glEnd()\n GL.glLoadIdentity()", "def circle(r, mv_direction):\n vert_amount = 80\n edge = 2 * r * math.sin(math.radians(360 / (2 * vert_amount))) \n polygon_angle = (vert_amount - 2) / vert_amount * 180\n angle = 180 - polygon_angle\n \n for i in range(vert_amount):\n if i == 0: \n rotate_turtle(polygon_angle / 2, not mv_direction)\n else:\n rotate_turtle(angle, mv_direction)\n turtle.forward(edge)", "def triangle_sphere(t, s):\n olderr = seterr(all='ignore')\n\n # Calcula si el plano del triangulo cruza la esfera, sino devuelve una lista vacia.\n C = plane_sphere(t.plane(),s)\n if not C: return []\n\n # Calcula el mapa de los vertices del triangulo que están dentro de la esfera.\n # Distancias negativas están dentro de la esfera.\n ds = bitarray(map(lambda p: s.dist(p) <= 0, t.p))\n\n # Calcula los puntos de intersección de cada uno de los segmentos del triángulo\n # a la esfera.\n segs = [ segment(t.p[i], t.p[(i+1)%3]) for i in range(3) ]\n ssis = [ segment_sphere(seg, s) for seg in segs ]\n ssif = [ p for pts in ssis for p in pts ]\n\n # Calcula el número de intersecciones que ocurrieron.\n c = len(ssif)\n\n # Split small circle in origin and radious.\n o,r = C\n\n seterr(**olderr)\n\n # Devuelve los arcos según la disposición de los vértives y la\n # cantidad de cortes.\n\n if c == 0 and ds in [bitarray('111')]:\n return []\n\n elif c == 0 and ds in [bitarray('000')] and o not in t:\n return []\n\n elif c == 0 and ds in [bitarray('000')] and o in t:\n return [(2*pi,)+C+(None,None)]\n \n elif c in [2, 4, 6]:\n T = _tim_['%i%i%i' % tuple(map(len, ssis))]\n return [ _arc_(ssis[aa][bb], ssis[cc][dd], C[0], C[1]) for aa,bb,cc,dd in T ]\n\n raise RuntimeError", "def draw_ball(self):\n circle(screen, self.color, (self.x, self.y), self.r)", "def drawHexagon(size, centerColor, petalColor):\n turtle.color(centerColor)\n turtle.begin_fill()\n turtle.forward(size)\n turtle.right(60)\n drawPetal(size, petalColor)\n turtle.left(120) # Turned left 120 so that the turtle faces the correct direction\n drawPetal(size, petalColor)\n turtle.left(120)\n turtle.forward(size)\n turtle.right(60)\n drawPetal(size, petalColor)\n turtle.left(120)\n turtle.forward(size)\n turtle.right(60)\n drawPetal(size, petalColor)\n turtle.left(120)\n turtle.forward(size)\n turtle.right(60)\n drawPetal(size, petalColor)\n turtle.left(120)\n turtle.forward(size)\n turtle.right(60)\n drawPetal(size, petalColor)\n turtle.left(120)\n drawPetal(size, centerColor)\n\n turtle.end_fill()" ]
[ "0.76036316", "0.60727096", "0.5996167", "0.59707737", "0.5860674", "0.5790599", "0.5790599", "0.57553256", "0.5712482", "0.57060146", "0.5607589", "0.55747604", "0.55495393", "0.55164635", "0.55006", "0.5494377", "0.54593194", "0.5439375", "0.5421091", "0.5418831", "0.5415348", "0.53940207", "0.5392267", "0.5337805", "0.5332018", "0.5330638", "0.5317944", "0.5308844", "0.5301097", "0.52944386" ]
0.81417817
0
Create mask to inform position of padding to model to avoid model treats padding as input
def create_padding_mask(seq): seq = tf.cast(tf.math.equal(seq, 0), tf.float32) return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_attention_mask_3d_padding(source_mask, target_mask):\n mask = make_attention_mask_3d(source_mask, target_mask)\n # invert mask for Megatron\n return mask < 0.5", "def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask", "def padding_mask(lens):\n bs, max_len = len(lens), max(lens)\n mask = torch.zeros(bs, 1, max_len)\n for i, l in enumerate(lens):\n mask[i, :, :l] = 1\n mask = mask > 0\n return mask", "def generate_padding_masks(data, pad_value=0):\n with torch.no_grad():\n mask = (data == pad_value).to(data.device).t().unsqueeze(1)\n return mask", "def make_padding_mask(input_ids, padding_idx=1):\r\n padding_mask = input_ids.eq(padding_idx)\r\n if not padding_mask.any():\r\n padding_mask = None\r\n return padding_mask", "def make_mask(data, pad):\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def make_mask(data, pad):\n\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask", "def generate_visual_features_padding_masks(data, pad_value=0):\n with torch.no_grad():\n return (data == pad_value).all(dim=-1).t().to(data.device).unsqueeze(1)", "def Mask(self) -> int:", "def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask", "def apply_mask(data, mask_func, seed=None, padding=None):\n shape = np.array(data.shape)\n shape[:-3] = 1\n mask = mask_func(shape, seed)\n if padding is not None:\n mask[:, :, :padding[0]] = 0\n mask[:, :, padding[1]:] = 0 # padding value inclusive on right of zeros\n\n masked_data = data * mask + 0.0 # The + 0.0 removes the sign of the zeros\n return masked_data, mask", "def make_inference_attention_mask_3d(source_block, target_block, pad_id):\n # mask = (target_block[:, None, :] != pad_id) * (source_block[:, :, None] != pad_id)\n return make_attention_mask_3d(source_block != pad_id, target_block != pad_id)", "def compute_mask(self, inputs, mask=None):\n if self.padding != \"same\":\n raise ValueError(\"Padding mode '%s' not yet supported\" % (\n self.padding,))\n return mask", "def compute_mask(t, padding_idx=0):\n mask = torch.ne(t, padding_idx).float()\n return mask", "def get_attn_key_pad_mask(seq_k, seq_q):\n\n\t# Expand to fit the shape of key query attention matrix.\n\tlen_q = seq_q.size(1)\n\tpadding_mask = seq_k.eq(0) # padding的部分置为1\n\tpadding_mask = padding_mask.unsqueeze(1).expand(-1, len_q, -1) # batch x len_seq_q x len_seq_k\n\n\treturn padding_mask", "def add_mask_layer(self):\n return Masking(mask_value=self.mask_value, input_shape=(self.max_sequence_size, 1))", "def make_attn_mask(inp, inp_len, dtype=tf.float32):\n with tf.name_scope(\"encoder_mask\"):\n mask = tf.sequence_mask(inp_len, dtype=dtype, maxlen=tf.shape(inp)[1])\n return mask[:, None, None, :]", "def testMask4D(self):\n\n # This mask, applied on an image filled with 1, should result in an image\n # filled with 17, as there are 18 weights but we zero out one of them.\n mask = np.ones([3, 3, 2, 1], dtype=np.float32)\n mask[0, 0, 0, :] = 0\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.array([[17] * 3] * 3)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)", "def mask(self):", "def resize_mask(mask, scale, padding):\n h, w = mask.shape[:2]\n mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)\n mask = np.pad(mask, padding, mode='constant', constant_values=0)\n return mask", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)", "def get_mask(tensor, padding_idx=0):\n mask = torch.ones(size=list(tensor.size()), dtype=torch.bool)\n mask[tensor == padding_idx] = False \n\n return mask", "def make_initial_mask(model):\n mask = Mask()\n prunable_layers = utils.prunable_layers(model)\n for module in prunable_layers:\n mask[module] = torch.ones(list(model.state_dict()[module].shape))\n return mask", "def __generate_mask(self):\n mask = np.concatenate([np.ones(len(self.fixed[0])),\n np.zeros(self.num_points),\n np.ones(len(self.fixed[1]))])\n return mask", "def attention_bias_ignore_padding(memory_padding):\n\tret = tf.multiply(memory_padding, -1e18)\n\treturn tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1)", "def make_pad_mask(seq_lens, device_id=-1):\n bs = seq_lens.size(0)\n max_time = max(seq_lens)\n\n seq_range = torch.arange(0, max_time, dtype=torch.int32)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, max_time)\n seq_length_expand = seq_range_expand.new(seq_lens).unsqueeze(-1)\n mask = seq_range_expand < seq_length_expand\n\n if device_id >= 0:\n mask = mask.cuda(device_id)\n\n return mask", "def make_pad_mask(seq_lens):\n bs = seq_lens.size(0)\n max_time = seq_lens.max()\n seq_range = torch.arange(0, max_time, dtype=torch.int32, device=seq_lens.device)\n seq_range = seq_range.unsqueeze(0).expand(bs, max_time)\n mask = seq_range < seq_lens.unsqueeze(-1)\n return mask", "def lstm_mask_layer(proj, mask):\n\n return proj * mask[:, :, None]", "def offset_mask(mask):\n def axis_data(axis):\n \"\"\"Gets the bounds of a masked area along a certain axis\"\"\"\n x = mask.sum(axis)\n trimmed_front = N.trim_zeros(x,\"f\")\n offset = len(x)-len(trimmed_front)\n size = len(N.trim_zeros(trimmed_front,\"b\"))\n return offset,size\n\n xo,xs = axis_data(0)\n yo,ys = axis_data(1)\n\n array = mask[yo:yo+ys,xo:xo+xs]\n offset = (yo,xo)\n return offset, array" ]
[ "0.7194738", "0.71909803", "0.69525963", "0.68565017", "0.68536884", "0.67958707", "0.6788473", "0.67264396", "0.663091", "0.66175294", "0.6586988", "0.65755576", "0.6422467", "0.6411352", "0.63775486", "0.6350122", "0.63143253", "0.62728643", "0.62658393", "0.6217786", "0.6207882", "0.6206947", "0.61920965", "0.6183879", "0.6183711", "0.6149185", "0.6144442", "0.6130343", "0.6097966", "0.60941124" ]
0.7422589
0
Turn a linear function into a linear layer.
def create_linear_layer(lin_fn, num_inputs): inputs = torch.randn(num_inputs, dtype=torch.float64).requires_grad_(True) bias = torch.ones((), dtype=torch.float64).requires_grad_(True) out = lin_fn(inputs, bias) layer = nn.Linear(num_inputs, len(out)) for i, x in enumerate(out): inputs.grad = None bias.grad = None x.backward(retain_graph=True) if inputs.grad is not None: layer.weight.detach()[i] = inputs.grad.float() else: layer.weight.detach()[i].zero_() if bias.grad is not None: layer.bias.detach()[i] = bias.grad.float() else: layer.bias.detach()[i] = 0 return layer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def linear_layer(self, W, b, x, use_relu=True):\n\n if use_relu is True:\n return np.maximum(W @ x + b, 0)\n return W @ x + b", "def linear(*args, **kwargs):\n return nn.Linear(*args, **kwargs)", "def linear(x, n_output, name=None, activation=None, reuse=None):\n\tprint('##Start linear##')\n\tif len(x.get_shape()) != 2:\n\t\tx = flatten(x, reuse=reuse)\n\tn_input = x.get_shape().as_list()[1]\n\twith tf.variable_scope(name or \"fc\", reuse=reuse):\n\t\tW = tf.get_variable(\n name='W',\n shape=[n_input, n_output],\n dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n\t\tb = tf.get_variable(\n name='b',\n shape=[n_output],\n dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n\t\th = tf.nn.bias_add(\n name='h',\n value=tf.matmul(x, W),\n bias=b)\n\tif activation:\n h = activation(h)\n\tprint('h=', h,'W= ',W)\n\treturn h, W", "def linear(x,output_dim):\n w=tf.get_variable(\"w\", [x.get_shape()[1], output_dim])\n b=tf.get_variable(\"b\", [output_dim], initializer=tf.constant_initializer(0.0))\n return tf.nn.xw_plus_b(x,w,b)", "def snlinear(x, output_size, bias_start=0.0, training=True, name='snlinear'):\n with tf.variable_scope(\n name,\n custom_getter=sn_gettr(training=training, equality_constrained=False)):\n return tf.layers.dense(\n x,\n output_size,\n activation=None,\n use_bias=True,\n kernel_initializer=tf.keras.initializers.VarianceScaling(\n scale=1.0, mode='fan_avg', distribution='uniform'),\n bias_initializer=tf.initializers.constant(bias_start))", "def linear_function(x, y):\n\n return x + y / 2.", "def create_linear_model(x, N, outputs=1):\n with C.layers.default_options(initial_state = 0.1):\n return linear_layer(x, outputs)", "def linear(self, X):\n return X", "def linear(x, n_output, name=None, activation=None, reuse=None):\n if len(x.get_shape()) != 2:\n x = flatten(x, reuse=reuse)\n n_input = x.get_shape().as_list()[1]\n with tf.variable_scope(name or \"fc\", reuse=reuse):\n W = tf.get_variable(name='W',\n shape=[n_input, n_output],\n dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(name='b',\n shape=[n_output],\n dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n h = tf.nn.bias_add(name='h',\n value=tf.matmul(x, W),\n bias=b)\n if activation:\n h = activation(h)\n return h, W", "def linear(x, n_output, name=None, activation=None, reuse=None):\n if len(x.get_shape()) != 2:\n x = flatten(x, reuse=reuse)\n n_input = x.get_shape().as_list()[1]\n with tf.variable_scope(name or \"fc\", reuse=reuse):\n W = tf.get_variable(name='W',\n shape=[n_input, n_output],\n dtype=tf.float32,\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(name='b',\n shape=[n_output],\n dtype=tf.float32,\n initializer=tf.constant_initializer(0.0))\n h = tf.nn.bias_add(name='h',\n value=tf.matmul(x, W),\n bias=b)\n if activation:\n h = activation(h)\n return h, W", "def linear(w_in, w_out, *, bias=False):\n return nn.Linear(w_in, w_out, bias=bias)", "def linear(input, hidden_size, name, with_bias=True, init_type='gcn'):\n \n if init_type == 'gcn':\n fc_w_attr = F.ParamAttr(initializer=F.initializer.XavierInitializer())\n fc_bias_attr = F.ParamAttr(initializer=F.initializer.ConstantInitializer(0.0))\n else:\n fan_in = input.shape[-1]\n bias_bound = 1.0 / math.sqrt(fan_in)\n fc_bias_attr = F.ParamAttr(initializer=F.initializer.UniformInitializer(low=-bias_bound, high=bias_bound))\n\n negative_slope = math.sqrt(5)\n gain = math.sqrt(2.0 / (1 + negative_slope ** 2))\n std = gain / math.sqrt(fan_in)\n weight_bound = math.sqrt(3.0) * std\n fc_w_attr = F.ParamAttr(initializer=F.initializer.UniformInitializer(low=-weight_bound, high=weight_bound))\n \n if not with_bias:\n fc_bias_attr = False\n \n output = L.fc(input,\n hidden_size,\n param_attr=fc_w_attr,\n name=name,\n bias_attr=fc_bias_attr)\n return output", "def Linear(in_features, out_features, bias=True, device=None, dtype=None):\n layer = nn.Linear(in_features, out_features, bias, device, dtype)\n # @see: https://msdn.microsoft.com/en-us/magazine/mt833293.aspx for example\n torch.nn.init.xavier_uniform_(layer.weight)\n if bias:\n torch.nn.init.zeros_(layer.bias)\n return layer", "def add_linear_transform(self):\n \n with tf.variable_scope(\"linear_transform\"):\n \n # feature scales/weights\n self.w = tf.get_variable(\"weights\", shape=[self.dim_input], \n initializer= tf.contrib.layers.xavier_initializer())\n #self.B = tf.get_variable(\"biases\", shape=[self.dim_input], \n # initializer= tf.contrib.layers.xavier_initializer())\n \n # diagonalize and matmul\n self.W = tf.diag(self.w)\n #self.W = tf.get_variable(\"weights\", shape=[self.dim_input, self.dim_input], \n # initializer= tf.contrib.layers.xavier_initializer())\n \n #self.X_transformed = tf.add(tf.matmul(self.X_input, self.W), self.B) \n self.X_transformed = tf.matmul(self.X_input, self.W)", "def linear(input: tf.Tensor,\n n_output: int,\n use_bias: bool = True) -> tf.Tensor:\n weight = tf.get_variable(\n \"weight\",\n dtype=input.dtype,\n shape=(input.shape[-1], n_output),\n initializer=tf.glorot_normal_initializer(),\n )\n output = input @ weight\n if use_bias:\n bias = tf.get_variable(\n \"bias\",\n dtype=input.dtype,\n shape=(n_output, ),\n initializer=tf.zeros_initializer(),\n )\n output += bias\n return output", "def get_linear_layer(rows, columns, init_method):\n layer = torch.nn.Linear(rows, columns)\n init_method(layer.weight)\n with torch.no_grad():\n layer.bias.zero_()\n return layer", "def tensor_network_linear(inputs, states, output_size, bias, bias_start=0.0):\n # each coordinate of hidden state is independent- parallel\n states_tensor = nest.flatten(states)\n total_inputs = [inputs]\n total_inputs.extend(states)\n output = _linear(total_inputs, output_size, True)\n return output", "def linear(X, hidden_size, name, with_bias=True, init_type=None):\n \n if init_type == 'gcn':\n fc_w_attr = F.ParamAttr(initializer=F.initializer.XavierInitializer(),\n name=\"%s_w\" % name)\n fc_bias_attr = F.ParamAttr(initializer=F.initializer.ConstantInitializer(0.0), \n name=\"%s_b\" % name)\n else:\n fan_in = X.shape[-1]\n bias_bound = 1.0 / math.sqrt(fan_in)\n init_b = F.initializer.UniformInitializer(low=-bias_bound, high=bias_bound)\n fc_bias_attr = F.ParamAttr(initializer=init_b, name=\"%s_b\" % name)\n\n negative_slope = math.sqrt(5)\n gain = math.sqrt(2.0 / (1 + negative_slope ** 2))\n std = gain / math.sqrt(fan_in)\n weight_bound = math.sqrt(3.0) * std\n init_w = F.initializer.UniformInitializer(low=-weight_bound, high=weight_bound)\n fc_w_attr = F.ParamAttr(initializer=init_w, name=\"%s_w\" % name)\n \n if not with_bias:\n fc_bias_attr = False\n \n output = L.fc(X,\n hidden_size,\n param_attr=fc_w_attr,\n name=name,\n bias_attr=fc_bias_attr)\n return output", "def linear(input_, output_size, scope=None, stddev=0.02, bias_start=0.0, with_w=False):\n \n shape = input_.get_shape().as_list()\n\n # NOTE: The variable scope is for sharing variables\n # https://stackoverflow.com/questions/35919020/whats-the-difference-of-name-scope-and-a-variable-scope-in-tensorflow\n with tf.variable_scope(scope or \"Linear\"):\n matrix = tf.get_variable(\"Matrix\", [shape[1], output_size], tf.float32,\n tf.random_normal_initializer(stddev=stddev))\n bias = tf.get_variable(\"bias\", [output_size],\n initializer=tf.constant_initializer(bias_start))\n if with_w:\n return tf.matmul(input_, matrix) + bias, matrix, bias\n else:\n return tf.matmul(input_, matrix) + bias", "def _add_linear_layer(model_1, model_2):\n data_1 = model_1.weight.data\n data_2 = model_2.weight.data \n \n new_weight_top = torch.cat((data_1, torch.zeros((data_1.shape[0], data_2.shape[1])).cuda()), dim=1)\n new_weight_bottom = torch.cat((torch.zeros((data_2.shape[0], data_1.shape[1])).cuda(), data_2), dim=1)\n new_weight = torch.cat((new_weight_top, new_weight_bottom), dim=0)\n \n new_bias = torch.cat((model_1.bias, model_2.bias), dim=0)\n \n result_model = torch.nn.Linear(model_1.in_features + model_2.in_features, model_1.out_features + model_2.out_features)\n result_model.weight = torch.nn.Parameter(new_weight)\n result_model.bias = torch.nn.Parameter(new_bias)\n\n return result_model", "def linear(point, dependentParams = (NO_TRANSFORMATION, NO_TRANSLATION), externalparams = (0,0,0,0)):\n\n LinearMatrix = dependentParams[0]\n Translation = dependentParams[1]\n return np.matmul(LinearMatrix, point) + Translation", "def _add_final_linear_layer(model_1, model_2):\n data_1 = model_1.weight.data\n data_2 = model_2.weight.data\n\n new_weight = torch.cat((data_1, data_2), dim=1)\n new_bias = model_1.bias + model_2.bias\n\n result_model = torch.nn.Linear(\n model_1.in_features + model_2.in_features, model_1.out_features\n )\n result_model.weight = torch.nn.Parameter(new_weight)\n result_model.bias = torch.nn.Parameter(new_bias)\n\n return result_model", "def linear_activation_calculation(A, W, b, activation_function):\n\n # Your code here\n return activation_function(linear_forward_calculation(A, W, b))\n # raise NotImplementedError", "def SNLinear(*args, **kwargs):\n return spectral_norm(nn.Linear(*args, **kwargs))", "def linear(self, x):\n with tf.name_scope(\"presoftmax_linear\"):\n batch_size = tf.shape(x)[0]\n length = tf.shape(x)[1]\n\n x = tf.reshape(x, [-1, self.hidden_size])\n logits = tf.matmul(x, self.shared_weights, transpose_b=True)\n\n return tf.reshape(logits, [batch_size, length, self.vocab_size])", "def _fc_linear(prev_layer, layer_name, weights, reuse_scope=False):\n with tf.name_scope(layer_name):\n if reuse_scope is False:\n w_np, b_np = _get_weights(layer_name, weights)\n\n with tf.variable_scope(layer_name):\n w = tf.get_variable('W', shape=tuple(w_np.shape),\n dtype=w_np.dtype, trainable=False,\n initializer=tf.constant_initializer(w_np))\n\n b = tf.get_variable('b', shape=tuple(b_np.shape),\n dtype=b_np.dtype, trainable=False,\n initializer=tf.constant_initializer(b_np))\n\n else:\n with tf.variable_scope(layer_name, reuse=True):\n w = tf.get_variable('W')\n b = tf.get_variable('b')\n return tf.nn.bias_add(tf.matmul(prev_layer, w), b)", "def forward(self, state):\r\n x = F.relu(self.linear1(state))\r\n x = F.relu(self.linear2(x))\r\n #x = torch.tanh(self.linear3(x))\r\n #x = F.relu(self.linear3(x))\r\n #x = nn.LeakyReLU(self.linear3(x), negative_slope=0.1)# .negativ_slope nur für leakyReLU relevant\r\n x = F.leaky_relu(self.linear3(x), 0.1)\r\n #x = F.softmax(self.linear3(x), dim=0)\r\n \r\n return x#.negativ_slope", "def tf_linear_model(input_shape, output_shape):\n return keras.Sequential([\n tf.keras.layers.InputLayer(input_shape=input_shape),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(output_shape),\n tf.keras.layers.Softmax()\n ])", "def forward(self, state):\n x = F.relu(self.linear1(state))\n x = F.relu(self.linear2(x))\n x = self.linear3(x)\n return x", "def linear(m, b, x, xx):\n y = m*(x - xx) + b\n return y" ]
[ "0.7015644", "0.69151753", "0.66974515", "0.667202", "0.66359043", "0.66043425", "0.65615773", "0.6542365", "0.6536673", "0.6536673", "0.6518618", "0.64601105", "0.6458006", "0.6448246", "0.6381619", "0.63695693", "0.63089275", "0.62843263", "0.62195283", "0.61954653", "0.6170122", "0.6163915", "0.6130873", "0.6100831", "0.607761", "0.60452104", "0.6029942", "0.6025166", "0.6020593", "0.6017976" ]
0.75638497
0
Generate model evaluations for a set of samples.
def _run_model(self, samples: Union[NumpyFloatArray, NumpyIntArray]): self.runmodel_object.run(samples=samples, append_samples=False) model_evals = copy.deepcopy(np.array(self.runmodel_object.qoi_list)) return model_evals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_samples(self, samples, eval_key=None, missing=None):\n pass", "def evaluate_samples(self, samples, eval_key=None, mask_targets=None):\n pass", "def __call__(self, samples):\n vals = run_model_samples_in_parallel(\n self.pool_function, self.max_eval_concurrency, samples,\n pool=None, assert_omp=self.assert_omp)\n return vals", "def evaluate(self, epoch, exploration_paths):\n logger.log(\"Collecting samples for evaluation\")\n paths = self._sample_eval_paths(epoch)\n statistics = OrderedDict()\n\n statistics.update(self._statistics_from_paths(paths, \"Test\"))\n statistics.update(self._get_other_statistics())\n statistics.update(self._statistics_from_paths(exploration_paths,\n \"Exploration\"))\n\n statistics['AverageReturn'] = get_average_returns(paths)\n statistics['Epoch'] = epoch\n\n for key, value in statistics.items():\n logger.record_tabular(key, value)\n\n self.log_diagnostics(paths)", "def evaluate():\n\tmodel.eval()\n\tstddev = 1 # And mean=0\n\tfor batch_idx, (data, _) in enumerate(syn_test_loader):\n\t\tdata = data.cuda()\n\t\tif batch_idx == 0:\n\t\t\tnoise = torch.autograd.Variable(torch.randn(batch_size, bottleneck).cuda() * stddev)\n\t\t\tsample_representation(\"orig_nat\", data, noise)\n\t\t\tsample_representation(\"natural\", data, noise)\n\t\t\tsample_representation(\"orig_syn\", data, noise)\n\t\t\tsample_representation(\"synth\", data, noise)", "def evaluate_model_random(self, model_eval_samples): \n neval = len(model_eval_samples)\n task_horizon = len(model_eval_samples[0]['ac'])\n # construct evaluation samples \n\n true_obs = [[] for _ in range(neval)]\n inputs = []\n \n for j in range(neval):\n for k in range(task_horizon ):\n obs = model_eval_samples[j]['obs'][k]\n ac = model_eval_samples[j]['ac'][k]\n inputs.append( np.concatenate(( obs,ac)) ) \n true_obs[j].append( model_eval_samples[j]['obs'][k] )\n true_obs[j].append( model_eval_samples[j]['obs'][task_horizon] )\n \n \n \n inputs = np.array([ inputs for _ in range(5)])\n\n \n # Evaluation [ensemble_size, batch_size, obs + acs]\n pred = self.policy.model.predict( inputs)\n error = [0 for _ in range(neval)]\n for j in range(neval):\n for k in range(task_horizon):\n for i in range(5):\n error[j] += np.linalg.norm( true_obs[j][k+1] - true_obs[j][k] - pred[0][i][j*task_horizon+k])\n error = np.array(error) / (5*task_horizon)\n print(\"model error: \", np.mean(error), np.std(error))\n return np.mean(error), np.std(error)", "def _evaluate_examples(self):\n\n getLogger(\"problog_lfi\").debug(\"Evaluating examples:\")\n if self._log:\n evaluator = ExampleEvaluatorLog(self._weights)\n else:\n evaluator = ExampleEvaluator(self._weights)\n\n results = []\n for i, example in enumerate(self._compiled_examples):\n try:\n result = evaluator(example)\n results.append(result)\n getLogger(\"problog_lfi\").debug(\n \"Example \"\n + str(i + 1)\n + \":\\tFrequency = \"\n + str(result[0][0])\n + \"\\tp_evidence = \"\n + str(result[0][1])\n + \"\\tp_queries = \"\n + str(result[0][2])\n )\n except InconsistentEvidenceError:\n # print(\"Ignoring example {}/{}\".format(i + 1, len(self._compiled_examples)))\n getLogger(\"problog_lfi\").warning(\n \"Ignoring example {}/{}\".format(i + 1, len(self._compiled_examples))\n )\n\n return list(chain.from_iterable(results))", "def generate(self, models, sample, **kwargs):\n net_input = sample['net_input']\n\n def batch_for_softmax(dec_out, target):\n # assumes decoder_out[0] is the only thing needed (may not be correct for future models!)\n first, rest = dec_out[0], dec_out[1:]\n bsz, tsz, dim = first.shape\n if bsz * tsz < self.softmax_batch:\n yield dec_out, target, True\n else:\n flat = first.contiguous().view(1, -1, dim)\n flat_tgt = target.contiguous().view(flat.shape[:-1])\n s = 0\n while s < flat.size(1):\n e = s + self.softmax_batch\n yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False\n s = e\n\n def gather_target_probs(probs, target):\n probs = probs.gather(\n dim=2,\n index=target.unsqueeze(-1),\n )\n return probs\n\n orig_target = sample['target']\n\n # compute scores for each model in the ensemble\n avg_probs = None\n avg_attn = None\n for model in models:\n model.eval()\n decoder_out = model(**net_input)\n attn = decoder_out[1]\n if type(attn) is dict:\n attn = attn.get('attn', None)\n\n batched = batch_for_softmax(decoder_out, orig_target)\n probs, idx = None, 0\n for bd, tgt, is_single in batched:\n sample['target'] = tgt\n curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data\n if is_single:\n probs = gather_target_probs(curr_prob, orig_target)\n else:\n if probs is None:\n probs = curr_prob.new(orig_target.numel())\n step = curr_prob.size(0) * curr_prob.size(1)\n end = step + idx\n tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)\n probs[idx:end] = tgt_probs.view(-1)\n idx = end\n sample['target'] = orig_target\n\n probs = probs.view(sample['target'].shape)\n\n if avg_probs is None:\n avg_probs = probs\n else:\n avg_probs.add_(probs)\n if attn is not None and torch.is_tensor(attn):\n attn = attn.data\n if avg_attn is None:\n avg_attn = attn\n else:\n avg_attn.add_(attn)\n if len(models) > 1:\n avg_probs.div_(len(models))\n avg_probs.log_()\n if avg_attn is not None:\n avg_attn.div_(len(models))\n\n bsz = avg_probs.size(0)\n hypos = []\n start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz\n for i in range(bsz):\n # remove padding from ref\n ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \\\n if sample['target'] is not None else None\n tgt_len = ref.numel()\n avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]\n score_i = avg_probs_i.sum() / tgt_len\n if avg_attn is not None:\n avg_attn_i = avg_attn[i]\n alignment = utils.extract_hard_alignment(\n avg_attn_i,\n sample['net_input']['src_tokens'][i],\n sample['target'][i],\n self.pad,\n self.eos,\n )\n else:\n avg_attn_i = alignment = None\n hypos.append([{\n 'tokens': ref,\n 'score': score_i,\n 'attention': avg_attn_i,\n 'alignment': alignment,\n 'positional_scores': avg_probs_i,\n }])\n return hypos", "def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)", "def evaluate_model(args, eval_runs, warm_runs, metrics=['psnr', 'ssim', 'fps']):\n upsampler = Upsampler(args)\n if warm_runs > 0:\n print(\"Warming up for evaluation\")\n for i in range(warm_runs):\n print(\"Performing warm-up run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n upsampler.run_dir(bix_dir, reset=False)\n \n time = 0.\n psnrs = []\n ssims = []\n for i in range(eval_runs):\n run_psnrs = []\n run_ssims = []\n print(\"Performing evaluation run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n gt_dir = os.path.join(VID4_DIR, 'GT', sequence)\n print(\"Evaluating on\", bix_dir)\n time += upsampler.run_dir(bix_dir, reset=False)\n vid_psnrs, vid_ssims = _eval_sr_perf(os.path.join(bix_dir, 'up'), gt_dir)\n run_psnrs += vid_psnrs\n run_ssims += vid_ssims\n if i == eval_runs-1:\n with open(os.path.join(upsampler.get_model_dir(), \"psnr.txt\"), \"w\") as f:\n f.writelines(str(psnr) + '\\n' for psnr in run_psnrs)\n with open(os.path.join(upsampler.get_model_dir(), \"ssim.txt\"), \"w\") as f:\n f.writelines(str(ssim) + '\\n' for ssim in run_ssims)\n psnrs += run_psnrs\n ssims += run_ssims\n\n fps = VID4_LENGTH/ (time/eval_runs)\n return Performance(psnr=psnrs, ssim=ssims, fps=fps)", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def generate(self):\n self.training_data.gen_x(self.x_func)\n self.training_data.gen_a(self.a_func)\n self.training_data.gen_y(self.y_func)\n \n self.testing_data.gen_x(self.x_func)\n self.testing_data.gen_ys(self.y_func)\n self.testing_data.gen_azero(self.ytotal_func)", "def evaluate(self,**kwargs):\n # setup model\n self.optimizer = SGD(lr = 0,momentum=0,decay = 0)\n self.createModel()\n self.setGenerators()\n self.printParameters()\n output = {}\n\n if kwargs['validationOnly'] != None:\n if kwargs['validationOnly'] == True:\n valOnly = True\n else:\n valOnly = False\n else:\n valOnly = False\n\n if valOnly == False:\n trainOutput = self.model.evaluate_generator(\n generator = self.trainGen,\n steps=self.steps_per_epoch,\n use_multiprocessing=True,\n verbose=1\n )\n output['loss'] = trainOutput[0]\n for i in range(len(self.metricsAsString)):\n output[self.metricsAsString[i]] = trainOutput[i+1]\n\n print(\"loss : \" + str(output['loss']))\n for i in range(len(self.metricsAsString)):\n tmp = self.metricsAsString[i] \n print(tmp + \" : \" + str(output[tmp])) \n\n validationOutput = self.model.evaluate_generator(\n generator = self.validateGen,\n steps=self.validation_steps, \n use_multiprocessing=True, \n verbose=1)\n \n output['val_loss'] = validationOutput[0]\n for i in range(len(self.metricsAsString)):\n output[\"val_\" + self.metricsAsString[i]] = validationOutput[i+1]\n \n\n print(\"val_loss : \" + str(output['val_loss']))\n for i in range(len(self.metricsAsString)):\n tmp = \"val_\" + self.metricsAsString[i] \n print(tmp + \" : \" + str(output[tmp]))", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def eval_batch(self, outputs, target):\n raise NotImplementedError", "def test_eval(model, test_set):\n num_test_batch = len(test_set)\n test_loss = np.zeros((num_test_batch, 1), dtype=float)\n test_acc = np.zeros((num_test_batch, 1), dtype=float)\n for ibatch, batch in enumerate(test_set):\n result = model.test_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n test_loss[ibatch] = result[0]\n test_acc[ibatch] = result[-1]\n return np.mean(test_loss), np.mean(test_acc)", "def evaluate(model, datagen, X_test, Y_test, batch_size, save_folder_path=None):\n\n print(\"[INFO] Evaluating model...\")\n\n scores = model.evaluate_generator(\n datagen.flow(X_test, Y_test, batch_size=batch_size),\n verbose=1)\n \n print(\"[INFO] Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n \n if save_folder_path is not None:\n # Write results to path\n assert os.path.isdir(save_folder_path) == True, \"Unable to save evaluation results, save_folder_path is not a folder\"\n eval_results_path = save_folder_path + \"/eval_results.txt\"\n eval_handle = open(eval_results_path, 'w')\n eval_handle.write(\"Model name: {}\\n\\n\".format(MODEL_NAME))\n eval_handle.write(\"Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n eval_handle.close()", "def evaluate(hparams, summary_dir, num_gpus, model_type, eval_set, eval_size,\n eval_shard, data_dir, num_targets, dataset, validate, seed,\n shuffled, shift, pad, batch_size=100, checkpoint=None):\n output_dir = summary_dir\n load_dir = summary_dir + '/train/'\n summary_dir += '/eval/' + FLAGS.dataset + '/' + eval_set\n with tf.Graph().as_default():\n features = get_features(eval_set, batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=True,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad, eval_shard=eval_shard)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n test_writer = tf.summary.FileWriter(summary_dir)\n seen_step = -1\n paused = 0\n while paused < 360:\n print('start evaluation, model defined')\n if checkpoint:\n step = extract_step(checkpoint)\n last_checkpoint = checkpoint\n else:\n step, last_checkpoint = find_checkpoint(load_dir, seen_step)\n if step == -1:\n time.sleep(60)\n paused += 1\n else:\n paused = 0\n seen_step = step\n run_experiment(load_eval, last_checkpoint, test_writer,\n eval_experiment, model, result,\n eval_size // batch_size, features=features,\n eval_set=eval_set, output_dir=output_dir,\n unsupervised=hparams.unsupervised,\n num_gpus=num_gpus)\n if checkpoint:\n break\n\n test_writer.close()", "def evaluate(\n self,\n num_samples: int = 10,\n report_every: int = 100,\n train_or_test: str = \"test\",\n ) -> None:\n # asset train or test valid\n assert train_or_test in [\"train\", \"test\"]\n\n # set device and num_gpus\n num_gpus = num_devices()\n device = torch_device()\n torch.backends.cudnn.benchmark = True if cuda.is_available() else False\n\n # init model with gpu (or not)\n self.model.to(device)\n if num_gpus > 1:\n self.model = nn.DataParallel(model)\n self.model.eval()\n\n # set train or test\n ds = (\n self.dataset.test_ds\n if train_or_test == \"test\"\n else self.dataset.train_ds\n )\n\n # set num_samples\n ds.dataset.num_samples = num_samples\n print(\n f\"{len(self.dataset.test_ds)} samples of {self.dataset.test_ds[0][0][0].shape}\"\n )\n\n # Loop over all examples in the test set and compute accuracies\n ret = dict(\n infer_times=[],\n video_preds=[],\n video_trues=[],\n clip_preds=[],\n clip_trues=[],\n )\n report_every = 100\n\n # inference\n with torch.no_grad():\n for i in range(\n 1, len(ds)\n ): # [::10]: # Skip some examples to speed up accuracy computation\n if i % report_every == 0:\n print(\n f\"Processsing {i} of {len(self.dataset.test_ds)} samples..\"\n )\n\n # Get model inputs\n inputs, label = ds[i]\n inputs = inputs.to(device, non_blocking=True)\n\n # Run inference\n start_time = time()\n outputs = self.model(inputs)\n outputs = outputs.cpu().numpy()\n infer_time = time() - start_time\n ret[\"infer_times\"].append(infer_time)\n\n # Store results\n ret[\"video_preds\"].append(outputs.sum(axis=0).argmax())\n ret[\"video_trues\"].append(label)\n ret[\"clip_preds\"].extend(outputs.argmax(axis=1))\n ret[\"clip_trues\"].extend([label] * num_samples)\n\n print(\n f\"Avg. inference time per video ({len(ds)} clips) =\",\n round(np.array(ret[\"infer_times\"]).mean() * 1000, 2),\n \"ms\",\n )\n print(\n \"Video prediction accuracy =\",\n round(accuracy_score(ret[\"video_trues\"], ret[\"video_preds\"]), 2),\n )\n print(\n \"Clip prediction accuracy =\",\n round(accuracy_score(ret[\"clip_trues\"], ret[\"clip_preds\"]), 2),\n )\n return ret", "def evaluate_samples(lang_samples: Dict[str, List[Tuple[List[str], List[str], List[str]]]]) -> Dict[str, Any]:\n\n evaluation_result = dict()\n lang_phon_err, lang_phon_count, lang_word_err = dict(), dict(), dict()\n languages = sorted(lang_samples.keys())\n for lang in languages:\n for word, generated, target in lang_samples[lang]:\n word = ''.join(word)\n phon_err, phon_count = phoneme_error(generated, target)\n word_err = word_error(generated, target)\n phon_err_dict = lang_phon_err.setdefault(lang, dict())\n phon_count_dict = lang_phon_count.setdefault(lang, dict())\n word_err_dict = lang_word_err.setdefault(lang, dict())\n best_phon_err, best_phon_count = phon_err_dict.get(word, None), phon_count_dict.get(word, None)\n if best_phon_err is None or phon_err / phon_count < best_phon_err / best_phon_count:\n phon_err_dict[word] = phon_err\n phon_count_dict[word] = phon_count\n word_err_dict[word] = word_err\n\n phon_errors, phon_counts, word_errors, word_counts = [], [], [], []\n for lang in languages:\n phon_err = sum(lang_phon_err[lang].values())\n phon_errors.append(phon_err)\n phon_count = sum(lang_phon_count[lang].values())\n phon_counts.append(phon_count)\n word_err = sum(lang_word_err[lang].values())\n word_errors.append(word_err)\n word_count = len(lang_word_err[lang])\n word_counts.append(word_count)\n per = phon_err / phon_count\n wer = word_err / word_count\n evaluation_result.setdefault(lang, {}).update({'per': per})\n evaluation_result.setdefault(lang, {}).update({'wer': wer})\n mean_per = sum(phon_errors) / sum(phon_counts)\n mean_wer = sum(word_errors) / sum(word_counts)\n evaluation_result['mean_per'] = mean_per\n evaluation_result['mean_wer'] = mean_wer\n\n return evaluation_result", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def evaluate(self, training_samples, validation_samples, epochs=20, iterations=None, callbacks=[]):\n # Initialize a Keras Data Generator for generating Training data\n dataGen_training = DataGenerator(training_samples, self.preprocessor,\n training=True, validation=False,\n shuffle=self.shuffle_batches,\n iterations=iterations)\n # Initialize a Keras Data Generator for generating Validation data\n dataGen_validation = DataGenerator(validation_samples,\n self.preprocessor,\n training=True, validation=True,\n shuffle=self.shuffle_batches)\n # Run training & validation process with the Keras fit\n history = self.model.fit(dataGen_training,\n validation_data=dataGen_validation,\n callbacks=callbacks,\n epochs=epochs,\n workers=self.workers,\n max_queue_size=self.batch_queue_size)\n # Clean up temporary files if necessary\n if self.preprocessor.prepare_batches or self.preprocessor.prepare_subfunctions:\n self.preprocessor.data_io.batch_cleanup()\n # Return the training & validation history\n return history", "def print_eval(trainset, testset, exptypes=EXPTYPES, semantic=False, savemodels=False, loadmodels=False, deprep=False, externals=True, predict=True):\n system_pairs = []\n print \"== cleaning lsts ==\"\n cleanupnonespanexpressions(testset)\n cleanholdercandidates(testset)\n cleanholders(testset)\n cleanupnonespanexpressions(trainset)\n cleanholdercandidates(trainset)\n cleanholders(trainset)\n \n print \"== train ==\"\n ev = evaluate()\n features, labels, stats = getfeaturesandlabels(trainset, semantic=semantic, predict=False)\n print counters, '\\n'\n\n print \"== test ==\"\n counters.clear()\n ftest, ltest, stest = getfeaturesandlabels(testset, semantic=semantic, predict=predict)\n print counters\n for exp in exptypes:\n vec, X, y = create_matrix(features[exp], labels[exp])\n if externals:\n vecw, Xw, yw = create_matrix(features[exp + 'w'], labels[exp + 'w'])\n vecimp, Ximp, yimp = create_matrix(features[exp + 'w'], labels[exp + 'implicit'])\n if loadmodels:\n clf = read_model(loadmodels + exp)\n else:\n clf = create_model(X, y)\n if externals:\n clfw = create_model(Xw, yw)\n clfimp = create_model(Ximp, yimp)\n if savemodels:\n write_model(clf, savemodels + exp)\n print \"== eval ==\"\n if deprep:\n print \"== {} ==\".format(deprep)\n Xt, yt = transform_to_matrix(ftest[exp], ltest[exp], vec)\n if externals:\n Xtw, ytw = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'w'], vecw)\n Xtimp, ytimp = transform_to_matrix(ftest[exp + 'w'], ltest[exp + 'implicit'], vecimp)\n results = clf.predict_proba(Xt)\n s_p_w = False\n s_p_imp = False\n gold_p1 = ev.get_unique_exp(copy.deepcopy(stest['positions'][exp + 'w']), exp, count=False)\n gold_p2 = copy.deepcopy(gold_p1)\n gold_p3 = copy.deepcopy(gold_p1)\n if clfw:\n resultsw = clfw.predict_proba(Xtw)\n s_p_w=ev.get_system_pairs_prob(stest['positions'][exp + 'w'], resultsw, gold_p1)\n counters['s_p_w' + exp] = len(s_p_w)\n if DEBUG:\n print \"RESULTSW\"\n print resultsw\n if clfimp:\n resultsimp = clfimp.predict_proba(Xtimp)\n s_p_imp=ev.get_system_pairs_prob(stest['positions'][exp + 'implicit'], resultsimp, gold_p2)\n counters['s_p_imp' + exp] = len(s_p_imp)\n if DEBUG:\n print \"RESULTSIMP\"\n print resultsimp\n s_p_int=ev.get_system_pairs_prob(stest['positions'][exp], results, gold_p3)\n counters['s_p_int' + exp] = len(s_p_int)\n system_pairs_exp = ev.merge_system_pairs(s_p_int, s_p_imp=s_p_imp, s_p_w=s_p_w)\n counters['system_pairs_all' + exp] = len(system_pairs_exp)\n for pair in system_pairs_exp:\n if 'confidence' in pair and pair['confidence'] > 0:\n counters['system_pairs' + exp] += 1\n if predict:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"system exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n else:\n ssc_exp = ev.spansetcoverage_o_p(system_pairs_exp, exptype=exp)\n print \"gold exp - {}:\\n{}\".format(exp, prf_prettystring(ssc_exp))\n system_pairs.extend(system_pairs_exp)\n if predict:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"system exp - all:\\n\", prf_prettystring(ssc)\n else:\n ssc = ev.spansetcoverage_o_p(system_pairs)\n print \"gold exp - all: \\n\", prf_prettystring(ssc)\n \n for k,v in sorted(counters.items(), key=lambda x: x[0]):\n print k, v\n if isinstance(deprep, basestring):\n dump_jsonfile(system_pairs, 'system_pairs-' + deprep + '.json')\n return {'stats': stest, 'system_pairs': system_pairs}", "def sampling(X_train, y_train, X_test, y_test, sampling_instances, model_instances, func):\n\n metrics = []\n # go through all sampling methods\n for sampling_instance in sampling_instances:\n if sampling_instance is not None:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = sampling_instance.fit_resample(X=X_train, y=y_train)\n else:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = X_train, y_train\n\n # Go through all models\n for model_instance in model_instances:\n print('fitting model ' + str(model_instances.index(model_instance) + 1) + ' on ' +\n str(len(model_instances)), \" : \", type(model_instance).__name__)\n model_instance.fit(X_train1, y_train1)\n metrics.append(func(y_test, model_instance.predict(X_test)))\n\n models = [type(model).__name__ for model in model_instances]\n methods = [type(sampling).__name__ for sampling in sampling_instances]\n index = [model + '_' + method for model in models for method in methods]\n\n #Dry run of compute metrics with return_index=True to get indexes\n columns = func(y_test, y_test, average='weighted', return_index=True)\n metrics = pd.DataFrame(metrics, columns=columns, index=index)\n\n return metrics", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def evaluate(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n **kwargs):\n raise NotImplementedError()", "def _evaluate(self, train_x, train_y, test_x, test_y, n_targets, name):\n r_temp = {}\n for metric_name in self.metrics:\n r_temp.update({f\"{metric_name}_Model\": name, f\"{metric_name}_Sum\": 0,\n f\"{metric_name}_Min\": 1000000, f\"{metric_name}_Max\": 0})\n\n for i in range(self.repetitions):\n is_nan = True\n while (is_nan):\n model = self.get_model(train_x.shape[1], n_targets)\n model.fit(train_x, train_y, **self.fit_kwargs)\n result = model.predict(test_x)\n is_nan = np.any(np.isnan(result))\n del model\n\n for metric_name in self.metrics:\n metric = self.get_metrics(metric_name)\n value = metric(result, test_y)\n r_temp[f\"{metric_name}_Sum\"] += value\n if r_temp[f\"{metric_name}_Min\"] > value:\n r_temp[f\"{metric_name}_Min\"] = value\n if r_temp[f\"{metric_name}_Max\"] < value:\n r_temp[f\"{metric_name}_Max\"] = value\n keras.backend.clear_session()\n for metric_name in self.metrics:\n r_temp[f\"{metric_name}_Mean\"] = r_temp[f\"{metric_name}_Sum\"] / self.repetitions\n return r_temp", "def evaluate(self, dataset, *args, **kwargs):\n\n losses = []\n for sample in dataset:\n output = self.predict(sample, *args, **kwargs)\n losses.append(self.metric_loss(output, sample, *args, **kwargs))\n\n return losses" ]
[ "0.6946277", "0.6890121", "0.66393656", "0.6486331", "0.64624524", "0.6429546", "0.63909847", "0.6262663", "0.6214519", "0.6213952", "0.6138569", "0.60926986", "0.60802746", "0.60679173", "0.605645", "0.6050133", "0.6041548", "0.6036979", "0.60147506", "0.6002384", "0.59805185", "0.5978238", "0.5964558", "0.59589976", "0.5951642", "0.5941843", "0.5923971", "0.5921825", "0.59130543", "0.5908304" ]
0.70462584
0
Test if has_page_title returns True if fake_article has a title.
def test_article_has_page_title(self, fake_article): fake_analysis = PageTitleAnalyzer(title=fake_article.title) assert fake_analysis.has_page_title()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_article_has_no_page_title(self, fake_article_missing_elements):\n\n fake_analysis = PageTitleAnalyzer(title=fake_article_missing_elements.title)\n assert not fake_analysis.has_page_title()", "def verifyPageTitle(self, titleToVerify):\n try:\n actualTitle = self.getTitle()\n return self.util.verifyTextContains(actualTitle, titleToVerify)\n except:\n self.log.error(\"Failed to get page title\")\n print_stack()\n return False", "def test_home_page_title(self):\n response = self.client.get('/')\n self.assertIn(site_main_title, response.content.title().decode())", "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def test_article_page_title_length(self, fake_article):\n\n fake_analysis = PageTitleAnalyzer(title=fake_article.title)\n assert fake_analysis.page_title_length == len(fake_article.title)", "def page_title(step, title):\r\n\r\n with AssertContextManager(step):\r\n assert_equals(world.browser.title, title)", "def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE", "def page_title_contains(self, title: str, timeout: TimeoutType = DEFAULT_TIMEOUT):\n return self._wait_until(condition=ec.title_contains(title), timeout=timeout)", "def istitle(self) -> bool:\n pass", "def has_story_title(self, title, stories):\n\tfor story in stories:\n\t break\n\telse:\n\t self.fail(\"Story with title '%s' not found\" % title)", "def has_story_title(self, title, stories):\n\tfor story in stories:\n\t break\n\telse:\n\t self.fail(\"Story with title '%s' not found\" % title)", "def is_title_displayed(self):\n return self.driver.wait_for_title_contains(InboxLocators.TITLE, 30)", "def test_title(self):\n key = api.portal.get_registry_record(\n 'plone.site_title'\n )\n self.assertEqual(u'Briefy CMS', key)", "def test_on_homepage_published_only(self):\n\tpublished_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\")\n\tunpublished_titles = (\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tfor title in published_titles + unpublished_titles:\n\t status = 'draft'\n\t if title in published_titles:\n\t\tstatus = 'published'\n\t create_story(title=title, on_homepage=True, status=status)\n\t\t\t \n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(published_titles))\n\tfor title in published_titles:\n\t self.has_story_title(title, homepage_stories)", "def test_on_homepage_published_only(self):\n\tpublished_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\")\n\tunpublished_titles = (\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tfor title in published_titles + unpublished_titles:\n\t status = 'draft'\n\t if title in published_titles:\n\t\tstatus = 'published'\n\t create_story(title=title, on_homepage=True, status=status)\n\t\t\t \n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(published_titles))\n\tfor title in published_titles:\n\t self.has_story_title(title, homepage_stories)", "def check_story_exists(self) -> bool:\n title_check = self._soup.find(\"title\").string\n if title_check == u'FicWad: fresh-picked original and fan fiction':\n return False\n return True", "def is_news_article(self, page):\n title = page.find(self.tag_prefix + self.title_tag).text\n for meta in self.titles_to_exclude:\n if title.startswith(meta):\n logging.info(\"{} No es un articulo. Se ignora\".format(title.encode('utf8')))\n return False\n logging.info(\"{} Es un articulo. Se procesa\".format(title.encode('utf8')))\n return True", "def test_get_title(double_title, single_title, empty_title):\n assert get_title(double_title) == \"Parton distributions with LHC data\"\n assert get_title(single_title) == \"The Large Hadron Collider\"\n assert get_title(empty_title) == \"\"\n\n no_title_key = {\n \"not_titles\": []\n }\n assert get_title(no_title_key) == \"\"", "def test_on_homepage(self):\n\thomepage_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\",\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tother_titles = (\"Story 1\", \"Story 2\")\n\tfor title in homepage_titles + other_titles:\n\t on_homepage = title in homepage_titles\n\t create_story(title=title, on_homepage=on_homepage,\n\t\t\t status='published')\n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(homepage_titles))\n\tfor title in homepage_titles:\n\t self.has_story_title(title, homepage_stories)", "def test_on_homepage(self):\n\thomepage_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\",\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tother_titles = (\"Story 1\", \"Story 2\")\n\tfor title in homepage_titles + other_titles:\n\t on_homepage = title in homepage_titles\n\t create_story(title=title, on_homepage=on_homepage,\n\t\t\t status='published')\n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(homepage_titles))\n\tfor title in homepage_titles:\n\t self.has_story_title(title, homepage_stories)", "def test_title_handling(self, site_setup):\n templates, content, output, s_gen = site_setup\n\n templates.join(\"t.html\").write(\"{{ title }}\")\n templates.join(\"b.html\").write(\n \"{{ breadcrumbs|map(attribute='title')|join(', ') }}\"\n )\n\n with_title = content.mkdir(\"with-title\")\n with_title.join(\"index.md\").write(\"\\n\".join([\n \"template: t.html\",\n \"title: custom title\",\n \"---\"\n ]))\n with_title.join(\"child.md\").write(\"\\n\".join([\n \"template: b.html\",\n \"title: woohoo\",\n \"---\"\n ]))\n content.join(\"without-a-title.md\").write(\"\\n\".join([\n \"template: t.html\",\n \"---\"\n ]))\n\n s_gen.config[\"default_template\"] = \"t.html\"\n s_gen.gen_site(str(output))\n\n with_title_output = output.join(\"with-title\", \"index.html\")\n without_title_output = output.join(\"without-a-title\", \"index.html\")\n child_output = output.join(\"with-title\", \"child\", \"index.html\")\n # check the output files exist in the correct place\n assert with_title_output.check()\n assert without_title_output.check()\n assert child_output.check()\n\n assert with_title_output.read() == \"custom title\"\n assert without_title_output.read() == \"Without a title\"\n\n # Check that custom title is used in breadcrumbs\n exp_bc = \", \".join([HomePage.title, \"custom title\",\n \"woohoo\"])\n assert child_output.read() == exp_bc", "def test_Entry_title(self):\n test_entry = self.create_Entry()\n self.assertTrue(test_entry.title == str(test_entry))", "def test_title(self):\n self.driver.get(\"https://demo.testchameleon.com/\")\n assert \"Gentellela Alela!\" in self.driver.title", "def set_title(self, title):\n if check_data_exist(title) is True:\n self.title = title.text", "def test_list_views_check_main_title_descriptin(self):\n url = reverse('blogs:list')\n response = self.client.get(url)\n # TODO you need to check that the tiles are present in the list Dilshad. You are only looking for the http200\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.main_title, str(response.content))\n self.assertIn(self.description1, str(response.content))\n self.assertIn(self.description2, str(response.content))", "def test_index_title(self):\n response = self.app.get('/')\n assert \"<title>Welcome to TurboGears</title>\" in response.body", "def valid_title(self, title):\n if title in self.timers.keys() and isinstance(title, str) and self.timers[title]['count']>0:\n return True\n else:\n return False", "def title_exists(form, field):\n if Entry.select().where(Entry.title ** field.data).exists():\n raise ValidationError('That title is already in use.')", "def get_title(self, article: BeautifulSoup):\n return self.get_text(article, self.parsing_template.title)", "def test_sites_has_sites_in_title(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Sites\").click()\n self.assertTrue('Site' in self.browser.title, 'Sites did not have \"Site\" in title.')" ]
[ "0.74226934", "0.7201128", "0.7072147", "0.70092344", "0.68888247", "0.67413706", "0.6726934", "0.66687554", "0.6647014", "0.65997624", "0.65997624", "0.65551233", "0.6530293", "0.6367897", "0.6367897", "0.63619655", "0.6342983", "0.6315898", "0.6308692", "0.6308692", "0.6280457", "0.6145463", "0.60994744", "0.6001693", "0.59856105", "0.59378153", "0.59342265", "0.5874012", "0.5868169", "0.58670956" ]
0.8438234
0
Test if has_page_title returns False if fake_article has no title.
def test_article_has_no_page_title(self, fake_article_missing_elements): fake_analysis = PageTitleAnalyzer(title=fake_article_missing_elements.title) assert not fake_analysis.has_page_title()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_article_has_page_title(self, fake_article):\n\n fake_analysis = PageTitleAnalyzer(title=fake_article.title)\n assert fake_analysis.has_page_title()", "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def verifyPageTitle(self, titleToVerify):\n try:\n actualTitle = self.getTitle()\n return self.util.verifyTextContains(actualTitle, titleToVerify)\n except:\n self.log.error(\"Failed to get page title\")\n print_stack()\n return False", "def test_home_page_title(self):\n response = self.client.get('/')\n self.assertIn(site_main_title, response.content.title().decode())", "def is_news_article(self, page):\n title = page.find(self.tag_prefix + self.title_tag).text\n for meta in self.titles_to_exclude:\n if title.startswith(meta):\n logging.info(\"{} No es un articulo. Se ignora\".format(title.encode('utf8')))\n return False\n logging.info(\"{} Es un articulo. Se procesa\".format(title.encode('utf8')))\n return True", "def check_story_exists(self) -> bool:\n title_check = self._soup.find(\"title\").string\n if title_check == u'FicWad: fresh-picked original and fan fiction':\n return False\n return True", "def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE", "def has_story_title(self, title, stories):\n\tfor story in stories:\n\t break\n\telse:\n\t self.fail(\"Story with title '%s' not found\" % title)", "def has_story_title(self, title, stories):\n\tfor story in stories:\n\t break\n\telse:\n\t self.fail(\"Story with title '%s' not found\" % title)", "def test_article_page_title_length(self, fake_article):\n\n fake_analysis = PageTitleAnalyzer(title=fake_article.title)\n assert fake_analysis.page_title_length == len(fake_article.title)", "def istitle(self) -> bool:\n pass", "def is_title_displayed(self):\n return self.driver.wait_for_title_contains(InboxLocators.TITLE, 30)", "def page_title_contains(self, title: str, timeout: TimeoutType = DEFAULT_TIMEOUT):\n return self._wait_until(condition=ec.title_contains(title), timeout=timeout)", "def page_title(step, title):\r\n\r\n with AssertContextManager(step):\r\n assert_equals(world.browser.title, title)", "def test_on_homepage_published_only(self):\n\tpublished_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\")\n\tunpublished_titles = (\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tfor title in published_titles + unpublished_titles:\n\t status = 'draft'\n\t if title in published_titles:\n\t\tstatus = 'published'\n\t create_story(title=title, on_homepage=True, status=status)\n\t\t\t \n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(published_titles))\n\tfor title in published_titles:\n\t self.has_story_title(title, homepage_stories)", "def test_on_homepage_published_only(self):\n\tpublished_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\")\n\tunpublished_titles = (\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tfor title in published_titles + unpublished_titles:\n\t status = 'draft'\n\t if title in published_titles:\n\t\tstatus = 'published'\n\t create_story(title=title, on_homepage=True, status=status)\n\t\t\t \n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(published_titles))\n\tfor title in published_titles:\n\t self.has_story_title(title, homepage_stories)", "def is_home_page(self):\n return not self.title and self.category is None", "def test_get_title(double_title, single_title, empty_title):\n assert get_title(double_title) == \"Parton distributions with LHC data\"\n assert get_title(single_title) == \"The Large Hadron Collider\"\n assert get_title(empty_title) == \"\"\n\n no_title_key = {\n \"not_titles\": []\n }\n assert get_title(no_title_key) == \"\"", "def test_title(self):\n key = api.portal.get_registry_record(\n 'plone.site_title'\n )\n self.assertEqual(u'Briefy CMS', key)", "def test_on_homepage(self):\n\thomepage_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\",\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tother_titles = (\"Story 1\", \"Story 2\")\n\tfor title in homepage_titles + other_titles:\n\t on_homepage = title in homepage_titles\n\t create_story(title=title, on_homepage=on_homepage,\n\t\t\t status='published')\n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(homepage_titles))\n\tfor title in homepage_titles:\n\t self.has_story_title(title, homepage_stories)", "def test_on_homepage(self):\n\thomepage_titles = (\n\t \"The Power of Play: Playground Locations in the Children's Corridor\",\n\t \"Birth Trends in the Children's Corridor: Focus on Foreign-born Mothers\",\n\t \"Shattered Dreams: Revitalizing Hope in Original Aurora\",\n\t \"A School Fight Rallies Hinkley High School Mothers to Organize\",\n\t \"Transportation Challenges Limit Education Choices for Denver Parents\")\n\tother_titles = (\"Story 1\", \"Story 2\")\n\tfor title in homepage_titles + other_titles:\n\t on_homepage = title in homepage_titles\n\t create_story(title=title, on_homepage=on_homepage,\n\t\t\t status='published')\n\thomepage_stories = Story.objects.on_homepage()\n\tself.assertEqual(homepage_stories.count(), len(homepage_titles))\n\tfor title in homepage_titles:\n\t self.has_story_title(title, homepage_stories)", "def test_list_views_check_main_title_descriptin(self):\n url = reverse('blogs:list')\n response = self.client.get(url)\n # TODO you need to check that the tiles are present in the list Dilshad. You are only looking for the http200\n self.assertEqual(response.status_code, 200)\n self.assertIn(self.main_title, str(response.content))\n self.assertIn(self.description1, str(response.content))\n self.assertIn(self.description2, str(response.content))", "def valid_title(self, title):\n if title in self.timers.keys() and isinstance(title, str) and self.timers[title]['count']>0:\n return True\n else:\n return False", "def is_bad_title(title):\n bad_examples = [\"under construction\", \"test page\", \"redirect\", \"index of\", \"none \", \"expired\", \"coming soon\",\n \"error \", \"domain pending\", \"at directnic\", \"pending validation\", \"website disabled\",\n \"US Zip Code Information\", # verified we need this, urls like 00000.us, 00001.us end up at zipcode.com\n \"domain default page\", \"non-existent domain\", \"v-webs hosting services\",\n \"be back soon\", \"something went wrong\", \"Lunarpages Web Hosting Placeholder Page\",\n \"Félicitations ! Votre domaine a bien été créé chez OVH !\", \"Domaine r&eacute;serv&eacute;\",\n \" - For Sale | Undeveloped\", \"Yahoo&#39;s Aabaco Small Business: Websites, Ecommerce, Email &amp; Local Listings\",\n \"service unavailable\", \"website disabled\", \"404 Not Found\", \"Not Found\", \"Page cannot be found\"\n ]\n for bad_title in bad_examples:\n if bad_title.lower() in title.lower():\n debug(bad_title)\n return hit(bad_title)\n\n exact_matches = [\"web hosting\", \"webhosting\"]\n for ma in exact_matches:\n if title.replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").lower() == ma:\n debug(ma)\n return hit(ma)\n return False", "def test_title_handling(self, site_setup):\n templates, content, output, s_gen = site_setup\n\n templates.join(\"t.html\").write(\"{{ title }}\")\n templates.join(\"b.html\").write(\n \"{{ breadcrumbs|map(attribute='title')|join(', ') }}\"\n )\n\n with_title = content.mkdir(\"with-title\")\n with_title.join(\"index.md\").write(\"\\n\".join([\n \"template: t.html\",\n \"title: custom title\",\n \"---\"\n ]))\n with_title.join(\"child.md\").write(\"\\n\".join([\n \"template: b.html\",\n \"title: woohoo\",\n \"---\"\n ]))\n content.join(\"without-a-title.md\").write(\"\\n\".join([\n \"template: t.html\",\n \"---\"\n ]))\n\n s_gen.config[\"default_template\"] = \"t.html\"\n s_gen.gen_site(str(output))\n\n with_title_output = output.join(\"with-title\", \"index.html\")\n without_title_output = output.join(\"without-a-title\", \"index.html\")\n child_output = output.join(\"with-title\", \"child\", \"index.html\")\n # check the output files exist in the correct place\n assert with_title_output.check()\n assert without_title_output.check()\n assert child_output.check()\n\n assert with_title_output.read() == \"custom title\"\n assert without_title_output.read() == \"Without a title\"\n\n # Check that custom title is used in breadcrumbs\n exp_bc = \", \".join([HomePage.title, \"custom title\",\n \"woohoo\"])\n assert child_output.read() == exp_bc", "def test_title(self):\n self.driver.get(\"https://demo.testchameleon.com/\")\n assert \"Gentellela Alela!\" in self.driver.title", "def set_title(self, title):\n if check_data_exist(title) is True:\n self.title = title.text", "def testFalseCapTitle(self):\n val = capTitles(\"victor Ifezue\") \n self.assertNotEqual(val, \"victor Ifezue\")", "def test_address__home_page_address_title__1(zcmlS):\n assert u'none' == ITitle(HomePageAddress())", "def title_exists(form, field):\n if Entry.select().where(Entry.title ** field.data).exists():\n raise ValidationError('That title is already in use.')" ]
[ "0.81478935", "0.71469104", "0.7134996", "0.7023476", "0.6681592", "0.66118556", "0.6604462", "0.659955", "0.659955", "0.65993476", "0.65619737", "0.6494986", "0.64904606", "0.6416079", "0.63737994", "0.63737994", "0.6358897", "0.6193415", "0.6174787", "0.6101239", "0.6101239", "0.6086549", "0.6081076", "0.60695505", "0.6007879", "0.59145284", "0.59131074", "0.59076774", "0.58878785", "0.58689827" ]
0.7949928
1
Test if page_title_length returns the good title length.
def test_article_page_title_length(self, fake_article): fake_analysis = PageTitleAnalyzer(title=fake_article.title) assert fake_analysis.page_title_length == len(fake_article.title)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def title_len(self) -> int:\n return self.__title_len", "def verifyPageTitle(self, titleToVerify):\n try:\n actualTitle = self.getTitle()\n return self.util.verifyTextContains(actualTitle, titleToVerify)\n except:\n self.log.error(\"Failed to get page title\")\n print_stack()\n return False", "def check_title(self):\n currenttitle = self.driver.title\n assert self.TITLE in currenttitle, 'Title not expected. Actual: ' + currenttitle + ', Expected: ' + self.TITLE", "def calculate_longest_title(self):\n longest_title_length = 0\n for movie in self.movies:\n title_length = len(movie.title)\n if title_length > longest_title_length:\n longest_title_length = title_length\n return longest_title_length", "def test_article_has_page_title(self, fake_article):\n\n fake_analysis = PageTitleAnalyzer(title=fake_article.title)\n assert fake_analysis.has_page_title()", "def get_valid_title(title):\n if len(title) >= 254:\n title = title[:254]\n return title", "def check_longest_name(item, title):\n if len(item) > longest_names[title]:\n longest_names[title] = len(item)", "def titleValidator(self, title):\n if type(title) != str:\n API.abort(400, error_messages[11]['Int_title'])\n\n # check if the contents of title have characters between a-z and A-Z\n elif not re.match(r\"(^[a-zA-Z_]+$)\", title) or title.isspace():\n API.abort(\n 400, error_messages[12]['wrong_format_title'])\n\n return True", "def test_home_page_title(self):\n response = self.client.get('/')\n self.assertIn(site_main_title, response.content.title().decode())", "def test_get_title(double_title, single_title, empty_title):\n assert get_title(double_title) == \"Parton distributions with LHC data\"\n assert get_title(single_title) == \"The Large Hadron Collider\"\n assert get_title(empty_title) == \"\"\n\n no_title_key = {\n \"not_titles\": []\n }\n assert get_title(no_title_key) == \"\"", "def page_title(step, title):\r\n\r\n with AssertContextManager(step):\r\n assert_equals(world.browser.title, title)", "def test_blogpost_title_length(self):\r\n self.configure_fixtures()\r\n valid_title = 'a' * 255\r\n invalid_title = 'a' * 256\r\n blogpost = Blogpost(title=valid_title, body=\"body\", app=self.app)\r\n db.session.add(blogpost)\r\n\r\n assert_not_raises(DataError, db.session.commit)\r\n\r\n blogpost.title = invalid_title\r\n assert_raises(DataError, db.session.commit)", "def valid_title(self, title):\n if title in self.timers.keys() and isinstance(title, str) and self.timers[title]['count']>0:\n return True\n else:\n return False", "def test_slugs_max_length(self):\n self.assertLessEqual(len(str(self.art_short.slug)), 50)\n self.assertLessEqual(len(str(self.art_long.slug)), 50)", "def validateTitle(title):\n \n if not(title) or not(title.strip()):\n return \"You must supply a title.\"\n else:\n return None", "def test_post_oversized_title(self):\n title = 'a' * (constants.TITLE_MAX_LENGTH + 1)\n response = self.post(content='foo', title=title)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_get_book_title(self):\n\t\t\n\t\tself.assertTrue(data.get_book_title(46) == '1 Corinthians')", "def is_title_displayed(self):\n return self.driver.wait_for_title_contains(InboxLocators.TITLE, 30)", "def test_slugs_max_length(self):\n self.assertLessEqual(len(str(self.cat_short.slug)), 50)\n self.assertLessEqual(len(str(self.cat_long.slug)), 50)", "def page_title_contains(self, title: str, timeout: TimeoutType = DEFAULT_TIMEOUT):\n return self._wait_until(condition=ec.title_contains(title), timeout=timeout)", "def test_very_long_title( self ):\n driver = self.driver\n driver.get(self.base_url + \"/record=b5713050~S6\")\n driver.find_element_by_link_text(\"Request\").click()\n url_obj = urlparse( driver.current_url )\n q_dct = parse_qs( driver.current_url )\n # print( 'q_dct, ```%s```' % pprint.pformat(q_dct) )\n self.assertEqual(\n 'jcbl.aeon.atlas-sys.com',\n url_obj.netloc )\n self.assertEqual(\n ['b5713050'],\n q_dct['ReferenceNumber'] )\n self.assertEqual(\n [\"The English-American his travail by sea and land: or, A new survey of the West-India's [sic], : containing a journall of three thousand and three hundred miles within the main land of America. Wher...\"],\n q_dct['ItemTitle'] )\n self.assertEqual(\n ['Gage, Thomas, 1603?-1656'],\n q_dct['ItemAuthor'] )\n self.assertEqual(\n ['London : printed by R. Cotes, and are to be sold by Humphrey Blunden at the Castle in Cornhill, and Thomas Williams at the Bible in Little Britain, 1648'],\n q_dct['ItemPublisher'] )\n self.assertEqual(\n ['1-SIZE D648 .G133e'],\n q_dct['CallNumber'] )\n self.assertEqual(\n ['http://www.archive.org/details/englishamericanh00gage'],\n q_dct['ItemInfo2'] )", "def _is_title(self):\n ph = _child(self.__nvXxPr.nvPr, 'p:ph')\n if ph is None:\n return False\n # idx defaults to 0 when idx attr is absent\n ph_idx = ph.get('idx', '0')\n # title placeholder is identified by idx of 0\n return ph_idx == '0'", "def is_bad_title(title):\n bad_examples = [\"under construction\", \"test page\", \"redirect\", \"index of\", \"none \", \"expired\", \"coming soon\",\n \"error \", \"domain pending\", \"at directnic\", \"pending validation\", \"website disabled\",\n \"US Zip Code Information\", # verified we need this, urls like 00000.us, 00001.us end up at zipcode.com\n \"domain default page\", \"non-existent domain\", \"v-webs hosting services\",\n \"be back soon\", \"something went wrong\", \"Lunarpages Web Hosting Placeholder Page\",\n \"Félicitations ! Votre domaine a bien été créé chez OVH !\", \"Domaine r&eacute;serv&eacute;\",\n \" - For Sale | Undeveloped\", \"Yahoo&#39;s Aabaco Small Business: Websites, Ecommerce, Email &amp; Local Listings\",\n \"service unavailable\", \"website disabled\", \"404 Not Found\", \"Not Found\", \"Page cannot be found\"\n ]\n for bad_title in bad_examples:\n if bad_title.lower() in title.lower():\n debug(bad_title)\n return hit(bad_title)\n\n exact_matches = [\"web hosting\", \"webhosting\"]\n for ma in exact_matches:\n if title.replace(\" \", \"\").replace(\"\\t\", \"\").replace(\"\\n\", \"\").replace(\"\\r\", \"\").lower() == ma:\n debug(ma)\n return hit(ma)\n return False", "def testFalseCapTitle(self):\n val = capTitles(\"victor Ifezue\") \n self.assertNotEqual(val, \"victor Ifezue\")", "def test_slugs_truncate(self):\n self.assertFalse(str(self.art_long.slug).endswith('a' * 8))\n self.assertTrue(str(self.art_short.slug).endswith('-title'))", "def testCapTitle(self):\n val = capTitles(\"false true\") \n self.assertEqual(val, \"False True\")", "def truncate_title(title):\n return title if len(title) <= 70 else title[:70]+\"...\"", "def check_valid_title(title):\n title_issues = TitleIssues(title_contains_nsfw=title_contains_nsfw(title))\n return title_issues", "def test__validate_title__2():\n for input_value in (\n 'a' * (TITLE_LENGTH_MAX + 1),\n ):\n with vampytest.assert_raises(ValueError):\n validate_title(input_value)", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length" ]
[ "0.7266168", "0.6975329", "0.64746845", "0.6418382", "0.6313484", "0.6305625", "0.6272739", "0.62464887", "0.6177433", "0.6164515", "0.61345303", "0.6125975", "0.61245835", "0.6124123", "0.6077936", "0.6068465", "0.6045622", "0.6019685", "0.5975678", "0.5965192", "0.5961571", "0.5937254", "0.5910052", "0.5893197", "0.5884553", "0.58374137", "0.5834494", "0.58153474", "0.58064044", "0.57950616" ]
0.77252203
0
The caching processes takes users input data and computation output, then cache them
def cache(self, end_user, input_data, output_data0, c_EC, Name_offloaded_data): print("Caching at EC server is done at %d%% " % (random.randint(50, 99))) c_km = c_EC/(1+((end_user-1) * (input_data + output_data0))) cache_capacity_allocation_EC.append(c_km) cached_content.insert(Name_offloaded_data, output_data0) return cached_content, cache_capacity_allocation_EC
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache_end_user(self, end_user, input_data, output_data0, c_k, computation_requirement_user,\n Name_offloaded_data):\n print(\"computation at end-user device is done at %d%% of %s's task.\" %\n (random.randint(50, 99), end_user))\n print(\"Caching at end-user device is done at %d%% of %s's task.\" % (random.randint(50, 99), end_user))\n c_ki = c_k * (input_data + output_data0) / (number_user_application[end_user] * computation_requirement_user)\n cached_content.insert(Name_offloaded_data, output_data0)\n cache_capacity_allocation_user.append(c_ki)\n return cached_content, cache_capacity_allocation_user", "def _cache_data(self):\n while self._run:\n try:\n values = self._data_streamer.get_data_current_state()\n for parameter, mapping_method in self._mapping.items():\n value = values[parameter]\n mapped_notes = self._data_streamer.get_mapper_for_param(parameter, mapping_method[0]).map(value)\n self._value_queues[parameter].put((value,mapped_notes))\n except Exception, e:\n print e.message", "def cache(self, *args, **kwargs):\n\n default_fn = kwargs.pop('default_fn', None)\n\n def _run(*args, **kwargs):\n \"\"\"\n :param: *args\n :param: **kwargs (fname, force, verbose)\n \"\"\"\n\n fname = kwargs.pop('fname', None)\n force = kwargs.pop('force', False)\n verbose = kwargs.pop('verbose', True)\n copy = kwargs.get('copy', False)\n\n callback = None\n if len(args) > 1:\n callback, *args = args\n\n if len(args) > 0:\n adata = args[0] if isinstance(args[0], anndata.AnnData) else kwargs.get('adata')\n else:\n adata = kwargs.get('adata')\n\n assert isinstance(adata, anndata.AnnData), f'Expected `{adata}` to be of type `anndata.AnnData`.'\n\n if callback is None:\n callback = (lambda *_x, **_y: None) if default_fn is None else default_fn\n\n assert callable(callback), f'`{callblack}` is not callable.'\n\n if force:\n if verbose:\n print('Recomputing values.')\n res = callback(*args, **kwargs)\n cache_fn(res if copy else adata, fname, True, verbose, *args, **kwargs)\n return res\n\n # when loading to cache and copy is true, modify the copy\n if copy:\n adata = adata.copy()\n\n # we need to pass the *args and **kwargs in order to\n # get the right field when using regexes\n if not cache_fn(adata, fname, False, verbose, *args, **kwargs):\n if verbose:\n print('Computing values.')\n res = callback(*args, **kwargs)\n ret = cache_fn(res if copy else adata, fname, True, False, *args, **kwargs)\n\n assert ret, 'Caching failed.'\n\n return res\n\n # if cache was found and not modifying inplace\n return adata if copy else None\n\n cache_fn = self._create_cache_fn(*args, **kwargs)\n\n return _run", "def cache_produce () :\n\n \"\"\"\n list for movie\n each entry is the sum and count of all ratings received by each movie, which will be used later to calculate the user offset\n \"\"\"\n mcache = movie_read(open('/u/downing/cs/netflix/movie_titles.txt', 'r', encoding = \"ISO-8859-1\"))\n\n \"\"\"\n dictionaries for user caches\n each entry contain the sum and count of all ratings given by each user, which will be used later to calculate the user offset\n mean is the average of all ratings from all movies\n \"\"\"\n ucache, mean = user_read(mcache, \"/u/downing/cs/netflix/training_set\")\n\n cal_offset(mcache, mean)\n cal_offset(ucache, mean)\n\n mcache.append(mean)\n\n output_cache(ucache, open('/u/wc6892/Documents/cs373-netflix/wc6892-ucacheoff.txt', 'w'))\n output_cache(mcache, open('/u/wc6892/Documents/cs373-netflix/wc6892-mcacheoff.txt', 'w'))", "def cache(self):\n self.cached_mu = self.mu.eval()\n self.cached_var = self.var.eval()\n self.cached_count = self.count.eval()", "def recache(self, phys):\r\n self.myOutputCache.initialize(phys.app)\r\n\r\n for output in self.myOutputs:\r\n output.initialize(phys.app)\r\n output.run(1)", "def cache_dc(self, end_user, input_data, output_data0, c_d, Name_offloaded_data):\n print(\"Caching at Data center is done at %d%%\" % (random.randint(50, 99)))\n c_kd = end_user * (input_data + output_data0)\n cache_capacity_allocation_dc.append(c_kd)\n cached_content.insert(Name_offloaded_data, output_data0)\n DC_caching_decision_variable.append(1)\n return cached_content, DC_caching_decision_variable, cache_capacity_allocation_dc", "def test_cache_results(self):\n env = pike.Environment()\n value = [1]\n with pike.Graph('g') as graph:\n n = ParrotNode(value)\n env.add(graph)\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n n.value = [1, 2]\n\n # We mutated value, but the return value should be cached\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n\n # Busting cache should return new value\n ret = env.run('g', True)\n self.assertEqual(ret, {'default': [1, 2]})", "def _cached_call(self, args, kwargs):\r\n # Compare the function code with the previous to see if the\r\n # function code has changed\r\n output_dir, argument_hash = self._get_output_dir(*args, **kwargs)\r\n metadata = None\r\n # FIXME: The statements below should be try/excepted\r\n if not (self._check_previous_func_code(stacklevel=4) and\r\n os.path.exists(output_dir)):\r\n if self._verbose > 10:\r\n _, name = get_func_name(self.func)\r\n self.warn('Computing func %s, argument hash %s in '\r\n 'directory %s'\r\n % (name, argument_hash, output_dir))\r\n out, metadata = self.call(*args, **kwargs)\r\n if self.mmap_mode is not None:\r\n # Memmap the output at the first call to be consistent with\r\n # later calls\r\n out = _load_output(output_dir, self.func,\r\n timestamp=self.timestamp,\r\n mmap_mode=self.mmap_mode,\r\n verbose=self._verbose)\r\n else:\r\n try:\r\n t0 = time.time()\r\n out = _load_output(output_dir, _get_func_fullname(self.func),\r\n timestamp=self.timestamp,\r\n metadata=metadata, mmap_mode=self.mmap_mode,\r\n verbose=self._verbose)\r\n if self._verbose > 4:\r\n t = time.time() - t0\r\n _, name = get_func_name(self.func)\r\n msg = '%s cache loaded - %s' % (name, format_time(t))\r\n print(max(0, (80 - len(msg))) * '_' + msg)\r\n except Exception:\r\n # XXX: Should use an exception logger\r\n self.warn('Exception while loading results for '\r\n '(args=%s, kwargs=%s)\\n %s' %\r\n (args, kwargs, traceback.format_exc()))\r\n\r\n shutil.rmtree(output_dir, ignore_errors=True)\r\n out, metadata = self.call(*args, **kwargs)\r\n argument_hash = None\r\n return (out, argument_hash, metadata)", "def _process(self, data, cache):\n stop = False\n try:\n super(PickleCache, self).process(data)\n except StopIteration:\n stop = True\n\n data_to_save = data\n\n cache = dict() if cache is None else cache\n cache[self.chain_info['chain_hash']] = {\"data\": data_to_save,\n \"stopped\": stop,\n 'chain_repr': self.chain_info[\n 'chain_repr'],\n 'chain_mtime': self.chain_info[\n 'chain_mtime']}\n return cache, stop", "def cache_model(self, **inputs):\n self.shared_vars = self._create_shared_vars(**inputs)\n self.cached_model = self.create_model(**self.shared_vars)", "def process_cache_arguments(self, args):\r\n pass", "def _place_cached_acts_data(inp_data: torch.Tensor, out_data: torch.Tensor, device: torch.device) \\\n -> Tuple[torch.Tensor, torch.Tensor]:\n torch.cuda.empty_cache()\n\n # Available GPU memory in GB\n threshold_mem = torch.cuda.get_device_properties(device).total_memory - torch.cuda.memory_allocated(device)\n threshold_mem = threshold_mem / (1024 * 1024 * 1024)\n threshold_mem = threshold_mem * EMPIRICAL_THRESHOLD\n\n # required GPU memory in GB\n req_mem = 0\n req_mem += reduce(lambda x, y: x * y, inp_data.size()) * DATA_SIZE_IN_BITS / (1024 * 1024 * 1024 * 8)\n req_mem += reduce(lambda x, y: x * y, out_data.size()) * DATA_SIZE_IN_BITS / (1024 * 1024 * 1024 * 8)\n\n if req_mem < threshold_mem:\n inp_data = inp_data.to(device)\n out_data = out_data.to(device)\n logger.debug(\"Placing cached activations data on GPU.\")\n\n return inp_data, out_data", "def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)", "def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache", "def _retrieveCachedData(self):", "def Steering(cache, generations, input_queue, result_queue):\n\n # Generations that have pending tasks to be executed. Pending tasks are those\n # whose results are not ready. The tasks that have their results ready are\n # referenced to as ready tasks. Once there is no pending generation, the\n # algorithm terminates.\n waiting = generations\n\n # Record how many initial tasks there are. If there is no task at all, the\n # algorithm can terminate right away.\n num_tasks = 0\n\n # Submit all the tasks in the initial generations to the next stage of the\n # framework. The next stage can be the build/compilation stage.\n for generation in generations:\n # Only send the task that has not been performed before to the next stage.\n for task in [task for task in generation.Pool() if task not in cache]:\n result_queue.put(task)\n cache.add(task)\n num_tasks += 1\n\n # If there is no task to be executed at all, the algorithm returns right away.\n if not num_tasks:\n # Inform the next stage that there will be no more task.\n result_queue.put(pipeline_process.POISONPILL)\n return\n\n # The algorithm is done if there is no pending generation. A generation is\n # pending if it has pending task.\n while waiting:\n # Busy-waiting for the next task.\n if input_queue.empty():\n continue\n\n # If there is a task whose result is ready from the last stage of the\n # feedback loop, there will be one less pending task.\n\n task = input_queue.get()\n\n # Store the result of this ready task. Intermediate results can be used to\n # generate report for final result or be used to reboot from a crash from\n # the failure of any module of the framework.\n task.LogSteeringCost()\n\n # Find out which pending generation this ready task belongs to. This pending\n # generation will have one less pending task. The \"next\" expression iterates\n # the generations in waiting until the first generation whose UpdateTask\n # method returns true.\n generation = next(gen for gen in waiting if gen.UpdateTask(task))\n\n # If there is still any pending task, do nothing.\n if not generation.Done():\n continue\n\n # All the tasks in the generation are finished. The generation is ready to\n # produce the next generation.\n waiting.remove(generation)\n\n # Check whether a generation should generate the next generation.\n # A generation may not generate the next generation, e.g., because a\n # fixpoint has been reached, there has not been any improvement for a few\n # generations or a local maxima is reached.\n if not generation.IsImproved():\n continue\n\n for new_generation in generation.Next(cache):\n # Make sure that each generation should contain at least one task.\n assert new_generation.Pool()\n waiting.append(new_generation)\n\n # Send the tasks of the new generations to the next stage for execution.\n for new_task in new_generation.Pool():\n result_queue.put(new_task)\n cache.add(new_task)\n\n # Steering algorithm is finished and it informs the next stage that there will\n # be no more task.\n result_queue.put(pipeline_process.POISONPILL)", "def cached_or_run(self, job, run_func, *args):\n to_cache = self.workflow.is_cached_rule(job.rule)\n try:\n if to_cache:\n self.workflow.output_file_cache.fetch(job)\n return\n except CacheMissException:\n pass\n run_func(*args)\n if to_cache:\n self.workflow.output_file_cache.store(job)", "def dynCache(*args, **kwargs)->None:\n pass", "def cache_function(self, func):\n\n @wraps(func)\n def wrapper(*args):\n if self.__log:\n self.__logger.info(f\"Called {func.__name__} with {args}\")\n fileName = self.__build_file_name(func, args)\n\n if os.path.isfile(fileName):\n # Result is already stored in cache\n # Retrieve return value from cache\n return self.__read_cache(fileName)\n else:\n # Result is not stored in cache\n # Run function\n if len(args) > 0:\n returnVal = func(args)\n else:\n returnVal = func()\n\n # Store value in cache\n self.__write_cache(fileName, returnVal)\n\n # Give return value\n return returnVal\n\n return wrapper", "def test_custom_cache_multiple(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n def cost(a, b, cache):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n res = execute(\n [tape1, tape2],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )\n return res[0]\n\n custom_cache = {}\n jax.grad(cost)(a, b, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def __call__(self, *args):\n if args not in self.memo:\n self.memo[args] = self.f(*args)\n return self.memo[args]", "def cache_results(self):\n self.cache_manager.cache_results(\n self.parser,\n self.query,\n self.search_engine_name,\n self.scrape_method,\n self.page_number,\n db_lock=self.db_lock\n )", "def __call__(self, *args, **kwargs):\n key = None\n value = None\n memoization_key = None\n\n if self._memoize:\n memoization_key = self._get_memoization_key(*args, **kwargs)\n if memoization_key in self._cached_results:\n return self._cached_results[memoization_key]\n\n if self._cache:\n key = self.get_cache_key(*args, **kwargs)\n value = cache_backend.get(key)\n\n if value is None:\n value = self._fn(*self._inject_obj(args), **kwargs)\n\n if self._cache:\n cache_backend.set(key, value, timeout=self._timeout)\n\n if self._memoize:\n self._cached_results[memoization_key] = value\n\n return value", "def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def query(self):\r\n records = self.input()\r\n if self.to_investigate:\r\n records = self.investigate(records)\r\n post.log.info(\"Caching {} records for {}\".format(len(records), self.name))\r\n self.cache_records(records)", "def cache(self, treename = \"superNt\") :\n\n output_directory = self.outdir\n if not output_directory.endswith(\"/\") :\n output_directory += \"/\"\n full_filename = output_directory + \"%s\" % self.name\n\n if not os.path.isfile(full_filename) :\n with h5py.File(full_filename, 'w', libver='latest') as selection_file :\n name = self.selection\n selection_group = selection_file.create_group(name)\n selection_group.attrs['cut_string'] = self.selectionstr\n\n varlist_str = ','.join(self.fields)\n selection_group.attrs['variable_list'] = varlist_str\n\n for sample in self.samples :\n print \"Caching %s\" % sample.name\n process_group = selection_group.create_group(sample.name)\n self.add_process_to_cache(process_group, sample, treename)\n sample.selection_file = full_filename\n sample.selection_group = \"/%s/%s/\" % ( str(selection_group.name), sample.name )\n return full_filename\n\n elif os.path.isfile(full_filename) :\n print \"Found pre-cached selection file %s\" % full_filename\n # check the current file for\n # 1) make sure that the selection definition is the same\n # 2) make sure that each of our samples is in there\n # if (1) fails, don't try to be smart, just exit\n # if (2) fails (and (1) succeeds), just add the dataset\n # TODO add check for all the relevant variables\n with h5py.File(full_filename, 'a', libver='latest') as selection_file :\n name = self.selection\n print \" > Looking for top level -> %s \" % name\n found_top_level = False\n for top_level in selection_file :\n if str(top_level) == name :\n found_top_level = True\n print\" %s in file!\" % name\n selection_group = selection_file[\"%s\" % name]\n cut_definition = selection_group.attrs['cut_string']\n included_vars = selection_group.attrs['variable_list'].split(',')\n not_included_vars = []\n for field in self.fields :\n if field not in included_vars :\n not_included_vars.append(field)\n\n if cut_definition != self.selectionstr :\n print \"ERROR Cut definition for %s in selection file %s does not match!\" % ( name, full_filename )\n print \"ERROR Expected selection : %s\" % self.selectionstr\n print \"ERROR Selection in file : %s\" % cut_definition\n sys.exit()\n\n if len(not_included_vars) > 0 :\n print \"ERROR Stored fields (variables) in selection file %s does not contain some currently expected fields\" % ( full_filename)\n print \"ERROR Fields in file : %s\" % included_vars\n print \"ERROR Expected : %s\" % self.fields\n print \"ERROR > not included : %s\" % not_included_vars\n sys.exit()\n\n if not found_top_level :\n print \"ERROR Did not find top level group %s for selection %s in file %s\" % (self.selectionstr, full_filename)\n sys.exit()\n\n print \"Top level and selection seem ok! (n.b. did not check variable content)\"\n\n selection_group = selection_file[\"%s\" % name]\n print \"Looking for selection group %s\" % str(selection_group.name)\n group_keys = [str(g) for g in selection_group.keys()]\n for sample in self.samples :\n process_group_name = sample.name\n if process_group_name in group_keys :\n print \"Loading > %s (from pre-existing group)\" % sample.name\n sample.selection_file = full_filename\n sample.selection_group = \"/%s/%s/\" % ( str(selection_group.name), sample.name )\n else :\n print \"Loading > %s did not find in pre-existing group, adding it now\" % sample.name\n process_group = selection_group.create_group(sample.name)\n self.add_process_to_cache(process_group, sample, treename)\n sample.selection_file = full_filename\n sample.selection_group = \"/%s/%s/\" % ( str(selection_group.name), sample.name )\n return full_filename", "def _memorize(func):\n\n def _wrapper(self, *args, **kwargs):\n \"\"\"Wrapper to cache the function's output.\n \"\"\"\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val\n return _wrapper", "def cached(\n inputs: INPUTS = None,\n params: PARAMETERS = None,\n outputs: OUTPUTS = None,\n) -> Callable[..., Callable[..., Optional[T]]]:\n\n def wrapper_builder(f: Callable[..., Optional[T]]) -> Callable[..., Optional[T]]:\n @functools.wraps(f)\n def wrapper(*args, **kw) -> Optional[T]:\n resolved_parameters = resolve_cache_parameters(params, args, kw)\n hash_key = current_cache.get_hash_key(f, inputs, resolved_parameters)\n if current_cache.use_cached(hash_key):\n return None\n\n result = f(*args, **kw)\n outputs_names = get_output_names(outputs, args, kw)\n current_cache.cache_outputs(hash_key, outputs_names)\n\n return result\n\n return wrapper\n\n return wrapper_builder", "def setup_cache(self):\n train_cache_path = self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)\n dev_cache_path = self.cache.get_cache_path_and_check(DEV_STR, self.task_name)\n test_cache_path = self.cache.get_cache_path_and_check(TEST_STR, self.task_name)\n\n self.train_cache_writer = None\n self.dev_cache_writer = None\n self.test_cache_writer = None\n\n if os.path.exists(train_cache_path):\n f = h5py.File(train_cache_path, 'r')\n self.train_cache = (torch.tensor(f[str(i)][()]) for i in range(len(f.keys())))\n else:\n self.train_cache_writer = h5py.File(train_cache_path, 'w')\n if os.path.exists(dev_cache_path):\n f2 = h5py.File(dev_cache_path, 'r')\n self.dev_cache = (torch.tensor(f2[str(i)][()]) for i in range(len(f2.keys())))\n else:\n self.dev_cache_writer = h5py.File(dev_cache_path, 'w')\n if os.path.exists(test_cache_path):\n f3 = h5py.File(test_cache_path, 'r')\n self.test_cache = (torch.tensor(f3[str(i)][()]) for i in range(len(f3.keys())))\n else:\n self.test_cache_writer = h5py.File(test_cache_path, 'w')" ]
[ "0.69241387", "0.6817171", "0.6684514", "0.66622245", "0.6657395", "0.66510534", "0.64325535", "0.6382823", "0.6365466", "0.6267725", "0.6214952", "0.620603", "0.6172402", "0.61452436", "0.61371493", "0.6086325", "0.6070391", "0.60115355", "0.600396", "0.5997731", "0.59631586", "0.59455794", "0.5923815", "0.59170324", "0.5893532", "0.5885981", "0.58544034", "0.58519346", "0.584071", "0.5822332" ]
0.69128287
1
The caching processes takes users input data and computation output, then cache them
def cache_end_user(self, end_user, input_data, output_data0, c_k, computation_requirement_user, Name_offloaded_data): print("computation at end-user device is done at %d%% of %s's task." % (random.randint(50, 99), end_user)) print("Caching at end-user device is done at %d%% of %s's task." % (random.randint(50, 99), end_user)) c_ki = c_k * (input_data + output_data0) / (number_user_application[end_user] * computation_requirement_user) cached_content.insert(Name_offloaded_data, output_data0) cache_capacity_allocation_user.append(c_ki) return cached_content, cache_capacity_allocation_user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache(self, end_user, input_data, output_data0, c_EC, Name_offloaded_data):\n\n print(\"Caching at EC server is done at %d%% \" % (random.randint(50, 99)))\n c_km = c_EC/(1+((end_user-1) * (input_data + output_data0)))\n cache_capacity_allocation_EC.append(c_km)\n cached_content.insert(Name_offloaded_data, output_data0)\n return cached_content, cache_capacity_allocation_EC", "def _cache_data(self):\n while self._run:\n try:\n values = self._data_streamer.get_data_current_state()\n for parameter, mapping_method in self._mapping.items():\n value = values[parameter]\n mapped_notes = self._data_streamer.get_mapper_for_param(parameter, mapping_method[0]).map(value)\n self._value_queues[parameter].put((value,mapped_notes))\n except Exception, e:\n print e.message", "def cache(self, *args, **kwargs):\n\n default_fn = kwargs.pop('default_fn', None)\n\n def _run(*args, **kwargs):\n \"\"\"\n :param: *args\n :param: **kwargs (fname, force, verbose)\n \"\"\"\n\n fname = kwargs.pop('fname', None)\n force = kwargs.pop('force', False)\n verbose = kwargs.pop('verbose', True)\n copy = kwargs.get('copy', False)\n\n callback = None\n if len(args) > 1:\n callback, *args = args\n\n if len(args) > 0:\n adata = args[0] if isinstance(args[0], anndata.AnnData) else kwargs.get('adata')\n else:\n adata = kwargs.get('adata')\n\n assert isinstance(adata, anndata.AnnData), f'Expected `{adata}` to be of type `anndata.AnnData`.'\n\n if callback is None:\n callback = (lambda *_x, **_y: None) if default_fn is None else default_fn\n\n assert callable(callback), f'`{callblack}` is not callable.'\n\n if force:\n if verbose:\n print('Recomputing values.')\n res = callback(*args, **kwargs)\n cache_fn(res if copy else adata, fname, True, verbose, *args, **kwargs)\n return res\n\n # when loading to cache and copy is true, modify the copy\n if copy:\n adata = adata.copy()\n\n # we need to pass the *args and **kwargs in order to\n # get the right field when using regexes\n if not cache_fn(adata, fname, False, verbose, *args, **kwargs):\n if verbose:\n print('Computing values.')\n res = callback(*args, **kwargs)\n ret = cache_fn(res if copy else adata, fname, True, False, *args, **kwargs)\n\n assert ret, 'Caching failed.'\n\n return res\n\n # if cache was found and not modifying inplace\n return adata if copy else None\n\n cache_fn = self._create_cache_fn(*args, **kwargs)\n\n return _run", "def cache_produce () :\n\n \"\"\"\n list for movie\n each entry is the sum and count of all ratings received by each movie, which will be used later to calculate the user offset\n \"\"\"\n mcache = movie_read(open('/u/downing/cs/netflix/movie_titles.txt', 'r', encoding = \"ISO-8859-1\"))\n\n \"\"\"\n dictionaries for user caches\n each entry contain the sum and count of all ratings given by each user, which will be used later to calculate the user offset\n mean is the average of all ratings from all movies\n \"\"\"\n ucache, mean = user_read(mcache, \"/u/downing/cs/netflix/training_set\")\n\n cal_offset(mcache, mean)\n cal_offset(ucache, mean)\n\n mcache.append(mean)\n\n output_cache(ucache, open('/u/wc6892/Documents/cs373-netflix/wc6892-ucacheoff.txt', 'w'))\n output_cache(mcache, open('/u/wc6892/Documents/cs373-netflix/wc6892-mcacheoff.txt', 'w'))", "def cache(self):\n self.cached_mu = self.mu.eval()\n self.cached_var = self.var.eval()\n self.cached_count = self.count.eval()", "def recache(self, phys):\r\n self.myOutputCache.initialize(phys.app)\r\n\r\n for output in self.myOutputs:\r\n output.initialize(phys.app)\r\n output.run(1)", "def cache_dc(self, end_user, input_data, output_data0, c_d, Name_offloaded_data):\n print(\"Caching at Data center is done at %d%%\" % (random.randint(50, 99)))\n c_kd = end_user * (input_data + output_data0)\n cache_capacity_allocation_dc.append(c_kd)\n cached_content.insert(Name_offloaded_data, output_data0)\n DC_caching_decision_variable.append(1)\n return cached_content, DC_caching_decision_variable, cache_capacity_allocation_dc", "def test_cache_results(self):\n env = pike.Environment()\n value = [1]\n with pike.Graph('g') as graph:\n n = ParrotNode(value)\n env.add(graph)\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n n.value = [1, 2]\n\n # We mutated value, but the return value should be cached\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n\n # Busting cache should return new value\n ret = env.run('g', True)\n self.assertEqual(ret, {'default': [1, 2]})", "def _cached_call(self, args, kwargs):\r\n # Compare the function code with the previous to see if the\r\n # function code has changed\r\n output_dir, argument_hash = self._get_output_dir(*args, **kwargs)\r\n metadata = None\r\n # FIXME: The statements below should be try/excepted\r\n if not (self._check_previous_func_code(stacklevel=4) and\r\n os.path.exists(output_dir)):\r\n if self._verbose > 10:\r\n _, name = get_func_name(self.func)\r\n self.warn('Computing func %s, argument hash %s in '\r\n 'directory %s'\r\n % (name, argument_hash, output_dir))\r\n out, metadata = self.call(*args, **kwargs)\r\n if self.mmap_mode is not None:\r\n # Memmap the output at the first call to be consistent with\r\n # later calls\r\n out = _load_output(output_dir, self.func,\r\n timestamp=self.timestamp,\r\n mmap_mode=self.mmap_mode,\r\n verbose=self._verbose)\r\n else:\r\n try:\r\n t0 = time.time()\r\n out = _load_output(output_dir, _get_func_fullname(self.func),\r\n timestamp=self.timestamp,\r\n metadata=metadata, mmap_mode=self.mmap_mode,\r\n verbose=self._verbose)\r\n if self._verbose > 4:\r\n t = time.time() - t0\r\n _, name = get_func_name(self.func)\r\n msg = '%s cache loaded - %s' % (name, format_time(t))\r\n print(max(0, (80 - len(msg))) * '_' + msg)\r\n except Exception:\r\n # XXX: Should use an exception logger\r\n self.warn('Exception while loading results for '\r\n '(args=%s, kwargs=%s)\\n %s' %\r\n (args, kwargs, traceback.format_exc()))\r\n\r\n shutil.rmtree(output_dir, ignore_errors=True)\r\n out, metadata = self.call(*args, **kwargs)\r\n argument_hash = None\r\n return (out, argument_hash, metadata)", "def _process(self, data, cache):\n stop = False\n try:\n super(PickleCache, self).process(data)\n except StopIteration:\n stop = True\n\n data_to_save = data\n\n cache = dict() if cache is None else cache\n cache[self.chain_info['chain_hash']] = {\"data\": data_to_save,\n \"stopped\": stop,\n 'chain_repr': self.chain_info[\n 'chain_repr'],\n 'chain_mtime': self.chain_info[\n 'chain_mtime']}\n return cache, stop", "def cache_model(self, **inputs):\n self.shared_vars = self._create_shared_vars(**inputs)\n self.cached_model = self.create_model(**self.shared_vars)", "def process_cache_arguments(self, args):\r\n pass", "def _place_cached_acts_data(inp_data: torch.Tensor, out_data: torch.Tensor, device: torch.device) \\\n -> Tuple[torch.Tensor, torch.Tensor]:\n torch.cuda.empty_cache()\n\n # Available GPU memory in GB\n threshold_mem = torch.cuda.get_device_properties(device).total_memory - torch.cuda.memory_allocated(device)\n threshold_mem = threshold_mem / (1024 * 1024 * 1024)\n threshold_mem = threshold_mem * EMPIRICAL_THRESHOLD\n\n # required GPU memory in GB\n req_mem = 0\n req_mem += reduce(lambda x, y: x * y, inp_data.size()) * DATA_SIZE_IN_BITS / (1024 * 1024 * 1024 * 8)\n req_mem += reduce(lambda x, y: x * y, out_data.size()) * DATA_SIZE_IN_BITS / (1024 * 1024 * 1024 * 8)\n\n if req_mem < threshold_mem:\n inp_data = inp_data.to(device)\n out_data = out_data.to(device)\n logger.debug(\"Placing cached activations data on GPU.\")\n\n return inp_data, out_data", "def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)", "def cache(func):\n results = {}\n\n @functools.wraps(func)\n def __cache(*args): # changed function\n nonlocal results # if this function call with parameters that already used\n if args in results.keys(): # then answer gets from dictionary\n # print(\"{} - got from cache\".format(args))\n rez = results[args]\n else:\n rez = func(*args)\n results[args] = rez\n return rez\n\n return __cache", "def _retrieveCachedData(self):", "def Steering(cache, generations, input_queue, result_queue):\n\n # Generations that have pending tasks to be executed. Pending tasks are those\n # whose results are not ready. The tasks that have their results ready are\n # referenced to as ready tasks. Once there is no pending generation, the\n # algorithm terminates.\n waiting = generations\n\n # Record how many initial tasks there are. If there is no task at all, the\n # algorithm can terminate right away.\n num_tasks = 0\n\n # Submit all the tasks in the initial generations to the next stage of the\n # framework. The next stage can be the build/compilation stage.\n for generation in generations:\n # Only send the task that has not been performed before to the next stage.\n for task in [task for task in generation.Pool() if task not in cache]:\n result_queue.put(task)\n cache.add(task)\n num_tasks += 1\n\n # If there is no task to be executed at all, the algorithm returns right away.\n if not num_tasks:\n # Inform the next stage that there will be no more task.\n result_queue.put(pipeline_process.POISONPILL)\n return\n\n # The algorithm is done if there is no pending generation. A generation is\n # pending if it has pending task.\n while waiting:\n # Busy-waiting for the next task.\n if input_queue.empty():\n continue\n\n # If there is a task whose result is ready from the last stage of the\n # feedback loop, there will be one less pending task.\n\n task = input_queue.get()\n\n # Store the result of this ready task. Intermediate results can be used to\n # generate report for final result or be used to reboot from a crash from\n # the failure of any module of the framework.\n task.LogSteeringCost()\n\n # Find out which pending generation this ready task belongs to. This pending\n # generation will have one less pending task. The \"next\" expression iterates\n # the generations in waiting until the first generation whose UpdateTask\n # method returns true.\n generation = next(gen for gen in waiting if gen.UpdateTask(task))\n\n # If there is still any pending task, do nothing.\n if not generation.Done():\n continue\n\n # All the tasks in the generation are finished. The generation is ready to\n # produce the next generation.\n waiting.remove(generation)\n\n # Check whether a generation should generate the next generation.\n # A generation may not generate the next generation, e.g., because a\n # fixpoint has been reached, there has not been any improvement for a few\n # generations or a local maxima is reached.\n if not generation.IsImproved():\n continue\n\n for new_generation in generation.Next(cache):\n # Make sure that each generation should contain at least one task.\n assert new_generation.Pool()\n waiting.append(new_generation)\n\n # Send the tasks of the new generations to the next stage for execution.\n for new_task in new_generation.Pool():\n result_queue.put(new_task)\n cache.add(new_task)\n\n # Steering algorithm is finished and it informs the next stage that there will\n # be no more task.\n result_queue.put(pipeline_process.POISONPILL)", "def cached_or_run(self, job, run_func, *args):\n to_cache = self.workflow.is_cached_rule(job.rule)\n try:\n if to_cache:\n self.workflow.output_file_cache.fetch(job)\n return\n except CacheMissException:\n pass\n run_func(*args)\n if to_cache:\n self.workflow.output_file_cache.store(job)", "def dynCache(*args, **kwargs)->None:\n pass", "def cache_function(self, func):\n\n @wraps(func)\n def wrapper(*args):\n if self.__log:\n self.__logger.info(f\"Called {func.__name__} with {args}\")\n fileName = self.__build_file_name(func, args)\n\n if os.path.isfile(fileName):\n # Result is already stored in cache\n # Retrieve return value from cache\n return self.__read_cache(fileName)\n else:\n # Result is not stored in cache\n # Run function\n if len(args) > 0:\n returnVal = func(args)\n else:\n returnVal = func()\n\n # Store value in cache\n self.__write_cache(fileName, returnVal)\n\n # Give return value\n return returnVal\n\n return wrapper", "def test_custom_cache_multiple(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n a = jax.numpy.array(0.1)\n b = jax.numpy.array(0.2)\n\n def cost(a, b, cache):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RY(a, wires=0)\n qml.RX(b, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n res = execute(\n [tape1, tape2],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )\n return res[0]\n\n custom_cache = {}\n jax.grad(cost)(a, b, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def __call__(self, *args):\n if args not in self.memo:\n self.memo[args] = self.f(*args)\n return self.memo[args]", "def cache_results(self):\n self.cache_manager.cache_results(\n self.parser,\n self.query,\n self.search_engine_name,\n self.scrape_method,\n self.page_number,\n db_lock=self.db_lock\n )", "def __call__(self, *args, **kwargs):\n key = None\n value = None\n memoization_key = None\n\n if self._memoize:\n memoization_key = self._get_memoization_key(*args, **kwargs)\n if memoization_key in self._cached_results:\n return self._cached_results[memoization_key]\n\n if self._cache:\n key = self.get_cache_key(*args, **kwargs)\n value = cache_backend.get(key)\n\n if value is None:\n value = self._fn(*self._inject_obj(args), **kwargs)\n\n if self._cache:\n cache_backend.set(key, value, timeout=self._timeout)\n\n if self._memoize:\n self._cached_results[memoization_key] = value\n\n return value", "def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache", "def query(self):\r\n records = self.input()\r\n if self.to_investigate:\r\n records = self.investigate(records)\r\n post.log.info(\"Caching {} records for {}\".format(len(records), self.name))\r\n self.cache_records(records)", "def cache(self, treename = \"superNt\") :\n\n output_directory = self.outdir\n if not output_directory.endswith(\"/\") :\n output_directory += \"/\"\n full_filename = output_directory + \"%s\" % self.name\n\n if not os.path.isfile(full_filename) :\n with h5py.File(full_filename, 'w', libver='latest') as selection_file :\n name = self.selection\n selection_group = selection_file.create_group(name)\n selection_group.attrs['cut_string'] = self.selectionstr\n\n varlist_str = ','.join(self.fields)\n selection_group.attrs['variable_list'] = varlist_str\n\n for sample in self.samples :\n print \"Caching %s\" % sample.name\n process_group = selection_group.create_group(sample.name)\n self.add_process_to_cache(process_group, sample, treename)\n sample.selection_file = full_filename\n sample.selection_group = \"/%s/%s/\" % ( str(selection_group.name), sample.name )\n return full_filename\n\n elif os.path.isfile(full_filename) :\n print \"Found pre-cached selection file %s\" % full_filename\n # check the current file for\n # 1) make sure that the selection definition is the same\n # 2) make sure that each of our samples is in there\n # if (1) fails, don't try to be smart, just exit\n # if (2) fails (and (1) succeeds), just add the dataset\n # TODO add check for all the relevant variables\n with h5py.File(full_filename, 'a', libver='latest') as selection_file :\n name = self.selection\n print \" > Looking for top level -> %s \" % name\n found_top_level = False\n for top_level in selection_file :\n if str(top_level) == name :\n found_top_level = True\n print\" %s in file!\" % name\n selection_group = selection_file[\"%s\" % name]\n cut_definition = selection_group.attrs['cut_string']\n included_vars = selection_group.attrs['variable_list'].split(',')\n not_included_vars = []\n for field in self.fields :\n if field not in included_vars :\n not_included_vars.append(field)\n\n if cut_definition != self.selectionstr :\n print \"ERROR Cut definition for %s in selection file %s does not match!\" % ( name, full_filename )\n print \"ERROR Expected selection : %s\" % self.selectionstr\n print \"ERROR Selection in file : %s\" % cut_definition\n sys.exit()\n\n if len(not_included_vars) > 0 :\n print \"ERROR Stored fields (variables) in selection file %s does not contain some currently expected fields\" % ( full_filename)\n print \"ERROR Fields in file : %s\" % included_vars\n print \"ERROR Expected : %s\" % self.fields\n print \"ERROR > not included : %s\" % not_included_vars\n sys.exit()\n\n if not found_top_level :\n print \"ERROR Did not find top level group %s for selection %s in file %s\" % (self.selectionstr, full_filename)\n sys.exit()\n\n print \"Top level and selection seem ok! (n.b. did not check variable content)\"\n\n selection_group = selection_file[\"%s\" % name]\n print \"Looking for selection group %s\" % str(selection_group.name)\n group_keys = [str(g) for g in selection_group.keys()]\n for sample in self.samples :\n process_group_name = sample.name\n if process_group_name in group_keys :\n print \"Loading > %s (from pre-existing group)\" % sample.name\n sample.selection_file = full_filename\n sample.selection_group = \"/%s/%s/\" % ( str(selection_group.name), sample.name )\n else :\n print \"Loading > %s did not find in pre-existing group, adding it now\" % sample.name\n process_group = selection_group.create_group(sample.name)\n self.add_process_to_cache(process_group, sample, treename)\n sample.selection_file = full_filename\n sample.selection_group = \"/%s/%s/\" % ( str(selection_group.name), sample.name )\n return full_filename", "def _memorize(func):\n\n def _wrapper(self, *args, **kwargs):\n \"\"\"Wrapper to cache the function's output.\n \"\"\"\n if self.use_cache:\n cache = load_cache(self.cache_filename)\n original_key = generate_hash(\n self.__class__.__name__, func.__name__, args, kwargs)\n cache_key = hashlib.md5(original_key.encode('utf-8')).hexdigest()\n cached_val = cache.get(cache_key)\n if cached_val:\n return cached_val\n val = func(self, *args, **kwargs)\n if self.use_cache:\n cache.set(cache_key, val)\n return val\n return _wrapper", "def cached(\n inputs: INPUTS = None,\n params: PARAMETERS = None,\n outputs: OUTPUTS = None,\n) -> Callable[..., Callable[..., Optional[T]]]:\n\n def wrapper_builder(f: Callable[..., Optional[T]]) -> Callable[..., Optional[T]]:\n @functools.wraps(f)\n def wrapper(*args, **kw) -> Optional[T]:\n resolved_parameters = resolve_cache_parameters(params, args, kw)\n hash_key = current_cache.get_hash_key(f, inputs, resolved_parameters)\n if current_cache.use_cached(hash_key):\n return None\n\n result = f(*args, **kw)\n outputs_names = get_output_names(outputs, args, kw)\n current_cache.cache_outputs(hash_key, outputs_names)\n\n return result\n\n return wrapper\n\n return wrapper_builder", "def setup_cache(self):\n train_cache_path = self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)\n dev_cache_path = self.cache.get_cache_path_and_check(DEV_STR, self.task_name)\n test_cache_path = self.cache.get_cache_path_and_check(TEST_STR, self.task_name)\n\n self.train_cache_writer = None\n self.dev_cache_writer = None\n self.test_cache_writer = None\n\n if os.path.exists(train_cache_path):\n f = h5py.File(train_cache_path, 'r')\n self.train_cache = (torch.tensor(f[str(i)][()]) for i in range(len(f.keys())))\n else:\n self.train_cache_writer = h5py.File(train_cache_path, 'w')\n if os.path.exists(dev_cache_path):\n f2 = h5py.File(dev_cache_path, 'r')\n self.dev_cache = (torch.tensor(f2[str(i)][()]) for i in range(len(f2.keys())))\n else:\n self.dev_cache_writer = h5py.File(dev_cache_path, 'w')\n if os.path.exists(test_cache_path):\n f3 = h5py.File(test_cache_path, 'r')\n self.test_cache = (torch.tensor(f3[str(i)][()]) for i in range(len(f3.keys())))\n else:\n self.test_cache_writer = h5py.File(test_cache_path, 'w')" ]
[ "0.6912924", "0.6817877", "0.6684545", "0.66632384", "0.6657902", "0.6651216", "0.6432238", "0.6383507", "0.6365778", "0.62689775", "0.621549", "0.62075645", "0.6172295", "0.61462307", "0.6138449", "0.6087849", "0.60696554", "0.6011156", "0.60051346", "0.5998601", "0.5963868", "0.59460986", "0.5924679", "0.5916921", "0.58943707", "0.5886656", "0.5855116", "0.58521444", "0.58404255", "0.58227766" ]
0.69236094
0
Each enduser demand arrives at edge cloud and requests resources, where each enduser has identification (end_user_id). It starts computation process, waits for it to finish
def compute_end_user(self, end_user, input_data, computation_deadline, p_k, computation_requirement_user, c_k, p_ec, c_ec, RC_EC_capacity, Name_offloaded_data): global total_executing_ec_m_array global total_executing_ec_n_array global EC_m_cloud_offloading_variable global EC_m_computation_allocation_variable global EC_n_computation_allocation_variable global output_data global transm_delay_between_ru global cache_capacity_allocation_user # Convert # cycle_per_second = cycle_per_byte * byte_per_second # https: // crypto.stackexchange.com / questions / 8405 / how - to - calculate - cycles - per - byte # each end-user device has a CPU peak bandwidth of $16$-bit values per cycle computation_requirement_user * 16 pki = p_k * (computation_requirement_user / (end_user * computation_requirement_user)) execution_latency_user = (input_data * computation_requirement_user) / (1 + pki) energy_consumption = cpu_arc_parameter * input_data * computation_requirement_user * pki ** 2 active_user_list.append(end_user) if execution_latency_user >= computation_deadline or energy_consumption >= cpu_energy[ end_user] or computation_requirement_user >= pki: print("No available resources at end-user device, task is offloaded to EC server") user_cache_allocation_variable.append(0) user_offloading_variable.append(1) # Radio resource revenue percentage_radio_spectrum = random.random() spectrum_efficiency_user = spectrum_efficiency # End user needs communication resource for offloading instantaneous_data = np.multiply(1, (percentage_radio_spectrum * spectrum_efficiency_user * wireless_bandwidth)) transm_delay_user_ru = np.multiply(user_offloading_variable, (input_data / 1 + instantaneous_data)) instantaneous_data_vector.append(instantaneous_data) input_data0 = [] input_data0.append(input_data) percentage_radio_spectrum_vector.append(percentage_radio_spectrum) EC_m_cloud_offloading_variable.append(0) # Offload to EC network ec = EC(count_EdgeCloud, end_user, input_data, computation_deadline, computation_requirement_user, p_ec, c_ec, RC_EC_capacity) transm_delay_between_ru, cached_content, EC_m_cloud_offloading_variable, \ EC_m_computation_allocation_variable, EC_n_computation_allocation_variable, \ total_executing_ec_m_array, total_executing_ec_n_array, \ computation_capacity_allocation_EC, cache_capacity_allocation_EC, \ EC_m_cache_allocation_variable, EC_n_cache_allocation_variable, \ EC_m_EC_n_offloading_variable, output_data, execution_latency_dc_vector, \ DC_computation_decision_variable, computation_capacity_allocation_dc, DC_caching_decision_variable, \ cache_capacity_allocation_dc = ec.compute(end_user, input_data, computation_deadline, p_ec, c_ec, RC_EC_capacity, computation_requirement_user, transm_delay_user_ru, Name_offloaded_data) transm_delay.append(transm_delay_user_ru) computation_capacity_allocation_user.append(pki) local_computation_cost.append(0) user_execution_latency_array.append(0) input_data_vector.append(Name_offloaded_data) cache_capacity_allocation_user.append(0) return transm_delay, percentage_radio_spectrum_vector, instantaneous_data_vector, \ user_execution_latency_array, computation_capacity_allocation_user, cache_capacity_allocation_user, \ local_computation_cost, user_cache_allocation_variable, user_offloading_variable, \ input_data_vector, active_user_list, transm_delay_between_ru, cached_content, \ EC_m_cloud_offloading_variable, EC_m_computation_allocation_variable, \ EC_n_computation_allocation_variable, total_executing_ec_m_array, total_executing_ec_n_array, \ computation_capacity_allocation_EC, cache_capacity_allocation_EC, EC_m_cache_allocation_variable, \ EC_n_cache_allocation_variable, EC_m_EC_n_offloading_variable, output_data, \ execution_latency_dc_vector, DC_computation_decision_variable, computation_capacity_allocation_dc, \ DC_caching_decision_variable, cache_capacity_allocation_dc else: end_user_device_stutus.append(1) user_execution_latency_array.append(execution_latency_user) user_cache_allocation_variable.append(1) total_executing_ec_m_array.append(0) total_executing_ec_n_array.append(0) EC_m_cloud_offloading_variable.append(0) EC_m_computation_allocation_variable.append(0) EC_n_computation_allocation_variable.append(0) percentage_radio_spectrum_vector.append(0) computation_capacity_allocation_EC = [] computation_capacity_allocation_EC.append(0) user_offloading_variable.append(0) # Local computation execution_latency_dc_vector = [] execution_latency_dc_vector.append(0) instantaneous_data_vector.append(0) computation_capacity_allocation_user.append(pki) local_computation_cost.append(execution_latency_user) cache_capacity_allocation_EC = [] cache_capacity_allocation_EC.append(0) EC_m_EC_n_offloading_variable = [] EC_m_EC_n_offloading_variable.append(0) output_data0 = self.input_data * (70 / 100) output_data.append(output_data0) transm_delay_between_ru.append(0) cached_content, cache_capacity_allocation_user = \ EndUser.cache_end_user(self, end_user, input_data, output_data0, c_k, computation_requirement_user, Name_offloaded_data) computation_capacity_allocation_EC.append(0) EC_m_cache_allocation_variable = [] EC_n_cache_allocation_variable = [] DC_computation_decision_variable = [] computation_capacity_allocation_dc = [] DC_caching_decision_variable = [] cache_capacity_allocation_dc = [] EC_m_cache_allocation_variable.append(0) EC_n_cache_allocation_variable.append(0) DC_computation_decision_variable.append(0) computation_capacity_allocation_dc.append(0) DC_caching_decision_variable.append(0) cache_capacity_allocation_dc.append(0) input_data_vector.append(Name_offloaded_data) transm_delay.append(0) return transm_delay, percentage_radio_spectrum_vector, instantaneous_data_vector, \ user_execution_latency_array, computation_capacity_allocation_user, cache_capacity_allocation_user, \ local_computation_cost, user_cache_allocation_variable, user_offloading_variable, \ input_data_vector, active_user_list, transm_delay_between_ru, cached_content, \ EC_m_cloud_offloading_variable, EC_m_computation_allocation_variable, \ EC_n_computation_allocation_variable, total_executing_ec_m_array, total_executing_ec_n_array, \ computation_capacity_allocation_EC, cache_capacity_allocation_EC, EC_m_cache_allocation_variable, \ EC_n_cache_allocation_variable, EC_m_EC_n_offloading_variable, output_data, \ execution_latency_dc_vector, DC_computation_decision_variable, computation_capacity_allocation_dc, \ DC_caching_decision_variable, cache_capacity_allocation_dc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute(self, end_user, input_data, computation_deadline, p_ec, c_ec, RC_EC_capacity,\n computation_requirement_user, transm_delay_user_ru, Name_offloaded_data):\n global execution_latency_dc_vector # Global need to be declared in the beginning of the function\n global EC_m_cache_allocation_variable\n global cache_capacity_allocation_EC\n global DC_caching_decision_variable\n global DC_computation_decision_variable\n global output_data\n\n pkm = p_ec * (computation_requirement_user / ((end_user - 1) * computation_requirement_user))\n p_EC_n = max(computation_capacity_EC)\n pkn = p_EC_n * (computation_requirement_user / ((end_user - 1) * computation_requirement_user))\n execution_latency_ec_n = (input_data * computation_requirement_user) / ((end_user - 1) * p_ec)\n execution_latency_ec_m = (input_data * computation_requirement_user) / ((end_user - 1) * p_ec)\n if computation_requirement_user <= pkm and execution_latency_ec_m <= computation_deadline:\n execution_latency_mc = (input_data * computation_requirement_user) / ((end_user - 1) * p_ec)\n total_executing_ec_m = execution_latency_mc\n total_executing_ec_m_array.append(total_executing_ec_m)\n total_executing_ec_n_array.append(0)\n EC_m_EC_n_offloading_variable.append(0)\n EC_n_cache_allocation_variable.append(0)\n print(\" Computation at EC is done at %d%%\" % (random.randint(50, 99))) # We just print to see where the\n # task is execute\n EC_m_computation_allocation_variable.append(1)\n EC_n_computation_allocation_variable.append(0)\n output_data0 = self.input_data * (70 / 100) # We assume that after computation, the input can be reduced\n # 30%\n EC_m_cache_allocation_variable.append(1)\n cached_content, cache_capacity_allocation_EC = \\\n EC.cache(self, end_user, input_data, output_data0, c_ec, Name_offloaded_data)\n computation_capacity_allocation_EC.append(pkm)\n execution_latency_dc_vector.append(0)\n cache_capacity_allocation_dc = []\n computation_capacity_allocation_dc = []\n DC_computation_decision_variable.append(0)\n computation_capacity_allocation_dc.append(0)\n DC_caching_decision_variable.append(0)\n cache_capacity_allocation_dc.append(0)\n output_data.append(output_data0)\n transm_delay_BS_m= input_data / Fiber_Fronthaul_RU_EC[end_user]\n transm_delay_between_ru.append(transm_delay_BS_m)\n return transm_delay_between_ru, cached_content, EC_m_cloud_offloading_variable, \\\n EC_m_computation_allocation_variable, EC_n_computation_allocation_variable, \\\n total_executing_ec_m_array, total_executing_ec_n_array, \\\n computation_capacity_allocation_EC, cache_capacity_allocation_EC, \\\n EC_m_cache_allocation_variable, EC_n_cache_allocation_variable, \\\n EC_m_EC_n_offloading_variable, output_data, execution_latency_dc_vector, \\\n DC_computation_decision_variable, computation_capacity_allocation_dc, DC_caching_decision_variable, \\\n cache_capacity_allocation_dc\n elif computation_requirement_user <= pkn and execution_latency_ec_n <= computation_deadline:\n EC_n_cache_allocation_variable.append(1)\n EC_m_EC_n_offloading_variable.append(1)\n EC_m_cache_allocation_variable.append(0)\n EC_n_computation_allocation_variable.append(1)\n EC_m_computation_allocation_variable.append(0)\n # Offloading delay between EC m and EC n\n transm_delay_ECm_ECn = input_data / Bandwidth_EC_m_EC_n\n total_executing_ec_n = execution_latency_ec_n\n print(\" Computation at EC n is done at %d%%\" % (random.randint(50, 99)))\n output_data0 = input_data * (40 / 1000)\n cached_content, cache_capacity_allocation_EC = EC.cache(\n self, end_user, input_data, output_data0, c_ec, Name_offloaded_data)\n computation_capacity_allocation_EC.append(0)\n execution_latency_dc_vector.append(0)\n cache_capacity_allocation_dc = []\n output_data.append(output_data0)\n computation_capacity_allocation_dc = []\n DC_computation_decision_variable = []\n DC_computation_decision_variable.append(0)\n computation_capacity_allocation_dc.append(0)\n DC_caching_decision_variable.append(0)\n cache_capacity_allocation_dc.append(0)\n EC_m_cloud_offloading_variable.append(0)\n total_executing_ec_n_array.append(total_executing_ec_n)\n total_executing_ec_m_array.append(0)\n transm_delay_between_ru.append(transm_delay_ECm_ECn)\n return transm_delay_between_ru, cached_content, EC_m_cloud_offloading_variable, \\\n EC_m_computation_allocation_variable, EC_n_computation_allocation_variable, \\\n total_executing_ec_m_array, total_executing_ec_n_array, \\\n computation_capacity_allocation_EC, cache_capacity_allocation_EC, \\\n EC_m_cache_allocation_variable, EC_n_cache_allocation_variable, \\\n EC_m_EC_n_offloading_variable, output_data, execution_latency_dc_vector, \\\n DC_computation_decision_variable, computation_capacity_allocation_dc, DC_caching_decision_variable, \\\n cache_capacity_allocation_dc\n else:\n print(\"No available resources at EC server, task is offloaded to Data center\")\n EC_n_computation_allocation_variable.append(0)\n EC_m_computation_allocation_variable.append(0)\n EC_m_cloud_offloading_variable.append(1)\n EC_m_cache_allocation_variable.append(0)\n cache_capacity_allocation_EC.append(0)\n EC_n_cache_allocation_variable.append(0)\n EC_m_EC_n_offloading_variable.append(0)\n transm_delay_EC_dc = input_data / RC_EC_capacity\n # Request Resource at Data center\n datacenter = DataCenter(total_number_regional_cloud, computation_deadline, input_data, computation_deadline,\n computation_requirement_user, p_d, c_d)\n output_data, execution_latency_dc_vector, DC_computation_decision_variable, \\\n computation_capacity_allocation_dc, cached_content, DC_caching_decision_variable, \\\n cache_capacity_allocation_dc = datacenter.compute_dc(end_user, input_data, computation_requirement_user,\n transm_delay_EC_dc, Name_offloaded_data)\n\n computation_capacity_allocation_EC.append(0)\n total_executing_ec_m_array.append(0)\n total_executing_ec_n_array.append(0)\n transm_delay_bsm_dc = input_data / RC_EC_capacity\n transm_delay_between_ru.append(transm_delay_bsm_dc)\n\n return transm_delay_between_ru, cached_content, EC_m_cloud_offloading_variable, \\\n EC_m_computation_allocation_variable, EC_n_computation_allocation_variable, \\\n total_executing_ec_m_array, total_executing_ec_n_array, \\\n computation_capacity_allocation_EC, cache_capacity_allocation_EC, \\\n EC_m_cache_allocation_variable, EC_n_cache_allocation_variable, \\\n EC_m_EC_n_offloading_variable, output_data, execution_latency_dc_vector, \\\n DC_computation_decision_variable, computation_capacity_allocation_dc, DC_caching_decision_variable, \\\n cache_capacity_allocation_dc", "def cache_end_user(self, end_user, input_data, output_data0, c_k, computation_requirement_user,\n Name_offloaded_data):\n print(\"computation at end-user device is done at %d%% of %s's task.\" %\n (random.randint(50, 99), end_user))\n print(\"Caching at end-user device is done at %d%% of %s's task.\" % (random.randint(50, 99), end_user))\n c_ki = c_k * (input_data + output_data0) / (number_user_application[end_user] * computation_requirement_user)\n cached_content.insert(Name_offloaded_data, output_data0)\n cache_capacity_allocation_user.append(c_ki)\n return cached_content, cache_capacity_allocation_user", "async def populate_weather(user_request: schemas.UserRequest, background_tasks: BackgroundTasks):\n\n user_id = user_request.user_id\n query = select(UserRequest).where(UserRequest.user_id == user_id)\n exist = await database.fetch_one(query)\n\n if not exist:\n task = GetWeatherBackgroundTask(\n cities=cities,\n database=database,\n weather_client=weather_client,\n user_id=user_id,\n )\n background_tasks.add_task(task.run)\n task_pool[task.user_id] = task\n return {\"message\": \"Background task started succesfully.\"}\n\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=f\"The 'user_id' {user_id} already exists.\",\n )", "def compute_dc(self, end_user, input_data, computation_requirement_user, transm_delay_EC_dc, Name_offloaded_data):\n pkd = p_d * (computation_requirement_user / (end_user * computation_requirement_user))\n execution_latency = (input_data * computation_requirement_user) / (end_user * pkd)\n print(\"Computation at Data center is done at %d%%\" % (random.randint(50, 99)))\n output_data0 = self.input_data * (70/100) # We assume that the output data is small than input data\n total_exucution_dc = transm_delay_EC_dc + execution_latency\n execution_latency_dc_vector.append(total_exucution_dc)\n computation_capacity_allocation_dc.append(pkd)\n output_data.append(output_data0)\n cached_content, DC_caching_decision_variable, cache_capacity_allocation_dc = \\\n DataCenter.cache_dc(self, end_user, input_data, output_data0, c_d, Name_offloaded_data)\n DC_computation_decision_variable.append(1)\n\n return output_data, execution_latency_dc_vector, DC_computation_decision_variable, \\\n computation_capacity_allocation_dc, cached_content, DC_caching_decision_variable, cache_capacity_allocation_dc", "def _run_computation(self):\n with self.swap(stats_jobs_continuous.StatisticsAggregator,\n 'get_statistics', self._mock_get_statistics):\n ModifiedUserImpactAggregator.start_computation()\n self.process_and_flush_pending_tasks()", "def run(self):\n\n if self.nproc > 0:\n # get resources\n nodes = self.RM.get_allocation(self, self.nproc, self.mem_pproc, self.disk_pproc)\n\n # did we actually get nodes?????\n if nodes >= 0:\n #--------------------------------\n # update resource usage\n #--------------------------------\n self.using.nodes = nodes\n self.using.procs = self.nproc\n if self.start_waiting_time >= 0:\n self.total_waiting_time += self.fwk.fwk_global_time - self.start_waiting_time\n self.start_waiting_time = -1\n\n #--------------------------------\n # set curr_exec_time, start_exec_time, and state\n #--------------------------------\n self.get_curr_exec_time()\n\n #--------------------------------\n # log event\n #--------------------------------\n if self.retry == True:\n if self.sim.retry_limit > 0 and self.curr_retries < self.sim.retry_limit:\n self.num_retries += 1\n self.curr_retries += 1\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched attempt %d on %d processes on %d nodes\" %(self.retry, self.using.procs, self.using.nodes))\n else:\n #print \"exceeded retry limit\"\n if self.fwk.debug:\n print('exceeded retry limit, killing sim from component.')\n self.sim.kill()\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started running on %d processes on %d nodes\" % (self.using.procs, self.using.nodes))\n else:\n #-------------------------------------------\n # we did not get the resources we wanted\n #-------------------------------------------\n self.state = \"waiting_on_resources\"\n if self.start_waiting_time == -1:\n self.start_waiting_time = self.fwk.fwk_global_time\n self.num_waiting += 1\n #--------------------------------\n # log event\n #--------------------------------\n self.fwk.logEvent(self.sim.name, self.name, \"waiting_on_procs\", \"needs %d procs %d memory pproc %d disk pproc\" % (self.nproc, self.mem_pproc, self.disk_pproc))\n else:\n # non-resource consuming component\n self.get_curr_exec_time()\n if self.retry == True:\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched, attempt %d\" %(self.num_retries))\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started\")", "def create_worker_join_task(self, id, user_password='Tester', user_org='Musketeer'):\r\n created = False\r\n while not created:\r\n try:\r\n\r\n self.task_name = self.get_current_task_name()\r\n print(self.task_name)\r\n config = 'cloud'\r\n\r\n version = self.task_name.split('_')[1]\r\n worker_name = 'worker_' + str(id) + '_' + version\r\n user_password += version\r\n\r\n ffl.Factory.register(config, fflapi.Context, fflapi.User, fflapi.Aggregator, fflapi.Participant)\r\n fflapi.create_user(worker_name, user_password, user_org, self.credentials_filename)\r\n\r\n\r\n #context_w = ffl.Factory.context(config, self.credentials_filename)\r\n context_w = ffl.Factory.context(config, self.credentials_filename, worker_name, user_password, encoder=serializer.Base64Serializer)\r\n\r\n\r\n '''\r\n ffl_user_worker = ffl.Factory.user(context_w)\r\n with ffl_user_worker:\r\n try:\r\n ffl_user_worker.create_user(worker_name, user_password, user_org)\r\n except Exception as err:\r\n print(str(err).split(':')[1])\r\n '''\r\n #context_w = ffl.Factory.context('cloud', self.credentials_filename, worker_name, user_password, encoder = serializer.Base64Serializer)\r\n #user_worker0 = ffl.Factory.user(context_w)\r\n \r\n user_worker = ffl.Factory.user(context_w)\r\n with user_worker:\r\n try:\r\n\r\n\r\n\r\n result = user_worker.join_task(self.task_name)\r\n print('Worker %s has joined task %s' % (worker_name, self.task_name))\r\n created = True\r\n except Exception as err:\r\n print(str(err).split(':')[1])\r\n except:\r\n print('waiting for Master...')\r\n time.sleep(1)\r\n pass\r\n\r\n participant = ffl.Factory.participant(context_w, task_name=self.task_name)\r\n\r\n return participant", "def run(self):\n while self._num_workers > 0:\n self.server.handle_request()\n self._graph = None", "def collect_inv(spc, num_threads):\n global remaining\n devices = spc.device_management.devices.get(\n filter_={'managedStatus': 'In Sync'},\n paging={'start': 0, 'limit': 200},\n sortby=['name', 'platform'])\n\n print(\"There are %d devices to process\" % len(devices))\n remaining = len(devices)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=int(num_threads)) as executor:\n for device in devices:\n f = executor.submit(process_device, spc, device)\n f.add_done_callback(finished)\n\n print(\"\\nAll Over!!!\")", "async def run_requests(self):\n loop = asyncio.get_event_loop()\n tasks = []\n async with aiohttp.ClientSession(connector=self.connector) as session:\n\n for index, id in enumerate(self.ids):\n if id not in self.processed_ids:\n url = self.base_url + id\n auth_token = base64.b64encode(id.encode('ascii'))\n header = {\"Authorization\": auth_token.decode('UTF-8')}\n tasks.append(asyncio.ensure_future(self._request_one(url=url, header=header, id=id, index = index, session = session)))\n\n _ = await asyncio.gather(*tasks)", "async def run():\n sem = asyncio.Semaphore(DEFAULT_SEMAPHORE_LIMIT)\n tasks = []\n\n async with ClientSession() as session:\n for u in [ROOT_URL.format(jid) for jid in DEFAULT_RANGE_IDS]:\n task = asyncio.ensure_future(bound_fetch(sem, u, session))\n tasks.append(task)\n responses = asyncio.gather(*tasks)\n await responses", "def cli():\n while True:\n try:\n # Get the whole information on each edge.\n l_edge = list()\n s_rsc = '{}/edge'.format(etcdc.prefix)\n \n try:\n r = etcdc.read(s_rsc, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for child in r.children:\n l_app = list()\n d = ast.literal_eval(child.value)\n # get hosts\n print(PROJECT_ROOT + '/' + d['endpoint'])\n l_hosts = kube_list_node(PROJECT_ROOT + '/' + d['endpoint'])\n d['hosts'] = len(l_hosts)\n d_nodes = dict() # {'name': 'ip', ...}\n for item in l_hosts:\n d_nodes[item.metadata.name] = item.status.addresses[0].address\n # log.debug(d_nodes)\n # get # of tenants and apps\n l_tenants = get_tenant(d['name'])\n d['tenants'] = len(l_tenants)\n d['apps'] = 0\n for e in l_tenants:\n if 'app' in e:\n d['apps'] += len(e['app'])\n \n d['cpu'] = 0\n d['memory'] = 0\n i_total_cores = 0\n i_total_memory = 0\n i_total_storage = 0\n for h in l_hosts:\n i_total_cores += int(h.status.capacity['cpu'])\n i_total_memory += int(h.status.capacity['memory'].\n replace('Ki', ''))\n d['tot_cpu'] = i_total_cores\n d['tot_mem'] = int(i_total_memory / (1024*1024))\n \n # Get loadavg and free mem\n if d['name'] == 'edge1':\n ssh_server = 'harden.iorchard.co.kr'\n elif d['name'] == 'edge2':\n ssh_server = 'durant.iorchard.co.kr'\n RSC = 'ssh -p42544 {} get_rsc.sh'.format(ssh_server)\n (b_res, s_out) = cmd(RSC, 3, False)\n l = s_out.split(\"\\n\")\n d['used_cpu'] = (float(l[0]) + float(l[1]) + float(l[2]))\n avail_mem = (int(l[3]) + int(l[4]) + int(l[5])) / (1024*1024)\n d['used_mem'] = d['tot_mem'] - avail_mem\n d['cpu'] = int(d['used_cpu'] / d['tot_cpu'] * 100)\n d['memory'] = int(d['used_mem'] / d['tot_mem'] * 100)\n # ceph storage\n CEPH = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph exec -it \" \\\n + \"$(kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph get po \" \\\n + \"-l app=rook-ceph-tools \" \\\n + \"-o jsonpath='{.items[0].metadata.name}') -- \" \\\n + \"ceph df --format json\"\n (b_res, s_out) = cmd(CEPH, 3, False)\n print(s_out)\n d['status'] = 'Healthy' if b_res else 'Unhealthy'\n d_stor = ast.literal_eval(s_out)\n d['tot_stor'] = int(d_stor['stats']['total_bytes'] / pow(1024, 3))\n d['used_stor'] = int(d_stor['stats']['total_used_bytes'] / pow(1024, 3))\n d['storage'] = int(d['used_stor'] / d['tot_stor'] * 100)\n # Update etcd status\n try:\n s = '{}/edge/{}'.format(etcdc.prefix,\n d['name'])\n # log.debug(d)\n etcdc.write(s, d, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n # Update app status\n s_app = '{}/app'.format(etcdc.prefix)\n try:\n r_app = etcdc.read(s_app, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for app_child in r_app.children:\n if app_child.value is not None:\n d_app = dict()\n app = ast.literal_eval(app_child.value)\n if app['edge'] == d['name']:\n d_app['name'] = app['name']\n d_app['username'] = GUAC_USER\n d_app['password'] = GUAC_PASS\n # Get catalog info.\n s_cat = '{}/catalog/{}'.format(etcdc.prefix,\n app['catalog'])\n try:\n r_cat = etcdc.read(s_cat)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n cat = ast.literal_eval(r_cat.value)\n app['cat_type'] = cat['type']\n app['cat_name'] = cat['name']\n app['cat_logo'] = cat['logo']\n # Get app status\n if app['cat_type'] == 'vm':\n # first, look at DataVolume status of app.\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get dv ' \\\n + app['name'] \\\n + \" -o jsonpath='{range .status}{.phase},{.progress}{end}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n l_out = s_out.split(',')\n if l_out[0] == 'Succeeded':\n # Get vm status of app\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT \\\n + '/' \\\n + d['endpoint'] + ' get vm ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.ready}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res and s_out == 'true':\n # update app status 'running'.\n app.update({'status': 'running'})\n \n if app['edge'] == d['name']:\n # Get where app is running.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get vmi ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.nodeName}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['hostname'] = d_nodes[s_out]\n # Get nodeport for app.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get svc ' \\\n + app['name'] \\\n + \" -o jsonpath='{.spec.ports[0].nodePort}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n else:\n # update app status 'stopped'\n app.update({'status': 'stopped'})\n elif l_out[0] == 'ImportInProgress':\n # update app status 'building' and \n app.update({'status': 'building ({})'.format(l_out[1])})\n elif app['cat_type'] == 'container':\n app.update({'status': 'running'})\n \n try:\n s = '{}/app/{}'.format(etcdc.prefix,\n app['name'])\n # log.debug(app)\n etcdc.write(s, app, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n if 'port' in d_app:\n l_app.append(d_app)\n # render guac-config.j2 and copy it to guac broker server\n log.debug(l_app)\n template = env.get_template('broker.j2')\n s_out = template.render(l_app=l_app)\n s_tmp = '/tmp/{}.broker'.format(d['name'])\n try:\n with open(s_tmp, 'w') as f:\n f.write(s_out)\n except Exception as e:\n log.error(e)\n else:\n CMD = \"scp \" \\\n + \"-P42544 {} {}\".format(s_tmp, d['broker_ip']) \\\n + \":/etc/guacamole/noauth-config.xml\"\n log.debug(CMD)\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n \n l_edge.append(d)\n \n # log.debug(l_edge)\n log.debug(l_app)\n \n time.sleep(1)\n except:\n log.error('unknown error')", "async def run(users):\n async with aiohttp.ClientSession() as session:\n tasks = []\n for user in users:\n tasks.append(\n TornAPI.fetch_torn_user_data(\n session,\n user.params,\n user.id\n )\n )\n\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n return responses", "def ProcessRequests(self, manager):\n self._CreateSpool()\n metrics_set = self._MetricsSet(\n *(constructor(self._METRIC_PREFIX + name)\n for name, constructor in self._METRICS_CONSTRUCTORS))\n pending_requests = []\n timestamps = {}\n tick_count = 0\n next_heartbeat = time.time()\n while True:\n tick_count += 1\n if time.time() >= next_heartbeat:\n next_heartbeat = time.time() + self._HEARTBEAT_INTERVAL\n logging.debug('Starting tick number %d', tick_count)\n manager.StartTick()\n\n num_completed = 0\n for request_id, result in manager.Reap():\n num_completed += 1\n metrics_set.total_completed.increment(fields={'status': 'normal'})\n time_running = time.time() - timestamps.pop(request_id)\n metrics_set.time_running.add(time_running)\n self._CompleteRequest(request_id, result)\n\n num_added = 0\n for request_id in self._GetNewRequests():\n num_added += 1\n metrics_set.total_received.increment()\n timestamps[request_id] = time.time()\n pending_requests.append(request_id)\n\n num_aborted = 0\n for abort_id in self._GetAbortRequests():\n num_aborted += 1\n metrics_set.total_completed.increment(fields={'status': 'abort'})\n if abort_id in timestamps:\n time_to_abort = time.time() - timestamps.pop(abort_id)\n metrics_set.time_to_abort.add(time_to_abort)\n self._ProcessAbort(abort_id, pending_requests, manager)\n\n num_started = 0\n while pending_requests and manager.HasCapacity():\n num_started += 1\n request_id = pending_requests.pop(0)\n time_now = time.time()\n time_waiting = time_now - timestamps[request_id]\n metrics_set.time_waiting.add(time_waiting)\n timestamps[request_id] = time_now\n self._StartRequest(request_id, manager)\n\n if num_completed or num_added or num_aborted or num_started:\n logging.info('new: %d, started: %d, aborted: %d, completed: %d',\n num_added, num_started, num_aborted, num_completed)\n num_pending = len(pending_requests)\n num_running = len(manager)\n logging.info('pending: %d, running: %d', num_pending, num_running)\n metrics_set.task_count.set(num_pending,\n fields={'state': 'pending'})\n metrics_set.task_count.set(num_running,\n fields={'state': 'running'})\n metrics_set.ticks.increment()\n time.sleep(manager.sample_interval)", "def run(self):\n operation_manager = self._core.get_operation_manager()\n while True:\n while operation_manager.process_next():\n pass\n sleep(2)", "def __launch_task(self, current_time):\r\n assert self.queued_tasks > 0\r\n assert self.running_tasks < self.num_cores\r\n\r\n self.queued_tasks -= 1\r\n if not len(get_param(\"relative_weights\")) > self.current_user:\r\n print get_param(\"relative_weights\"), self.current_user\r\n assert False\r\n tasks_per_round = get_param(\"relative_weights\")[self.current_user]\r\n if self.task_count >= tasks_per_round:\r\n # Move on to the next user.\r\n self.task_count = 0\r\n self.current_user = (self.current_user + 1) % self.num_users\r\n\r\n while len(self.queues[self.current_user]) == 0:\r\n self.current_user = (self.current_user + 1) % self.num_users\r\n self.task_count = 0\r\n # Get the first task from the queue\r\n job, task_id = self.queues[self.current_user][0]\r\n # Remove the task from the user's queue.\r\n self.queues[self.current_user] = self.queues[self.current_user][1:]\r\n self.task_count += 1\r\n assert job.user_id == self.current_user\r\n task_length = job.get_task_length(task_id)\r\n event = (current_time + task_length, TaskCompletion(job, self))\r\n self.stats_manager.task_started(self.current_user, current_time)\r\n self.time_started = current_time\r\n if get_param(\"record_task_info\"):\r\n job.record_wait_time(task_id, current_time)\r\n self.running_tasks += 1\r\n return event", "def tasks_recv():\n recv_json = request.get_json()\n build_taskgraph(recv_json)\n return 'ok'", "def runTasks(self):\n\n self.logger.INFO(\n f\"STARTING TASKS FOR TRADER {self.user['Name']} - ACCOUNT ID: {self.account_id}\\n\")\n\n def selectSleep():\n \"\"\"\n PRE-MARKET(0400 - 0930 ET): 5 SECONDS\n MARKET OPEN(0930 - 1600 ET): 5 SECONDS\n AFTER MARKET(1600 - 2000 ET): 5 SECONDS\n\n WEEKENDS: 60 SECONDS\n WEEKDAYS(2000 - 0400 ET): 60 SECONDS\n\n EVERYTHING WILL BE BASED OFF CENTRAL TIME\n\n OBJECTIVE IS TO FREE UP UNNECESSARY SERVER USAGE\n \"\"\"\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # IF CURRENT TIME GREATER THAN 8PM AND LESS THAN 4AM, OR DAY IS WEEKEND, THEN RETURN 60 SECONDS\n if tm > \"20:00\" or tm < \"04:00\" or day in weekdays:\n\n return 60\n\n # ELSE RETURN 5 SECONDS\n return 5\n\n while self.isAlive:\n\n try:\n\n self.user = self.users.find_one({\"Name\": self.user[\"Name\"]})\n\n self.asset_type = self.user[\"Accounts\"][self.account_id][\"Asset_Type\"]\n\n self.updateAccountBalance()\n\n self.updateLastPrice()\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n # IF MIDNIGHT, ADD BALANCE, OPEN POSITIONS PROFIT/LOSS, CLOSED POSITIONS PROFIT/LOSS.\n midnight = dt_central.time().strftime(\"%H:%M\")\n\n if midnight == \"23:55\":\n\n if not self.midnight:\n\n self.balanceHistory()\n\n self.openPositionHistory()\n\n self.closedPositionHistory()\n\n self.midnight = True\n\n else:\n\n self.midnight = False\n\n # RUN TASKS ####################################################\n\n if self.asset_type == \"OPTION\":\n\n self.sellOutOptions()\n\n self.checkTrailingStop()\n\n self.killQueueOrder()\n\n self.sellAtMarketOpen()\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n # SELL ALL SECONDARY_AGG, SEC_AGG_V2 POSITIONS AT END OF DAY\n if dt_central.strftime(\"%H:%M\") == \"14:55\" and self.asset_type == \"EQUITY\":\n\n if not self.market_close_check:\n\n self.sellOutStrategies([{\"Strategy\": \"Secondary_Agg\"},\n {\"Strategy\": \"Sec_Agg_v2\"}])\n\n self.market_close_check = True\n\n else:\n\n self.market_close_check = False\n\n # SELL ALL Sec_Agg_Daytrade AT 14:30\n if dt_central.strftime(\"%H:%M\") == \"14:30\" and self.asset_type == \"EQUITY\":\n\n if not self.eleven_check:\n\n self.sellOutStrategies([{\"Strategy\": \"Sec_Aggressive\"}])\n\n self.eleven_check = True\n\n else:\n\n self.eleven_check = False\n\n except KeyError:\n\n self.isAlive = False\n\n except Exception:\n\n self.logger.ERROR(\n f\"ACCOUNT ID: {self.account_id} - TRADER: {self.user['Name']}\")\n\n finally:\n\n time.sleep(selectSleep())\n\n self.logger.INFO(f\"TASK STOPPED FOR ACCOUNT ID {self.account_id}\")", "def output_thread():\n global gRunning\n\n try:\n while gRunning:\n try:\n inference_result, user_data = gGraph.GetResult()\n \n print (postprocess(inference_result))\n #print(user_data)\n # gUpdateq.put((postprocess(inference_result), user_data))\n\n \n except KeyError:\n # This error occurs when GetResult can't access the user param from the graph, we're just ignoring it for now\n print(\"KeyError\")\n pass\n except Exception as e:\n print(e)\n pass\n print(\"Output thread terminating\")", "def workflow_complete():\n\n if request.method == \"POST\":\n \"\"\"\n request looks like:\n {\n \"workflow_name\": \"test-workflow\",\n \"dataset_id\": \"HRI107\",\n \"operation\": \"std-dev\",\n \"PID\": 1\n \"other_cardinals\": [(2, \"23.45.67.89\"), (3, \"34.56.78.90\")],\n \"jiff_server\": \"45.67.89.01\"\n }\n \"\"\"\n\n req = request.get_json(force=True)\n\n pods = get_pod_by_workflow_and_pid(req[\"workflow_name\"], req[\"PID\"])\n if pods is not None:\n for pod in pods:\n delete_entry(pod)\n\n jiff_server = get_jiff_server_by_workflow(req[\"workflow_name\"])\n if jiff_server is not None:\n delete_entry(jiff_server)\n\n orch = Orchestrator(req, app, len(get_running_workflows()))\n\n orch.stop_workflow()\n\n app.logger.info(f\"Workflow {req['workflow_name']} complete, removed from running jobs.\")\n\n event_timestamps = get_pod_event_timestamp_by_workflow_and_pid(req['workflow_name'],req['PID'])\n if event_timestamps is not None:\n delete_entry(event_timestamps)\n\n event_timestamps_dict = {x.name: str(getattr(event_timestamps, x.name)) for x in event_timestamps.__table__.columns}\n\n pod_resource_usage = get_pod_resource_consumption_by_workflow_and_pid(req['workflow_name'],req['PID'])\n usage = {'cpu': {'avg': None, 'max': None}, 'memory': {'avg': None, 'max': None}}\n if pod_resource_usage is not None:\n cpu_consumptions = [obj.cpu_usage for obj in pod_resource_usage]\n memory_consumptions = [obj.memory_usage for obj in pod_resource_usage]\n\n if len(cpu_consumptions) > 0:\n usage['cpu'] = {\n 'avg': sum(cpu_consumptions) / len(cpu_consumptions),\n 'max': max(cpu_consumptions)\n }\n\n if len(memory_consumptions) > 0:\n usage['memory'] = {\n 'avg': sum(memory_consumptions) / len(memory_consumptions),\n 'max': max(memory_consumptions)\n }\n\n for obj in pod_resource_usage:\n delete_entry(obj)\n\n app.logger.info(\"ABOUT TO send pod stats\")\n orch.send_pod_stats(usage, event_timestamps_dict)\n response = {\n \"MSG\": \"OK\",\n \"timestamps\": event_timestamps_dict,\n \"resource_consumption\": usage\n }\n else:\n\n app.logger.error(\n f\"Received request indicating the workflow {req['workflow_name']} \"\n f\"completed, but this workflow is not present in running jobs\"\n f\"record. Nothing to do.\")\n response = {\n \"MSG\": f\"ERR: {req['workflow_name']} not in running jobs record.\"\n }\n\n return jsonify(response)", "def master(client, data, column_name):\n\n # Info messages can help you when an algorithm crashes. These info\n # messages are stored in a log file which is send to the server when\n # either a task finished or crashes.\n info('Collecting participating organizations')\n\n # Collect all organization that participate in this collaboration.\n # These organizations will receive the task to compute the partial.\n organizations = client.get_organizations_in_my_collaboration()\n ids = [organization.get(\"id\") for organization in organizations]\n\n # Request all participating parties to compute their partial. This\n # will create a new task at the central server for them to pick up.\n # We've used a kwarg but is is also possible to use `args`. Although\n # we prefer kwargs as it is clearer.\n info('Requesting partial computation')\n task = client.create_new_task(\n input_={\n 'method': 'average_partial',\n 'kwargs': {\n 'column_name': column_name\n }\n },\n organization_ids=ids\n )\n\n # Now we need to wait untill all organizations(/nodes) finished\n # their partial. We do this by polling the server for results. It is\n # also possible to subscribe to a websocket channel to get status\n # updates.\n info(\"Waiting for resuls\")\n task_id = task.get(\"id\")\n task = client.get_task(task_id)\n while not task.get(\"complete\"):\n task = client.get_task(task_id)\n info(\"Waiting for results\")\n time.sleep(1)\n\n # Once we now the partials are complete, we can collect them.\n info(\"Obtaining results\")\n results = client.get_results(task_id=task.get(\"id\"))\n\n # Now we can combine the partials to a global average.\n global_sum = 0\n global_count = 0\n for output in results:\n global_sum += output[\"sum\"]\n global_count += output[\"count\"]\n\n return {\"average\": global_sum / global_count}", "def create_jobs(self, total_time):\r\n task_distribution = get_param('task_distribution')\r\n num_tasks = get_param('num_tasks')\r\n task_length = get_param('task_length')\r\n avg_arrival_delay = get_param('job_arrival_delay')\r\n job_arrival_distribution = get_param('job_arrival_distribution')\r\n for front_end in self.front_ends:\r\n last_job_arrival = 0\r\n count = 0\r\n while True:\r\n if job_arrival_distribution == \"constant\":\r\n new_last = last_job_arrival + avg_arrival_delay\r\n else:\r\n # If the job arrivals are a Poisson process, the time\r\n # between jobs follows an exponential distribution. \r\n new_last = last_job_arrival + \\\r\n random.expovariate(1.0/avg_arrival_delay)\r\n\r\n # See if we've passed the end of the experiment\r\n if new_last > total_time:\r\n break\r\n else: \r\n last_job_arrival = new_last\r\n \r\n if task_distribution == \"bimodal\":\r\n if random.random() > (1.0 / 6):\r\n # 5/6 of the jobs have 10 tasks.\r\n num_tasks = 10\r\n else:\r\n num_tasks = 200\r\n relative_demands = get_param(\"relative_demands\")\r\n if relative_demands == []:\r\n user_id = random.randrange(self.num_users)\r\n else:\r\n r = random.random()\r\n user_id = -1\r\n for current_user in range(self.num_users):\r\n if r < get_param(\"relative_demands\")[current_user]:\r\n user_id = current_user\r\n break\r\n assert user_id != -1\r\n job = Job(user_id, last_job_arrival, num_tasks, task_length,\r\n self.stats_manager, \r\n front_end.id_str + \":\" + str(count), self.servers)\r\n job_arrival_event = JobArrival(job, front_end)\r\n self.event_queue.put((last_job_arrival, job_arrival_event))\r\n self.total_jobs += 1\r\n count = count + 1", "def process(self):\n if not self._requests:\n return\n\n self._processing = True\n Engine.instance().start()", "def async_infer(self, data, req_id):\n\n input_data = {self.input_tensor_name: data}\n self.infer_queue.start_async(input_data, req_id)", "async def proccess_message(self, *args):\n\n await self.used_middlewares[0].compute(*args)", "async def run(self):\n print(\"[*] Starting %d coroutines to throttle requests to each search engine.\" % (len(self.data)))\n futures = [\n self.loop.run_in_executor(\n None, self.http_req, se\n ) for se in self.data.keys()\n ]\n\n for data in asyncio.as_completed(futures):\n names = await data\n self.employees.update(names)", "async def run_service(loop):\n curr = time.time()\n results = []\n\n while True:\n # First get the list of pending tasks, if there exists any\n results = get_pending(results)\n\n # Now poll the endpoints\n for url in API_URLS:\n future = loop.create_task(poll_endpoint(url))\n results.append(future)\n \n await asyncio.gather(*results)\n \n delta = time.time() - curr\n diff = max(0, POLL_INTERVAL - delta)\n await asyncio.sleep(diff)\n curr = time.time()", "def process(self, user_id: str, all_days: List[str]):\n if self.CC is not None:\n # Office Time Calculation from GPS\n self.CC.logging.log(\"Processing Working Days\")\n self.listing_all_work_days(user_id, all_days)\n\n arrival_data_feature = ArrivalTimes(self.CC)\n arrival_data_feature.process(user_id, all_days)\n\n expected_arrival_data_feature = ExpectedArrivalTimes(self.CC)\n expected_arrival_data_feature.process(user_id, all_days)\n\n staying_time_data_feature = StayingTimes(self.CC)\n staying_time_data_feature.process(user_id, all_days)\n\n expected_staying_time_data_feature = ExpectedStayingTimes(self.CC)\n expected_staying_time_data_feature.process(user_id, all_days)\n\n # Office Time Calculation from Beacon\n working_days_from_beacon_feature = WorkingDaysFromBeacon(self.CC)\n working_days_from_beacon_feature.process(user_id, all_days)\n\n arrival_data_from_beacon_feature = ArrivalTimesFromBeacon(self.CC)\n arrival_data_from_beacon_feature.process(user_id, all_days)\n\n expected_arrival_data_from_beacon_feature = ExpectedArrivalTimesFromBeacon(self.CC)\n expected_arrival_data_from_beacon_feature.process(user_id, all_days)\n\n staying_time_data_from_beacon_feature = StayingTimesFromBeacon(self.CC)\n staying_time_data_from_beacon_feature.process(user_id, all_days)\n\n expected_staying_time_data_from_beacon_feature = ExpectedStayingTimesFromBeacon(self.CC)\n expected_staying_time_data_from_beacon_feature.process(user_id, all_days)", "def startUserScrape(user): \n print '*** STARTED SCRAPING: USER: '+user+' ***'\n cache.set('user_scrape','true')\n cache.set('scrape_mode','user')\n cache.set('scrape_user',user)\n\n for key in ['scrape_friends','scrape_followers','scrape_tweets']:\n cache.set(key,'')\n \n for job in ['friends','followers','tweets']:\n cacheKey = '_'.join(['nextnearest',job,user])\n cache.set(cacheKey,False) \n \n doUserScrape.delay()", "def train(self, train_set):\n\n class TrainJob(mp.Process):\n def __init__(self, func, result_list, *args):\n super().__init__()\n self.func = func\n self.args = args\n self.res = result_list\n\n def run(self):\n self.res.append(self.func(*self.args))\n\n self._user_log = pd.DataFrame(train_set)\n self._user_log.columns = ['user_id', 'item_id']\n self._user_log.drop_duplicates(inplace=True)\n '''Calculate user model'''\n manager = mp.Manager()\n res_list = manager.list()\n user_ids = self._user_log['user_id'].drop_duplicates().values.tolist()\n part = 2\n cpus = cpu_count()\n job_list = []\n jobs = int(cpus / part) # Use 1/2 of the cpus\n if jobs <= 0:\n jobs = 1\n part_ids_num = int((len(user_ids) + jobs - 1) / jobs)\n for i in range(jobs):\n part_ids = user_ids[i * part_ids_num:i * part_ids_num + part_ids_num]\n j = TrainJob(self._build_user_model, res_list, part_ids)\n job_list.append(j)\n j.start()\n for job in job_list:\n job.join()\n for ids_dict in res_list:\n for key in ids_dict.keys():\n self._user_vector[key] = ids_dict[key]\n return self" ]
[ "0.6130756", "0.5836668", "0.54324245", "0.5392235", "0.53178537", "0.5296981", "0.52855223", "0.5277251", "0.52644676", "0.52298564", "0.522731", "0.5215356", "0.51988107", "0.5180719", "0.51591676", "0.51551324", "0.5139207", "0.51250726", "0.51058996", "0.51030153", "0.5102613", "0.50992", "0.5094567", "0.507644", "0.5070548", "0.5064263", "0.50541246", "0.5030821", "0.50238544", "0.5015608" ]
0.6660759
0
Read yaml file with label definitions.
def read_label_definitions(filename: str) -> dict: with open(filename, 'r') as f: translate = yaml.load(f, Loader=yaml.SafeLoader) return translate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_labels(labels_file):\n if not labels_file:\n print 'WARNING: No labels file provided. Results will be difficult to interpret.'\n return None\n\n labels = []\n with open(labels_file) as infile:\n for line in infile:\n label = line.strip()\n if label:\n labels.append(label)\n assert len(labels), 'No labels found'\n return labels", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def read_yaml(yfile):\n try:\n f0 = get_fileobj(yfile)\n context_items = [ConTextItem(literal=d[\"Lex\"],\n category=d[\"Type\"],\n re=r\"%s\"%d[\"Regex\"],\n rule=d[\"Direction\"],\n comments=d[\"Comments\"]) for d in yaml.load_all(f0)]\n except FileNotFoundError:\n context_items = []\n finally:\n f0.close()\n return context_items", "def read_labels_from_file(label_identifier) -> list:\n global _LABELS\n if _LABELS is None:\n _LABELS = read_label_definitions(NORM_LABEL_FILE)\n filename = _LABELS['label_files'][label_identifier]\n print(f\"Reading {filename}\")\n with open(filename, 'r') as f:\n labels = [l.strip() for l in f.readlines() if l.strip() != '']\n return labels", "def _reflow_labels(self, filename=\"Dockerfile\"):\n\n dfp = DockerfileParser(path=filename)\n labels = dict(dfp.labels) # Make a copy of the labels we need to add back\n\n # Delete any labels from the modeled content\n for key in dfp.labels:\n del dfp.labels[key]\n\n # Capture content without labels\n df_content = dfp.content.strip()\n\n # Write the file back out and append the labels to the end\n with open(filename, 'w') as df:\n df.write(\"%s\\n\\n\" % df_content)\n if labels:\n df.write(\"LABEL\")\n for k, v in labels.iteritems():\n df.write(\" \\\\\\n\") # All but the last line should have line extension backslash \"\\\"\n escaped_v = v.replace('\"', '\\\\\"') # Escape any \" with \\\"\n df.write(\" %s=\\\"%s\\\"\" % (k, escaped_v))\n df.write(\"\\n\\n\")", "def load_data_from_config(self):\n\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n self.labels = []\n self.to_add_labels = []\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n print(f\"config_dict {config_dict}\")\n if (config_dict is not None) and config_dict.get(\"dir_name\"):\n self.load_data_from_dir(dir_name=config_dict[\"dir_name\"], method='clear')", "def readyml(filename):\n\n with open(filename, 'r') as f:\n return yaml.load(f.read())", "def load_label_columns(self):\n with open(self.config.labels_local_path, 'r') as f:\n label_columns = yaml.safe_load(f)\n return label_columns", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def _load_file(self, f):\n if not os.path.exists(f):\n msg = '%s is a non-existant definition file' % f\n raise ValueError(msg)\n\n with open(f, 'r') as fh:\n return yaml.load(fh.read())", "def load_yaml():\n yamlfullpath = os.path.join(THISDIR, 'ff_data.yaml')\n\n with open(yamlfullpath, 'r') as stream:\n ff_data = yaml.safe_load(stream)\n\n FF_DATA_SCHEMA(ff_data)\n return ff_data", "def load_yaml_file(self, path):\n with path.open('r') as handle:\n data = load_yaml(handle)\n\n self.set_all(**self.SCHEMA.load(data).data)", "def get_labels() -> list[Label]:\n\n labels_file = deepcopy(get_data(\"labels.yml\"))\n standard_labels = []\n for group_info in labels_file[\"groups\"]:\n labels = group_info.pop(\"labels\", [])\n group = LabelGroup(**group_info)\n for label_info in labels:\n label = Label(**label_info, group=group)\n standard_labels.append(label)\n for label_info in labels_file[\"standalone\"]:\n label = Label(**label_info)\n standard_labels.append(label)\n return standard_labels", "def readGraphFromYAMLFile(self, filename):\n self.G = nx.read_yaml(filename)\n # TODO: buiild up the indexes !!!", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n return labels", "def _load_data_yaml(self, pathname): \n pathname = self._yaml_extension(pathname)\n\n with open(pathname) as file:\n traj_data = yaml.load(file, Loader=yaml.FullLoader)\n \n return traj_data", "def input_data(self):\n return read_yaml(self.file_path)", "def load_yaml(filename):\n try:\n f = file(filename, 'r')\n data = yaml.load(f)\n return data\n except (IOError, OSError) as e:\n err = e[0]\n reason = e[1]\n error = 'load_yaml: Failed to open {filename}: {reason} {err}'.format(filename=filename, reason=reason, err=err)\n raise IOError(error)", "def load_yaml(file: Text):\n with open(file) as fp:\n return yaml.load(fp, yaml.FullLoader)", "def load_yaml(filename):\n with open(filename) as file:\n yaml = YAML()\n data = yaml.load(file)\n return data, yaml", "def _load_labels(self, label_path: str) -> List[str]:\n with open(label_path, 'r') as f:\n return [line.strip() for _, line in enumerate(f.readlines())]", "def load_labels(filename):\n\n file_path = os.path.join(DATA_DIR, filename)\n with open(file_path, 'rb') as f:\n b = f.read()\n\n magic, n_labels = (struct.unpack('>i', b[i*4:(i+1)*4]) for i in range(2))\n\n assert magic[0] == 2049, \"bad magic number, what do?\"\n\n label_stream = array.array('B', b[8:])\n \n assert len(label_stream) == n_labels[0], \"mismatch in label length\"\n \n # label_stream is actually type array.array, which is iterable surely.\n # i'll convert it anyway...\n return tuple(label_stream)", "def load_yaml(yaml_name):\n print('training network configuration file is {0}'.format(yaml_name))\n util.check_file_exist(yaml_name)\n config = util.load_yaml_file(yaml_name)\n return config", "def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))", "def load_yaml_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n d = yaml.load(s, Loader=yaml.FullLoader)\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def read_data(feature_file, label_file):", "def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label", "def read_file(filename):\n contents, labels = [], []\n with open_file(filename) as f:\n for line in f:\n try:\n label,content = line.strip().split('\\t')\n contents.append(list(content))\n labels.append(label)\n except:\n pass\n return contents,labels", "def read_label_file(self, label_file_name = None): #completed\n if label_file_name is None:\n label_file_name = self.label_file_name\n try:\n label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)\n return label_data#[:,1], label_data[:,0]#in MATLAB format\n except IOError:\n print \"Unable to open \", label_file_name, \"... Exiting now\"\n sys.exit()" ]
[ "0.6454239", "0.6269596", "0.6258598", "0.62443876", "0.62082666", "0.6189674", "0.6186524", "0.6186325", "0.6183618", "0.61517555", "0.6094851", "0.6079179", "0.6069072", "0.60689235", "0.6063659", "0.60388273", "0.6031207", "0.6030064", "0.60240155", "0.60190016", "0.6007717", "0.6005971", "0.5999894", "0.59960437", "0.59902316", "0.5956467", "0.5953075", "0.59418994", "0.59391683", "0.59298193" ]
0.80432695
0
Normalize label to a standard set. Return a list
def normalize_label(label: str) -> list: global _LABELS if _LABELS is None: _LABELS = read_label_definitions(NORM_LABEL_FILE) if label in _LABELS['translate'].keys(): return [_LABELS['translate'][label]] elif label in _LABELS['label_to_group'].keys(): # Originally: return all (new) labels that came from splitting an old one #return _LABELS['label_to_group'][label] # Now: ignore labels that have been split, so as not to learn false positives return [] else: print("Untranslated: ", label) return [label]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def norm(self, label):\n label_vec = []\n label_value = self.to_int(label)\n for i in range(10):\n if i == label_value:\n label_vec.append(0.9)\n else:\n label_vec.append(0.1)\n return label_vec", "def normalize_labels(labels):\n number_of_labels = len(labels)\n number_of_species = get_number_of_species()\n labels_norm = np.zeros(shape=(number_of_labels, number_of_species))\n for i in range(number_of_labels):\n for label in labels[i]:\n labels_norm[i][label] = 1\n return labels_norm", "def get_labels_decomposed(self) -> List[List[str]]:\n return [list(label) for label in self.labels]", "def get_labels(self) -> Set[str]:", "def normalize_labels(labels):\n new_labels = np.array([-1] * len(labels))\n labels = np.array(labels)\n label_dict = dict()\n for i, label in enumerate(set(labels)):\n new_labels[np.where(labels == label)] = i\n label_dict[i] = label\n return label_dict, new_labels", "def normalize_label(labels):\n max_val = torch.max(labels)\n min_val = torch.min(labels)\n norm_labels = (labels - min_val)/(max_val - min_val)\n return norm_labels", "def get_labels(self):\n return set(category.label for category in\n self.get_categories(LABELS_SCHEME))", "def get_fashion_mnist_labels(labels): #@save\n text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',\n 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']\n return [text_labels[int(i)] for i in labels]", "def consolidate_labels(labels):\n return list(map(RNNOIE_model.consolidate_label , labels))", "def generate_labels():\n label_set = set([])\n for data in load_data():\n label = data.split(' ', 1)[0]\n label_set.add(label)\n labels = list(label_set)\n labels.sort()\n return labels", "def fix_label(self, label):\n if label is None:\n return None\n assert len(label) == len(self.data)\n fixed_labels = []\n for y, x in zip(label, self.data):\n assert len(y) == len(x)\n encode = self.transform_function(' '.join(x))\n fixed_label = list(chain(*[\n [label] + [PAD_TOKEN_LABEL_ID] * (len(self.transform_function.tokenize(word)) - 1)\n for label, word in zip(y, x)]))\n if encode['input_ids'][0] in self.transform_function.all_special_ids:\n fixed_label = [PAD_TOKEN_LABEL_ID] + fixed_label\n fixed_label += [PAD_TOKEN_LABEL_ID] * (len(encode['input_ids']) - len(fixed_label))\n fixed_label = fixed_label[:self.transform_function.max_seq_length]\n fixed_labels.append(fixed_label)\n return fixed_labels", "def format_for_nltk(labels, dataset):\n if len(labels) != len(dataset):\n return []\n return [(v, labels[i]) for i,v in enumerate(dataset)]", "def list_of_labels(self):\n L = np.unique(self.box_label)\n return np.union1d(L, self.geom.list_of_elements_labels())", "def get_labels(self):\n return set(k.label for k in self)", "def build_label_vocab(labels: Iterable[str]):\n labels_set = set()\n for l in labels:\n labels_set.add(l)\n label_list = sorted(list(labels_set))\n return label_list", "def convertLabels(self, labels):\n counter = 0\n numericLabels = []\n for label in labels:\n if label not in self.labelDict:\n self.labelDict[label] = counter\n self.backwards_conversion[counter] = label\n counter += 1\n numericLabels += [self.labelDict[label]]\n return np.array(numericLabels)", "def list(self):\n a0 = map(self._alphabet.unrank, self._labels[0])\n a1 = map(self._alphabet.unrank, self._labels[1])\n return [a0, a1]", "def consolidate_labels(labels):\n return map(RNN_model.consolidate_label , labels)", "def get_possible_labels(Y):\n \n return list(set(itertools.chain(*Y)))", "def normalize_labels(self):\n self.y_mean, self.y_std = du.get_mean_std(self.y_train)\n self.y_train = du.normalize(self.y_train, self.y_mean, self.y_std)\n if self.x_test is not None and self.y_test is not None:\n self.y_test = du.normalize(self.y_test, self.y_mean, self.y_std)", "def provide_label(self):\n return [(k, v.shape) for k, v in self.label]", "def format_labels(_labels):\n _ret = []\n if isinstance(_labels, str):\n # put in a list if the label is a string.\n _ret = [_labels]\n elif isinstance(_labels, dict):\n for _key, _item in _labels.items():\n _ret.append(_key.strip().replace(\" \", \"-\").replace(\"_\", \"-\"))\n elif isinstance(_labels, list) or isinstance(_labels, tuple):\n for _item in _labels:\n _ret.append(_item.strip().replace(\" \", \"-\").replace(\"_\", \"-\"))\n return _ret", "def label_data(data):\n if data == 'cat': return [1, 0]\n elif data == 'dog': return [0, 1]", "def format_for_scikit(labels, dataset):\n nd = []\n l = [int(lab) for lab in labels]\n for i in dataset:\n tmp = [int(v) for v in i.values()]\n nd.append(tmp)\n return l,nd", "def get_labels(self):\n return []", "def verbalize_labels(label_dictionary: Dictionary) -> List[Sentence]:\n verbalized_labels = []\n for byte_label, idx in label_dictionary.item2idx.items():\n str_label = byte_label.decode(\"utf-8\")\n if label_dictionary.span_labels:\n # verbalize BIOES labels\n if str_label == \"O\":\n verbalized_labels.append(\"outside\")\n elif str_label.startswith(\"B-\"):\n verbalized_labels.append(\"begin \" + str_label.split(\"-\")[1])\n elif str_label.startswith(\"I-\"):\n verbalized_labels.append(\"inside \" + str_label.split(\"-\")[1])\n elif str_label.startswith(\"E-\"):\n verbalized_labels.append(\"ending \" + str_label.split(\"-\")[1])\n elif str_label.startswith(\"S-\"):\n verbalized_labels.append(\"single \" + str_label.split(\"-\")[1])\n # if label is not BIOES, use label itself\n else:\n verbalized_labels.append(str_label)\n else:\n verbalized_labels.append(str_label)\n return list(map(Sentence, verbalized_labels))", "def standard_labels(self):\n return self._standard_labels", "def get_labels(self):\n return self.labels[1:]", "def get_unique_semantic_labels() -> Set[int]:\n idxs = set()\n data = SUNRGBDTrainDataset(True)\n for i in range(len(data)):\n idxs.update([x.item() for x in t.unique(data[i][1])])\n return idxs", "def transform_labels(self, labels):\n # Fallback:\n # return self.encoder.transform(labels)\n classes = list(self.classes_())\n return [classes.index(label) for label in labels]" ]
[ "0.70936733", "0.67618287", "0.6497166", "0.64950365", "0.62931526", "0.6270191", "0.6227273", "0.6212816", "0.61797905", "0.6102589", "0.60916936", "0.608626", "0.60579926", "0.6032182", "0.59783614", "0.59651196", "0.5954803", "0.5883055", "0.58621407", "0.58589786", "0.5820652", "0.57948774", "0.57514125", "0.5744008", "0.5740265", "0.5734793", "0.5719961", "0.5710242", "0.56770194", "0.56663924" ]
0.7416236
0
Convert a label to start and end of a special symbol to use as input or output for encoder/decoder
def label_to_symbol(label: str, all_labels: list) -> str: index = all_labels.index(label) in_symbol = f"[i-{index}]" out_symbol = f"[o-{index}]" return in_symbol, out_symbol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def symbol_to_label(symbol: str, all_labels: list) -> str:\n m = re.search(\"[i-(\\d+)]\", symbol)\n n = re.search(\"[o-(\\d+)]\", symbol)\n if m is None and n is None:\n raise ValueError(f\"Symbol {symbol} fails to match symbol regex\")\n elif m is not None:\n return all_labels[m.group(1)]\n else:\n return all_labels[n.group(1)]", "def _in_out_label_(self):\n return \"%s|%s\" % (FSMWordSymbol(self.word_in),\n FSMWordSymbol(self.word_out))", "def normalisesym(self, label):\n return label", "def read_label(self):\r\n # label = str(self.parse_binary())#!!BAD\r\n label = ''\r\n while True:\r\n c = self.eat_char()\r\n if c=='n':\r\n #terminal char\r\n break\r\n else:\r\n label += c\r\n\r\n self.log += \"'\" + label + \"'\"\r\n return label", "def encode_label(label: str) -> int:\n\tif not label:\n\t\treturn 0\n\t# part after letter if it has a number, otherwise 1\n\tindex = int(label[1:]) if len(label) > 1 else 1\n\t# A = 1, B = 2, ... E = 5\n\toffset = ord(label[0]) - ord(\"A\") + 1\n\t# compute label number\n\treturn (index - 1) * 5 + offset", "def letter_for(label):\n return \"ABCDEFGHIJ\"[label]", "def _symbols_of_input(label: str) -> List[str]:\n if label == common.EPSILON:\n return [label]\n\n # We add a state transition arc for each digit of a multi-digit number.\n if \"[\" not in label:\n return list(label)\n\n # We add a state transition arc for each inflectional or derivational\n # morpheme, inflectional group boundary, and proper noun analysis tag.\n return _SYMBOLS_REGEX.findall(label)", "def labelify(op):\n return re.sub(r'[\\/\\.]', '_', op)", "def fromLabel(self, label):\n indices = grids.ringPosFromRingLabel(label)\n\n if indices is None:\n # arbitrary label. excore.\n # set it up as if it's a freshly initialized one\n self.axial = None\n self.i1 = None\n self.i2 = None\n self.label = label\n else:\n self.i1, self.i2, self.axial = indices\n\n self.getFirstChar()\n self.makeLabel()", "def enlabel(mi_, ma_):\n\treturn \"Unicode characters from {} to {} codepoints\".format(mi_, ma_)", "def character_to_label(character):\n \n character_label = all_characters.find(character)\n \n return character_label", "def _createLabel(element, a, state):\n # len(e.symbol) is 1 or 2 => a % (either 1000 or 100)\n # => gives exact a, or last two digits.\n # the division by 10 removes the last digit.\n firstTwoDigits = (a % (10 ** (4 - len(element.symbol)))) // 10\n # the last digit is either 0-9 if state=0, or A-J if state=1, or K-T if state=2, or U-d if state=3\n lastDigit = (\n \"0123456789\" \"ABCDEFGHIJ\" \"KLMNOPQRST\" \"UVWXYZabcd\"[(a % 10) + state * 10]\n )\n return \"{}{}{}\".format(element.symbol, firstTwoDigits, lastDigit)", "def _symbols_of_output(label: str) -> List[str]:\n if label == common.EPSILON:\n return [label]\n\n # We add a new state transition arc for each character of the output token.\n return list(label)", "def _make_transition(token_namespace, reserved_syntax_strings, label):\n if label[0].isalpha():\n # A named token (e.g. NAME, NUMBER, STRING)\n return getattr(token_namespace, label)\n else:\n # Either a keyword or an operator\n assert label[0] in ('\"', \"'\"), label\n assert not label.startswith('\"\"\"') and not label.startswith(\"'''\")\n value = literal_eval(label)\n try:\n return reserved_syntax_strings[value]\n except KeyError:\n r = reserved_syntax_strings[value] = ReservedString(value)\n return r", "def makeLabel(self):\n\n self.setIndexNames()\n\n if self.isInCore():\n self.getFirstChar()\n else:\n # stick with what we have. (default:ExCore)\n return\n self.label = self.firstChar + \"{0:03d}\".format(self.i2)\n if self.axial is not None:\n # add axial letter\n self.label = self.label + AXIAL_CHARS[self.axial]", "def parse(cls, label) -> Any:\n return label", "def build_label_transform():\n\n return NALabelEncoder()", "def make_label(self, node):\n\t\tcurstring = str(node.__class__)[13:-2]\n\t\tif isinstance(node, ast.Name):\n\t\t\tcurstring = node.id\n\t\telif isinstance(node, ast.Num):\n\t\t\tcurstring = str(node.n)\n\t\telif isinstance(node, ast.Str):\n\t\t\tcurstring = node.s\n\n\t\tif isinstance(node, ast.Load) or isinstance(node, ast.Store) or \\\n\t\t\tisinstance(node, ast.Param) or isinstance(node, ast.Add) or \\\n\t\t\tisinstance(node, ast.Sub) or isinstance(node, ast.Mult):\n\t\t\treturn None\n\n\t\ttry:\n\t\t\tself.labels[str(node)] = curstring\n\t\t\treturn str(node)\n\t\texcept AttributeError:\n\t\t\treturn None", "def label_repr(label):\n return label.replace(',', r'\\,').replace(' ', r'\\ ').replace('=', r'\\=')", "def getLabel2(*args):", "def getLabel2(*args):", "def handle_labels(ls):\r\n\r\n # assign each line a number\r\n line_num = {}\r\n counter = 0\r\n for i in ls:\r\n if not i.startswith('('):\r\n line_num[i] = counter\r\n counter += 1\r\n else:\r\n sb = i[1:-1]\r\n line_num[sb] = counter\r\n\r\n # replace @XXX with number\r\n var_address = 16\r\n mem = {}\r\n for i in range(len(ls)):\r\n if ls[i].startswith('@'):\r\n # if @XXX is already in numeral form, do nothing\r\n if ls[i][1:].isdigit():\r\n pass\r\n\r\n # replace with pre-defined symbols if found\r\n elif pre_defined_sb.get(ls[i][1:]) is not None:\r\n ls[i] = '@' + pre_defined_sb[ls[i][1:]]\r\n\r\n # replace by (XXX) line number if search failed\r\n elif line_num.get(ls[i][1:]) is not None:\r\n ls[i] = '@' + str(line_num[ls[i][1:]])\r\n\r\n # else must be user defined variable\r\n # assign same address for same variable\r\n else:\r\n if ls[i] not in mem:\r\n mem[ls[i]] = '@' + str(var_address)\r\n ls[i] = '@' + str(var_address)\r\n var_address += 1\r\n else:\r\n ls[i] = mem[ls[i]]\r\n\r\n # remove (XXX)'s\r\n ls = list(filter(lambda x: not x.startswith('('), ls))\r\n\r\n return ls", "def parse_symbol():\n nonlocal idx\n ident = \"\"\n while idx < len(source) and not terminal(source[idx]):\n ident += source[idx]\n idx += 1\n idx -= 1 # Backtrack, bc last character is *invalid* and loop assumes we stop on a valid token character\n return ident", "def parse(sentence,label_sentence,sign):\n span = []\n start = None\n for index, word in enumerate(sentence):\n if word==B_token:\n start = index\n elif word==S_token:\n # if ''.join(label_sentence[index:index+1]) in kb_set: ## 在数据库中发现实体名\n # span.append((index, index+1))\n # start = None\n # else:\n # start = None\n span.append((index, index + 1))\n start = None\n elif word==E_token and start is not None:\n end = index\n # if ''.join(label_sentence[start:end + 1]) in kb_set:\n # span.append((start, end+1))\n # start = None\n # else:\n # start = None\n span.append((start, end + 1))\n start = None\n # 相邻两entity可以合并则合并\n if len(span) <= 1 or sign == 'label':\n return span\n new_span = []\n for i in range(len(span)-1):\n if span[i][1]==span[i+1][0] and ''.join(label_sentence[span[i][0]:span[i+1][1]]) in kb_set:\n new_span.append((span[i][0], span[i+1][1]))\n if i == len(span)-2:\n return new_span\n else:\n new_span.append((span[i][0], span[i][1]))\n new_span.append((span[-1][0], span[-1][1]))\n return new_span", "def _get_axis_label(\n self,\n label: float | str | Mobject,\n axis: Mobject,\n edge: Sequence[float],\n direction: Sequence[float],\n buff: float = SMALL_BUFF,\n ) -> Mobject:\n\n label = self.x_axis._create_label_tex(label)\n label.next_to(axis.get_edge_center(edge), direction=direction, buff=buff)\n label.shift_onto_screen(buff=MED_SMALL_BUFF)\n return label", "def literal_symbol(literal):\n if literal.op == '~':\n return literal.args[0]\n else:\n return literal", "def cleanLabel(label):\n if label.startswith('^'):\n label = label[1:] + '_prexisting'\n label = label.replace('?', 'Q') # happens with mRnaCompare filter labels\n return label", "def decodeName(self, last=-1):\n label = []\n done = False\n while not done:\n (length,) = self.unpack(\"!B\")\n if getBits(length, 6, 2) == 3:\n # Pointer\n self.offset -= 1\n pointer = getBits(self.unpack(\"!H\")[0], 0, 14)\n save = self.offset\n if last == save:\n raise BufferError(\n \"Recursive pointer [offset=%d,pointer=%d,length=%d]\" %\n (self.offset, pointer, len(self.data))\n )\n if pointer < self.offset:\n self.offset = pointer\n else:\n # Pointer can't point forwards\n raise BufferError(\n \"Invalid pointer [offset=%d,pointer=%d,length=%d]\" %\n (self.offset, pointer, len(self.data))\n )\n label.extend(self.decodeName(save).label)\n self.offset = save\n done = True\n else:\n if length > 0:\n l = self.get(length)\n try:\n l.decode()\n except UnicodeDecodeError:\n raise BufferError(\"Invalid label <%s>\" % l)\n label.append(l)\n else:\n done = True\n return \".\".join(str(label))", "def parse(sentence,label_sentence,sign):\n span = []\n start = None\n for index, word in enumerate(sentence):\n if word==B_token:\n start = index\n elif word==S_token:\n # if ''.join(label_sentence[index:index+1]) in kb_set: ## 在数据库中发现实体名\n # span.append((index, index+1))\n # start = None\n # else:\n # start = None\n span.append((index, index + 1))\n start = None\n elif word == E_token and start is not None:\n end = index\n # if ''.join(label_sentence[start:end + 1]) in kb_set:\n # span.append((start, end+1))\n # start = None\n # else:\n # start = None\n span.append((start, end + 1))\n start = None\n # elif word==E_token and start is not None:\n # end = index\n # if ''.join(label_sentence[start:end + 1]) in kb_set:\n # span.append((start, end+1))\n # start = None\n # else:\n # start = None\n # 相邻两entity可以合并则合并\n if len(span) <= 1 or sign == 'label':\n return span\n new_span = []\n for i in range(len(span)-1):\n if span[i][1]==span[i+1][0] and ''.join(label_sentence[span[i][0]:span[i+1][1]]) in kb_set:\n new_span.append((span[i][0], span[i+1][1]))\n if i == len(span)-2:\n return new_span\n else:\n new_span.append((span[i][0], span[i][1]))\n new_span.append((span[-1][0], span[-1][1]))\n return new_span", "def get_label(self, offset):\n self.ret = idc.GetDisasm(offset).replace(\"extrn \", \"\").split(\":\")[0]\n return self.ret" ]
[ "0.6479082", "0.61380243", "0.60909307", "0.59870726", "0.594325", "0.5936012", "0.5928955", "0.5867715", "0.58144397", "0.5803089", "0.5756134", "0.5675025", "0.56665194", "0.5643463", "0.56145877", "0.55713457", "0.5540174", "0.5500176", "0.54963714", "0.5480792", "0.5480792", "0.54697925", "0.5409293", "0.539701", "0.5392312", "0.53787315", "0.53673035", "0.5345983", "0.53233594", "0.5322323" ]
0.71865237
0
Test case for create_container
def test_create_container(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create(set_env, container: Container):\n # pylint: disable=unused-argument\n assert container\n assert isinstance(container, Container)", "def test001_create_containers(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a two container on that node, should succeed.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n self.cont2_name = self.random_string()\n self.containers.update({self.cont2_name: {'hostname': self.cont2_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}})\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n self.wait_for_service_action_status(self.cont2_name, res[self.cont2_name]['install'])\n\n self.log('Check that the container have been created.')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont2_name])\n cont1 = [c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name][0]\n self.assertTrue(cont1['container']['arguments']['storage'], self.cont_storage)\n self.assertTrue(cont1['container']['arguments']['root'], self.cont_flist)\n self.assertTrue(cont1['container']['arguments']['hostname'], self.cont_flist)\n\n self.log('%s ENDED' % self._testID)", "def test_create_container_privilege(self):\n pass", "def test_get_container(self):\n pass", "def ddtest_create_generic_container_w_name(self, name=None):\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=name, type='generic')", "def _create_container(self, container_name):\n try:\n container = self.swift.head_container(container_name)\n except client.ClientException:\n self.swift.put_container(container_name)\n else:\n return container", "def test_create(self):\n\n cont_num = len(CLIENT.containers_list)\n\n message = {\"method\": \"create\",\n \"params\": {\"elem\": self.tag_image}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"create\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertEqual(len(response[\"result\"]), cont_num + 1)", "def create_container(ContainerName=None, Tags=None):\n pass", "def test_destroy_container(self):\n pass", "def _process_createContainer(self, data):\r\n try:\r\n self._avatar.createContainer(data['containerTag'],\r\n data.get('containerData', {}))\r\n except KeyError as e:\r\n raise InvalidRequest(\"Can not process 'CreateContainer' request. \"\r\n 'Missing key: {0}'.format(e))", "def ddtest_create_generic_container_w_empty_or_null_name(self, name=None):\n if name is None:\n self._skip_on_issue('launchpad', '1354767')\n\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=container_resp.id, type='generic')", "def post(self, run=False, **container_dict):\n context = pecan.request.context\n compute_api = pecan.request.compute_api\n policy.enforce(context, \"container:create\",\n action=\"container:create\")\n\n try:\n run = strutils.bool_from_string(run, strict=True)\n except ValueError:\n msg = _('Valid run values are true, false, 0, 1, yes and no')\n raise exception.InvalidValue(msg)\n try:\n container_dict['tty'] = strutils.bool_from_string(\n container_dict.get('tty', False), strict=True)\n container_dict['stdin_open'] = strutils.bool_from_string(\n container_dict.get('stdin_open', False), strict=True)\n except ValueError:\n msg = _('Valid tty and stdin_open values are ''true'', '\n '\"false\", True, False, \"True\" and \"False\"')\n raise exception.InvalidValue(msg)\n\n # NOTE(mkrai): Intent here is to check the existence of image\n # before proceeding to create container. If image is not found,\n # container create will fail with 400 status.\n images = compute_api.image_search(context, container_dict['image'],\n True)\n if not images:\n raise exception.ImageNotFound(container_dict['image'])\n container_dict['project_id'] = context.project_id\n container_dict['user_id'] = context.user_id\n name = container_dict.get('name') or \\\n self._generate_name_for_container()\n container_dict['name'] = name\n if container_dict.get('memory'):\n container_dict['memory'] = \\\n str(container_dict['memory']) + 'M'\n if container_dict.get('restart_policy'):\n self._check_for_restart_policy(container_dict)\n container_dict['status'] = fields.ContainerStatus.CREATING\n new_container = objects.Container(context, **container_dict)\n new_container.create(context)\n\n if run:\n compute_api.container_run(context, new_container)\n else:\n compute_api.container_create(context, new_container)\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('containers',\n new_container.uuid)\n pecan.response.status = 202\n return view.format_container(pecan.request.host_url, new_container)", "def test_show_container(self):\n pass", "def createContainer(tag, data={}): #@NoSelf", "def __create_cont(self, path, filesystem, cont_stat, component_number):\n try:\n self.logger.debug('Create container interface called')\n status_obj = Status()\n cont_id = \"container\"\n #cont_id = get_container_id()\n tmp_path = '%s/%s/%s/%s/%s' % (self.__fs_base, \\\n filesystem, TMPDIR, cont_id,component_number)\n self.asyn_helper.call(\"create_container\", \\\n tmp_path, path, cont_stat, status_obj)\n return status_obj\n except Exception as err:\n self.logger.error(('create_container for %(con_dir)s failed ',\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path, \n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def ensure_container():\n return exec_fn(_init_container)", "def test003_start_stop_container(self):\n\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a container (C1)')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n\n self.log('Stop container C1, should succeed.')\n temp_actions = {'container': {'actions': ['stop'], 'service': self.cont1_name}}\n res = self.create_container(containers=self.containers, temp_actions=temp_actions)\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['stop'])\n\n self.log('Check if the container has been terminated, should succeed')\n conts = self.zos_client.container.list()\n self.assertFalse([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n\n self.log('Start container C1, should succeed.')\n temp_actions = {'container': {'actions': ['start'], 'service': self.cont1_name}}\n res = self.create_container(containers=self.containers, temp_actions=temp_actions)\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['start'])\n\n self.log('Check if the container has been started, should succeed')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n\n self.log('%s ENDED' % self._testID)", "def test002_create_container_with_all_possible_params(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a container without providing flist parameter, should fail.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {}}\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(res, \"parameter 'flist' not valid: \")\n\n self.log('Create a container with all possible parameters.')\n self.cont1_name = self.random_string()\n bridge_name = self.random_string()\n env_name = self.random_string()\n env_value = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage,\n 'env': {'name': env_name, 'value': env_value},\n 'ports': ['8080:80'],\n 'privileged': True,\n 'nics': [{'type': 'default'},\n {'type': 'bridge', 'id': bridge_name}],\n 'hostNetworking': True}}\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n\n self.log('Check if the parameters have been reflected correctly')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n\n (cont1_id, cont1) = [c for c in conts.items() if c[1]['container']['arguments']['name'] == self.cont1_name][0]\n cont1_cl = self.zos_client.container.client(cont1_id)\n self.assertTrue(cont1_cl.bash('echo $%s' % env_name).get().stdout.strip(), env_value)\n self.assertTrue(cont1['container']['arguments']['host_network'], True)\n self.assertTrue(cont1['container']['arguments']['port'], {'8080': 80})\n\n nics = cont1['container']['arguments']['nics']\n nic = [nic for nic in nics if nic['type'] == 'bridge'][0]\n self.assertTrue(len(nics), 2)\n self.assertEqual(nic['id'], bridge_name)\n\n self.log('%s ENDED' % self._testID)", "def test_add_container(self):\n with DockerHost('host', dind=False) as host:\n # Create a container with --net=none, add a calico interface to\n # it then check felix programs a route.\n node = host.create_workload(\"node\", network=NET_NONE)\n host.calicoctl(\"container add %s 192.168.1.1\" % node)\n\n # Create the profile, get the endpoint IDs for the containers and\n # add the profile to the endpoint so felix will pick it up.\n host.calicoctl(\"profile add TEST_GROUP\")\n ep = host.calicoctl(\"container %s endpoint-id show\" % node)\n host.calicoctl(\"endpoint %s profile set TEST_GROUP\" % ep)\n\n # Wait for felix to program down the route.\n check_route = partial(host.execute,\n \"ip route | grep '192\\.168\\.1\\.1'\")\n retry_until_success(check_route, ex_class=CalledProcessError)", "def test_container_cycle(self):\n # Before Create\n print(\"Create\")\n rep = post(self.url + \"/search\", data={'name': name})\n self.errorCatch(rep)\n\n # Create\n rep = post(self.url + \"/create\", data={\n 'image': default_image,\n 'homepath': \"/nashome/guest/test\",\n 'naspath': \"/home/nas/test\",\n 'command': \"tail -f /dev/null\",\n 'name': name})\n self.checkRunning()\n\n # Double create\n rep = post(self.url + \"/create\", data={\n 'image': default_image,\n 'homepath': \"/nashome/guest/test\",\n 'naspath': \"/home/nas/test\",\n 'command': \"tail -f /dev/null\",\n 'name': name})\n self.errorCatch(rep)\n\n # Check by api\n con = client.containers.get(name)\n self.assertIn(\"tmp0\", con.exec_run(\"ls /home/nas\").output.decode())\n self.assertIn(\"tmp1\", con.exec_run(\"ls /home/ubuntu\").output.decode())\n self.assertEqual(con.status, \"running\")\n\n # Stop\n con.exec_run(\"touch /opt/tmp2\").output.decode()\n print(\"Stop\")\n rep = post(self.url + \"/stop\", data={'name': name})\n self.checkOK(rep)\n\n # check stop\n rep = post(self.url + \"/search\", data={'name': name})\n self.checkOK(rep)\n rep = rep.json()\n self.assertIsInstance(rep[\"data\"], dict)\n self.assertEqual(rep['data']['status'], \"exited\")\n\n # start\n print(\"Resume\")\n rep = post(self.url + \"/start\", data={'name': name})\n self.checkOK(rep)\n self.checkRunning()\n con = client.containers.get(name)\n self.assertIn(\"tmp2\", con.exec_run(\"ls /opt\").output.decode())\n\n # change pw\n print(\"Change Password\")\n con.exec_run(\"adduser ubuntu\")\n rep = post(self.url + \"/passwd\", data={'name': name,\n 'pw': \"tmpPW\"})\n self.checkOK(rep)\n self.assertIn(\"tmpPW\", con.exec_run(\"cat /etc/shadow\").output.decode())\n\n # commit\n print(\"Commit\")\n rep = post(self.url + \"/commit\", data={'name': name,\n 'newname': name})\n self.checkOK(rep)\n\n # search image\n rep = post(self.url + \"/search/image\", data={'name': name})\n rep = rep.json()\n self.assertIsInstance(rep['data'], dict)\n\n # delete\n print(\"Delete\")\n rep = post(self.url + \"/delete\", data={'name': name})\n self.checkOK(rep)\n\n # check delete\n rep = post(self.url + \"/search\", data={'name': name})\n self.errorCatch(rep)\n\n # Delete Image\n print(\"Delete Image\")\n rep = post(self.url + \"/delete/image\", data={'name': name})\n self.checkOK(rep)\n\n # Check if delete it\n rep = post(self.url + \"/search/image\", data={'name': name})\n self.errorCatch(rep)", "def test_rackspace_uploader_creates_container(self, mock, mock2):\r\n with patch('pybossa.uploader.rackspace.pyrax.cloudfiles') as mycf:\r\n mycf.get_container.side_effect = NoSuchContainer\r\n mycf.create_container.return_value = True\r\n mycf.make_container_public.return_value = True\r\n u = RackspaceUploader()\r\n res = u.init_app(self.flask_app)\r\n err_msg = \"Init app should return the container.\"\r\n assert res is True, err_msg", "def test_containers(self):\n\n message = {\"method\": \"containers\"}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"containers\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertNotEqual(len(response[\"result\"]), 0)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")", "def container(name, ostemplate, **kwargs):\n if not openvz.exists(name):\n ctid = openvz.get_available_ctid()\n openvz.create(ctid, ostemplate=ostemplate, **kwargs)\n openvz.set(ctid, name=name)\n return Container(name)", "def test_create_container_w_invalid_type(self):\n container_resp = self.behaviors.create_container(\n 'name', 'bad_type', [])\n self.assertEqual(container_resp.status_code, 400)", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def create_containers(self, containers, script, arguments):\n try:\n self.verify_execution_status()\n except Exception:\n self.log_stack_trace(traceback.format_exc())\n self.log_message(\"ERROR: Could not verify execution mode status.\")\n return\n\n more_than_one = len(containers) > 1\n created_containers = []\n for container in containers:\n my_script = os.path.join(container.directory, script)\n try:\n container.create(my_script, arguments, more_than_one)\n created_containers.append(container)\n except Exception:\n self.log_message(\n f\"ERROR: Could not create container {container.name}\"\n f\" with image {container.image}\"\n )\n self.log_stack_trace(traceback.format_exc())\n\n # Failing to create a container is a critical error.\n # Try to clean up any containers we successfully created, then raise.\n for c in created_containers:\n try:\n c.cleanup_container()\n except Exception:\n pass\n raise" ]
[ "0.8292587", "0.81556135", "0.78325146", "0.7654203", "0.7513342", "0.7463807", "0.72975534", "0.7197016", "0.71869355", "0.7110519", "0.70905244", "0.7059516", "0.70520085", "0.7025099", "0.6996306", "0.69933844", "0.6968166", "0.6955092", "0.69229496", "0.6905665", "0.6898184", "0.6825606", "0.68175447", "0.6813769", "0.679635", "0.679635", "0.679635", "0.679635", "0.679635", "0.67336637" ]
0.9447695
0
Test case for create_container_privilege
def test_create_container_privilege(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_show_container_privilege(self):\n pass", "def test_update_container_privilege(self):\n pass", "def test_destroy_container_privilege(self):\n pass", "def test_create_container(self):\n pass", "def test_index_container_privileges(self):\n pass", "def test_create(set_env, container: Container):\n # pylint: disable=unused-argument\n assert container\n assert isinstance(container, Container)", "def test001_create_containers(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a two container on that node, should succeed.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n self.cont2_name = self.random_string()\n self.containers.update({self.cont2_name: {'hostname': self.cont2_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}})\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n self.wait_for_service_action_status(self.cont2_name, res[self.cont2_name]['install'])\n\n self.log('Check that the container have been created.')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont2_name])\n cont1 = [c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name][0]\n self.assertTrue(cont1['container']['arguments']['storage'], self.cont_storage)\n self.assertTrue(cont1['container']['arguments']['root'], self.cont_flist)\n self.assertTrue(cont1['container']['arguments']['hostname'], self.cont_flist)\n\n self.log('%s ENDED' % self._testID)", "def test_create_namespaced_role(self):\n pass", "def test_vault_create_authorization_for_vault_section(self):\n pass", "def test_create_cluster_policy(self):\n pass", "def test_create_cluster_role(self):\n pass", "def test_create_event_route_with_correct_privileges(self):\n resp = self.request_with_role('/admin/events/create', role='editor')\n self.assertEqual(resp.status_code, 200)", "def test_add_and_remove_privilege(self):\n\n self.create_common_users_and_groups()\n\n sgp = SetGroupPrivilegesAPI(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n sgp.add_privilege(\n groups=[\"Group 1\", \"Group 2\"], privilege=Privileges.CAN_USE_SPOTIQ\n )", "def test_create_namespaced_pod_security_policy_review(self):\n pass", "def ddtest_create_container_w_secret_name(self, name=None):\n # create a container with a particular secret name\n responses = self.behaviors.create_container_with_secret(\n name='name', secret_name=name)\n secret_resp, container_resp = responses\n self._check_container_create_response(container_resp)\n\n # verify the container exists with the expected data\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name='name', type='generic',\n num_secrets=1)\n\n # verify the secret's name is returned correctly\n secret_ref = get_resp.entity.secret_refs[0]\n self.assertEqual(secret_ref.name, name)", "def test_create_hyperflex_cluster_storage_policy(self):\n pass", "def test_create_rsa_container_w_no_passphrase(self):\n secret_urls = self.secret_behaviors.create_n_secrets(2)\n container_resp = self.behaviors.create_rsa_container(\n 'name', secret_urls[0], secret_urls[1])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name='name', type='rsa')", "def test_create_container_w_null_secret_name(self):\n responses = self.behaviors.create_container_with_secret(\n name='name', secret_name=None)\n secret_resp, container_resp = responses\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name='name', type='generic',\n num_secrets=1)\n\n # verify the secret's name is returned correctly\n secret_ref = get_resp.entity.secret_refs[0]\n self.assertEqual(secret_ref.name, None)", "def test_create_hyperflex_vcenter_config_policy(self):\n pass", "def test_createsu_command(self):\n # TODO: Write tests for when there is no superuser.\n # This seemed to not work when using this command on PythonAnywhere the first time\n pass", "def ddtest_create_generic_container_w_name(self, name=None):\n container_resp = self.behaviors.create_container(name, 'generic', [])\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name=name, type='generic')", "def test_destroy_container(self):\n pass", "def post(self, run=False, **container_dict):\n context = pecan.request.context\n compute_api = pecan.request.compute_api\n policy.enforce(context, \"container:create\",\n action=\"container:create\")\n\n try:\n run = strutils.bool_from_string(run, strict=True)\n except ValueError:\n msg = _('Valid run values are true, false, 0, 1, yes and no')\n raise exception.InvalidValue(msg)\n try:\n container_dict['tty'] = strutils.bool_from_string(\n container_dict.get('tty', False), strict=True)\n container_dict['stdin_open'] = strutils.bool_from_string(\n container_dict.get('stdin_open', False), strict=True)\n except ValueError:\n msg = _('Valid tty and stdin_open values are ''true'', '\n '\"false\", True, False, \"True\" and \"False\"')\n raise exception.InvalidValue(msg)\n\n # NOTE(mkrai): Intent here is to check the existence of image\n # before proceeding to create container. If image is not found,\n # container create will fail with 400 status.\n images = compute_api.image_search(context, container_dict['image'],\n True)\n if not images:\n raise exception.ImageNotFound(container_dict['image'])\n container_dict['project_id'] = context.project_id\n container_dict['user_id'] = context.user_id\n name = container_dict.get('name') or \\\n self._generate_name_for_container()\n container_dict['name'] = name\n if container_dict.get('memory'):\n container_dict['memory'] = \\\n str(container_dict['memory']) + 'M'\n if container_dict.get('restart_policy'):\n self._check_for_restart_policy(container_dict)\n container_dict['status'] = fields.ContainerStatus.CREATING\n new_container = objects.Container(context, **container_dict)\n new_container.create(context)\n\n if run:\n compute_api.container_run(context, new_container)\n else:\n compute_api.container_create(context, new_container)\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('containers',\n new_container.uuid)\n pecan.response.status = 202\n return view.format_container(pecan.request.host_url, new_container)", "def test_get_container(self):\n pass", "def ensure_container():\n return exec_fn(_init_container)", "def _process_createContainer(self, data):\r\n try:\r\n self._avatar.createContainer(data['containerTag'],\r\n data.get('containerData', {}))\r\n except KeyError as e:\r\n raise InvalidRequest(\"Can not process 'CreateContainer' request. \"\r\n 'Missing key: {0}'.format(e))", "def test_create_rsa_container_w_no_private_key(self):\n secret_urls = self.secret_behaviors.create_n_secrets(1)\n secret_refs = [SecretRef(name='private_key', ref=secret_urls[0])]\n\n container_resp = self.behaviors.create_rsa_container(\n 'name', 'rsa', secret_refs)\n self.assertEqual(container_resp.status_code, 400)", "def test_create_owner(self):\n url = reverse(\n 'projectroles:api_role_create',\n kwargs={'project': self.project.sodar_uuid},\n )\n post_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_create_resource_group(self):\n pass", "def test002_create_container_with_all_possible_params(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a container without providing flist parameter, should fail.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {}}\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(res, \"parameter 'flist' not valid: \")\n\n self.log('Create a container with all possible parameters.')\n self.cont1_name = self.random_string()\n bridge_name = self.random_string()\n env_name = self.random_string()\n env_value = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage,\n 'env': {'name': env_name, 'value': env_value},\n 'ports': ['8080:80'],\n 'privileged': True,\n 'nics': [{'type': 'default'},\n {'type': 'bridge', 'id': bridge_name}],\n 'hostNetworking': True}}\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n\n self.log('Check if the parameters have been reflected correctly')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n\n (cont1_id, cont1) = [c for c in conts.items() if c[1]['container']['arguments']['name'] == self.cont1_name][0]\n cont1_cl = self.zos_client.container.client(cont1_id)\n self.assertTrue(cont1_cl.bash('echo $%s' % env_name).get().stdout.strip(), env_value)\n self.assertTrue(cont1['container']['arguments']['host_network'], True)\n self.assertTrue(cont1['container']['arguments']['port'], {'8080': 80})\n\n nics = cont1['container']['arguments']['nics']\n nic = [nic for nic in nics if nic['type'] == 'bridge'][0]\n self.assertTrue(len(nics), 2)\n self.assertEqual(nic['id'], bridge_name)\n\n self.log('%s ENDED' % self._testID)" ]
[ "0.819603", "0.80356145", "0.78542966", "0.74272037", "0.6985518", "0.64949703", "0.6320619", "0.610004", "0.6066635", "0.603656", "0.60300285", "0.5930474", "0.59184194", "0.59108776", "0.5893975", "0.5884014", "0.5834234", "0.58311236", "0.57651585", "0.57612944", "0.5736942", "0.57049096", "0.5686702", "0.5681124", "0.5676748", "0.56749666", "0.56742895", "0.56622744", "0.5651561", "0.56367457" ]
0.9525998
0
Test case for destroy_container
def test_destroy_container(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_destroy_container_privilege(self):\n pass", "def delete_container(self, container: Container):", "def test_destroy(set_env, container: Container, docker_client: DockerClient):\n # pylint: disable=unused-argument\n assert container\n\n from dockerdb.commands.destroy import destroy\n\n destroy()\n\n with pytest.raises(NotFound):\n docker_client.containers.get(container_id=container.name)", "def destroyContainer(tag): #@NoSelf", "def test_delete_generic_container(self):\n container_resp = self.behaviors.create_container('name', 'generic', [])\n self._check_container_create_response(container_resp)\n\n # delete container and check the response\n del_resp = self.behaviors.delete_container(container_resp.ref)\n self.assertEqual(del_resp.status_code, 204)\n\n # check the container is actually deleted\n get_resp = self.container_client.get_container(container_resp.ref)\n self.assertEqual(get_resp.status_code, 404)", "def test_create_container(self):\n pass", "def _process_destroyContainer(self, data):\r\n try:\r\n self._avatar.destroyContainer(data['containerTag'])\r\n except KeyError as e:\r\n raise InvalidRequest(\"Can not process 'DestroyContainer' request. \"\r\n 'Missing key: {0}'.format(e))", "def destroy_lxd_container(container):\n\n if type(container) is bool:\n return\n\n name = container.name\n debug(\"Destroying container {}\".format(name))\n\n client = get_lxd_client()\n\n def wait_for_stop(timeout=30):\n \"\"\"Wait for eth0 to have an ipv4 address.\"\"\"\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if container.state == \"Stopped\":\n return\n\n def wait_for_delete(timeout=30):\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if client.containers.exists(name) is False:\n return\n\n try:\n container.stop(wait=False)\n wait_for_stop()\n except Exception as ex:\n debug(\n \"Error stopping container {}: {}\".format(\n name,\n ex,\n )\n )\n\n try:\n container.delete(wait=False)\n wait_for_delete()\n except Exception as ex:\n debug(\n \"Error deleting container {}: {}\".format(\n name,\n ex,\n )\n )\n\n try:\n # Delete the profile created for this container\n profile = client.profiles.get(name)\n if profile:\n profile.delete()\n except Exception as ex:\n debug(\n \"Error deleting profile {}: {}\".format(\n name,\n ex,\n )\n )", "def test_delete_rsa_container(self):\n secret_urls = self.secret_behaviors.create_n_secrets(3)\n container_resp = self.behaviors.create_rsa_container(\n 'name', secret_urls[0], secret_urls[1], secret_urls[2])\n self._check_container_create_response(container_resp)\n\n # delete container and check the response\n del_resp = self.behaviors.delete_container(container_resp.ref)\n self.assertEqual(del_resp.status_code, 204)\n\n # check the container is actually deleted\n get_resp = self.container_client.get_container(container_resp.ref)\n self.assertEqual(get_resp.status_code, 404)", "def delete_container(self, account, container):\n \n pass", "def view_destroyContainer(self, user, tag):\r\n try:\r\n container = user.containers.pop(tag)\r\n except KeyError:\r\n raise InvalidRequest('Can not destroy non existent container.')\r\n\r\n container.dontNotifyOnDeath(user.containerDied)\r\n container.destroy()\r\n\r\n # TODO: Return some info about success/failure of request\r", "def delete_container(ContainerName=None):\n pass", "def test_remove(self):\n\n message = {\"method\": \"remove\",\n \"params\": {\"elem\": self.container_to_remove}}\n response = yield self._get_response(message)\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"remove\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_remove\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertNotIn(container_name, containers.keys(),\n \"Container has found\")", "def stop_container(self, container):\n pass", "def test_delete_nonexistant_container(self):\n ref = self.container_client._get_base_url() + '/invalid_uuid'\n del_resp = self.behaviors.delete_container(ref)\n self.assertEqual(del_resp.status_code, 404)", "def test003_start_stop_container(self):\n\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a container (C1)')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n\n self.log('Stop container C1, should succeed.')\n temp_actions = {'container': {'actions': ['stop'], 'service': self.cont1_name}}\n res = self.create_container(containers=self.containers, temp_actions=temp_actions)\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['stop'])\n\n self.log('Check if the container has been terminated, should succeed')\n conts = self.zos_client.container.list()\n self.assertFalse([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n\n self.log('Start container C1, should succeed.')\n temp_actions = {'container': {'actions': ['start'], 'service': self.cont1_name}}\n res = self.create_container(containers=self.containers, temp_actions=temp_actions)\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['start'])\n\n self.log('Check if the container has been started, should succeed')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n\n self.log('%s ENDED' % self._testID)", "def _destroy(self):\r\n if self._client:\r\n self._client.returnNr(self._nr)\r\n self._client.unregisterContainer(self)\r\n self._client = None\r\n\r\n if self._confDir:\r\n shutil.rmtree(self._confDir, True)\r\n self._confDir = None\r\n\r\n if self._dataDir:\r\n shutil.rmtree(self._dataDir, True)\r\n self._dataDir = None", "def destroy():\n pass", "def destroy():\n pass", "def test001_create_containers(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a two container on that node, should succeed.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n self.cont2_name = self.random_string()\n self.containers.update({self.cont2_name: {'hostname': self.cont2_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}})\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n self.wait_for_service_action_status(self.cont2_name, res[self.cont2_name]['install'])\n\n self.log('Check that the container have been created.')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont2_name])\n cont1 = [c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name][0]\n self.assertTrue(cont1['container']['arguments']['storage'], self.cont_storage)\n self.assertTrue(cont1['container']['arguments']['root'], self.cont_flist)\n self.assertTrue(cont1['container']['arguments']['hostname'], self.cont_flist)\n\n self.log('%s ENDED' % self._testID)", "def delete_container(self, filesystem, acc_dir, cont_dir, account, container):\n try:\n # create path\n path = self.create_path(filesystem, acc_dir, cont_dir, account, container)\n self.logger.debug(('DELETE container called for path: %(path)s'),\n {'path' : path})\n # call container library to confirm if container is empty or not\n self.logger.debug('Called list container interface of library')\n list_obj = ListObjectWithStatus()\n self.asyn_helper.call(\"list_container\", \\\n path, list_obj, CONTAINER_LISTING_LIMIT, '', '', '', '')\n status = list_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'), {'status' : status})\n if status != OsdExceptionCode.OSD_OPERATION_SUCCESS:\n return status\n container_list = list_obj.object_record\n self.logger.debug('Got container list')\n if container_list:\n self.logger.debug('object list found in container!')\n raise HTTPConflict()\n # call container library to delete container\n self.logger.debug('Called delete container interface of library')\n status_obj = Status()\n self.asyn_helper.call(\"delete_container\", path, status_obj)\n status = status_obj.get_return_status()\n self.logger.info(('Status from container library comes '\n 'out to be: %(status)s'),\n {'status' : status})\n return status\n except Exception as err:\n self.logger.error(('container DELETE failed for account/container:'\n ' %(account)s/%(container)s '\n 'close failure: %(exc)s : %(stack)s'),\n {'account' : account, 'container' : container,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def kill_remove_docker_container(container):\n container.kill()\n container.remove()\n return 0, 'Container {} removed.'.format(container.id[:12])", "def test_09_cleanup(self, mock_remove, mock_config):\n udocker.Config = mock_config\n udocker.Config.tmpdir = \"/tmp\"\n udocker.FileUtil.tmptrash = {'file1.txt': None, 'file2.txt': None}\n udocker.FileUtil(\"\").cleanup()\n self.assertEqual(mock_remove.call_count, 2)", "def test_cleanup(self, delete_mock, network_delete_mock, create_mock, libvirt_mock):\n resources = lxc.LXCResources('foo', {'domain': 'bar'})\n resources._domain = mock.Mock()\n resources._network = mock.Mock()\n resources._hypervisor = mock.Mock()\n resources.cleanup()\n delete_mock.assert_called_with(resources.domain, mock.ANY, None)\n network_delete_mock.assert_called_with(resources.network)\n self.assertTrue(resources._hypervisor.close.called)", "def test_update_container(self):\n pass", "def teardown(self):\n response = self.client.stop(container=self.container['Id'])\n if response:\n self.logger.warning(response)\n\n response = self.client.remove_container(container=self.container['Id'])\n if response:\n self.logger.warning(response)\n\n try:\n timed(lambda: self.running, time_out=self.time_out, exit_on=False)\n except TimeOutError:\n self.logger.warning(\n 'Container teardown timed out, may still be running {}'\n .format(self.container)\n )\n print 'Timeout'", "def _destroy(self):", "def test_stop(set_env, container: Container, docker_client: DockerClient):\n # pylint: disable=unused-argument\n assert container\n\n from dockerdb.commands.stop import stop\n\n stop()\n\n # getting the container again in order to assert their status after we stopped it\n _container = docker_client.containers.get(container_id=container.name)\n assert _container.status == \"exited\"", "def test_get_container(self):\n pass", "def tearDownClass(cls):\n cls.container.stop()\n cls.container.remove()\n cls.client.close()" ]
[ "0.8245798", "0.7704431", "0.7700081", "0.7595063", "0.73785865", "0.7206148", "0.7133335", "0.70388615", "0.70206547", "0.6987417", "0.67901903", "0.67895716", "0.6761828", "0.67412347", "0.66842693", "0.6674814", "0.66085625", "0.6554082", "0.6554082", "0.6501235", "0.6475468", "0.645517", "0.6439865", "0.6423451", "0.6415591", "0.6388217", "0.6387794", "0.6378997", "0.6335168", "0.63006365" ]
0.9445566
0
Test case for destroy_container_privilege
def test_destroy_container_privilege(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_destroy_container(self):\n pass", "def test_create_container_privilege(self):\n pass", "def test_update_container_privilege(self):\n pass", "def test_show_container_privilege(self):\n pass", "def test_destroy(set_env, container: Container, docker_client: DockerClient):\n # pylint: disable=unused-argument\n assert container\n\n from dockerdb.commands.destroy import destroy\n\n destroy()\n\n with pytest.raises(NotFound):\n docker_client.containers.get(container_id=container.name)", "def _process_destroyContainer(self, data):\r\n try:\r\n self._avatar.destroyContainer(data['containerTag'])\r\n except KeyError as e:\r\n raise InvalidRequest(\"Can not process 'DestroyContainer' request. \"\r\n 'Missing key: {0}'.format(e))", "def delete_container_policy(ContainerName=None):\n pass", "def test_delete_hyperflex_vcenter_config_policy(self):\n pass", "def delete_container(self, container: Container):", "def test_deallocate_virt_realm(self):\n pass", "def test_delete_generic_container(self):\n container_resp = self.behaviors.create_container('name', 'generic', [])\n self._check_container_create_response(container_resp)\n\n # delete container and check the response\n del_resp = self.behaviors.delete_container(container_resp.ref)\n self.assertEqual(del_resp.status_code, 204)\n\n # check the container is actually deleted\n get_resp = self.container_client.get_container(container_resp.ref)\n self.assertEqual(get_resp.status_code, 404)", "def test_delete_rsa_container(self):\n secret_urls = self.secret_behaviors.create_n_secrets(3)\n container_resp = self.behaviors.create_rsa_container(\n 'name', secret_urls[0], secret_urls[1], secret_urls[2])\n self._check_container_create_response(container_resp)\n\n # delete container and check the response\n del_resp = self.behaviors.delete_container(container_resp.ref)\n self.assertEqual(del_resp.status_code, 204)\n\n # check the container is actually deleted\n get_resp = self.container_client.get_container(container_resp.ref)\n self.assertEqual(get_resp.status_code, 404)", "def test_delete_hyperflex_cluster_storage_policy(self):\n pass", "def test_aws_service_api_vm_security_group_delete(self):\n pass", "def destroy_lxd_container(container):\n\n if type(container) is bool:\n return\n\n name = container.name\n debug(\"Destroying container {}\".format(name))\n\n client = get_lxd_client()\n\n def wait_for_stop(timeout=30):\n \"\"\"Wait for eth0 to have an ipv4 address.\"\"\"\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if container.state == \"Stopped\":\n return\n\n def wait_for_delete(timeout=30):\n starttime = time.time()\n while(time.time() < starttime + timeout):\n time.sleep(1)\n if client.containers.exists(name) is False:\n return\n\n try:\n container.stop(wait=False)\n wait_for_stop()\n except Exception as ex:\n debug(\n \"Error stopping container {}: {}\".format(\n name,\n ex,\n )\n )\n\n try:\n container.delete(wait=False)\n wait_for_delete()\n except Exception as ex:\n debug(\n \"Error deleting container {}: {}\".format(\n name,\n ex,\n )\n )\n\n try:\n # Delete the profile created for this container\n profile = client.profiles.get(name)\n if profile:\n profile.delete()\n except Exception as ex:\n debug(\n \"Error deleting profile {}: {}\".format(\n name,\n ex,\n )\n )", "def test_delete_namespaced_role(self):\n pass", "def test_delete_cluster_role(self):\n pass", "def view_destroyContainer(self, user, tag):\r\n try:\r\n container = user.containers.pop(tag)\r\n except KeyError:\r\n raise InvalidRequest('Can not destroy non existent container.')\r\n\r\n container.dontNotifyOnDeath(user.containerDied)\r\n container.destroy()\r\n\r\n # TODO: Return some info about success/failure of request\r", "def test_delete_cluster_policy(self):\n pass", "def delete_container(self, account, container):\n \n pass", "def destroyContainer(tag): #@NoSelf", "def __exit__(self, type, value, traceback):\n log.Print('Shutting down metadata credentials')\n subprocess.check_call(['docker', 'rm', '-f', self._name],\n stdin=None, stdout=None, stderr=None)", "def delete_container(ContainerName=None):\n pass", "def delete_lifecycle_policy(ContainerName=None):\n pass", "def test_delete_hyperflex_sys_config_policy(self):\n pass", "def test_vault_delete_authorization_for_vault_section(self):\n pass", "def test_delete_bios_policy(self):\n pass", "def test_delete_hyperflex_ucsm_config_policy(self):\n pass", "def test_create_container(self):\n pass", "def test_destroy_exit_code(destroy_result: Result) -> None:\n assert destroy_result.exit_code == 0" ]
[ "0.799227", "0.75960445", "0.73963785", "0.7044312", "0.64787596", "0.6368871", "0.63494855", "0.62470484", "0.62427765", "0.62360406", "0.6196816", "0.61744213", "0.6163879", "0.61557615", "0.6136813", "0.6112972", "0.6096761", "0.60735035", "0.6069854", "0.6051791", "0.60278994", "0.6004123", "0.59912616", "0.5965809", "0.596298", "0.59218204", "0.590701", "0.58814186", "0.58674586", "0.5836725" ]
0.95208555
0
Test case for index_container_privileges
def test_index_container_privileges(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_show_container_privilege(self):\n pass", "def test_update_container_privilege(self):\n pass", "def test_create_container_privilege(self):\n pass", "def test_index_containers(self):\n pass", "def test_destroy_container_privilege(self):\n pass", "def test_news_index_has_perm(self):\n self.assertStatusCode(self.url, 200)", "def test_read_cluster_role(self):\n pass", "def test_news_index_no_perm(self):\n self.assertStatusCode(self.url, 403)", "def test_read_cluster_policy(self):\n pass", "def test_read_cluster_resource_quota(self):\n pass", "def test_list_cluster_role(self):\n pass", "def test_indexable(self):\n # verify ----------------------\n try:\n self.collection[0]\n except TypeError:\n msg = \"'Collection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "def test_read_cluster_resource_quota_status(self):\n pass", "def test_list_cluster_policy(self):\n pass", "def test_01_admin_index_authenticated(self):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"tester2\", email=\"[email protected]\",\r\n password=\"tester\")\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg", "def test_list_containers_with_non_authorized_user(self):\n\n test_auth_provider = self.os_operator.auth_provider\n # Get auth for the test user\n test_auth_provider.auth_data\n\n # Get fresh auth for test user and set it to next auth request for\n # account_client\n delattr(test_auth_provider, 'auth_data')\n test_auth_new_data = test_auth_provider.auth_data\n self.account_client.auth_provider.set_alt_auth_data(\n request_part='headers',\n auth_data=test_auth_new_data\n )\n\n params = {'format': 'json'}\n # list containers with non-authorized user token\n self.assertRaises(lib_exc.Forbidden,\n self.account_client.list_account_containers,\n params=params)", "def reindex_licence_permissions(container, event):\n if IUrbanEvent.providedBy(container):\n licence = container.aq_parent\n licence.reindexObject(idxs=['allowedRolesAndUsers'])", "def test_read_namespaced_applied_cluster_resource_quota(self):\n pass", "def test_vault_create_authorization_for_vault_section(self):\n pass", "def test_create_container(self):\n pass", "def testGetAccessAllowed(self):\n for user in (self.guest, self.contributor, self.delegate, self.owner, self.root):\n response = self.runGet(user, sequencer=self.hiseq2000.sodar_uuid)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"sodar_uuid\"], str(self.hiseq2000.sodar_uuid))", "def test_basic(self):\n self.check_4_way('container', 'pod')", "def ft_syndicate_access():\n \n fake_user = FakeObject()\n fake_user.email = \"[email protected]\"\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_user_exists(%s)\\n\" % fake_user.email\n ensure_user_exists( fake_user.email, is_admin=False, max_UGs=1100, max_RGs=1 )\n\n fake_volume = FakeObject()\n fake_volume.name = \"fakevolume\"\n fake_volume.description = \"This is a fake volume, created for funtional testing\"\n fake_volume.blocksize = 1024\n fake_volume.cap_read_data = True \n fake_volume.cap_write_data = True \n fake_volume.cap_host_data = False\n fake_volume.archive = False\n fake_volume.private = True\n \n # test idempotency\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_exists(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_exists( fake_user.email, fake_volume.name, 31 )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_access_right_absent(%s,%s)\\n\" % (fake_user.email, fake_volume.name)\n ensure_volume_access_right_absent( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n\n print \"\\nensure_user_absent(%s)\\n\" % fake_user.email\n ensure_user_absent( fake_user.email )\n \n \n \n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n \n print \"\\nensure_principal_exists(%s)\\n\" % fake_user.email\n ensure_principal_exists( fake_user.email, \"asdf\", is_admin=False, max_UGs=1100, max_RGs=1 )\n\n print \"\\nensure_volume_exists(%s)\\n\" % fake_volume.name\n ensure_volume_exists( fake_user.email, fake_volume )\n\n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nsetup_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name)\n setup_volume_access( fake_user.email, fake_volume.name, 31, 38800, \"abcdef\" )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nteardown_volume_access(%s, %s)\\n\" % (fake_user.email, fake_volume.name )\n teardown_volume_access( fake_user.email, fake_volume.name )\n \n print \"\\nensure_volume_absent(%s)\\n\" % fake_volume.name\n ensure_volume_absent( fake_volume.name )\n\n print \"\\nensure_principal_absent(%s)\\n\" % fake_user.email\n ensure_principal_absent( fake_user.email )", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def test_create_cluster_resource_quota(self):\n pass", "def test_create_cluster_role(self):\n pass", "def test_list_cluster_resource_quota(self):\n pass", "def test_access_levels(self):\n resource_id = 'protected-data'\n arn_prefix = \"arn:dcp:fus:us-east-1:dev:\"\n user = 'user_test_access_levels'\n group = 'group_test_access_levels'\n\n # create the resource to control\n resp = self.app.post(\n f'/v1/resource/{self.test_resource}/id/{resource_id}',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n with self.subTest(\"Check that no one has access by listing who has access\"):\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': []})\n\n with self.subTest(\"Toggle user access.\"):\n # create a user\n resp = self.app.post(\n f'/v1/user',\n data=json.dumps({'user_id': user}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # give a user access\n request_body = [\n {'member': user,\n 'member_type': 'user',\n 'access_level': 'read'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the user has access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': request_body})\n\n # Remove access for the user\n request_body = [\n {'member': user,\n 'member_type': 'user'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the user does not have access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': []})\n\n with self.subTest(\"Toggle group access.\"):\n # create a group\n resp = self.app.post(\n f'/v1/group',\n data=json.dumps({'group_id': group}),\n headers=admin_headers)\n self.assertEqual(resp.status_code, 201)\n\n # give a group access\n request_body = [\n {'member': group,\n 'member_type': 'group',\n 'access_level': 'read'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the group has access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': request_body})\n\n # Remove access for the group\n request_body = [\n {'member': group,\n 'member_type': 'group'}\n ]\n resp = self.app.put(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n data=json.dumps(request_body),\n headers=admin_headers\n )\n self.assertEqual(resp.status_code, 200)\n\n # Check that the group does not have access by listing who has access\n resp = self.app.get(\n f'/v1/resource/{self.test_resource}/id/{resource_id}/members',\n headers=admin_headers)\n self.assertEqual(resp.status_code, 200)\n self.assertJSONEqual(resp.body, {'members': []})", "def test_create_cluster_policy(self):\n pass", "def test_get_container(self):\n pass" ]
[ "0.7390241", "0.72225755", "0.7213204", "0.6697649", "0.61364084", "0.6098532", "0.57128835", "0.57016516", "0.56166", "0.55914325", "0.5578832", "0.5546629", "0.55442214", "0.5516774", "0.5504094", "0.5342963", "0.5326308", "0.5305716", "0.53056926", "0.5256991", "0.5253735", "0.52501565", "0.5226465", "0.5202995", "0.5184172", "0.51790863", "0.5171123", "0.5170295", "0.5169487", "0.5168543" ]
0.95182186
0
Test case for index_containers
def test_index_containers(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_index_container_privileges(self):\n pass", "def test_index(self):", "def test_creating_index_type(self):", "def test_export_index(self):", "def test_indexable(self):\n # verify ----------------------\n try:\n self.collection[0]\n except TypeError:\n msg = \"'Collection' object does not support indexing\"\n self.fail(msg)\n except IndexError:\n pass", "def test_index(client):\n ind = client.get('/')\n ind2 = client.get('/index/')\n assert ind.status_code == 200\n assert ind2.status_code == 200", "async def index(self, container, datas):", "def setUp(self):\n body = {\n \"settings\": {\n \"number_of_shards\": 1,\n \"number_of_replicas\": 0\n }\n }\n self.assertTrue(self.es.create_index('contacts_esclient_test', body))\n self.assertFalse(self.es.create_index('contacts_esclient_test', body))\n\n self.assertTrue(self.es.create_index('contacts_esclient_test2', body))\n self.assertFalse(self.es.create_index('contacts_esclient_test2', body))\n\n\n \"\"\" Index some test data \"\"\"\n data = {\"name\": \"Joe Tester\",\"age\": 21, \"sex\": \"male\"}\n self.assertTrue(self.es.index(\"contacts_esclient_test\", \"person\", body=data,\n docid=1))\n data = {\"name\": \"Joe Schmoe\",\"age\": 17, \"sex\": \"male\"}\n self.assertTrue(self.es.index(\"contacts_esclient_test\", \"person\", body=data,\n docid=2))\n\n self.assertTrue(self.es.refresh('contacts_esclient_test'))", "def testGetAttributeContainerByIndex(self):\n redis_client = self._CreateRedisClient()\n\n event_data_stream = events.EventDataStream()\n\n test_store = redis_store.RedisStore(\n storage_type=definitions.STORAGE_TYPE_TASK)\n test_store.Open(redis_client=redis_client)\n\n container = test_store.GetAttributeContainerByIndex(\n event_data_stream.CONTAINER_TYPE, 0)\n self.assertIsNone(container)\n\n test_store.AddAttributeContainer(event_data_stream)\n\n container = test_store.GetAttributeContainerByIndex(\n event_data_stream.CONTAINER_TYPE, 0)\n self.assertIsNotNone(container)\n\n with self.assertRaises(IOError):\n test_store.GetAttributeContainerByIndex('bogus', 0)\n\n test_store.Close()", "def test_removing_index(self):", "def build_index():\n pass", "def test_get_container(self):\n pass", "def test_create_index_fail_test(tcex: TcEx, monkeypatch: MonkeyPatch):\n\n # monkeypatch method\n def mp_post(*args, **kwargs): # pylint: disable=unused-argument\n return MockPost({}, ok=False)\n\n monkeypatch.setattr(tcex.session.tc, 'post', mp_post)\n\n # create index\n key = str(uuid.uuid4())\n try:\n tcex.api.tc.v2.datastore('local', key)\n assert False, 'Failed to catch error on ok=False'\n except RuntimeError:\n assert True", "def test_update_container(self):\n pass", "def test_create_container(self):\n pass", "def test_create_index(self, collection):\n collection.create_index(\"hello\")\n assert collection._indexes == {\"_id_\": ((\"_id\",), {(1,)})}\n\n collection.create_index(\"hello\", unique=True)\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,)}),\n \"hello_1\": ((\"hello\",), {(\"there\",)}),\n }", "def test_index_loads_properly(self):\n response = self.client.get('localhost:8000')\n self.assertEqual(response.status_code, 200)", "def test_show_container(self):\n pass", "def test_index_stats(self):\n #Create Index\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False)\n #Check Index Stats\n self.sleep(30)\n index_map = self.get_index_stats()\n self.log.info(index_map)\n for query_definition in self.query_definitions:\n index_name = query_definition.index_name\n for bucket in self.buckets:\n bucket_name = bucket.name\n check_keys = ['items_count', 'total_scan_duration', 'num_docs_queued',\n 'num_requests', 'num_rows_returned', 'num_docs_queued',\n 'num_docs_pending','delete_bytes' ]\n map = self._create_stats_map(items_count=2016)\n self._verify_index_stats(index_map, index_name, bucket_name, map, check_keys)", "def test_integer_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_simple_index, 12345)\n\t)", "def test_containers(self):\n\n message = {\"method\": \"containers\"}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"containers\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertNotEqual(len(response[\"result\"]), 0)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")", "def test_index_database(self, db_reader_cls):\n database = Mock()\n db_reader = db_reader_cls()\n db_reader.tables.return_value = ('calls', 'messages', 'pictures')\n documents_indexed_per_table = (1, 2, 3)\n self.client._index_table = Mock(\n side_effect=documents_indexed_per_table)\n\n documents_indexed = self.client._index_database(database)\n db_reader_cls.assert_called_with(database)\n self.assertEqual(documents_indexed, sum(documents_indexed_per_table))", "def test_track_index(self, collection):\n collection.create_index(\"hello\", unique=True)\n collection.insert_many([{\"hello\": \"here\"}, {\"hello\": 2}])\n assert collection._indexes == {\n \"_id_\": ((\"_id\",), {(1,), (2,), (3,)}),\n \"hello_1\": ((\"hello\",), {(\"there\",), (\"here\",), (2,)}),\n }", "def create_index():", "def test_index(self):\n es = elasticsearch.ElasticSearch(server='8.8.8.8',\n user='alice',\n password='iLoveDogs',\n doc_type='someLogCategory')\n\n index = es.index\n expected = time.strftime('logs-%Y.%m.%d')\n\n self.assertEqual(index, expected)", "def test_index_nas_shares(self):\n pass", "def test_index():\n app = create_ctfd()\n with app.app_context():\n with app.test_client() as client:\n r = client.get('/')\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_load_index(self):\n\n c = Client()\n response = c.get('/taric_books/')\n\n self.assertEqual(response.status_code, 200)", "def test_index_instructions(self):\n\n response = self.get_response('/')\n self.assertEqual(200, response.getcode())\n\n # We're just testing if the word \"add\" is present in the index\n self.assertIn(\"add\".encode(), response.read())", "def test_integer_map_key_index():\n\tlib.backup_and_restore(\n\t\tlambda context: create_indexes(lib.create_integer_map_key_index),\n\t\tNone,\n\t\tlambda context: check_indexes(lib.check_map_key_index, 12345)\n\t)" ]
[ "0.74689764", "0.7236028", "0.7231017", "0.6694666", "0.659971", "0.65104294", "0.64708704", "0.645695", "0.6350804", "0.6346495", "0.63435423", "0.6316221", "0.6306986", "0.62914807", "0.6266515", "0.6252186", "0.62408215", "0.6236435", "0.62193215", "0.6215455", "0.6201725", "0.61808914", "0.6131154", "0.61226493", "0.60659194", "0.6037242", "0.602223", "0.6004035", "0.5994148", "0.59933674" ]
0.9465906
0
Test case for show_container
def test_show_container(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_container(self):\n pass", "def test_create_container(self):\n pass", "def do_show(cs, args):\n opts = {}\n opts['id'] = args.container\n opts['all_projects'] = args.all_projects\n opts = zun_utils.remove_null_parms(**opts)\n container = cs.containers.get(**opts)\n if args.format == 'json':\n print(jsonutils.dumps(container._info, indent=4, sort_keys=True))\n elif args.format == 'yaml':\n print(yaml.safe_dump(container._info, default_flow_style=False))\n elif args.format == 'table':\n _show_container(container)", "def test_containers(self):\n\n message = {\"method\": \"containers\"}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"containers\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertNotEqual(len(response[\"result\"]), 0)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")", "def test_public_container_preview_html(self):\n published_unit = self.store.publish(self.vertical.location, self.user.id)\n published_child_container = self.store.get_item(self.child_container.location)\n published_child_vertical = self.store.get_item(self.child_vertical.location)\n self.validate_preview_html(published_unit, self.container_view)\n self.validate_preview_html(published_child_container, self.container_view)\n self.validate_preview_html(published_child_vertical, self.reorderable_child_view)", "def test_container_no_assets(self):\n context = {}\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_get_container_nested_container_fragment(self):\r\n # Add a wrapper with child beneath a child vertical\r\n root_usage_key = self._create_vertical()\r\n\r\n resp = self.create_xblock(parent_usage_key=root_usage_key, category=\"wrapper\")\r\n self.assertEqual(resp.status_code, 200)\r\n wrapper_usage_key = self.response_usage_key(resp)\r\n\r\n resp = self.create_xblock(parent_usage_key=wrapper_usage_key, category='problem', boilerplate='multiplechoice.yaml')\r\n self.assertEqual(resp.status_code, 200)\r\n\r\n # Get the preview HTML and verify the View -> link is present.\r\n html, __ = self._get_container_preview(root_usage_key)\r\n self.assertIn('wrapper-xblock', html)\r\n self.assertRegexpMatches(\r\n html,\r\n # The instance of the wrapper class will have an auto-generated ID. Allow any\r\n # characters after wrapper.\r\n (r'\"/container/location:MITx\\+999\\+Robot_Super_Course\\+wrapper\\+\\w+\" class=\"action-button\">\\s*'\r\n '<span class=\"action-button-text\">View</span>')\r\n )", "def test_show_container_privilege(self):\n pass", "def _get_container_preview(self, usage_key):\r\n preview_url = reverse_usage_url(\"xblock_view_handler\", usage_key, {'view_name': 'container_preview'})\r\n resp = self.client.get(preview_url, HTTP_ACCEPT='application/json')\r\n self.assertEqual(resp.status_code, 200)\r\n resp_content = json.loads(resp.content)\r\n html = resp_content['html']\r\n self.assertTrue(html)\r\n resources = resp_content['resources']\r\n self.assertIsNotNone(resources)\r\n return html, resources", "def test_docker_api(proc):\n assert len(proc.docker_container_id)\n assert proc.docker_inspect()['Id'].startswith(proc.docker_container_id)\n assert proc.docker_stats()['Container'] == proc.docker_container_id", "def test_public_container_preview_html(self):\r\n self.validate_preview_html(self.vertical, self.container_view,\r\n can_edit=False, can_reorder=False, can_add=False)\r\n self.validate_preview_html(self.child_container, self.container_view,\r\n can_edit=False, can_reorder=False, can_add=False)\r\n self.validate_preview_html(self.child_vertical, self.reorderable_child_view,\r\n can_edit=False, can_reorder=False, can_add=False)", "def container(app, container=None):\n if container is None:\n # Print containers\n table = Table([\n (\"NAME\", 30),\n ])\n table.print_header()\n for container in sorted(app.containers, key=lambda c: c.name):\n table.print_row([\n container.name,\n ])\n else:\n # Container name\n click.echo(CYAN(\"Name: \") + container.name)\n # Build parent\n click.echo(\n CYAN(\"Build ancestry: \") +\n \", \".join(other.name for other in app.containers.build_ancestry(container))\n )\n # Runtime dependencies\n dependencies = app.containers.dependencies(container)\n if dependencies:\n click.echo(CYAN(\"Depends on: \") + \", \".join(sorted(other.name for other in dependencies)))\n else:\n click.echo(CYAN(\"Depends on: \") + \"(nothing)\")\n # Dependents\n dependents = app.containers.dependents(container)\n if dependents:\n click.echo(CYAN(\"Depended on by: \") + \", \".join(sorted(other.name for other in dependents)))\n else:\n click.echo(CYAN(\"Depended on by: \") + \"(nothing)\")\n # Volumes\n click.echo(CYAN(\"Named volumes:\"))\n for mount_point, volume in container.named_volumes.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))\n click.echo(CYAN(\"Bind-mounted volumes:\"))\n for mount_point, volume in container.bound_volumes.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))\n # Devmodes\n click.echo(CYAN(\"Mounts (devmodes):\"))\n for name, mounts in container.devmodes.items():\n click.echo(\" {}:\".format(name))\n for mount_point, volume in mounts.items():\n click.echo(\" {}: {}\".format(mount_point, volume.source))", "def test_container_on_container_html(self):\r\n published_container = ItemFactory.create(\r\n parent_location=self.child_container.location,\r\n category=\"wrapper\", display_name=\"Wrapper\"\r\n )\r\n ItemFactory.create(\r\n parent_location=published_container.location,\r\n category=\"html\", display_name=\"Child HTML\"\r\n )\r\n\r\n def test_container_html(xblock):\r\n self._test_html_content(\r\n xblock,\r\n expected_section_tag=(\r\n '<section class=\"wrapper-xblock level-page is-hidden studio-xblock-wrapper\" '\r\n 'data-locator=\"{0}\" data-course-key=\"{0.course_key}\">'.format(published_container.location)\r\n ),\r\n expected_breadcrumbs=(\r\n r'<a href=\"/unit/{unit}\"\\s*'\r\n r'class=\"navigation-link navigation-parent\">Unit</a>\\s*'\r\n r'<a href=\"/container/{split_test}\"\\s*'\r\n r'class=\"navigation-link navigation-parent\">Split Test</a>\\s*'\r\n r'<a href=\"#\" class=\"navigation-link navigation-current\">Wrapper</a>'\r\n ).format(\r\n unit=re.escape(unicode(self.vertical.location)),\r\n split_test=re.escape(unicode(self.child_container.location))\r\n )\r\n )\r\n\r\n # Test the published version of the container\r\n test_container_html(published_container)\r\n\r\n # Now make the unit and its children into a draft and validate the container again\r\n modulestore('draft').convert_to_draft(self.vertical.location)\r\n modulestore('draft').convert_to_draft(self.child_vertical.location)\r\n draft_container = modulestore('draft').convert_to_draft(published_container.location)\r\n test_container_html(draft_container)", "def testGetAllContainers(self):\n containers_list = self.explorer_object.GetAllContainers()\n containers_list = sorted(containers_list, key=lambda ci: ci.name)\n self.assertEqual(7, len(containers_list))\n\n container_obj = containers_list[1]\n\n self.assertEqual('/dreamy_snyder', container_obj.name)\n self.assertEqual(\n '2017-02-13T16:45:05.629904159Z', container_obj.creation_timestamp)\n self.assertEqual('busybox', container_obj.config_image_name)\n self.assertTrue(container_obj.running)\n\n self.assertEqual(\n '7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966',\n container_obj.container_id)", "def test_index_containers(self):\n pass", "def test_update_container(self):\n pass", "def test_list_containers(self):\n self.assertEqual(self.client.containers.list(), [])\n \n container = self.client.containers.run(\"bfirsh/reticulate-splines\", detach=\"True\")\n time.sleep(5)\n self.assertNotEqual(self.client.containers.list(), [])\n\n container.stop()\n time.sleep(5)\n self.assertEqual(self.client.containers.list(), [])", "def describe_container(ContainerName=None):\n pass", "def test_container_on_container_html(self):\n draft_container = self._create_item(self.child_container.location, \"wrapper\", \"Wrapper\")\n self._create_item(draft_container.location, \"html\", \"Child HTML\")\n\n def test_container_html(xblock):\n self._test_html_content(\n xblock,\n expected_section_tag=(\n '<section class=\"wrapper-xblock level-page is-hidden studio-xblock-wrapper\" '\n 'data-locator=\"{0}\" data-course-key=\"{0.course_key}\">'.format(draft_container.location)\n ),\n expected_breadcrumbs=(\n '<a href=\"/course/{course}{subsection_parameters}\">Lesson 1</a>.*'\n '<a href=\"/container/{unit_parameters}\">Unit</a>.*'\n ).format(\n course=re.escape(str(self.course.id)),\n unit_parameters=re.escape(str(self.vertical.location)),\n subsection_parameters=re.escape('?show={}'.format(http.urlquote(\n str(self.sequential.location).encode()\n ))),\n ),\n )\n\n # Test the draft version of the container\n test_container_html(draft_container)\n\n # Now publish the unit and validate again\n self.store.publish(self.vertical.location, self.user.id)\n draft_container = self.store.get_item(draft_container.location)\n test_container_html(draft_container)", "def test_container_display_name(self):\n containers = test_ps_data.ps_containers\n container = containers[0]\n container_display = dockerprettyps.container_display_name(container, CliArgs())\n assert container_display == container[\"color\"] + container[\"name\"] + dockerprettyps.ENDC\n\n # Test that we bold the portion of a container name that matches a search if we have one.\n args = CliArgs()\n args.search = [\"post\"]\n for container in containers:\n if container[\"name\"] == \"some-postgres\":\n assert dockerprettyps.container_display_name(container, args) == \\\n \"\\x1b[91msome-\\x1b[1m\\x1b[91mpost\\x1b[0m\\x1b[91mgres\\x1b[0m\"", "def start(self, container: Container):", "def testGetAllContainers(self):\n containers_list = self.explorer_object.GetAllContainers()\n containers_list = sorted(containers_list, key=lambda ci: ci.name)\n self.assertEqual(3, len(containers_list))\n\n container_obj = containers_list[0]\n\n self.assertEqual('/angry_rosalind', container_obj.name)\n self.assertEqual(\n '2018-12-27T10:53:17.096746609Z', container_obj.creation_timestamp)\n self.assertEqual('busybox', container_obj.config_image_name)\n self.assertTrue(container_obj.running)\n\n self.assertEqual(\n 'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c',\n container_obj.container_id)", "def test_container_no_asset_for_container(self):\n assets = Asset.objects.select_subclasses()\n right = Container.objects.get(name='right')\n SectionAsset.objects.create(section=self.section, asset=assets[0],\n container=right)\n # Refresh the section object to get new relations\n self.section = Section.objects.get(pk=self.section.pk)\n context = {\n 'assets': self.section.sectionasset_set.order_by('weight')\n }\n container_name = \"left\"\n html = container(context, container_name)\n self.assertIn(\"storybase-container-placeholder\", html)\n self.assertIn(container_name, html)", "def test_public_child_container_preview_html(self):\n empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')\n published_empty_child_container = self.store.publish(empty_child_container.location, self.user.id)\n self.validate_preview_html(published_empty_child_container, self.reorderable_child_view, can_add=False)", "def testGetAllContainers(self):\n containers_list = self.explorer_object.GetAllContainers()\n containers_list = sorted(containers_list, key=lambda ci: ci.name)\n self.assertEqual(6, len(containers_list))\n\n container_obj = containers_list[0]\n\n self.assertEqual('/elastic_booth', container_obj.name)\n self.assertEqual(\n '2018-01-26T14:55:56.280943771Z', container_obj.creation_timestamp)\n self.assertEqual('busybox:latest', container_obj.config_image_name)\n self.assertTrue(container_obj.running)\n\n self.assertEqual(\n '5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a',\n container_obj.container_id)", "def containerView(*args, itemInfo: Union[AnyStr, bool]=\"\", itemList: bool=True,\n viewDescription: bool=True, viewLabel: bool=True, viewList: bool=True,\n viewName: Union[AnyStr, bool]=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def testGetAllContainers(self):\n containers_list = self.explorer_object.GetAllContainers()\n containers_list = sorted(containers_list, key=lambda ci: ci.name)\n self.assertEqual(5, len(containers_list))\n\n container_obj = containers_list[0]\n\n self.assertEqual('/festive_perlman', container_obj.name)\n self.assertEqual(\n '2018-05-16T10:51:39.271019533Z', container_obj.creation_timestamp)\n self.assertEqual('busybox', container_obj.config_image_name)\n self.assertTrue(container_obj.running)\n self.assertEqual(\n '8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206',\n container_obj.container_id)\n\n container_obj = containers_list[3]\n self.assertEqual('/reverent_wing', container_obj.name)\n self.assertEqual(\n '2018-05-16T10:51:28.695738065Z', container_obj.creation_timestamp)\n self.assertEqual('busybox', container_obj.config_image_name)\n self.assertFalse(container_obj.running)\n self.assertEqual(\n '10acac0b3466813c9e1f85e2aa7d06298e51fbfe86bbcb6b7a19dd33d3798f6a',\n container_obj.container_id)\n self.assertEqual(\n {'12345/tcp': {}, '27017/tcp': {}}, container_obj.exposed_ports)", "def test_is_container(self):\n # verify ----------------------\n try:\n 1 in self.collection\n except TypeError:\n msg = \"'Collection' object is not container\"\n self.fail(msg)", "def test001_create_containers(self):\n self.log('%s STARTED' % self._testID)\n\n self.log('Create a two container on that node, should succeed.')\n self.cont1_name = self.random_string()\n self.containers = {self.cont1_name: {'hostname': self.cont1_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}}\n\n self.cont2_name = self.random_string()\n self.containers.update({self.cont2_name: {'hostname': self.cont2_name,\n 'flist': self.cont_flist,\n 'storage': self.cont_storage}})\n\n res = self.create_container(containers=self.containers, temp_actions=self.temp_actions)\n self.assertEqual(type(res), type(dict()))\n self.wait_for_service_action_status(self.cont1_name, res[self.cont1_name]['install'])\n self.wait_for_service_action_status(self.cont2_name, res[self.cont2_name]['install'])\n\n self.log('Check that the container have been created.')\n conts = self.zos_client.container.list()\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name])\n self.assertTrue([c for c in conts.values() if c['container']['arguments']['name'] == self.cont2_name])\n cont1 = [c for c in conts.values() if c['container']['arguments']['name'] == self.cont1_name][0]\n self.assertTrue(cont1['container']['arguments']['storage'], self.cont_storage)\n self.assertTrue(cont1['container']['arguments']['root'], self.cont_flist)\n self.assertTrue(cont1['container']['arguments']['hostname'], self.cont_flist)\n\n self.log('%s ENDED' % self._testID)", "def test_draft_container_preview_html(self):\r\n draft_unit = modulestore('draft').convert_to_draft(self.vertical.location)\r\n draft_child_container = modulestore('draft').convert_to_draft(self.child_container.location)\r\n draft_child_vertical = modulestore('draft').convert_to_draft(self.child_vertical.location)\r\n self.validate_preview_html(draft_unit, self.container_view,\r\n can_edit=True, can_reorder=True, can_add=True)\r\n self.validate_preview_html(draft_child_container, self.container_view,\r\n can_edit=True, can_reorder=True, can_add=True)\r\n self.validate_preview_html(draft_child_vertical, self.reorderable_child_view,\r\n can_edit=True, can_reorder=True, can_add=True)" ]
[ "0.7574978", "0.7064828", "0.7024498", "0.6931409", "0.67337406", "0.6730391", "0.66607016", "0.665341", "0.6561181", "0.64965963", "0.64463973", "0.64361846", "0.64278543", "0.6419025", "0.6405328", "0.6402743", "0.6387417", "0.6372569", "0.6355929", "0.633814", "0.6278366", "0.62752646", "0.62657154", "0.6255116", "0.6219792", "0.6188313", "0.6180428", "0.61683863", "0.61290586", "0.6108973" ]
0.9311129
0
Test case for show_container_privilege
def test_show_container_privilege(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_container_privilege(self):\n pass", "def test_update_container_privilege(self):\n pass", "def test_destroy_container_privilege(self):\n pass", "def test_index_container_privileges(self):\n pass", "def test_show_container(self):\n pass", "def show_privileges(self):\n print(\"\\nPrivilegios:\")\n for privilege in self.privileges:\n print(\"- \" + privilege)", "def show_privileges(self):\n print(\"\\nAdministrator privileges: \")\n for privilege in self.privileges:\n print(\"- \" + privilege)", "def show_privileges(self):\n print(\"This admin user has the following privileges:\")\n for item in self.privileges:\n print(f\"- {item}\")", "def show_privileges(self):\n\t\tprint(\"An administrator has following privileges: \")\n\t\tfor item in self.privileges:\n\t\t\tprint(item)", "def show_privileges(self):\n print(\"The set of privileges for this administrator are as follows: \")\n if self.privileges:\n \"\"\"I forgot if and else statment\"\"\"\n for privilege in self.privileges:\n print(\"-\" + str(privilege.title()))\n else:\n print(\"This user has no privileges.\")", "def show_privileges(self):\n print(\"\\nPrivileges:\")\n if self.privileges:\n for privilege in self.privileges:\n print(\"- \" + privilege)\n else:\n print(\"- This user has no privileges.\")", "def test_get_container(self):\n pass", "def show_privileges(self):\n print(f\"List of privileges for user:\")\n for privilege in self.privileges:\n print(privilege.title())", "def test_create_container(self):\n pass", "def privilege(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"privilege\")", "def describe_container(ContainerName=None):\n pass", "def test_status_code_for_privileged_user(self):\n self.grant_permission()\n self.client.login(username=\"john\", password=\"pass\")\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, self.status_has_permission)", "def test_auth_private_owned(self):\n self.do_visible(True, 'pattieblack', False, tenant='pattieblack')", "def dictGetHierarchy_check(self, privilege, on, grant_target_name, user_name, node=None):\n if node is None:\n node = self.context.node\n\n dict_name = f\"dict_{getuid()}\"\n table_name = f\"table_{getuid()}\"\n\n on = on.replace(\"dict\", f\"{dict_name}\")\n\n exitcode, message = errors.not_enough_privileges(name=f\"{user_name}\")\n\n with Scenario(\"user without privilege\"):\n\n with dict_setup(node, table_name, dict_name):\n\n with When(\"I grant the user NONE privilege\"):\n node.query(f\"GRANT NONE TO {grant_target_name}\")\n\n with And(\"I grant the user USAGE privilege\"):\n node.query(f\"GRANT USAGE ON *.* TO {grant_target_name}\")\n\n with Then(\"I attempt to dictGetHierarchy without privilege\"):\n node.query(f\"SELECT dictGetHierarchy({dict_name},toUInt64(1))\", settings = [(\"user\", user_name)], exitcode=exitcode, message=message)\n\n with Scenario(\"user with privilege\"):\n\n with dict_setup(node, table_name, dict_name):\n\n with When(\"I grant privilege\"):\n node.query(f\"GRANT {privilege} ON {on} TO {grant_target_name}\")\n\n with Then(\"I attempt to dictGetHierarchy with privilege\"):\n node.query(f\"SELECT dictGetHierarchy({dict_name},toUInt64(1))\", settings = [(\"user\", user_name)])\n\n with Scenario(\"user with revoked privilege\"):\n\n with dict_setup(node, table_name, dict_name):\n\n with When(\"I grant privilege\"):\n node.query(f\"GRANT {privilege} ON {on} TO {grant_target_name}\")\n\n with And(\"I revoke privilege\"):\n node.query(f\"REVOKE {privilege} ON {on} FROM {grant_target_name}\")\n\n with When(\"I attempt to dictGetHierarchy without privilege\"):\n node.query(f\"SELECT dictGetHierarchy({dict_name},toUInt64(1))\", settings = [(\"user\", user_name)], exitcode=exitcode, message=message)", "def dictGetHierarchy_granted_directly(self, node=None):\n\n user_name = f\"user_{getuid()}\"\n\n if node is None:\n node = self.context.node\n\n with user(node, f\"{user_name}\"):\n Suite(run=dictGetHierarchy_check,\n examples=Examples(\"privilege on grant_target_name user_name\", [\n tuple(list(row)+[user_name,user_name]) for row in dictGetHierarchy_check.examples\n ], args=Args(name=\"check privilege={privilege}\", format_name=True)))", "def test_empty_private_owned(self):\n self.do_visible(True, 'pattieblack', False, is_admin=True)", "def test_list_containers_with_non_authorized_user(self):\n\n test_auth_provider = self.os_operator.auth_provider\n # Get auth for the test user\n test_auth_provider.auth_data\n\n # Get fresh auth for test user and set it to next auth request for\n # account_client\n delattr(test_auth_provider, 'auth_data')\n test_auth_new_data = test_auth_provider.auth_data\n self.account_client.auth_provider.set_alt_auth_data(\n request_part='headers',\n auth_data=test_auth_new_data\n )\n\n params = {'format': 'json'}\n # list containers with non-authorized user token\n self.assertRaises(lib_exc.Forbidden,\n self.account_client.list_account_containers,\n params=params)", "def test_auth_public_owned(self):\n self.do_visible(True, 'pattieblack', True, tenant='pattieblack')", "def test_add_and_remove_privilege(self):\n\n self.create_common_users_and_groups()\n\n sgp = SetGroupPrivilegesAPI(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n sgp.add_privilege(\n groups=[\"Group 1\", \"Group 2\"], privilege=Privileges.CAN_USE_SPOTIQ\n )", "def dictGet_check(self, privilege, on, grant_target_name, user_name, node=None):\n if node is None:\n node = self.context.node\n\n dict_name = f\"dict_{getuid()}\"\n table_name = f\"table_{getuid()}\"\n\n on = on.replace(\"dict\", f\"{dict_name}\")\n\n exitcode, message = errors.not_enough_privileges(name=f\"{user_name}\")\n\n with Scenario(\"user without privilege\"):\n\n with dict_setup(node, table_name, dict_name):\n\n with When(\"I grant the user NONE privilege\"):\n node.query(f\"GRANT NONE TO {grant_target_name}\")\n\n with And(\"I grant the user USAGE privilege\"):\n node.query(f\"GRANT USAGE ON *.* TO {grant_target_name}\")\n\n with Then(\"I attempt to dictGet without privilege\"):\n node.query(f\"SELECT dictGet ({dict_name},'y',toUInt64(1))\", settings = [(\"user\", user_name)], exitcode=exitcode, message=message)\n\n with Scenario(\"user with privilege\"):\n\n with dict_setup(node, table_name, dict_name):\n\n with When(f\"I grant privilege\"):\n node.query(f\"GRANT {privilege} ON {on} TO {grant_target_name}\")\n\n with Then(\"I attempt to dictGet with privilege\"):\n node.query(f\"SELECT dictGet ({dict_name},'y',toUInt64(1))\", settings = [(\"user\", user_name)])\n\n with Scenario(\"user with revoked privilege\"):\n\n with dict_setup(node, table_name, dict_name):\n\n with When(\"I grant privilege\"):\n node.query(f\"GRANT {privilege} ON {on} TO {grant_target_name}\")\n\n with And(\"I revoke privilege\"):\n node.query(f\"REVOKE {privilege} ON {on} FROM {grant_target_name}\")\n\n with When(\"I attempt to dictGet without privilege\"):\n node.query(f\"SELECT dictGet ({dict_name},'y',toUInt64(1))\", settings = [(\"user\", user_name)], exitcode=exitcode, message=message)", "def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")", "def test_active_admin_highest_privilege(self) -> None:\n\n self.http_client.request = AsyncMock(\n return_value=FakeResponse.json(\n code=200,\n payload={\n \"active\": True,\n \"sub\": SUBJECT,\n \"scope\": \" \".join(\n [SYNAPSE_ADMIN_SCOPE, MATRIX_USER_SCOPE, MATRIX_GUEST_SCOPE]\n ),\n \"username\": USERNAME,\n },\n )\n )\n request = Mock(args={})\n request.args[b\"access_token\"] = [b\"mockAccessToken\"]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n requester = self.get_success(self.auth.get_user_by_req(request))\n self.http_client.get_json.assert_called_once_with(WELL_KNOWN)\n self.http_client.request.assert_called_once_with(\n method=\"POST\", uri=INTROSPECTION_ENDPOINT, data=ANY, headers=ANY\n )\n self._assertParams()\n self.assertEqual(requester.user.to_string(), \"@%s:%s\" % (USERNAME, SERVER_NAME))\n self.assertEqual(requester.is_guest, False)\n self.assertEqual(requester.device_id, None)\n self.assertEqual(\n get_awaitable_result(self.auth.is_server_admin(requester)), True\n )", "def testGetManagementNodeForZoneAuth(self):\n management_node = self._saveManagementNode()\n response = self._get('inventory/zones/%d/management_nodes/%s/' % (\n management_node.zone.zone_id, management_node.system_ptr_id))\n self.assertEquals(response.status_code, 401)\n\n response = self._get('inventory/zones/%d/management_nodes/%s/' % (\n management_node.zone.zone_id, management_node.system_ptr_id),\n username=\"testuser\", password=\"password\")\n self.assertEquals(response.status_code, 200)", "def test_list_cluster_role(self):\n pass", "def test_server_administrator():\n if is_server_administrator():\n return True\n raise False" ]
[ "0.8366198", "0.79593486", "0.7508638", "0.71059686", "0.6670595", "0.6056394", "0.59110796", "0.58962005", "0.586424", "0.5812169", "0.5746774", "0.5722556", "0.5689453", "0.5635183", "0.5626916", "0.55962235", "0.5576626", "0.553116", "0.5496365", "0.54744405", "0.54153925", "0.541147", "0.53382117", "0.5323337", "0.5317116", "0.5316057", "0.5286515", "0.52852726", "0.52757525", "0.5271501" ]
0.962284
0
Test case for update_container
def test_update_container(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_container():\n return exec_fn(_update_container)", "def test_update_container_privilege(self):\n pass", "def __update_container(self, path, obj_stat):\n try:\n self.logger.debug('Update container interface called')\n return self.asyn_helper.call \\\n (\"update_container\", path, obj_stat)\n except Exception as err:\n self.logger.error(('update_container for %(con_dir)s failed '\n 'close failure: %(exc)s : %(stack)s'),\n {'con_dir' : path,\n 'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err", "def _update_container(self):\n client = docker.from_env()\n self.container = client.containers.get(self.session.container_id)", "def testUpdateAttributeContainer(self):\n redis_client = self._CreateRedisClient()\n\n event_data_stream = events.EventDataStream()\n\n test_store = redis_store.RedisStore(\n storage_type=definitions.STORAGE_TYPE_TASK)\n test_store.Open(redis_client=redis_client)\n\n number_of_containers = test_store.GetNumberOfAttributeContainers(\n event_data_stream.CONTAINER_TYPE)\n self.assertEqual(number_of_containers, 0)\n\n with self.assertRaises(IOError):\n test_store.UpdateAttributeContainer(event_data_stream)\n\n test_store.AddAttributeContainer(event_data_stream)\n\n number_of_containers = test_store.GetNumberOfAttributeContainers(\n event_data_stream.CONTAINER_TYPE)\n self.assertEqual(number_of_containers, 1)\n\n test_store.UpdateAttributeContainer(event_data_stream)\n\n number_of_containers = test_store.GetNumberOfAttributeContainers(\n event_data_stream.CONTAINER_TYPE)\n self.assertEqual(number_of_containers, 1)\n\n test_store.Close()", "def test_update_deployment(self):\n pass", "def test_update(self):\n # this is tested graphically, as it is UI\n pass", "def update(self, **kwargs):\n return self.client.api.update_container(self.id, **kwargs)", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_update(self):\n pass", "def test_redeploy_container_asset(self):\n pass", "def test_update_collection(self):\n pass", "def update_container(self, container_id, values):\n dbdriver = get_instance()\n return dbdriver.update_container(container_id, values)", "def test_create_container(self):\n pass", "def test_update_metadata(self):\n pass", "def test_update_metadata1(self):\n pass", "def test_update_deployment_state(self):\n pass", "def test_destroy_container(self):\n pass", "def test_get_container(self):\n pass", "def update(self, container, representation):\n pass", "def test_update_case(self):\n pass", "def test_update_inventory(self):\n pass", "def patch(self, container_id, **patch):\n context = pecan.request.context\n container = _get_container(container_id)\n check_policy_on_container(container.as_dict(), \"container:update\")\n utils.validate_container_state(container, 'update')\n if 'memory' in patch:\n patch['memory'] = str(patch['memory']) + 'M'\n if 'cpu' in patch:\n patch['cpu'] = float(patch['cpu'])\n compute_api = pecan.request.compute_api\n container = compute_api.container_update(context, container, patch)\n return view.format_container(pecan.request.host_url, container)", "async def test_edit_data(container_requester):\n async with container_requester as requester:\n await requester(\n 'POST', '/db/guillotina', data=json.dumps({\n '@type': 'Item',\n 'id': 'foobar1'\n }))\n await requester(\n 'POST', '/db/guillotina', data=json.dumps({\n '@type': 'Item',\n 'id': 'foobar2'\n }))\n response, _ = await requester(\n 'POST',\n '/db/guillotina/@batch',\n data=json.dumps([{\n 'method': 'PATCH',\n 'endpoint': 'foobar1',\n 'payload': {\n \"title\": \"Foobar1 changed\"\n }\n }, {\n 'method': 'PATCH',\n 'endpoint': 'foobar2',\n 'payload': {\n \"title\": \"Foobar2 changed\"\n }\n }])\n )\n response, _ = await requester(\n 'POST',\n '/db/guillotina/@batch',\n data=json.dumps([{\n 'method': 'GET',\n 'endpoint': 'foobar1'\n }, {\n 'method': 'GET',\n 'endpoint': 'foobar2'\n }])\n )\n assert len(response) == 2\n assert response[0]['body']['title'] == 'Foobar1 changed'\n assert response[1]['body']['title'] == 'Foobar2 changed'", "def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)", "def test_update_cloud(self):\n pass", "def test_container_cycle(self):\n # Before Create\n print(\"Create\")\n rep = post(self.url + \"/search\", data={'name': name})\n self.errorCatch(rep)\n\n # Create\n rep = post(self.url + \"/create\", data={\n 'image': default_image,\n 'homepath': \"/nashome/guest/test\",\n 'naspath': \"/home/nas/test\",\n 'command': \"tail -f /dev/null\",\n 'name': name})\n self.checkRunning()\n\n # Double create\n rep = post(self.url + \"/create\", data={\n 'image': default_image,\n 'homepath': \"/nashome/guest/test\",\n 'naspath': \"/home/nas/test\",\n 'command': \"tail -f /dev/null\",\n 'name': name})\n self.errorCatch(rep)\n\n # Check by api\n con = client.containers.get(name)\n self.assertIn(\"tmp0\", con.exec_run(\"ls /home/nas\").output.decode())\n self.assertIn(\"tmp1\", con.exec_run(\"ls /home/ubuntu\").output.decode())\n self.assertEqual(con.status, \"running\")\n\n # Stop\n con.exec_run(\"touch /opt/tmp2\").output.decode()\n print(\"Stop\")\n rep = post(self.url + \"/stop\", data={'name': name})\n self.checkOK(rep)\n\n # check stop\n rep = post(self.url + \"/search\", data={'name': name})\n self.checkOK(rep)\n rep = rep.json()\n self.assertIsInstance(rep[\"data\"], dict)\n self.assertEqual(rep['data']['status'], \"exited\")\n\n # start\n print(\"Resume\")\n rep = post(self.url + \"/start\", data={'name': name})\n self.checkOK(rep)\n self.checkRunning()\n con = client.containers.get(name)\n self.assertIn(\"tmp2\", con.exec_run(\"ls /opt\").output.decode())\n\n # change pw\n print(\"Change Password\")\n con.exec_run(\"adduser ubuntu\")\n rep = post(self.url + \"/passwd\", data={'name': name,\n 'pw': \"tmpPW\"})\n self.checkOK(rep)\n self.assertIn(\"tmpPW\", con.exec_run(\"cat /etc/shadow\").output.decode())\n\n # commit\n print(\"Commit\")\n rep = post(self.url + \"/commit\", data={'name': name,\n 'newname': name})\n self.checkOK(rep)\n\n # search image\n rep = post(self.url + \"/search/image\", data={'name': name})\n rep = rep.json()\n self.assertIsInstance(rep['data'], dict)\n\n # delete\n print(\"Delete\")\n rep = post(self.url + \"/delete\", data={'name': name})\n self.checkOK(rep)\n\n # check delete\n rep = post(self.url + \"/search\", data={'name': name})\n self.errorCatch(rep)\n\n # Delete Image\n print(\"Delete Image\")\n rep = post(self.url + \"/delete/image\", data={'name': name})\n self.checkOK(rep)\n\n # Check if delete it\n rep = post(self.url + \"/search/image\", data={'name': name})\n self.errorCatch(rep)", "def put_container(self, account, container):\n \n pass", "def test_update_client(self):\n pass" ]
[ "0.76290596", "0.7151693", "0.6988617", "0.69339997", "0.68087256", "0.6795811", "0.679012", "0.6744848", "0.66460276", "0.66460276", "0.66460276", "0.6637277", "0.6537386", "0.6536095", "0.6529612", "0.65175", "0.6516598", "0.65146124", "0.6504375", "0.64841086", "0.64686704", "0.64384615", "0.6408856", "0.6401972", "0.63223034", "0.6278651", "0.6253443", "0.6252398", "0.6240707", "0.61954194" ]
0.95231485
0
Test case for update_container_privilege
def test_update_container_privilege(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_container_privilege(self):\n pass", "def test_destroy_container_privilege(self):\n pass", "def test_show_container_privilege(self):\n pass", "def test_update_container(self):\n pass", "def test_index_container_privileges(self):\n pass", "def test_update_privilege_with_invalid_volume_size(self):\n\n # Create a tenant\n tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)\n\n # Create a privilege without volume size settings\n privilege = vim.vcs.storage.DatastoreAccessPrivilege()\n privilege.datastore = self.datastore\n privilege.allow_create = True\n\n # Add privilege to the tenant\n self.tenantMgr.AddPrivilege(tenant, privilege)\n\n # Update the privilege with invalid volume size\n with self.assertRaises(vmodl.fault.InvalidArgument):\n self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048, volume_total_size=1024)", "def test_update_privilege_with_invalid_max_size(self):\n\n # Create a tenant\n tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)\n\n # Create a privilege without volume size settings\n privilege = vim.vcs.storage.DatastoreAccessPrivilege()\n privilege.datastore = self.datastore\n privilege.allow_create = True\n privilege.volume_total_size = 1024\n\n # Add privilege to the tenant\n self.tenantMgr.AddPrivilege(tenant, privilege)\n\n # Update the privilege with invalid volume size\n with self.assertRaises(vmodl.fault.InvalidArgument):\n self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_max_size=2048)", "def test_update_privilege_with_invalid_total_size(self):\n\n # Create a tenant\n tenant = self.tenantMgr.CreateTenant(name=TENANT_NAME, description=TENANT_DESC)\n\n # Create a privilege without volume size settings\n privilege = vim.vcs.storage.DatastoreAccessPrivilege()\n privilege.datastore = self.datastore\n privilege.allow_create = True\n privilege.volume_max_size = 2048\n\n # Add privilege to the tenant\n self.tenantMgr.AddPrivilege(tenant, privilege)\n\n # Update the privilege with invalid volume size\n with self.assertRaises(vmodl.fault.InvalidArgument):\n self.tenantMgr.UpdatePrivilege(tenant, self.datastore, volume_total_size=1024)", "def test_add_and_remove_privilege(self):\n\n self.create_common_users_and_groups()\n\n sgp = SetGroupPrivilegesAPI(\n tsurl=TS_URL,\n username=TS_USER,\n password=TS_PASSWORD,\n disable_ssl=True,\n )\n sgp.add_privilege(\n groups=[\"Group 1\", \"Group 2\"], privilege=Privileges.CAN_USE_SPOTIQ\n )", "def test_update_privileges_fails(self):\n user = User.create(name='foo', email='[email protected]', user_type='user',\n owned_teams=[], owned_organizations=[])\n user.put()\n\n # You get a 200, but the changes you requested don't happen.\n response = self.testapp.put_json(\n '/api/users/{}'.format(user.uid),\n {'user_type': 'super_admin', 'owned_teams': ['Team_foo'],\n 'owned_organizations': ['Organization_foo']},\n headers=self.login_headers(user),\n )\n user_dict = json.loads(response.body)\n self.assertEqual(user.user_type, user_dict['user_type'])\n self.assertEqual(user.owned_teams, user_dict['owned_teams'])\n self.assertEqual(user.owned_organizations,\n user_dict['owned_organizations'])\n\n # Also not changed in the db.\n fetched_user = User.get_by_id(user.uid)\n self.assertEqual(user.user_type, fetched_user.user_type)\n self.assertEqual(user.owned_teams, fetched_user.owned_teams)\n self.assertEqual(user.owned_organizations,\n fetched_user.owned_organizations)", "def test_aws_service_api_vm_security_group_put(self):\n pass", "def test_update_hyperflex_cluster_storage_policy(self):\n pass", "def test_update_hyperflex_vcenter_config_policy(self):\n pass", "def test_edit_user_enable_permit_sudo(driver):\n pass", "def test_update_virtualization_realm(self):\n pass", "def test_update_resource_group(self):\n pass", "def test_update_hyperflex_ucsm_config_policy(self):\n pass", "def update_container():\n return exec_fn(_update_container)", "def test_update_hyperflex_sys_config_policy(self):\n pass", "def test_put_owner(self):\n url = reverse(\n 'projectroles:api_role_update',\n kwargs={'roleassignment': self.update_as.sodar_uuid},\n )\n put_data = {\n 'role': PROJECT_ROLE_OWNER,\n 'user': str(self.assign_user.sodar_uuid),\n }\n response = self.request_knox(url, method='PUT', data=put_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_ipam_roles_update(self):\n pass", "def prepare_container_change(instance, **kwargs):\n prepare_permission_change(instance)\n instance._old_containers = set(instance.containers)", "def test_update_bios_policy(self):\n pass", "def test_patch_cluster_role(self):\n pass", "def test_update_hyperflex_ext_iscsi_storage_policy(self):\n pass", "def test_update_ipsecpolicy(self):\r\n resource = 'ipsecpolicy'\r\n cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)\r\n self._test_update_resource(resource, cmd, 'myid',\r\n ['myid', '--name', 'newname'],\r\n {'name': 'newname', })", "def test_vault_update_vault_section(self):\n pass", "def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])", "def test_change_provisioned_throughput_usual_case():", "def test_pulls_acl_change(self):\n record = self.good_pullrecord()\n record['status'] = 'PULLING'\n id = self.images.insert(record)\n self.assertIsNotNone(id)\n # Now try to submit an ACL change\n session = self.m.new_session(self.auth, self.system)\n pr = {\n 'system': record['system'],\n 'itype': record['itype'],\n 'tag': record['pulltag'],\n 'remotetype': 'dockerv2',\n 'userACL': [1001, 1002],\n 'groupACL': [1003, 1004]\n }\n rec = self.m.pull(session, pr) # ,delay=False)\n self.assertEqual(rec['status'], 'PULLING')" ]
[ "0.766053", "0.7609293", "0.74106824", "0.69123495", "0.67316014", "0.62694347", "0.61879647", "0.61383", "0.6041444", "0.6019781", "0.586566", "0.5853104", "0.57842124", "0.57581365", "0.5677262", "0.56765527", "0.56648535", "0.56625587", "0.5658636", "0.5632956", "0.5630784", "0.56158006", "0.5608021", "0.56007457", "0.55840427", "0.55801624", "0.5542741", "0.55251485", "0.5523725", "0.55181783" ]
0.9553669
0
Return the number of available cores.
def cpu_count(): num_available_cores = multiprocessing.cpu_count() return num_available_cores
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_num_processors():\n cores = 0\n try:\n cores = len(os.sched_getaffinity(0))\n except AttributeError:\n cores = cpu_count()\n return cores", "def cpu_count_cores():\n return cext.cpu_count_cores()", "def get_ncpu():\n from multiprocessing import cpu_count\n return cpu_count()", "def cores(self):\n return int(self.get('cores'))", "def available_cpu_count():\n import os\n import multiprocessing\n try:\n import psutil\n except ImportError:\n psutil = None\n num_cpu = 0\n\n if 'QUTIP_NUM_PROCESSES' in os.environ:\n # We consider QUTIP_NUM_PROCESSES=0 as unset.\n num_cpu = int(os.environ['QUTIP_NUM_PROCESSES'])\n\n if num_cpu == 0 and 'SLURM_CPUS_PER_TASK' in os.environ:\n num_cpu = int(os.environ['SLURM_CPUS_PER_TASK'])\n\n if num_cpu == 0 and hasattr(os, 'sched_getaffinity'):\n num_cpu = len(os.sched_getaffinity(0))\n\n if (\n num_cpu == 0\n and psutil is not None\n and hasattr(psutil.Process(), \"cpu_affinity\")\n ):\n num_cpu = len(psutil.Process().cpu_affinity())\n\n if num_cpu == 0:\n try:\n num_cpu = multiprocessing.cpu_count()\n except NotImplementedError:\n pass\n\n return num_cpu or 1", "def getThreads():\n if sys.platform == 'win32':\n return int(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return int(os.popen('grep -c cores /proc/cpuinfo').read())", "def cores(self):\n available_cores = self.num_cores_per_socket * self.num_cpu_sockets\n return int(available_cores) # type: ignore", "def _get_threads():\n if sys.platform == 'win32':\n # return (int)(os.environ['NUMBER_OF_PROCESSORS'])\n return 0 # save trouble, do not use multiprocessing on windows\n else:\n return (int)(os.popen('grep -c cores /proc/cpuinfo').read())", "def numcpu () :\n import multiprocessing\n return multiprocessing.cpu_count()", "def processor_count():\n # Linux, Unix and MacOS:\n if hasattr(os, \"sysconf\"):\n if os.sysconf_names.has_key(\"SC_NPROCESSORS_ONLN\"):\n # Linux & Unix:\n ncpus = os.sysconf(\"SC_NPROCESSORS_ONLN\")\n if isinstance(ncpus, int) and ncpus > 0:\n return ncpus\n else: # OSX:\n return int(os.popen2(\"sysctl -n hw.ncpu\")[1].read())\n # Windows:\n if os.environ.has_key(\"NUMBER_OF_PROCESSORS\"):\n ncpus = int(os.environ[\"NUMBER_OF_PROCESSORS\"]);\n if ncpus > 0:\n return ncpus\n return 1 # Default", "def count_cpus():\r\n try:\r\n return multiprocessing.cpu_count()\r\n except Exception:\r\n logging.exception('can not get cpu count from'\r\n ' multiprocessing.cpu_count()')\r\n cpuinfo = get_cpuinfo()\r\n # Returns at least one cpu. Check comment #1 in crosbug.com/p/9582.\r\n return len(cpuinfo) or 1", "def cpu_count():\n if multiprocessing is None:\n return 1\n try:\n return multiprocessing.cpu_count()\n except NotImplementedError:\n pass\n try:\n return os.sysconf(\"SC_NPROCESSORS_CONF\")\n except (AttributeError, ValueError):\n pass\n gen_log.error(\"Could not detect number of processors; assuming 1\")\n return 1", "def get_ncores(self):\n return self._ncores", "def cores(self):\n available_cores = self.cores_per_socket * self.sockets\n return int(available_cores) # type: ignore", "def determine_number_of_cpus():\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE)\n sc_stdout = sysctl.communicate()[0]\n res = int(sc_stdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudo_devices = os.listdir('/devices/pseudo/')\n expr = re.compile('^cpuid@[0-9]+$')\n\n res = 0\n for pd in pseudo_devices:\n if expr.match(pd) is not None:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesg_process = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesg_process.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')", "def cpu_count(only_physical_cores=False):\n if mp is None:\n return 1\n\n return loky.cpu_count(only_physical_cores=only_physical_cores)", "def cpu_count():\r\n if mp is None:\r\n return 1\r\n return mp.cpu_count()", "def getThreads():\r\n return multiprocessing.cpu_count()", "def logical_cores(self):\n return int(self.num_logical_cpus) # type: ignore", "def num_cpus(self):\n if 'QUTIP_NUM_PROCESSES' in os.environ:\n num_cpus = int(os.environ['QUTIP_NUM_PROCESSES'])\n else:\n num_cpus = available_cpu_count()\n os.environ['QUTIP_NUM_PROCESSES'] = str(num_cpus)\n return num_cpus", "def num_cores(self):\n return self.mpi_procs * self.omp_threads", "def compute_cores(config):\n cores = config.getint('General','cores')\n if cores > mp.cpu_count():\n cores = mp.cpu_count()\n return cores", "def num_cores(self):\n return self.cores_per_socket * self.sockets_per_node * self.num_nodes", "def _get_num_cpus():\r\n # we try to determine num CPUs by using different approaches.\r\n # SC_NPROCESSORS_ONLN seems to be the safer and it is also\r\n # used by multiprocessing module\r\n try:\r\n return os.sysconf(\"SC_NPROCESSORS_ONLN\")\r\n except ValueError:\r\n # as a second fallback we try to parse /proc/cpuinfo\r\n num = 0\r\n f = open('/proc/cpuinfo', 'r')\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n for line in lines:\r\n if line.lower().startswith('processor'):\r\n num += 1\r\n\r\n # unknown format (e.g. amrel/sparc architectures), see:\r\n # http://code.google.com/p/psutil/issues/detail?id=200\r\n # try to parse /proc/stat as a last resort\r\n if num == 0:\r\n f = open('/proc/stat', 'r')\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n search = re.compile('cpu\\d')\r\n for line in lines:\r\n line = line.split(' ')[0]\r\n if search.match(line):\r\n num += 1\r\n\r\n if num == 0:\r\n raise RuntimeError(\"can't determine number of CPUs\")\r\n return num", "def get_cpu_count():\n\n # #Check nproc. I have found it respecting the visible CPUs in SLURM:\n # try:\n # m = subprocess.run(['nproc'], stdout=subprocess.PIPE)\n # if m:\n # res = int(m.stdout.decode('ascii').replace(\"\\n\", \"\"))\n # if res > 0:\n # return res\n # except:\n # pass\n \n\n # cpuset\n # cpuset may restrict the number of *available* processors\n try:\n m = re.search(r'(?m)^Cpus_allowed:\\s*(.*)$',\n open('/proc/self/status').read())\n if m:\n res = bin(int(m.group(1).replace(',', ''), 16)).count('1')\n if res > 0:\n return res\n except IOError:\n pass\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # https://github.com/giampaolo/psutil\n try:\n import psutil\n return psutil.cpu_count() # psutil.NUM_CPUS on old versions\n except (ImportError, AttributeError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],\n stdout=subprocess.PIPE)\n scStdout = sysctl.communicate()[0]\n res = int(scStdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudoDevices = os.listdir('/devices/pseudo/')\n res = 0\n for pd in pseudoDevices:\n if re.match(r'^cpuid@[0-9]+$', pd):\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesgProcess.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')", "def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1", "def determineNumberOfCPUs():\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError, NotImplementedError):\n pass\n\n # http://code.google.com/p/psutil/\n try:\n import psutil\n return psutil.NUM_CPUS\n except (ImportError, AttributeError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError, ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],\n stdout=subprocess.PIPE)\n scStdout = sysctl.communicate()[0]\n res = int(scStdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudoDevices = os.listdir('/devices/pseudo/')\n expr = re.compile('^cpuid@[0-9]+$')\n\n res = 0\n for pd in pseudoDevices:\n if expr.match(pd) != None:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesgProcess.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')", "def threads_per_core(self) -> int:\n return pulumi.get(self, \"threads_per_core\")", "def determineNumberOfCPUs():\n\n # Python 2.6+\n try:\n import multiprocessing\n return multiprocessing.cpu_count()\n except (ImportError,NotImplementedError):\n pass\n\n # POSIX\n try:\n res = int(os.sysconf('SC_NPROCESSORS_ONLN'))\n\n if res > 0:\n return res\n except (AttributeError,ValueError):\n pass\n\n # Windows\n try:\n res = int(os.environ['NUMBER_OF_PROCESSORS'])\n\n if res > 0:\n return res\n except (KeyError, ValueError):\n pass\n\n # jython\n try:\n from java.lang import Runtime\n runtime = Runtime.getRuntime()\n res = runtime.availableProcessors()\n if res > 0:\n return res\n except ImportError:\n pass\n\n # BSD\n try:\n sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'],\n stdout=subprocess.PIPE)\n scStdout = sysctl.communicate()[0]\n res = int(scStdout)\n\n if res > 0:\n return res\n except (OSError, ValueError):\n pass\n\n # Linux\n try:\n res = open('/proc/cpuinfo').read().count('processor\\t:')\n\n if res > 0:\n return res\n except IOError:\n pass\n\n # Solaris\n try:\n pseudoDevices = os.listdir('/devices/pseudo/')\n expr = re.compile('^cpuid@[0-9]+$')\n\n res = 0\n for pd in pseudoDevices:\n if expr.match(pd) != None:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n # Other UNIXes (heuristic)\n try:\n try:\n dmesg = open('/var/run/dmesg.boot').read()\n except IOError:\n dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)\n dmesg = dmesgProcess.communicate()[0]\n\n res = 0\n while '\\ncpu' + str(res) + ':' in dmesg:\n res += 1\n\n if res > 0:\n return res\n except OSError:\n pass\n\n raise Exception('Can not determine number of CPUs on this system')", "def get_used(self):\n return int(self.used_cores)" ]
[ "0.89551276", "0.86170775", "0.8316827", "0.8212397", "0.8195297", "0.81726545", "0.8117976", "0.8109059", "0.81060207", "0.8065131", "0.7973178", "0.7964775", "0.7926608", "0.79141355", "0.78823775", "0.7857191", "0.7844706", "0.7841263", "0.78397304", "0.7825439", "0.78199613", "0.7807727", "0.7781659", "0.77803195", "0.773752", "0.7684795", "0.7673642", "0.7626505", "0.7591798", "0.7572787" ]
0.8886122
1
Set all the device and server owned properties.
def set_all_properties(device: Device, test_cfg: TestCfg): cprint("\nSet device owned properties.", color="cyan", flush=True) for key, value in test_cfg.mock_data.items(): device.send(test_cfg.interface_device_prop, "/sensor-id/" + key, value) time.sleep(0.005) cprint("\nSet server owned properties.", color="cyan", flush=True) for key, value in test_cfg.mock_data.items(): value = prepare_transmit_data(key, value) post_server_interface(test_cfg, test_cfg.interface_server_prop, "/sensor-id/" + key, value) time.sleep(0.005)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unset_some_properties(device: Device, test_cfg: TestCfg):\n cprint(\"\\nUnset some device owned properties.\", color=\"cyan\", flush=True)\n for key, _ in test_cfg.mock_data.items():\n if key not in [\"datetime_endpoint\", \"booleanarray_endpoint\"]:\n device.unset_property(test_cfg.interface_device_prop, \"/sensor-id/\" + key)\n time.sleep(0.005)\n\n cprint(\"\\nUnset some server owned properties.\", color=\"cyan\", flush=True)\n for key, _ in test_cfg.mock_data.items():\n if key not in [\"binaryblob_endpoint\", \"stringarray_endpoint\"]:\n delete_server_interface(test_cfg, test_cfg.interface_server_prop, \"/sensor-id/\" + key)\n time.sleep(0.005)", "def set_ownership(self):\n\n os.chmod(os.path.join(\"%s\" % NetworkManager_conf_dir, self.connection._id), 0600)", "def set_properties(struct):", "def set_owner(self, data):\n self._owner = self._uni(data)\n self.add_payload('owner', data)", "def __setstate__(self,values):\n self.initDefault()\n setter = object.__setattr__\n for value,attr in zip(values,self.persistent):\n setter(self,attr,value)\n if self.dirty_sizeCrc == None:\n self.dirty_sizeCrc = {} #--Use empty dict instead.\n self.refreshDataSizeCrc()", "def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]", "def _setAttributes(self, reactor, done):\n self.reactor = reactor\n self._done = done", "def set_props(cont):\n\n\t# Basic\n\town = cont.owner\n\t\n\t# Sensors\n\ts_keyboard = cont.sensors['keyboard']\n\t\n\t# Objects\n\to_group = own.groupObject\n\t\t\n\t# Properties\n\tp_player = globalDict['state']['player']\n\tp_controls = globalDict['settings']['controls']\n\tup = p_controls['key_up'] in s_keyboard.inputs.keys()\n\tdown = p_controls['key_down'] in s_keyboard.inputs.keys()\n\tleft = p_controls['key_left'] in s_keyboard.inputs.keys()\n\tright = p_controls['key_right'] in s_keyboard.inputs.keys()\n\trun = p_controls['key_run'] in s_keyboard.inputs.keys()\n\tcrouch = p_controls['key_crouch'] in s_keyboard.inputs.keys()\n\t\n\tif o_group != None:\n\t\t\n\t\t#print(s_keyboard.inputs)\n\t\t\n\t\t# Vertical\n\t\tif not up and not down or up and down:\n\t\t\tp_player['mov_v'] = 0\n\t\t\n\t\tif up and not down:\n\t\t\tp_player['mov_v'] = 1\n\t\t\n\t\tif not up and down:\n\t\t\tp_player['mov_v'] = -1\n\t\t\t\n\t\t# Horizontal\n\t\tif not left and not right or left and right:\n\t\t\tp_player['mov_h'] = 0\n\t\t\n\t\tif right and not left:\n\t\t\tp_player['mov_h'] = 1\n\t\t\n\t\tif not right and left:\n\t\t\tp_player['mov_h'] = -1\n\t\t\t\n\t\t# None\n\t\tif not run and not crouch or run and crouch:\n\t\t\tp_player['mov_run'] = 0\n\t\t\tp_player['mov_crouch'] = 0\n\t\t\t\n\t\t# Run\n\t\tif run and not crouch:\n\t\t\tp_player['mov_run'] = 1\n\t\t\tp_player['mov_crouch'] = 0\n\t\t\t\n\t\t# Crouch\n\t\tif not run and crouch:\n\t\t\tp_player['mov_run'] = 0\n\t\t\tp_player['mov_crouch'] = 1\n\t\t\t\n\t\t\n\tpass", "def set_properties(self, props=None):\n props = [] if props is None else props\n prop = dav.Prop() + props\n set = dav.Set() + prop\n root = dav.PropertyUpdate() + set\n\n r = self._query(root, query_method=\"proppatch\")\n\n statuses = r.tree.findall(\".//\" + dav.Status.tag)\n for s in statuses:\n if \" 200 \" not in s.text:\n raise error.PropsetError(s.text)\n\n return self", "def set(self, properties):\n raise NotImplementedError", "def set_attrs(self, username, attrs):\n pass", "def setUp(self):\n super(ServerMetadataTest, self).setUp()\n self.meta = {'meta_key_1': 'meta_value_1',\n 'meta_key_2': 'meta_value_2'}\n self.servers_client.set_server_metadata(self.server.id, self.meta)", "def _set_attributes(self):", "def set_owner(self, owner):\n self.settings[\"owner\"] = owner", "def set_device_parameters(request):\n def fin():\n request.cls.device.close()\n request.addfinalizer(fin)\n\n request.cls.driver = ros.ROSDriver\n request.cls.patched_driver = PatchedROSDevice\n request.cls.vendor = 'ros'\n parent_conftest.set_device_parameters(request)", "def set_occupant(self, obj):\n\t\tpass", "def setUp(self):\n # Direct connection used to match the property values\n self.sockobj = socket(AF_INET, SOCK_STREAM)\n self.sockobj.settimeout(socket_timeout)\n # Connect to the selected server\n self.sockobj.connect(server) \n self.pyclient = PySimpleClient()\n self.cmd_num = 0\n for servo_type in app_nr.values():\n self.__dict__[servo_type] = self.pyclient.getComponent(\"MINORSERVO/\" + servo_type)", "def _init_owners(self, identity, record, **kwargs):\n # if the given identity is that of a user, we add the\n # corresponding user to the owners (record.access.owned_by)\n is_sys_id = system_process in identity.provides\n if not record.access.owned_by and not is_sys_id:\n record.access.owned_by.add({\"user\": identity.id})", "def set_owner(self, owner):\n self.__owner = owner", "def set_object_full_permissions(self, agent):\n\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Copy)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Modify)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Transfer)", "def __setstate__(self, dict):\n\n\t\tself.__dict__ = dict\n\n\t\t# Set missing values to defaults.\n\t\tself._device = None\n\t\tself.resources = {}", "def _init_attributes(self):\n if os.name == \"nt\":\n if \"64\" in platform.architecture()[0]:\n platform_arch = \"x86_64\"\n elif \"32\" in platform.architecture()[0]:\n platform_arch = \"i386\"\n else:\n platform_arch = platform.architecture()\n os_ver = f\"Windows-{platform.win32_ver()[1]}\"\n else:\n platform_arch = platform.machine()\n if platform.system() == \"Darwin\":\n os_ver = f\"macOS-{platform.mac_ver()[0]}\"\n else:\n os_ver = \"-\".join(linux_distribution()[0:2])\n\n license_chunks = LICENSE.split(\" \")\n if license_chunks[0] == \"GPLv2\":\n client_license = \"GPL-2.0\"\n else:\n client_license = \"Commercial\"\n\n default_attributes = {\n # Process id\n \"_pid\": str(os.getpid()),\n # Platform architecture\n \"_platform\": platform_arch,\n # OS version\n \"_os\": os_ver,\n # Hostname of the local machine\n \"_source_host\": socket.gethostname(),\n # Client's name\n \"_client_name\": \"mysql-connector-python\",\n # Client's version\n \"_client_version\": \".\".join([str(x) for x in VERSION[0:3]]),\n # Client's License identifier\n \"_client_license\": client_license,\n }\n self._settings[\"attributes\"].update(default_attributes)\n\n if \"connection-attributes\" in self._settings:\n for attr_name in self._settings[\"connection-attributes\"]:\n attr_value = self._settings[\"connection-attributes\"][attr_name]\n # Validate name type\n if not isinstance(attr_name, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' must be a string type\"\n )\n # Validate attribute name limit 32 characters\n if len(attr_name) > 32:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' exceeds 32 characters \"\n \"limit size\"\n )\n # Validate names in connection-attributes cannot start with \"_\"\n if attr_name.startswith(\"_\"):\n raise InterfaceError(\n \"Key names in 'session-connect-attributes' cannot \"\n f\"start with '_', found: {attr_name}\"\n )\n # Validate value type\n if not isinstance(attr_value, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value '{attr_value}' \"\n \" must be a string type\"\n )\n\n # Validate attribute value limit 1024 characters\n if len(attr_value) > 1024:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value: '{attr_value}' \"\n \"exceeds 1024 characters limit size\"\n )\n\n self._settings[\"attributes\"][attr_name] = attr_value", "def initAttributes(self):\n CCSDS.DU.DataUnit.initAttributes(self)\n self.dataFieldHeaderFlag = 0\n self.setPacketLength()", "def elyra_owned_properties(self) -> Set[str]:\n return self._elyra_owned_properties", "def setShadowDefaults(self):\n for user in self.shadowDefault.keys():\n #if not self.userspace.has_key(user):\n self.userspace[user].info = self.shadowDefault[user]", "def setOwner(self, long_name, short_name=None):\n nChars = 3\n minChars = 2\n if long_name is not None:\n long_name = long_name.strip()\n if short_name is None:\n words = long_name.split()\n if len(long_name) <= nChars:\n short_name = long_name\n elif len(words) >= minChars:\n short_name = ''.join(map(lambda word: word[0], words))\n else:\n trans = str.maketrans(dict.fromkeys('aeiouAEIOU'))\n short_name = long_name[0] + long_name[1:].translate(trans)\n if len(short_name) < nChars:\n short_name = long_name[:nChars]\n t = mesh_pb2.ToRadio()\n if long_name is not None:\n t.set_owner.long_name = long_name\n if short_name is not None:\n short_name = short_name.strip()\n if len(short_name) > nChars:\n short_name = short_name[:nChars]\n t.set_owner.short_name = short_name\n self._sendToRadio(t)", "def setup_devices(self, devices):\n \n self.devices = devices\n \n barrier = ReusableBarrier(len(devices))\n lock = Lock()\n aux_dict = {}\n\n for device in devices:\n device.barrier = barrier\n device.global_lock = lock\n for location in device.sensor_data: \n if location not in aux_dict:\n aux_dict[location] = Semaphore() \n \n for device in devices:\n device.device_semaphores = aux_dict\n\n self.setup_master_thread()", "def set_owner(self, owner, is_stream=False):\n if is_stream:\n self._logger.debug('TCP Proto Stream is set!')\n self._stream = owner\n else:\n self._server = owner", "def script_set_device(self,udid=None):\n self.desired_caps['udid'] = udid;", "def _setVals(self, remote_app_id=0, remote_node=0, remote_port=0):\n self.remote_app_id = remote_app_id\n self.remote_port = remote_port\n self.remote_node = remote_node" ]
[ "0.59108984", "0.58113533", "0.54988265", "0.5497237", "0.53480184", "0.5347657", "0.531481", "0.52248204", "0.5189221", "0.5167563", "0.5050707", "0.5040113", "0.502045", "0.50000304", "0.49858457", "0.49772954", "0.49768072", "0.4970701", "0.49702758", "0.49687654", "0.49585325", "0.4932521", "0.4913157", "0.4898704", "0.48963654", "0.4876461", "0.48568547", "0.48520148", "0.48516935", "0.4844684" ]
0.7363796
0
Unset some of the device and server owned properties.
def unset_some_properties(device: Device, test_cfg: TestCfg): cprint("\nUnset some device owned properties.", color="cyan", flush=True) for key, _ in test_cfg.mock_data.items(): if key not in ["datetime_endpoint", "booleanarray_endpoint"]: device.unset_property(test_cfg.interface_device_prop, "/sensor-id/" + key) time.sleep(0.005) cprint("\nUnset some server owned properties.", color="cyan", flush=True) for key, _ in test_cfg.mock_data.items(): if key not in ["binaryblob_endpoint", "stringarray_endpoint"]: delete_server_interface(test_cfg, test_cfg.interface_server_prop, "/sensor-id/" + key) time.sleep(0.005)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unset():\n rino.remote.unset()", "def clearProperties(*args):", "def clearProperties(*args):", "def clearProperties(*args):", "def clearProperties(*args):", "def clear_properties(self):\n self.properties.clear()", "def set_all_properties(device: Device, test_cfg: TestCfg):\n cprint(\"\\nSet device owned properties.\", color=\"cyan\", flush=True)\n for key, value in test_cfg.mock_data.items():\n device.send(test_cfg.interface_device_prop, \"/sensor-id/\" + key, value)\n time.sleep(0.005)\n\n cprint(\"\\nSet server owned properties.\", color=\"cyan\", flush=True)\n for key, value in test_cfg.mock_data.items():\n value = prepare_transmit_data(key, value)\n post_server_interface(test_cfg, test_cfg.interface_server_prop, \"/sensor-id/\" + key, value)\n time.sleep(0.005)", "def unset(self, *list):\n attrs = dict().fromkeys(list, \"\")\n self.graph._setattrs(handle=self.handle, **attrs)", "def clean_session(self):\n unused_entries = ['root_freespace', 'home_freespace', 'hardvideo',\n 'optional_partitions', 'boot_id', 'greeter', 'display',\n 'boot_size', 'root_size', 'swap_size', 'home_size',\n 'root_id', 'lvm', 'swap_id', 'home_id', 'luks',\n 'user_passwd', 'root_passwd', 'desktop', 'gpu_driver',\n 'vga_controller', 'gpu_proprietary', 'desktop_extra']\n\n for unused in unused_entries:\n del self.user[unused]", "def unpossessed(self):\r\n self.owner = None", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def clearProperty(*args):", "def _remove_swarm_keys(self):\n for key in SWARM_PROPERTIES:\n self.spec.pop(key, None)", "def unset(cls, client, resource, args) :\n try :\n if type(resource) is not list :\n unsetresource = nshttpprofile()\n if type(resource) != type(unsetresource):\n unsetresource.name = resource\n else :\n unsetresource.name = resource.name\n return unsetresource.unset_resource(client, args)\n else :\n if type(resource[0]) != cls :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i]\n else :\n if (resource and len(resource) > 0) :\n unsetresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n unsetresources[i].name = resource[i].name\n result = cls.unset_bulk_request(client, unsetresources, args)\n return result\n except Exception as e :\n raise e", "def unset(cls, client, resource, args) :\n\t\ttry :\n\t\t\tif type(resource) is not list :\n\t\t\t\tunsetresource = lsntransportprofile()\n\t\t\t\tif type(resource) != type(unsetresource):\n\t\t\t\t\tunsetresource.transportprofilename = resource\n\t\t\t\telse :\n\t\t\t\t\tunsetresource.transportprofilename = resource.transportprofilename\n\t\t\t\treturn unsetresource.unset_resource(client, args)\n\t\t\telse :\n\t\t\t\tif type(resource[0]) != cls :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ lsntransportprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].transportprofilename = resource[i]\n\t\t\t\telse :\n\t\t\t\t\tif (resource and len(resource) > 0) :\n\t\t\t\t\t\tunsetresources = [ lsntransportprofile() for _ in range(len(resource))]\n\t\t\t\t\t\tfor i in range(len(resource)) :\n\t\t\t\t\t\t\tunsetresources[i].transportprofilename = resource[i].transportprofilename\n\t\t\t\tresult = cls.unset_bulk_request(client, unsetresources, args)\n\t\t\treturn result\n\t\texcept Exception as e :\n\t\t\traise e", "def uncleanable():\n data = attrdict.AttrDict()\n data.backup_ids = set()\n data.image_ids = set()\n data.keypair_ids = set()\n data.server_ids = set()\n data.nodes_ids = set()\n data.chassis_ids = set()\n data.snapshot_ids = set()\n data.transfer_ids = set()\n data.volume_ids = set()\n return data", "def reset(self):\n # type: ()->None\n self._ifAttributes = {}\n self._ifAttributes['bridge-opts'] = {}\n self._ifAttributes['up'] = []\n self._ifAttributes['down'] = []\n self._ifAttributes['pre-up'] = []\n self._ifAttributes['pre-down'] = []\n self._ifAttributes['post-up'] = []\n self._ifAttributes['post-down'] = []", "def test_remove_user_property(self):\n pass", "def test_del_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n prop.del_prop('info', 'sdk')\n\n testutils.undeploy()\n\n return 0", "def clear_cached_attributes(self):\n setattr(self, '_atoms', None)\n setattr(self, '_bonds', None)\n setattr(self, '_rings', None)\n setattr(self, '_ring_systems', None)", "def unset(self) -> None:\n self.val = None\n self.notes = []", "def unset(self, shell=None):\n\n try:\n del os.environ[self.name]\n except KeyError:\n pass \n\n if shell:\n print shell.unset_var(self.name)", "def clear_attrs(self):\n self._attributes.clear()", "def _remove_pod_keys(self):\n self.labels = self.spec.pop(\"labels\", {})\n self.ports = self.spec.pop(\"ports\", [])\n self.pod_opts[\"grace\"] = self.spec.pop(\"stop_grace_period\", None)\n self.pod_opts[\"pid\"] = self.spec.pop(\"pid\", None)\n self.pod_opts[\"dns\"] = self.spec.pop(\"dns\", [])\n self.pod_opts[\"dns_search\"] = self.spec.pop(\"dns_search\", [])", "def uom(self, value):\n raise TypeError(\"Cannot delete {class-name} uom property.\")", "def clear(self):\r\n self._instance = None\r\n self.__resources = {}\r\n self.__m2m = {}\r\n self._suspended_permissions = {}\r\n self._suspended_traversing = {}\r\n self._suspended_minimals = {}\r\n self._empty = {}\r\n self._synchronized_fields = set()\r\n self._synchronized_memberships = False\r\n self._synchronized_permissions = set()\r\n self._suspended_inv_minimals = {}", "def CleanUpEnvironment(self):\n for prop in self._wrap_properties:\n self._adb.RunShellCommand('setprop %s \"\"' % (prop,))\n SetChromeTimeoutScale(self._adb, None)", "def __del__(self):\n\t\tsuper().__del__()\n\t\tself.maneuverability = 0 \n\t\tself.protection = 0" ]
[ "0.6243904", "0.61080045", "0.61080045", "0.61080045", "0.61080045", "0.59212315", "0.5779443", "0.5738039", "0.55826885", "0.5536625", "0.55132157", "0.55132157", "0.55132157", "0.55132157", "0.549153", "0.5467077", "0.5450318", "0.5432928", "0.54141974", "0.5398717", "0.53541213", "0.5346853", "0.53089225", "0.53049064", "0.5290458", "0.52875155", "0.5279167", "0.52697825", "0.5237011", "0.52124435" ]
0.75877285
0
Start an asyncio event loop, used for the device call back.
def start_call_back_loop(loop: asyncio.AbstractEventLoop) -> None: asyncio.set_event_loop(loop) loop.run_forever()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startLoop(self):\n if(self.loop is not None):\n raise Exception(\"Event loop is already started!\")\n self.loop = asyncio.new_event_loop()\n self.thread = Thread(target=start_thread_loop, args=(self.loop,))\n self.thread.setDaemon(True)\n self.thread.start()", "def _io_event_loop_thread(self):\r\n io_event_loop = asyncio.get_event_loop_policy().new_event_loop()\r\n asyncio.set_event_loop(io_event_loop)\r\n assert isinstance(io_event_loop, AbstractEventLoop)\r\n self._io_event_loop = io_event_loop\r\n self._event_loop_started.release()\r\n self._io_event_loop.run_forever()", "def load_event_loop():\n while True:\n try:\n async_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(async_loop)\n return async_loop\n except:\n time.sleep(3)", "def start_loop(self, loop):\n asyncio.set_event_loop(loop)\n loop.run_forever()", "def event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n loop.close()", "def event_loop():\n loop = asyncio.get_event_loop_policy().new_event_loop()\n yield loop\n loop.close()", "def _start_io_event_loop(self):\r\n self._event_loop_started = threading.Lock()\r\n self._event_loop_started.acquire()\r\n threading.Thread(None, self._io_event_loop_thread).start()\r\n self._event_loop_started.acquire()", "def get_event_loop() -> KivyEventLoop:\n return asyncio.get_event_loop()", "def _run_loop(self):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n self._server = websockets.serve(self._log_message, self._host, self._port)\n\n loop.run_until_complete(self._server)\n loop.run_forever()", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setUp(self):\n self.loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.loop)", "def setup_test_loop():\n loop = asyncio.get_event_loop()\n # asyncio.set_event_loop(None)\n return loop", "def run(self, *args: S.args, **kwargs: S.kwargs) -> None:\n\n async def runner() -> None:\n asyncio.events.new_event_loop = old_new_event_loop\n try:\n await self.start(*args, **kwargs)\n finally:\n if not self.is_closed():\n await self.close()\n\n # we just have to monkey patch in support for using get_event_loop\n old_new_event_loop = asyncio.new_event_loop\n asyncio.events.new_event_loop = asyncio.get_event_loop\n try:\n asyncio.run(runner())\n except KeyboardInterrupt:\n log.info(\"Closing the event loop\")", "def event_loop(request):\n loop = asyncio.get_event_loop()\n yield loop", "def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)", "async def setup(self) -> None:\n if self.args.sync:\n self._processing = threading.Thread(target=self.start_events_sync)\n self._processing.daemon = True\n self._processing.start()\n else:\n self._processing = asyncio.ensure_future(self.start_events_async())", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_server(protocol_factory, port=self.port)\n event_loop.run_until_complete(coro)", "def loop():\n loop = LOOP_INIT()\n asyncio.set_event_loop(loop)\n yield loop\n loop.close()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def main() -> None:\n runner()\n asyncio.get_event_loop().run_forever()", "def start(self):\n\n if self.thread is None:\n self.thread = threading.Thread(\n target=self.__run__,\n daemon=True,\n )\n\n self.thread.start()\n LOGGER.debug(\n \"Starting thread `%s` for event loop `%s`.\",\n self.ident,\n self.thread.ident,\n )", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_connection(\n protocol_factory, host=self.host, port=self.port)\n event_loop.run_until_complete(coro)", "def run(self):\n self.active_tasks, self.coros_result = {}, {}\n try:\n self.event_loop = asyncio.get_event_loop()\n except Exception:\n self.event_loop = asyncio.new_event_loop()\n asyncio.set_event_loop(self.event_loop)\n self.master_queue = asyncio.Queue()\n self.queue_listen_task = asyncio.Task(self.receive_queue_coro())\n self.is_initialized = True\n try:\n self.get_event_loop().run_until_complete(self.queue_listen_task)\n except CancelledError:\n logging.debug(\"Closing the Thread.\")", "def get_event_loop():\n try:\n return asyncio.get_running_loop()\n except RuntimeError:\n return asyncio.new_event_loop()", "def start(self):\n loop = aio.get_event_loop()\n\n if self._with_subscribers:\n # Start the server to listen to events\n self.registry = SubscriptionRegistry()\n server = self.registry.server\n xx = aio.ensure_future(server)\n\n if self._with_discovery:\n # Start the server to listen to new devices\n addrinfo = socket.getaddrinfo(UPNP_ADDR, None)[0]\n sock = socket.socket(addrinfo[0], socket.SOCK_DGRAM)\n # Allow multiple copies of this program on one machine\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n listen = loop.create_datagram_endpoint(\n partial(UPnP,loop,UPNP_ADDR,self._found_device,self.upnp),\n sock=sock\n )\n xx = aio.ensure_future(listen)\n\n if self._with_discovery or self._with_subscribers:\n xx = aio.ensure_future(self.real_start())", "async def async_start(self) -> None:\n\n self._shutdown = False\n\n # Start up the LifeSOS interface\n self._baseunit.start()\n\n # Connect to the MQTT broker\n self._mqtt_was_connected = False\n if self._config.mqtt.uri.port:\n self._mqtt.connect_async(\n self._config.mqtt.uri.hostname,\n self._config.mqtt.uri.port,\n keepalive=Translator.KEEP_ALIVE)\n else:\n self._mqtt.connect_async(\n self._config.mqtt.uri.hostname,\n keepalive=Translator.KEEP_ALIVE)\n\n # Start processing MQTT messages\n self._mqtt.loop_start()", "def update(self):\n asyncio.set_event_loop(asyncio.new_event_loop())\n self.listen(self.port)\n self.loop = IOLoop.instance()\n self.loop.start()", "def start(setup): #pragma: no cover\n import warnings\n warnings.warn(\"start() is deprecated, use run() instread\", DeprecationWarning)\n\n\n async def main():\n await setup()\n await initialize()\n try:\n tasks = []\n for hub in Hub.hubs:\n tasks.append(spawn(hub.run()))\n for task in tasks:\n await task\n finally:\n await finalize()\n loop = get_event_loop()\n loop.run_until_complete(main(program))" ]
[ "0.76564205", "0.7373814", "0.72559583", "0.7224968", "0.7152667", "0.7087684", "0.70857453", "0.70601386", "0.7014385", "0.70009464", "0.70009464", "0.70009464", "0.70009464", "0.6974847", "0.6932438", "0.688413", "0.68510026", "0.6805241", "0.67912406", "0.67848516", "0.67848325", "0.67848325", "0.6748656", "0.67215985", "0.67114645", "0.6680164", "0.6637385", "0.66333663", "0.66252315", "0.65612024" ]
0.7421172
1
Refresh reloads data from the server. It raises an error if it fails to get the object's metadata
def refresh(self): self.metadata = self.db.read(self.path).json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _refresh(self):\n url = self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)\n resp = self._cb.get_object(url)\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def refresh(self):\n resp = self._imgur._send_request(self._INFO_URL)\n self._populate(resp)\n self._has_fetched = True\n # NOTE: What if the object has been deleted in the meantime? That might\n # give a pretty cryptic error.", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def refresh(self, unused_http):\n self._metadata_service.refresh()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def _refresh(self, unused_http):\n # Refreshing can also be done by directly calling this method, instead of just through\n # refresh() above!\n self._metadata_service.refresh()", "def refresh(self): \n return self._config.refreshObj(self)", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh_from_api(self):\n self.populate_from_api(self.get_from_api())", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def _Refresh(self):\n raise NotImplementedError", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n # type () -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n resource_object = self.connection.api_call(\n \"GET\", [\"v1\", \"resources\", self.id], headers=headers, model=Resource\n )\n\n self.raw_model = resource_object.raw_model\n\n return True", "def refresh(self):\n raise UncodedError", "def refresh(dataset, client):\n pass", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def refresh(self):\n metadata = {}\n\n # disable proxy for METADATA_ENDPOINT\n save_no_proxy = os.environ.get('no_proxy', '')\n os.environ['no_proxy'] = METADATA_ENDPOINT + ',%s' % save_no_proxy\n\n # read the instance metadata\n try:\n _request = urllib.request.Request(self._oci_metadata_api_url + 'instance/',\n headers={'Authorization': 'Bearer Oracle'})\n api_conn = urllib.request.urlopen(_request, timeout=2)\n instance_metadata = json.loads(api_conn.read().decode('utf-8'))\n metadata['instance'] = instance_metadata\n except IOError as e:\n raise IOError(\"Error connecting to metadata server\") from e\n\n # get the VNIC info\n try:\n _request = urllib.request.Request(self._oci_metadata_api_url + 'vnics/',\n headers={'Authorization': 'Bearer Oracle'})\n api_conn = urllib.request.urlopen(_request, timeout=2)\n vnic_metadata = json.loads(api_conn.read().decode('utf-8'))\n metadata['vnics'] = vnic_metadata\n except IOError as e:\n raise IOError(\"Error connecting to metadata server\") from e\n\n # restore no_proxy\n if not save_no_proxy:\n os.environ['no_proxy'] = save_no_proxy\n\n if metadata:\n self._metadata = OCIMetadata(metadata)\n\n return self", "def refresh(self) -> object:\n requestor = Requestor(local_api_key=self._api_key)\n url = self.instance_url()\n response, api_key = requestor.request(method=RequestMethod.GET, url=url, params=self._retrieve_params)\n self.refresh_from(values=response, api_key=api_key)\n return self", "def refresh(self):\n self.fetch(False)", "def refresh(self, obj):\n self.expunge(obj)\n return self.find(obj.__class__, {'_id': obj._id}, refresh=True).first()", "def refresh(self):\n self.__refresh()" ]
[ "0.76382405", "0.7313773", "0.7313773", "0.7045631", "0.69201297", "0.6815041", "0.67725044", "0.67725044", "0.675185", "0.6670727", "0.66402215", "0.66402215", "0.66402215", "0.6614664", "0.65988886", "0.65988886", "0.65988886", "0.65587395", "0.6531285", "0.6509297", "0.6509297", "0.64925194", "0.6491863", "0.6468419", "0.64525634", "0.644991", "0.6430584", "0.63202083", "0.6283521", "0.6259453" ]
0.7415601
1
Sets the object's userfriendly nickname
def nickname(self, new_nickname): self.set({"nickname": new_nickname})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_nickname(self, nickname):\n \n if len(nickname) > globals.MAX_NICKNAME_LENGTH:\n nick = nickname[0:globals.MAX_NICKNAME_LENGTH-3]+\"...\"\n else:\n nick = nickname\n \n self._nickname.set_message(nick)", "async def nickname(self, ctx, *, nickname=\"\"):\n # [p]set nickname <nickname>\n\n nickname = nickname.strip()\n if nickname == \"\":\n nickname = None\n try:\n await self.bot.change_nickname(ctx.message.server.me, nickname)\n await self.bot.say(\"Done.\")\n except discord.Forbidden:\n await self.bot.say(\"I cannot do that, I lack the \"\n \"\\\"Change Nickname\\\" permission.\")", "def setName(self, newName):\n self.__username = newName", "def change_username(self, name):\n self.username = name", "def set_nick(self, nick):\n raise NotImplementedError", "def set_username(self, value):\n self.username = value", "def set_username(self, value):\n raise NotImplementedError('set_username')", "def add_nickname(self, nickname):\n if 'Nicknames' not in self.properties:\n self.properties['Nicknames'] = []\n if (len(self.properties['Nicknames']) == 1 and self.properties['Nicknames'][0].startswith('Temp')):\n self.properties['Nicknames'][0] = nickname.title()\n else:\n self.properties['Nicknames'].append(nickname.title())", "async def nick(self, ctx, *, nickname):\n if len(nickname) > 32:\n await ctx.send(\"Nickname must be 32 characters or fewer\")\n return\n await ctx.me.edit(nick=nickname)\n await ctx.send(f\"Nickname changed to {nickname}\")", "def nickname(self, nickname):\n if nickname is None:\n raise ValueError(\"Invalid value for `nickname`, must not be `None`\") # noqa: E501\n\n self._nickname = nickname", "def set_nickname(self, nickname):\n self.nickname = nickname\n self.tweets_list = TweetsLinkedList(nickname)\n self.tweets_list.create_linked()", "async def nick(\n self, context: Context, user: discord.User, *, nickname: str = None\n ) -> None:\n member = context.guild.get_member(user.id) or await context.guild.fetch_member(\n user.id\n )\n try:\n await member.edit(nick=nickname)\n embed = discord.Embed(\n description=f\"**{member}'s** new nickname is **{nickname}**!\",\n color=0x9C84EF,\n )\n await context.send(embed=embed)\n except:\n embed = discord.Embed(\n description=\"An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.\",\n color=0xE02B2B,\n )\n await context.send(embed=embed)", "def __str__(self):\n return self.nickname", "async def nick(self, context: SlashContext, user: discord.User, nickname: str = None):\n author = await context.guild.fetch_member(context.author_id)\n if not author.guild_permissions.manage_nicknames:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"You don't have enough permissions to change the nickname of this user.\",\n color=0xE02B2B\n )\n return await context.send(embed=embed)\n member = await context.guild.fetch_member(user.id)\n try:\n await member.edit(nick=nickname)\n embed = discord.Embed(\n title=\"Changed Nickname!\",\n description=f\"**{member}'s** new nickname is **{nickname}**!\",\n color=0x42F56C\n )\n await context.send(embed=embed)\n except:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.\",\n color=0xE02B2B\n )\n await context.message.channel.send(embed=embed)", "def set_uname(self, username):\n Server.t_usernames[threading.get_ident()] = username\n _print(f\"t_usernames: {Server.t_usernames}\")", "def rename(self,newName):\n self.userName = newName", "def set_name(self):\n if self.first_name and self.last_name:\n name_string = \"%s\" % self.first_name\n name_string += \" %s\" % self.last_name\n self.name = name_string\n\n if self.name:\n if not self.first_name and not self.last_name:\n n = HumanName(self.name)\n self.first_name = n.first\n if n.middle:\n self.first_name = n.first + \" \" + n.middle\n self.last_name = n.last\n if n.suffix:\n self.last_name = n.last + \" \" + n.suffix", "def nickname(self):\r\n if \"nickname\" in self.data:\r\n return self.data[\"nickname\"]\r\n return None", "def change_nick(self, before, after):\n userdata = self.users[irc.strings.lower(before)]\n self.del_user(before)\n self.add_user(after, userdata)", "def user_name(self, user_name):\n\n self._user_name = user_name", "async def set_nick(\n client,\n event,\n user: ('user', 'Who\\'s?'),\n nick: P(str, 'Their new nick', min_length = 1, max_length = 32) = None,\n):\n yield\n await client.user_guild_profile_edit(event.guild, user, nick=nick)\n yield f'{user:f}\\'s nick has been updated'", "def set_username(self, username):\n self._java_ref.setUsername(username)", "def _switch_nick(self):\n self.nickname = self.firstnick + str(random.randint(1000, 9999))\n self._log(self.botlog, 'Switching to nick %s' % self.nickname)\n self._send('NICK %s' % self.nickname)", "def set_user_name_override(name: str) -> None:\r\n global _user_name_override\r\n _user_name_override = name", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username" ]
[ "0.7748096", "0.74267966", "0.72978115", "0.72184396", "0.7196049", "0.71037096", "0.70768476", "0.70639694", "0.70152736", "0.68799037", "0.679108", "0.67883277", "0.6730406", "0.671135", "0.6603327", "0.65963477", "0.6478311", "0.64500314", "0.6448181", "0.6426235", "0.6407177", "0.63844866", "0.6372392", "0.6357389", "0.63571703", "0.63571703", "0.63571703", "0.63571703", "0.63571703", "0.63571703" ]
0.78098214
0
Load styles for BHSA divs.
def load_style(): display(HTML(Path('bhsa.css').read_text()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_default_style(self):\n self._css_shape = {\n \"point\": {\"color\": (255,0,0), \"paint\": fshape.FILL, \"z-index\":0},\n \"line\": {\"color\": (0,255,0), \"paint\": fshape.STROKE, \"z-index\":0},\n \"area\": {\"color\": (0,0,255), \"paint\": fshape.FILL, \"z-index\":0},\n \"text\": {\"color\": (0,0,0), \"angle\":0, \"paint\": fshape.FILL, \"z-index\":0}\n }\n \n # jeigu simbolis yra nurodytas, tai cia jo stiliaus aprasymas\n self._css_symbol = {\n \"graphics\": {\"z-index\":1000, \"color\": (255,0,0), \"line-width\":0.12} # ocad simboliams kurie yra paversti i grafika\n #\"901_1\": {\"name\":\"Road\", \"color\": (204, 204, 204)}\n }", "def load_style() -> str:\n return '<style id=\"scipp-style-sheet\">' + load_style_sheet() + '</style>'", "def embed_styles(self):\n for style in self.book.xpath(\"//link[@rel='stylesheet']\"):\n style_raw = self.get_remote_content(style.attrib[\"href\"])\n if style_raw != None:\n style_content = style_raw.decode(\"utf-8\")\n new_style = html.Element(\"style\")\n new_style.attrib[\"type\"] = \"text/css\"\n new_style.text = style_content \n style.xpath(\"//head\")[0].insert(0, new_style)\n style.getparent().remove(style)", "def get_stylesheet():\n\n #ss_dict\n ss_dict = {'header_image' : HEADER_IMAGE,\n 'icon_true' : ICON_TRUE,\n 'icon_false' : ICON_FALSE,\n 'futura_lt_light' : FUTURA_LT_LIGHT,\n 'bright_orange' : BRIGHT_ORANGE.name(),\n 'bright_orange_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_ORANGE.red(), BRIGHT_ORANGE.green(), BRIGHT_ORANGE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_orange' : DARK_ORANGE.name(),\n 'dark_orange_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_ORANGE.red(), DARK_ORANGE.green(), DARK_ORANGE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'bright_blue' : BRIGHT_BLUE.name(),\n 'bright_blue_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_BLUE.red(), BRIGHT_BLUE.green(), BRIGHT_BLUE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_blue' : DARK_BLUE.name(),\n 'dark_blue_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_BLUE.red(), DARK_BLUE.green(), DARK_BLUE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'bright_green' : BRIGHT_GREEN.name(),\n 'bright_green_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_GREEN.red(), BRIGHT_GREEN.green(), BRIGHT_GREEN.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_green' : DARK_GREEN.name(),\n 'dark_green_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_GREEN.red(), DARK_GREEN.green(), DARK_GREEN.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'bright_grey' : BRIGHT_GREY.name(),\n 'bright_grey_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_GREY.red(), BRIGHT_GREY.green(), BRIGHT_GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'grey' : GREY.name(),\n 'grey_transparent' : 'rgba({0},{1},{2},{3})'.format(GREY.red(), GREY.green(), GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'dark_grey' : DARK_GREY.name(),\n 'dark_grey_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_GREY.red(), DARK_GREY.green(), DARK_GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY)}\n\n\n #str_stylesheet\n str_stylesheet = \" \\\n\\\n\\\n/* QWidget */\\\nQWidget { background-color: %(dark_grey)s; \\\n font-family: \\\"%(futura_lt_light)s\\\"; \\\n font-size: 14pt; \\\n selection-background-color: %(bright_blue)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_header_icon */\\\nQWidget#wdgt_header_icon { border-image: url(%(header_image)s); } \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QToolTip */\\\nQToolTip { background-color: %(dark_grey)s; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QLabel */\\\nQLabel { background-color: transparent; \\\n} \\\n\\\n\\\n/* QLabel - lbl_explanation_header */\\\nQLabel#lbl_explanation_header { font-weight: bold; \\\n font-size: 20pt; \\\n color: %(bright_grey)s; \\\n margin-top: 10; \\\n margin-left: 10; \\\n margin-bottom: 4; \\\n margin-right: 10; \\\n} \\\n\\\n\\\n/* QLabel - lbl_explanation_text */\\\nQLabel#lbl_explanation_text { color: %(bright_grey)s; \\\n margin-top: 4; \\\n margin-left: 10; \\\n margin-bottom: 4; \\\n margin-right: 10; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QProgressBar */\\\nQProgressBar { border: none;\\\n background-color: %(dark_grey)s;\\\n text-align: center;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QLineEdit */\\\nQLineEdit { border: none;\\\n background-color: %(grey)s;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QMenuBar - mnubar_menu */\\\nQMenuBar#mnubar_menu { background-color: transparent;\\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item */\\\nQMenuBar#mnubar_menu::item { background: transparent;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item - selected */\\\nQMenuBar#mnubar_menu::item:selected { background: transparent;\\\n color: %(bright_orange)s; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item - pressed */\\\nQMenuBar#mnubar_menu::item:pressed { background: transparent;\\\n color: %(dark_orange)s; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QMenu - separator */\\\nQMenu::separator { background: %(bright_orange)s;\\\n height: 1px; \\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads */\\\nQMenu#mnu_threads { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads - item */\\\nQMenu#mnu_threads::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads - item - selected */\\\nQMenu#mnu_threads::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging */\\\nQMenu#mnu_threads_logging { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging - item */\\\nQMenu#mnu_threads_logging::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging - item - selected */\\\nQMenu#mnu_threads_logging::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_gui */\\\nQMenu#mnu_gui { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_gui - item */\\\nQMenu#mnu_gui::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_gui - item - selected */\\\nQMenu#mnu_gui::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view */\\\nQMenu#mnu_shot_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view - item */\\\nQMenu#mnu_shot_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view - item - selected */\\\nQMenu#mnu_shot_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view */\\\nQMenu#mnu_char_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view - item */\\\nQMenu#mnu_char_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view - item - selected */\\\nQMenu#mnu_char_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view */\\\nQMenu#mnu_prop_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view - item */\\\nQMenu#mnu_prop_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view - item - selected */\\\nQMenu#mnu_prop_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic */\\\nQMenu#mnu_alembic { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic - item */\\\nQMenu#mnu_alembic::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic - item - selected */\\\nQMenu#mnu_alembic::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_assets */\\\nQMenu#mnu_assets { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_assets - item */\\\nQMenu#mnu_assets::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_assets - item - selected */\\\nQMenu#mnu_assets::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes */\\\nQMenu#mnu_attributes { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes - item */\\\nQMenu#mnu_attributes::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes - item - selected */\\\nQMenu#mnu_attributes::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QScrollBar */\\\nQScrollBar { background: %(dark_grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QTableCornerButton */\\\nQTableCornerButton { background-color: %(grey)s; \\\n border: none; \\\n}\\\n\\\n\\\n/* QTableCornerButton - section */\\\nQTableCornerButton::section { background-color: %(grey)s; \\\n border: none; \\\n}\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* ShotMetadataView */\\\nShotMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - shot_metadata_view_hor_header*/\\\nQHeaderView#shot_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_hor_header - section */\\\nQHeaderView#shot_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(dark_orange)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_ver_header */\\\nQHeaderView#shot_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_ver_header - section */\\\nQHeaderView#shot_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* ShotMetadataContextMenu */\\\nShotMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* ShotMetadataContextMenu -item - selected */\\\nShotMetadataContextMenu::item:selected { background-color: %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* PropMetadataView */\\\nPropMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(bright_blue)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - prop_metadata_view_hor_header*/\\\nQHeaderView#prop_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_hor_header - section */\\\nQHeaderView#prop_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(bright_blue)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_ver_header */\\\nQHeaderView#prop_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_ver_header - section */\\\nQHeaderView#prop_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n/* PropMetadataContextMenu */\\\n/* Here is the above mentioned menu but also its sub menus. */\\\n/* mnu_metadata, mnu_geometry, mnu_visibility, mnu_selection */\\\n\\\n\\\n/* PropMetadataContextMenu */\\\nPropMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* PropMetadataContextMenu -item - selected */\\\nPropMetadataContextMenu::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata */\\\nQMenu#PropMetadataContextMenu_mnu_metadata { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_metadata::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry */\\\nQMenu#PropMetadataContextMenu_mnu_geometry { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_geometry::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility */\\\nQMenu#PropMetadataContextMenu_mnu_visibility { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_visibility::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection */\\\nQMenu#PropMetadataContextMenu_mnu_selection { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_selection::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* CharMetadataView */\\\nCharMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(dark_green)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - char_metadata_view_hor_header*/\\\nQHeaderView#char_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_hor_header - section */\\\nQHeaderView#char_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(bright_green)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_ver_header */\\\nQHeaderView#char_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_ver_header - section */\\\nQHeaderView#char_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n/* CharMetadataContextMenu */\\\n/* Here is the above mentioned menu but also its sub menus. */\\\n/* mnu_metadata, mnu_geometry, mnu_visibility, mnu_selection */\\\n\\\n\\\n/* CharMetadataContextMenu */\\\nCharMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* CharMetadataContextMenu -item - selected */\\\nCharMetadataContextMenu::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata */\\\nQMenu#CharMetadataContextMenu_mnu_metadata { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_metadata::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry */\\\nQMenu#CharMetadataContextMenu_mnu_geometry { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_geometry::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility */\\\nQMenu#CharMetadataContextMenu_mnu_visibility { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_visibility::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection */\\\nQMenu#CharMetadataContextMenu_mnu_selection { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_selection::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorFramerange */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QSpinBox - spnbx_frame */\\\nQSpinBox#spnbx_frame { background-color: transparent; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange_transparent)s; \\\n border-bottom: 1px solid %(bright_orange_transparent)s; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_framerange_main */\\\nQWidget#wdgt_table_view_editor_framerange_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame */\\\nQWidget#wdgt_frame { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_and_time_slider */\\\nQWidget#wdgt_range_and_time_slider { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider */\\\nQWidget#wdgt_frame_slider { background-color: transparent; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider_left */\\\nQWidget#wdgt_frame_slider_left { background-color: qlineargradient(spread:reflect, x1:0.3, y1:0, x2:0, y2:0, \\\n stop:0.45 transparent, \\\n stop:0.5 %(dark_orange_transparent)s, \\\n stop:0.55 transparent); \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider_right */\\\nQWidget#wdgt_frame_slider_right { background-color: qlineargradient(spread:reflect, x1:0.1, y1:0, x2:0, y2:0, \\\n stop:0.45 transparent, \\\n stop:0.5 %(dark_orange_transparent)s, \\\n stop:0.55 transparent); \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_get_current_frame*/\\\nAssetManagerHoverButton#btn_get_current_frame { background-color: %(bright_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider */\\\nQWidget#wdgt_range_slider { background-color: transparent; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_scrollbar */\\\nQWidget#wdgt_range_scrollbar { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_left */\\\nQWidget#wdgt_range_slider_left { background-color: %(dark_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_middle */\\\nQWidget#wdgt_range_slider_middle { background-color: %(bright_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_right */\\\nQWidget#wdgt_range_slider_right { background-color: %(dark_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_framesource */\\\nQLabel#lbl_framesource { background-color: transparent; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_complete_range_start*/\\\nAssetManagerHoverButton#btn_complete_range_start { background-color: %(grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_current_range_start*/\\\nAssetManagerHoverButton#btn_current_range_start { background-color: %(dark_orange_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_complete_range_end*/\\\nAssetManagerHoverButton#btn_complete_range_end { background-color: %(grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_current_range_end*/\\\nAssetManagerHoverButton#btn_current_range_end { background-color: %(dark_orange_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorNodepicker */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_nodepicker_main */\\\nQWidget#wdgt_table_view_editor_nodepicker_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_nodetype */\\\nQLabel#lbl_nodetype { background-color: %(grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLineEdit - le_filter */\\\nQLineEdit#le_filter { background-color: %(dark_grey_transparent)s; \\\n border: 1px solid %(dark_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QListView - node_view */\\\nQListView#node_view { background-color: %(grey_transparent)s; \\\n alternate-background-color: %(dark_grey_transparent)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QListView - node_view - item selected */\\\nQListView#node_view::item:selected { background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorPathpicker */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_pathpicker_main */\\\nQWidget#wdgt_table_view_editor_pathpicker_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_base_path */\\\nQLabel#lbl_base_path { background-color: %(grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLineEdit - le_path_filter */\\\nQLineEdit#le_path_filter { background-color: %(dark_grey_transparent)s; \\\n border: 1px solid %(dark_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QListView - path_view */\\\nQListView#path_view { background-color: %(grey_transparent)s; \\\n alternate-background-color: %(dark_grey_transparent)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QListView - path_view - item selected */\\\nQListView#path_view::item:selected { background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* AssetManagerSliderAction */\\\n/* QWidgetAction that draws a slider and an LCD Display */\\\n\\\n\\\n/* AssetManagerSliderAction - QLabel */\\\nQLabel#AssetManagerSliderActionQLabel { background-color: transparent; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QWidget */\\\nQWidget#AssetManagerSliderActionQWidget { background-color: transparent; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QSlider - groove - horizontal */\\\nQSlider#AssetManagerSliderActionQSlider::groove:horizontal { background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, \\\n stop:0 transparent, \\\n stop:1 %(bright_orange)s); \\\n height: 1px; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QSlider - handle - horizontal */\\\nQSlider#AssetManagerSliderActionQSlider::handle:horizontal { background: %(bright_grey)s; \\\n width: 20px; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QLCDNumber */\\\nQLCDNumber#AssetManagerSliderActionQLCDNumber { background: transparent; \\\n color: %(bright_orange)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QWidget - wdgt_asset_manager_pre_export_dialog_main */\\\nQWidget#wdgt_asset_manager_pre_export_dialog_main { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_wdgt_asset_manager_pre_export_dialog_main_options */\\\nQWidget#wdgt_wdgt_asset_manager_pre_export_dialog_main_options { background-color: transparent; } \\\n\\\n\\\n/* QLabel - lbl_question */\\\nQLabel#lbl_question { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept */\\\nQPushButton#btn_accept { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept - pressed */\\\nQPushButton#btn_accept:pressed { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept - hover */\\\nQPushButton#btn_accept:hover { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject */\\\nQPushButton#btn_reject { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject - pressed */\\\nQPushButton#btn_reject:pressed { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject - hover */\\\nQPushButton#btn_reject:hover { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice */\\\nQCheckBox#chkbx_remember_choice { background: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator */\\\nQCheckBox#chkbx_remember_choice::indicator { background: transparent; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - hover */\\\nQCheckBox#chkbx_remember_choice::indicator:hover { background: %(dark_grey)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - checked */\\\nQCheckBox#chkbx_remember_choice::indicator:checked { background: %(bright_grey)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - pressed */\\\nQCheckBox#chkbx_remember_choice::indicator:pressed { background: %(dark_orange)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorBool */\\\n/* Below are the stylesheets for the children of this widget. */\\\n\\\n\\\n/* TableViewEditorBool */\\\nTableViewEditorBool { background-color: %(dark_grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true */\\\nQPushButton#TableViewEditorBool_btn_true { background-color: transparent; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true - hover */\\\nQPushButton#TableViewEditorBool_btn_true:hover { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true - pressed */\\\nQPushButton#TableViewEditorBool_btn_true:pressed { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false */\\\nQPushButton#TableViewEditorBool_btn_false { background-color: transparent; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false - hover */\\\nQPushButton#TableViewEditorBool_btn_false:hover { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false - pressed */\\\nQPushButton#TableViewEditorBool_btn_false:pressed { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* AssetManagerDockWidget */\\\nAssetManagerDockWidget { background: %(dark_grey)s; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* AssetManagerDockWidget - title */\\\nAssetManagerDockWidget::title { background: %(dark_grey)s; \\\n text-align: left; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button, AssetManagerDockWidget::float-button {background: %(bright_orange)s; \\\n border: none; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button:hover, AssetManagerDockWidget::float-button:hover { background: %(dark_orange)s; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button:pressed, AssetManagerDockWidget::float-button:pressed { background: %(dark_orange)s; \\\n} \\\n\\\n\\\n\"%ss_dict\n\n return str_stylesheet", "def SetupStyles(self):\n # Python styles\n faces = self.GetFaces()\n fonts = \"face:%(font)s,size:%(size)d\" % faces\n default = \"fore:#000000,\" + fonts\n\n # Default\n self.StyleSetSpec(wx.stc.STC_P_DEFAULT, default)\n # Comments\n self.StyleSetSpec(wx.stc.STC_P_COMMENTLINE,\n \"fore:#777777,italic,\" + fonts)\n # Number\n self.StyleSetSpec(wx.stc.STC_P_NUMBER,\n \"fore:#007F7F,\" + fonts)\n # String\n self.StyleSetSpec(wx.stc.STC_P_STRING,\n \"fore:#007F00,\" + fonts)\n # Single quoted string\n self.StyleSetSpec(wx.stc.STC_P_CHARACTER,\n \"fore:#7F007F,\" + fonts)\n # Keyword\n self.StyleSetSpec(wx.stc.STC_P_WORD,\n \"fore:#00007F,bold,\" + fonts)\n # Triple quotes\n self.StyleSetSpec(wx.stc.STC_P_TRIPLE,\n \"fore:#7F0000,\" + fonts)\n # Triple double quotes\n self.StyleSetSpec(wx.stc.STC_P_TRIPLEDOUBLE,\n \"fore:#7F0000,\" + fonts)\n # Class name definition\n self.StyleSetSpec(wx.stc.STC_P_CLASSNAME,\n \"fore:#0000FF,bold,\" + fonts)\n # Function or method name definition\n self.StyleSetSpec(wx.stc.STC_P_DEFNAME,\n \"fore:#007F7F,bold,\" + fonts)\n # Operators\n self.StyleSetSpec(wx.stc.STC_P_OPERATOR, \"bold,\" + fonts)\n # Identifiers\n self.StyleSetSpec(wx.stc.STC_P_IDENTIFIER, default)\n # Comment-blocks\n self.StyleSetSpec(wx.stc.STC_P_COMMENTBLOCK,\n \"fore:#7F7F7F,\" + fonts)\n # End of line where string is not closed\n eol_style = \"fore:#000000,back:#E0C0E0,eol,\" + fonts\n self.StyleSetSpec(wx.stc.STC_P_STRINGEOL, eol_style)", "def loadstyle(style_name):\n\n style = {}\n nwc_styles = {} # for backwards compatibility\n style_file = os.path.join(HERE, '..', 'rc', style_name)\n try:\n # Check rc directory for built in styles first\n rc_file(style_file)\n except FileNotFoundError:\n # Check current working dir or path\n style_file = style_name\n try:\n rc_file(style_file)\n except FileNotFoundError as err:\n raise StyleNotFoundError(f\"No such style file found: {err}\")\n style = rcParams.copy()\n\n # The style files may also contain an extra section with typography\n # for titles and captions (these can only be separately styled in code,\n # as of Matplotlib 2.2)\n # This is a hack, but it's nice to have all styling in one file\n # The extra styling is prefixed with `#!`\n with open(style_file, 'r') as file_:\n doc = file_.readlines()\n rc_params_newsworthy = \"\\n\".join([d[2:]\n for d in doc if d.startswith(\"#!\")])\n rc_params_newsworthy = yaml.safe_load(rc_params_newsworthy)\n ###\n # Typography\n ###\n if \"title_font\" in rc_params_newsworthy:\n nwc_styles[\"title_font\"] = [\n x.strip() for x in rc_params_newsworthy[\"title_font\"].split(\",\")\n ]\n else:\n nwc_styles[\"title_font\"] = style[\"font.family\"]\n\n # define as pt or reltive (\"smaller\")\n nwc_styles[\"subtitle.fontsize\"] = rc_params_newsworthy.get(\n \"subtitle.fontsize\",\n None,\n )\n\n # make annotation same font size as ticks by default\n tick_font_size = style.get('xtick.labelsize', \"smaller\")\n nwc_styles[\"annotation.fontsize\"] = rc_params_newsworthy.get(\n \"annotation.fontsize\",\n tick_font_size,\n )\n nwc_styles[\"note.fontsize\"] = rc_params_newsworthy.get(\n \"note.fontsize\",\n \"smaller\",\n )\n nwc_styles[\"caption.fontsize\"] = rc_params_newsworthy.get(\n \"caption.fontsize\",\n \"smaller\",\n )\n\n color = rc_params_newsworthy.get(\"neutral_color\",\n rcParams[\"figure.edgecolor\"])\n black_color = rc_params_newsworthy.get(\"black_color\", BLACK)\n dark_gray_color = rc_params_newsworthy.get(\"dark_gray_color\", DARK_GRAY)\n light_gray_color = rc_params_newsworthy.get(\"light_gray_color\", LIGHT_GRAY)\n strong_color = rc_params_newsworthy.get(\"strong_color\", color)\n positive_color = rc_params_newsworthy.get(\"positive_color\", POSITIVE)\n negative_color = rc_params_newsworthy.get(\"negative_color\", NEGATIVE)\n warm_color = rc_params_newsworthy.get(\"warm_color\", WARM)\n cold_color = rc_params_newsworthy.get(\"cold_color\", COLD)\n fill_between_color = rc_params_newsworthy.get(\"fill_between_color\", FILL_BETWEEN)\n fill_between_alpha = rc_params_newsworthy.get(\"fill_between_alpha\", 0.5)\n nwc_styles[\"black_color\"] = to_rgba(\"#\" + str(black_color), 1)\n nwc_styles[\"dark_gray_color\"] = to_rgba(\"#\" + str(dark_gray_color), 1)\n nwc_styles[\"light_gray_color\"] = to_rgba(\"#\" + str(light_gray_color), 1)\n nwc_styles[\"neutral_color\"] = to_rgba(\"#\" + str(color), 1)\n nwc_styles[\"strong_color\"] = to_rgba(\"#\" + str(strong_color), 1)\n nwc_styles[\"positive_color\"] = to_rgba(\"#\" + positive_color, 1)\n nwc_styles[\"negative_color\"] = to_rgba(\"#\" + negative_color, 1)\n nwc_styles[\"warm_color\"] = to_rgba(\"#\" + warm_color, 1)\n nwc_styles[\"cold_color\"] = to_rgba(\"#\" + cold_color, 1)\n nwc_styles[\"fill_between_color\"] = to_rgba(\"#\" + str(fill_between_color), 1)\n nwc_styles[\"fill_between_alpha\"] = float(fill_between_alpha)\n\n if \"qualitative_colors\" in rc_params_newsworthy:\n nwc_styles[\"qualitative_colors\"] = [\n to_rgba(\"#\" + c.strip(), 1)\n for c in rc_params_newsworthy[\"qualitative_colors\"].split(\",\")\n ]\n\n else:\n nwc_styles[\"qualitative_colors\"] = [to_rgba(\"#\" + c, 1) for c in QUALITATIVE]\n if \"logo\" in rc_params_newsworthy:\n nwc_styles[\"logo\"] = rc_params_newsworthy[\"logo\"]\n\n return style, nwc_styles", "def load_style_sheet() -> str:\n return _preprocess_style(_read_text('style.css.template'))", "def LHCbStyle ( name = \"LHCbStyle\" ,\n desc = \"Standard LHCb plots style\" ,\n lineWidth = lhcbWidth ,\n font = lhcbFont ,\n makeNew = False ,\n force = True ) :\n obj = ROOT.gROOT.FindObject ( name )\n if obj and issubclass ( type( obj ) , ROOT.TStyle ) and not makeNew : \n logger.info ('The style %s is reused' % obj.GetName() )\n if force : \n logger.info ('The style %s is forced' % obj.GetName() )\n ROOT.gROOT.SetStyle ( obj.GetName() )\n ROOT.gROOT.ForceStyle ( )\n return obj\n \n nam = name\n i = 1\n while obj :\n nam = name + '_%d' % i\n obj = ROOT.gROOT.FindObject ( nam )\n i += 1\n \n style = ROOT.TStyle ( nam , desc )\n logger.info ('New style %s is created' % style.GetName() )\n\n ROOT.gROOT.SetStyle()\n \n style . Reset() \n ## use plain black on white colors\n style . SetFrameBorderMode ( 0 )\n style . SetCanvasBorderMode ( 0 )\n style . SetPadBorderMode ( 0 )\n style . SetPadColor ( 0 )\n style . SetCanvasColor ( 0 )\n style . SetStatColor ( 0 )\n # style . SetPalette ( 1 )\n # style . SetTitleStyle ( 0 )\n ## set the paper & margin sizes\n style . SetPaperSize ( 20 , 26 )\n style . SetPadTopMargin ( 0.05 )\n style . SetPadRightMargin ( 0.05 ) ## increase for colz plots\n style . SetPadBottomMargin ( 0.16 )\n style . SetPadLeftMargin ( 0.14 )\n \n ## use large fonts\n style . SetTextFont ( font )\n style . SetTextSize ( 0.08 )\n style . SetLabelFont ( font , \"x\" ) \n style . SetLabelFont ( font , \"y\" ) \n style . SetLabelFont ( font , \"z\" ) \n style . SetLabelSize ( 0.05 , \"x\" )\n style . SetLabelSize ( 0.05 , \"y\" )\n style . SetLabelSize ( 0.05 , \"z\" )\n style . SetTitleFont ( font )\n style . SetTitleSize ( 0.06 , \"x\" )\n style . SetTitleSize ( 0.06 , \"y\" )\n style . SetTitleSize ( 0.06 , \"z\" ) \n \n \n ## use bold lines and markers\n style . SetLineWidth ( lineWidth )\n style . SetFrameLineWidth ( lineWidth )\n style . SetHistLineWidth ( lineWidth )\n style . SetFuncWidth ( lineWidth )\n style . SetGridWidth ( lineWidth )\n style . SetLineStyleString ( 2 , \"[12 12]\" ) ## postscript dashes\n #style . SetMarkerStyle ( 20 )\n style . SetMarkerSize ( 1.2 )\n \n ## label offsets\n style . SetLabelOffset(0.015);\n \n ## by default, do not display histogram decorations:\n style . SetOptStat ( 0 ) \n ## lhcbStyle->SetOptStat(\"emr\"); ## show only nent -e , mean - m , rms -r\n ## full opts at http://root.cern.ch/root/html/TStyle.html#TStyle:SetOptStat\n style . SetStatFormat (\"6.3g\") ## specified as c printf options\n # style . SetOptTitle ( 1 )\n style . SetOptFit ( 0 )\n ## lhcbStyle . SetOptFit(1011); // order is probability, Chi2, errors, parameters\n \n\n ## look of the statistics box:\n style . SetStatBorderSize ( 0 )\n style . SetStatFont ( font )\n # style . SetStatFontSize ( 0.05 )\n # style . SetStatX ( 0.9 )\n # style . SetStatY ( 0.9 )\n # style . SetStatW ( 0.25 )\n # style . SetStatH ( 0.15 )\n ## put tick marks on top and RHS of plots\n style . SetPadTickX ( 1 )\n style . SetPadTickY ( 1 )\n \n## histogram divisions: only 5 in x to avoid label overlaps\n style . SetNdivisions ( 505 , \"x\" )\n style . SetNdivisions ( 510 , \"y\" )\n \n if force : \n logger.info ('The style %s is forced' % style.GetName() )\n ROOT.gROOT.SetStyle ( style.GetName() )\n ROOT.gROOT.ForceStyle ()\n \n return style", "def load_dg_style() -> str:\n from .formatting_html import load_style\n from .resources import _preprocess_style, _read_text\n\n style_sheet = (\n '<style id=\"datagroup-style-sheet\">'\n + _preprocess_style(_read_text('datagroup.css'))\n + '</style>'\n )\n\n return load_style() + style_sheet", "def loadcss(*args):\n return render(settings, 'CSS_FILES', 'staticloader/load_css.html', *args)", "def setup_css(self):\n\t\t\n\t\t# style de notre interface\n\t\tself.setStyleSheet(\"\"\"\n background-color: rgb(30,30,30);\n color: rgb(240,240,240);\n \"\"\")\n\t\t\n\t\t# style de nos combobox\n\t\tstyle = \"\"\"\n QComboBox::down-arrow {\n image: none;\n border-width: 0px;\n }\n QComboBox::drop-down {\n border-width: 0px;\n }\n \"\"\"\n\t\tself.cbb_devisesFrom.setStyleSheet(style)\n\t\tself.cbb_devisesTo.setStyleSheet(style)\n\t\t\n\t\t# style de notre bouton\n\t\tself.btn_inverser.setStyleSheet(\"background-color: red\")", "def loadCss(app):\n\n _browse = app._browse\n aContext = app.context\n appCss = aContext.css\n\n if _browse:\n return appCss\n\n if not app.inNb:\n return\n\n css = getCss(app)\n dh(css)\n dh(\n dedent(\n \"\"\"\n <script>\n globalThis.copyChar = (el, c) => {\n for (const el of document.getElementsByClassName('ccon')) {\n el.className = 'ccoff'\n }\n el.className = 'ccon'\n navigator.clipboard.writeText(String.fromCharCode(c))\n }\n </script>\n \"\"\"\n )\n )", "def _read_stylesheet(self, style):\n tree = ET.parse(style)\n for marker in tree.findall('style'):\n if marker.get('publishable') == 'true':\n self.publishable.add(marker.get('id'))", "def update(self):\n for stylesheet_path, widgets in self._widget_sheet_map.iteritems():\n with open(stylesheet_path, \"r\") as fid:\n raw_stylesheet = fid.read()\n \n for widget in widgets:\n widget.setStyleSheet(raw_stylesheet)", "def set_style(self):", "def scan_system_css():\r\n pass", "def test_load_style(sphinx_app_wrapper):\n sphinx_app = sphinx_app_wrapper.build_sphinx_app()\n cfg = sphinx_app.config\n assert cfg.project == \"Sphinx-Gallery <Tests>\"\n build_warn = sphinx_app._warning.getvalue()\n assert build_warn == \"\"\n index_html = os.path.join(sphinx_app_wrapper.outdir, \"index.html\")\n assert os.path.isfile(index_html)\n with open(index_html) as fid:\n content = fid.read()\n assert (\n 'link rel=\"stylesheet\" type=\"text/css\" href=\"_static/sg_gallery.css' in content\n ) # noqa: E501", "def setupStyling(self):\n\n\t\tfaces = {\n\t\t\t'times': 'Times New Roman',\n\t\t\t'mono' : 'Courier New',\n\t\t\t'helv' : 'Arial',\n\t\t\t'other': 'Comic Sans MS',\n\t\t\t'size' : 10,\n\t\t\t'size2': 8,\n\t\t}\n\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_DEFAULT, \"back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleClearAll()\n\t\tself.edit.StyleSetSpec(stc.STC_STYLE_LINENUMBER, \"fore:#928374,back:#212121,face:%(mono)s,size:%(size2)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEXT, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HEADING, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HIDDEN, \"fore:#d5c4a1,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODE, \"fore:#b8bb26,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.SYMBOL, \"fore:#81ac71,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.TEST, \"fore:#ff00ff,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.STRIKE, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.BOLD, \"fore:#d9a62e,bold,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.UNDERLINE, \"fore:#d9a62e,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.ITALIC, \"fore:#7d9d90,italic,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.IMAGE_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.LINK_UNDERLINED, \"fore:#cb8296,underline,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML, \"fore:#cb8296,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.HTML_ATTRIBUTE, \"fore:#d9a62e,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.FORMAT, \"fore:#e44533,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.COMMENT, \"fore:#928372,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_KEYWORD, \"fore:#569cd6,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_SYMBOL, \"fore:#9cdcfe,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TEXT, \"fore:#F9FFE0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_STRING, \"fore:#d69d73,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_COMMENT, \"fore:#57a64a,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FUNCTION, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_CLASS, \"fore:#4ec9b0,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_TYPE, \"fore:#EFCD1E,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_FLOW, \"fore:#d8a0df,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.StyleSetSpec(STYLE.CODEBLOCK_DIGIT, \"fore:#b5ce92,back:#282828,face:%(mono)s,size:%(size)d\" % faces)\n\t\tself.edit.IndicatorSetStyle(0, stc.STC_INDIC_SQUIGGLE)\n\t\tself.edit.IndicatorSetForeground(0, wx.RED)", "def __init__(self):\n # Try to get the Bulma settings. The user may not have created this dict.\n try:\n self.bulma_settings = settings.BULMA_SETTINGS\n except AttributeError:\n self.bulma_settings = {}\n\n self.bulma_submodule_path = simple_bulma_path / \"bulma\" / \"sass\"\n self.custom_scss = self.bulma_settings.get(\"custom_scss\", [])\n self.variables = self.bulma_settings.get(\"variables\", {})\n self.output_style = self.bulma_settings.get(\"output_style\", \"nested\")\n self.storage = FileSystemStorage(simple_bulma_path)\n\n # Make a list of all the finders except this one.\n # We use this in the custom SCSS handler.\n other_finders = settings.STATICFILES_FINDERS.copy()\n other_finders.remove(\"django_simple_bulma.finders.SimpleBulmaFinder\")\n self.other_finders = [get_finder(finder) for finder in other_finders]", "def make_styles(working_dir='.', media_query='@media....'):\n #print 'making styles'\n # if we dont have a order json file dont do anything\n #print \">>>>>\" + working_dir + '/order.json'\n if False == os.path.isfile(working_dir + '/order.json'): return\n final_block = ''\n json_css_order = open(working_dir + '/order.json')\n css_order = json.load(json_css_order)\n json_css_order.close()\n final_block, import_block_720 = create_import_blocks(css_order, working_dir)\n if import_block_720:\n final_block = final_block + '\\n\\n' + media_query + import_block_720 + '\\n}\\n'\n make_file(working_dir + '/scss/' + 'main_styles.scss', final_block + '\\n')\n call_sass(working_dir)", "def loadSHSChromas(IDs):\n fin = open(\"SHSDataset/Chromas/btchromas.cly\")\n fin.readline() #First line is 'chroma'\n chromas = {}\n while True:\n ID = fin.readline()\n if not ID:\n break\n ID = int(ID)\n if ID%1000 == 0:\n print(\"Loaded chromas for %i songs...\"%ID)\n if not ID in IDs:\n fin.readline()\n continue\n x = fin.readline().rstrip()\n x = np.array([float(a) for a in x.split(\",\")])\n x = np.reshape(x, (len(x)/12, 12))\n chromas[ID] = x\n fin.close()\n return chromas", "def get_styles(u):\n stat, ds_request = u.request(method = 'GET',\n path = 'rest/styles.json',\n payload = None,\n mime = 'application/json')\n json_data = json.loads(ds_request)\n if json_data.get('styles') == '':\n return None\n styles = json_data.get('styles').get('style')\n\n out = {}\n for style in styles:\n out[style.get('name')] = {'href': style.get('href')}\n return out", "def UpdateBaseStyles(self):\n super(EditraBaseStc, self).UpdateBaseStyles()\n\n # Set control specific styles\n sback = self.GetItemByName('select_style')\n if not sback.IsNull():\n sback = sback.GetBack()\n else:\n sback = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)\n self.VertEdit.SetBlockColor(sback)\n self.DefineMarkers()", "def load_style(self, path):\n self.wait_ready()\n\n def open_dialog():\n # Bring up the style popup menu and choose to open a style file\n self._song_pane.click_input(coords=(44, 73), absolute=False)\n menu = self._app.window(class_name='#32768')\n menu.menu_item('File Open Style').click_input()\n\n wait_until_passes(func=open_dialog,\n exceptions=ElementNotFoundError,\n timeout=120, retry_interval=0.4)\n self._open_file(path)", "def initialize_from_config(self) -> None:\n self.__style = self.plugin_configuration.get_string_property(\n \"style\",\n default_value=RuleMd029.__one_or_ordered_style,\n valid_value_fn=self.__validate_configuration_style,\n )", "def update_style(self):\n pass", "def LHCbStyle_alt ( name = \"LHCbStyle\" ,\n desc = \"Standard LHCb plots style\" ,\n lineWidth = lhcbWidth ,\n font = lhcbFont ,\n makeNew = False ,\n force = True ) :\n obj = ROOT.gROOT.FindObject ( name )\n if obj and issubclass ( type( obj ) , ROOT.TStyle ) and not makeNew : \n logger.info ('The style %s is reused' % obj.GetName() )\n if force : \n logger.info ('The style %s is forced' % obj.GetName() )\n ROOT.gROOT.SetStyle ( obj.GetName() )\n ROOT.gROOT.ForceStyle ( )\n return obj\n \n nam = name\n i = 1\n while obj :\n nam = name + '_%d' % i\n obj = ROOT.gROOT.FindObject ( nam )\n i += 1\n \n style = ROOT.TStyle ( nam , desc )\n logger.info ('New style %s is created' % style.GetName() )\n \n ## use plain black on white colors\n style . SetFrameBorderMode ( 0 )\n style . SetCanvasBorderMode ( 0 )\n style . SetPadBorderMode ( 0 )\n style . SetPadColor ( 0 )\n style . SetCanvasColor ( 0 )\n style . SetStatColor ( 0 )\n # style . SetPalette ( 1 )\n # style . SetTitleStyle ( 0 )\n ## set the paper & margin sizes\n style . SetPaperSize ( 20 , 26 )\n style . SetPadTopMargin ( 0.05 )\n style . SetPadRightMargin ( 0.05 ) ## increase for colz plots\n style . SetPadBottomMargin ( 0.16 )\n style . SetPadLeftMargin ( 0.14 )\n \n ## use large fonts\n style . SetTextFont ( font )\n style . SetTextSize ( 0.08 )\n style . SetLabelFont ( font , \"x\" ) \n style . SetLabelFont ( font , \"y\" ) \n style . SetLabelFont ( font , \"z\" ) \n style . SetLabelSize ( 0.05 , \"x\" )\n style . SetLabelSize ( 0.05 , \"y\" )\n style . SetLabelSize ( 0.05 , \"z\" )\n style . SetTitleFont ( font )\n style . SetTitleSize ( 0.06 , \"x\" )\n style . SetTitleSize ( 0.06 , \"y\" )\n style . SetTitleSize ( 0.06 , \"z\" ) \n \n \n ## use bold lines and markers\n style . SetLineWidth ( lineWidth )\n style . SetFrameLineWidth ( lineWidth )\n style . SetHistLineWidth ( lineWidth )\n style . SetFuncWidth ( lineWidth )\n style . SetGridWidth ( lineWidth )\n style . SetLineStyleString ( 2 , \"[12 12]\" ) ## postscript dashes\n #style . SetMarkerStyle ( 20 )\n style . SetMarkerSize ( 1.2 )\n \n ## label offsets\n style . SetLabelOffset(0.015);\n \n ## by default, do not display histogram decorations:\n style . SetOptStat ( 0 ) \n ## lhcbStyle->SetOptStat(\"emr\"); ## show only nent -e , mean - m , rms -r\n ## full opts at http://root.cern.ch/root/html/TStyle.html#TStyle:SetOptStat\n style . SetStatFormat (\"6.3g\") ## specified as c printf options\n # style . SetOptTitle ( 1 )\n style . SetOptFit ( 0 )\n ## lhcbStyle . SetOptFit(1011); // order is probability, Chi2, errors, parameters\n \n\n ## look of the statistics box:\n style . SetStatBorderSize ( 0 )\n style . SetStatFont ( font )\n # style . SetStatFontSize ( 0.05 )\n # style . SetStatX ( 0.9 )\n # style . SetStatY ( 0.9 )\n # style . SetStatW ( 0.25 )\n # style . SetStatH ( 0.15 )\n ## put tick marks on top and RHS of plots\n style . SetPadTickX ( 1 )\n style . SetPadTickY ( 1 )\n \n## histogram divisions: only 5 in x to avoid label overlaps\n style . SetNdivisions ( 505 , \"x\" )\n style . SetNdivisions ( 510 , \"y\" )\n \n if force : \n logger.info ('The style %s is forced' % style.GetName() )\n ROOT.gROOT.SetStyle ( style.GetName() )\n ROOT.gROOT.ForceStyle ()\n \n return style", "def get_styles():\n base_styles = {\n \"text-align\": \"center\",\n \"border\": \"1px solid #ddd\",\n \"padding\": \"7px\",\n \"border-radius\": \"2px\",\n }\n text_styles = {\n \"background-color\": \"#eee\",\n \"margin\": \"auto\",\n \"width\": \"50%\"\n }\n text_styles.update(base_styles)\n\n button_styles = {\n \"text-decoration\": \"none\",\n }\n button_styles.update(base_styles)\n\n fig_style = {\n \"padding\": \"10px\",\n \"width\": \"80%\",\n \"margin\": \"auto\",\n \"margin-top\": \"5px\"\n }\n fig_style.update(base_styles)\n return {\n \"text_styles\" : text_styles,\n \"base_styles\" : base_styles,\n \"button_styles\" : button_styles,\n \"fig_style\": fig_style,\n }", "def _get_bulma_css(self) -> List[str]:\n # If the user has the sass module installed in addition to libsass,\n # warn the user and fail hard.\n if not hasattr(sass, \"libsass_version\"):\n raise UserWarning(\n \"There was an error compiling your Bulma CSS. This error is \"\n \"probably caused by having the `sass` module installed, as the two modules \"\n \"are in conflict, causing django-simple-bulma to import the wrong sass namespace.\"\n \"\\n\"\n \"Please ensure you have only the `libsass` module installed, \"\n \"not both `sass` and `libsass`, or this application will not work.\"\n )\n\n # SASS wants paths with forward slash:\n sass_bulma_submodule_path = self.bulma_submodule_path \\\n .relative_to(simple_bulma_path).as_posix()\n\n bulma_string = f\"@import '{sass_bulma_submodule_path}/utilities/_all';\\n\"\n\n # Now load bulma dynamically.\n for dirname in self.bulma_submodule_path.iterdir():\n\n # We already added this earlier\n if dirname.name == \"utilities\":\n continue\n\n bulma_string += f\"@import '{sass_bulma_submodule_path}/{dirname.name}/_all';\\n\"\n\n # Now load in the extensions that the user wants\n extensions_string = self._get_extension_imports()\n\n # Generate SASS strings for each theme\n # The default theme is treated as \"\"\n theme_paths = []\n for theme in [\"\"] + themes:\n scss_string = \"@charset 'utf-8';\\n\"\n\n # Unpack this theme's custom variables\n variables = self.variables\n if theme:\n variables = settings.BULMA_SETTINGS[f\"{theme}_variables\"]\n scss_string += self._unpack_variables(variables)\n\n scss_string += bulma_string\n scss_string += extensions_string\n\n # Store this as a css file\n css_string = sass.compile(string=scss_string,\n output_style=self.output_style,\n include_paths=[simple_bulma_path.as_posix()])\n\n theme_path = f\"css/{theme + '_' if theme else ''}bulma.css\"\n css_path = simple_bulma_path / theme_path\n with open(css_path, \"w\", encoding=\"utf-8\") as bulma_css:\n bulma_css.write(css_string)\n theme_paths.append(theme_path)\n\n return theme_paths", "def CSSClasses(self):" ]
[ "0.5989639", "0.59036076", "0.56176794", "0.558392", "0.5241504", "0.5120053", "0.5115506", "0.507915", "0.5060298", "0.5031563", "0.50168383", "0.501615", "0.50023115", "0.49979183", "0.49873513", "0.49807042", "0.49791923", "0.49720952", "0.49583104", "0.49578643", "0.49547985", "0.4940858", "0.49116835", "0.48897865", "0.48791713", "0.48619038", "0.48394394", "0.48330495", "0.48038822", "0.4767508" ]
0.7472356
0
Get logger with path 'file name'. If permission error, create log in /tmp
def get_logger(logger_name, logging_format, file_name, level=logging.INFO): path, prepared = '', True for cat in file_name.split('/')[1:-1]: path += '/%s' % cat if not os.path.exists(path): try: os.mkdir(path) except PermissionError: prepared = False break if not prepared: file_name = '/tmp/%s' % file_name.split('/')[-1] logging.basicConfig(level=level, format=logging_format) log = logging.getLogger(logger_name) handler = logging.FileHandler(file_name, encoding='utf8') handler.setFormatter(logging.Formatter(logging_format)) log.addHandler(handler) log.setLevel(level=level) return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_logger(name, file_name_path='yang.log'):\n # check if file exists\n exists = False\n if os.path.isfile(file_name_path):\n exists = True\n FORMAT = '%(asctime)-15s %(levelname)-8s %(name)5s => %(message)s - %(lineno)d'\n DATEFMT = '%Y-%m-%d %H:%M:%S'\n logging.basicConfig(datefmt=DATEFMT, format=FORMAT, filename=file_name_path, level=logging.INFO)\n logger = logging.getLogger(name)\n # if file didn t exist we create it and now we can set chmod\n if not exists:\n os.chmod(file_name_path, 0o664 | stat.S_ISGID)\n return logger", "def get_logger(name: str) -> logging.Logger:\n try:\n p = Path(name)\n if p.exists():\n name = str(p.absolute().relative_to(Path.cwd()).as_posix())\n except:\n pass\n logger = logging.getLogger(name)\n # logger.addHandler(TqdmLoggingHandler())\n return logger", "def get_logger(name):\n logger = _root.getChild(name)\n if name.startswith(\"task.\") and _file_logging_enabled:\n _setup_task_logger(logger)\n return logger", "def _generate_log(path):\n # Create a logger and set the level.\n logger = logging.getLogger(\"Log_info\")\n # Check handler exists\n if len(logger.handlers) > 0:\n return logger # Logger already exists\n # set logger level\n logger.setLevel(logging.DEBUG)\n # Create file handler, log format and add the format to file handler\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(path)\n\n # See https://docs.python.org/3/library/logging.html#logrecord-attributes\n # for log format attributes.\n log_format = \"%(levelname)s %(asctime)s %(message)s\"\n formatter = logging.Formatter(log_format)\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n\n return logger", "def get_logger(name):\n filename = \"file_sync.log\"\n _create_log_dir()\n filepath = os.path.join(FLASK_APP.config[\"LOG_DIR\"], filename)\n logger = logging.getLogger(name)\n handler = TimedRotatingFileHandler(filepath, when=\"midnight\")\n logger.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n handler.setLevel(LOG_LEVELS[FLASK_APP.config[\"LOG_LEVEL\"]])\n log_format = (\"%(asctime)s %(levelname)s %(pathname)s\"\n \":%(funcName)s: %(lineno)d - %(message)s\")\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(name, level='INFO', terminal_log=True, file_log=False,\n file_name=None, file_max_bytes=1048576, file_backup_count=3,\n email_on_warnings=True, email_on_errors=True):\n # Get the root logger and set the level\n log_level = getattr(logging, level.upper())\n root_logger = logging.getLogger('')\n root_logger.setLevel(log_level)\n\n handlers = []\n # Form the handler(s) and set the level\n if terminal_log:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(log_level)\n handlers.append(stream_handler)\n\n # Create email warning handler\n if email_on_warnings:\n # Note, the placeholder in the subject will be replaced by the hostname\n warning_email_handler = CustomSMTPWarningHandler(\n mailhost=MAIL_HOST, fromaddr=WARNING_EMAIL,\n toaddrs=[WARNING_EMAIL], subject='Warning from: {}')\n warning_email_handler.setLevel(logging.WARNING)\n handlers.append(warning_email_handler)\n\n # Create email error handler\n if email_on_errors:\n # Note, the placeholder in the subject will be replaced by the hostname\n error_email_handler = CustomSMTPHandler(\n mailhost=MAIL_HOST, fromaddr=ERROR_EMAIL,\n toaddrs=[ERROR_EMAIL], subject='Error from: {}')\n error_email_handler.setLevel(logging.ERROR)\n handlers.append(error_email_handler)\n\n # Create rotating file handler\n if file_log:\n if file_name is None:\n file_name = name + '.log'\n file_handler = RotatingFileHandler(file_name, maxBytes=file_max_bytes,\n backupCount=file_backup_count)\n file_handler.setLevel(log_level)\n handlers.append(file_handler)\n\n # Add formatters to the handlers and add the handlers to the root_logger\n formatter = logging.Formatter(\n '%(asctime)s:%(name)s: %(levelname)s: %(message)s')\n for handler in handlers:\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Create a named logger and return it\n logger = logging.getLogger(name)\n return logger", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def unicon_logger():\n temp_path = tempfile_name()\n _logger = logging.getLogger()\n LOGGING_CFG['handlers']['file']['filename'] = temp_path\n dictConfig(LOGGING_CFG)\n\n return _logger", "def get_logger(logpath, attached):\n if not os.path.isdir(logpath):\n os.makedirs(logpath)\n logger = logging.getLogger('queue_logger')\n logger.setLevel(logging.INFO)\n if attached:\n handler = logging.StreamHandler()\n else:\n logfile = os.path.join(logpath, 'queue.log')\n handler = TimedRotatingFileHandler(logfile,\n when='midnight',\n backupCount=30)\n formatter = logging.Formatter(\n fmt='%(asctime)s - %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.propagate = False\n return logger", "def make_default_logger(file_path=LOG_FILENAME):\n logger = logging.getLogger(\"Logger\")\n if not len(logger.handlers):\n logger.setLevel(logging.DEBUG)\n # Create a handler and attach it to the logger\n try:\n handler = logging.handlers.RotatingFileHandler(\n file_path, maxBytes=5120000, backupCount=7\n )\n except OSError as e:\n if e.errno == 2:\n errprint(\n \"\\nWarning: %s: %s. \"\n \"Have you created the directory for the log?\"\n % (\n e.strerror,\n file_path,\n )\n )\n elif e.errno == 13:\n errprint(\n \"\\nWarning: %s: %s. \"\n \"Cannot access file as user: %s\"\n % (\n e.strerror,\n file_path,\n getpass.getuser(),\n )\n )\n else:\n errprint(\n \"\\nIOError [%s]: %s\\n%s\"\n % (e.errno, e.strerror, traceback.format_exc())\n )\n errprint(\n \"Juriscraper will continue to run, and all logs will be \"\n \"sent to stderr.\"\n )\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s: %(message)s\")\n )\n logger.addHandler(handler)\n return logger", "def get(name):\r\n log = logging.getLogger(\"%s.%s\" % (ROOT_NAME, name))\r\n return log", "def open_log(fn):\n\n global log_file\n if fn is not None:\n d = os.path.dirname(fn)\n if d != \"\":\n makedirs(d)\n log_file = open(fn, \"a+\")", "def custom_logger(path_to_log_file=None, logger_name=None):\n\n if not logger_name:\n logger_name = 'root'\n\n print (logger_name)\n\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n rootLogger = logging.getLogger(logger_name)\n rootLogger.setLevel(logging.DEBUG)\n\n if path_to_log_file is None:\n logs_folder = 'logs'\n if not os.path.exists(logs_folder):\n os.makedirs(logs_folder)\n path_to_log_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), logs_folder, '{0}-{1}'.format(socket.gethostname(), 'root'))\n #print ('[i] will be logged in to {0}'.format(path_to_log_file))\n\n # # take care, here is a magic string!!! configure this for own needs\n fileHandler = logging.FileHandler(\"{path}.log\".format(path=path_to_log_file))\n fileHandler.setFormatter(logFormatter)\n fileHandler.setLevel(logging.DEBUG)\n rootLogger.addHandler(fileHandler)\n\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logFormatter)\n consoleHandler.setLevel(logging.DEBUG)\n rootLogger.addHandler(consoleHandler)\n #rootLogger.info('\\n')\n\n return rootLogger", "def get_logger(request, name=\"ot_api\"):\n\n# package_dir = os.path.dirname(module_path)\n# config_filepath = os.path.join(package_dir, _LOGGING_CONFIG_FILE)\n# if os.path.exists(config_filepath):\n# try:\n# logging.config.fileConfig(config_filepath)\n# logger_set = True\n# except:\n# logger_set = False\n logger = logging.getLogger(name)\n if len(logger.handlers) == 0:\n if request is None:\n level = _get_logging_level(os.environ.get(_LOGGING_LEVEL_ENVAR))\n logging_formatter = _get_logging_formatter(os.environ.get(_LOGGING_FORMAT_ENVAR))\n logging_filepath = os.environ.get(_LOGGING_FILE_PATH_ENVAR)\n else:\n level_str, logging_format_name, logging_filepath = read_logging_config(request)\n logging_formatter = _get_logging_formatter(logging_format_name)\n level = _get_logging_level(level_str)\n\n logger.setLevel(level)\n if logging_filepath is not None:\n log_dir = os.path.split(logging_filepath)[0]\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n ch = logging.FileHandler(logging_filepath)\n else:\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(logging_formatter)\n logger.addHandler(ch)\n return logger", "def get_logger(level=None, name=None, filename=None, log_dir=None):\n if isinstance(log_dir, str):\n log_dir = Path(log_dir)\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if len(logger.handlers) == 0:\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime(\"%Y_%m_%d\")\n\n if not log_dir:\n log_dir = settings.logs_folder\n\n log_filename = log_dir / \"{}_{}.log\".format(filename, todays_date)\n\n # if the logs folder does not already exist, create it\n if not log_dir.exists():\n log_dir.makedirs_p()\n # create file handler and log formatter and set them up\n formatter = lg.Formatter(\n \"%(asctime)s [%(process)d] %(levelname)s - %(name)s - %(\" \"message)s\"\n )\n if settings.log_file:\n handler = lg.FileHandler(log_filename, encoding=\"utf-8\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n if settings.log_console:\n handler = lg.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def get_logger(name: str, log_file_path: str, log_debug_file_path: str) -> logging.Logger:\n logger = logging.getLogger(name)\n\n if os.path.isfile(log_debug_file_path):\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARNING)\n\n format = logging.Formatter(\n fmt='%(asctime)s %(name)-12s %(levelname)s %(process)-8d %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n\n iris_service_file_handler = logging.FileHandler(log_file_path)\n iris_service_file_handler.setFormatter(format)\n logger.addHandler(iris_service_file_handler)\n\n return logger", "def get_logger(name):\n\n logfile = os.environ.get(\"LOGFILE\", \"/tmp/{}.log\".format(name))\n result_logger = logging.getLogger(name)\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(logfile, encoding=\"utf8\")\n formatter = logging.Formatter(\n \"[%(asctime)s]\"\n \"[%(processName)s %(process)-6d]\"\n \"[%(filename)s %(lineno)d]\"\n \"[%(funcName)s]\"\n \"[%(levelname)s]\"\n \"%(message)s\"\n )\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n result_logger.addHandler(stream_handler)\n result_logger.addHandler(file_handler)\n result_logger.setLevel(logging.DEBUG)\n return result_logger", "def GetLogFile (physical = False) :\n if sys.hal_log_values [\"__log_file\"] is None :\n if physical :\n path = GetPath ()\n if sys.hal_log_values [\"__log_file_name\"] is None :\n if os.path.exists (path) : sys.hal_log_values [\"__log_file_name\"] = os.path.join (path, sys.hal_log_values [\"__log_const\"])\n else : raise PQHException (\"unable to create a log file in folder \" + path)\n \n if not isinstance (sys.hal_log_values [\"__log_file_name\"], str) :\n sys.hal_log_values [\"__log_file\"] = sys.hal_log_values [\"__log_file_name\"]\n else :\n try : \n sys.hal_log_values [\"__log_file\"] = open (sys.hal_log_values [\"__log_file_name\"], \"w\", encoding=\"utf-8\")\n except Exception as e: \n raise OSError (\"unable to create file \" + sys.hal_log_values [\"__log_file_name\"] + \"\\n\" + str(e))\n else :\n sys.hal_log_values [\"__log_file\"] = LogFakeFileStream()\n \n return sys.hal_log_values [\"__log_file\"]", "def get_logger(name='some script'):\n\n #timestamp for filename \n timestamp = datetime.now().strftime('%Y-%m-%d')\n\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n #custom formatter\n formatter = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(filename)s '\n '%(funcName)s line: %(lineno)s: %(msg)s'\n )\n handler = logging.FileHandler('/tmp/scripts_{0}.log'.format(timestamp))\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n #print to stdout if it's interactive, but file-only if not\n if sys.stdin.isatty():\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n \n return logger", "def get_logger(name: str, log_path: str = os.path.join(os.path.dirname(__file__), \"main.log\"),\n console: bool = False) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n # ensure that logging handlers are not duplicated\n for handler in list(logger.handlers):\n logger.removeHandler(handler)\n\n # rotating file handler\n if log_path:\n fh = RotatingFileHandler(path_join(log_path),\n maxBytes=10 * 2 ** 20, # 10 MB\n backupCount=1) # 1 backup\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # console handler\n if console:\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # null handler\n if not (log_path or console):\n logger.addHandler(logging.NullHandler())\n\n return logger", "def get_logger(name, log_dir, config_dir):\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger", "def get_logger(logger_name, log_folder=None, timestamp=\"%Y%m%d\", level=logging.DEBUG):\n from config import LOG_FILE_ROOT\n # if doesn't specify a log folder, use the default one in config\n if not log_folder:\n log_folder = LOG_FILE_ROOT\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n if timestamp:\n logfile = os.path.join(log_folder, '%s_%s.log' % (logger_name, time.strftime(timestamp, datetime.datetime.now().timetuple())))\n else:\n logfile = os.path.join(log_folder, '%s.log' % logger_name)\n fmt = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - %(funcName)20s() ] - %(name)s - %(levelname)s -- %(message)s', datefmt=\"%H:%M:%S\")\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n fh = logging.FileHandler(logfile)\n fh.setFormatter(fmt)\n fh.name = \"logfile\"\n logger.addHandler(fh)\n return (logger, logfile)", "def get_logger():\n # Prepare log directory.\n try:\n os.mkdir('logs')\n except FileExistsError:\n pass\n\n # Create logger and formatter.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(message)s')\n\n # Create and attach stream handler.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Create and attach file handler.\n file_handler = logging.handlers.TimedRotatingFileHandler(\n 'logs/log.txt', when='d', encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger", "def init_log(log_path,\r\n name=None,\r\n level=logging.INFO,\r\n when=\"D\",\r\n backup=7,\r\n format=\"%(name)s:%(levelname)s:%(asctime)s:%(filename)s:%(lineno)d * %(thread)d %(message)s\",\r\n datefmt=\"%m-%d %H:%M:%S\"):\r\n formatter = logging.Formatter(format, datefmt)\r\n logger = logging.getLogger(name)\r\n logger.setLevel(level)\r\n\r\n dir = os.path.dirname(log_path)\r\n if not os.path.isdir(dir):\r\n os.makedirs(dir)\r\n\r\n # 输出info以上的信息\r\n handler = logging.handlers.TimedRotatingFileHandler(filename=log_path + \".log\",\r\n when=when,\r\n backupCount=backup)\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n # 只输出warning的信息\r\n handler = logging.handlers.TimedRotatingFileHandler(filename=log_path + \".log.wf\",\r\n when=when,\r\n backupCount=backup)\r\n handler.setLevel(logging.WARNING)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n # 标准输出流\r\n stdout_handler = logging.StreamHandler(stream=sys.stdout)\r\n stdout_handler.setLevel(level)\r\n stdout_handler.setFormatter(formatter)\r\n logger.addHandler(stdout_handler)\r\n\r\n return logger", "def get_file_error_handler(logger_name, log_dir, level, backup_cnt):\r\n if not os.path.isdir(log_dir):\r\n raise NotADirectoryError()\r\n log_file = Path(log_dir) / f\"{logger_name}.log\"\r\n formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(name)s - fun:- %(funcName)s - %(message)s\")\r\n file_handler = TimedRotatingFileHandler(log_file, when='midnight', backupCount=backup_cnt)\r\n file_handler.setLevel(level)\r\n file_handler.setFormatter(formatter)\r\n err_formatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(name)s - %(funcName)s:%(lineno)d - %(message)s\")\r\n _err_file = log_file.parent / f'{logger_name}-ERROR.log'\r\n err_file_handler = TimedRotatingFileHandler(_err_file, when='midnight', backupCount=backup_cnt)\r\n err_file_handler.setLevel(logging.ERROR)\r\n err_file_handler.setFormatter(err_formatter)\r\n return file_handler, err_file_handler", "def get_logger():\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(name)s] [%(asctime)s]: %(message)s')\n caller = whoami(offset=1)\n name = os.path.basename(caller)\n logger = logging.getLogger(name)\n return logger", "def get_logger(name, conf):\n\n try:\n # try absolute path\n lfile = conf['log_file']\n except KeyError:\n print('config warning: log file is not configured, logging to default.log')\n lfile = 'default.log'\n except:\n print('config error: log file directory does not exist')\n lfile = 'default.log'\n\n try:\n timezone = conf['time_zone']\n except KeyError:\n timezone = 'America/Chicago'\n\n tz = pytz.timezone(timezone)\n\n class Formatter(logging.Formatter):\n def converter(self, timestamp):\n return datetime.datetime.fromtimestamp(timestamp, tz)\n\n def formatTime(self, record, datefmt=None):\n dt = self.converter(record.created)\n if datefmt:\n s = dt.strftime(datefmt)\n else:\n t = dt.strftime(self.default_time_format)\n s = self.default_msec_format % (t, record.msecs)\n return s\n\n logger = logging.getLogger(name)\n handler = logging.FileHandler(lfile)\n handler.setFormatter(Formatter(\"%(asctime)s: %(levelname)s: %(name)s: %(message)s\", \"%Y-%m-%dT%H:%M:%S%z\"))\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n return logger", "def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)", "def get_logger(log_dir, name):\n class StreamHandlerWithTQDM(logging.Handler):\n \"\"\"Let `logging` print without breaking `tqdm` progress bars.\n See Also:\n > https://stackoverflow.com/questions/38543506\n \"\"\"\n def emit(self, record):\n try:\n msg = self.format(record)\n tqdm.write(msg)\n self.flush()\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)\n\n # Create logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Log everything (i.e., DEBUG level and above) to a file\n log_path = os.path.join(log_dir, f'{name}.txt')\n file_handler = logging.FileHandler(log_path)\n file_handler.setLevel(logging.DEBUG)\n\n # Log everything except DEBUG level (i.e., INFO level and above) to console\n console_handler = StreamHandlerWithTQDM()\n console_handler.setLevel(logging.INFO)\n\n # Create format for the logs\n file_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n file_handler.setFormatter(file_formatter)\n console_formatter = logging.Formatter('[%(asctime)s] %(message)s',\n datefmt='%m.%d.%y %H:%M:%S')\n console_handler.setFormatter(console_formatter)\n\n # add the handlers to the logger\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n return logger", "def setup_logger(level, name, use_rotating_handler=True):\r\n \r\n logger = logging.getLogger(name)\r\n logger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n logger.setLevel(level)\r\n \r\n log_file_path = os.path.join( os.environ['SPLUNK_HOME'], 'var', 'log', 'splunk', 'radius_auth_rest_handler.log' )\r\n \r\n if use_rotating_handler:\r\n file_handler = logging.handlers.RotatingFileHandler(log_file_path, maxBytes=25000000, backupCount=5)\r\n else:\r\n file_handler = logging.FileHandler(log_file_path)\r\n \r\n formatter = logging.Formatter('%(asctime)s %(levelname)s ' + name + ' - %(message)s')\r\n file_handler.setFormatter(formatter)\r\n \r\n logger.addHandler(file_handler)\r\n \r\n return logger" ]
[ "0.79687124", "0.7040542", "0.69095176", "0.68904185", "0.6871001", "0.676563", "0.6639393", "0.66237354", "0.662209", "0.66128546", "0.65883726", "0.6566198", "0.6562564", "0.65618443", "0.6552894", "0.6514455", "0.64994484", "0.6497173", "0.6477766", "0.64529455", "0.644887", "0.6437909", "0.6392691", "0.638515", "0.6383825", "0.63591415", "0.6341129", "0.62897915", "0.6261855", "0.6248019" ]
0.7458781
1
Tests that finding GnuPG works.
def test_load(self): detected_path = GnuPG.path() self.assertIsNotNone(detected_path) self.assertIn(detected_path, ['/usr/bin/gpg2', '/usr/bin/gpg'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_gpg_exists(self):\n self.assertTrue(self.mikla.gpg_exists('ls'))", "def gnupg_bin():\n for a_bin in [\"gpg2\", \"gpg\"]:\n gpg_output = which_bin(a_bin)\n if gpg_output:\n return gpg_output\n\n raise CryptoritoError(\"gpg or gpg2 must be installed\")", "def test_gpg_not_exist(self):\n self.assertFalse(self.mikla.gpg_exists('the-amber-room'))", "def initializeGnuPG(config):\n ret = (None, None)\n\n if not config.EMAIL_GPG_SIGNING_ENABLED:\n return ret\n\n homedir = config.EMAIL_GPG_HOMEDIR\n primary = config.EMAIL_GPG_PRIMARY_KEY_FINGERPRINT\n passphrase = config.EMAIL_GPG_PASSPHRASE\n passFile = config.EMAIL_GPG_PASSPHRASE_FILE\n\n logging.info(\"Using %s as our GnuPG home directory...\" % homedir)\n gpg = gnupg.GPG(homedir=homedir)\n logging.info(\"Initialized GnuPG interface using %s binary with version %s.\"\n % (gpg.binary, gpg.binary_version))\n\n primarySK = None\n primaryPK = None\n secrets = gpg.list_keys(secret=True)\n publics = gpg.list_keys()\n\n if not secrets:\n logging.warn(\"No secret keys found in %s!\" % gpg.secring)\n return ret\n\n primarySK = filter(lambda key: key['fingerprint'] == primary, secrets)\n primaryPK = filter(lambda key: key['fingerprint'] == primary, publics)\n\n if primarySK and primaryPK:\n logging.info(\"Found GnuPG primary key with fingerprint: %s\" % primary)\n for sub in primaryPK[0]['subkeys']:\n logging.info(\" Subkey: %s Usage: %s\" % (sub[0], sub[1].upper()))\n else:\n logging.warn(\"GnuPG key %s could not be found in %s!\" % (primary, gpg.secring))\n return ret\n\n if passphrase:\n logging.info(\"Read GnuPG passphrase from config.\")\n elif passFile:\n try:\n with open(passFile) as fh:\n passphrase = fh.read()\n except (IOError, OSError):\n logging.error(\"Could not open GnuPG passphrase file: %s!\" % passFile)\n else:\n logging.info(\"Read GnuPG passphrase from file: %s\" % passFile)\n\n def gpgSignMessage(message):\n \"\"\"Sign **message** with the default key specified by\n ``EMAIL_GPG_PRIMARY_KEY_FINGERPRINT``.\n\n :param str message: A message to sign.\n :rtype: str or ``None``.\n :returns: A string containing the clearsigned message, or ``None`` if\n the signing failed.\n \"\"\"\n sig = gpg.sign(message, default_key=primary, passphrase=passphrase)\n if sig and sig.data:\n return sig.data\n\n logging.debug(\"Testing signature created with GnuPG key...\")\n sig = gpgSignMessage(\"Testing 1 2 3\")\n if sig:\n logging.info(\"Test signature with GnuPG key %s okay:\\n%s\" % (primary, sig))\n return (gpg, gpgSignMessage)\n\n return ret", "def gnupg_home():\n if 'GNUPGHOME' in os.environ:\n gnupghome = os.environ['GNUPGHOME']\n if not os.path.isdir(gnupghome):\n raise CryptoritoError(\"Invalid GNUPGHOME directory\")\n\n return [\"--homedir\", gnupghome]\n else:\n return []", "def check_gpp():\n chk = Popen(\"g++ --version\", shell=True, stdout=PIPE, stderr=PIPE)\n stdout, stderr = chk.communicate()\n if stderr:\n out_error(\"g++ not installed. Please install g++ for your distribution\")\n return\n gpp = [re.sub(\" +\", \" \", line.strip())\n for line in stdout.decode().splitlines()\n if line.lower().strip().startswith(\"g++\")][0]\n version = gpp[gpp.rfind(\" \") + 1:]\n out_info(\"g++ version: {}\".format(version))", "def test_gpus_raises():\n gpus = \"1\"\n\n with pytest.raises(ValueError):\n cli._gpus(gpus)", "async def test_minimal(gnupg_keypair: GnuPGKeypair):\n\n signer = GPGSigner(\n keyid=gnupg_keypair.keyid,\n passphrase=gnupg_keypair.passphrase,\n homedir=gnupg_keypair.gnupg_home,\n )\n\n # Note: At a minimum, [Cc]onfig key must exist with non-null value\n image_config = ImageConfig(b'{\"Config\":{}}')\n config_digest_canonical = image_config.get_digest_canonical()\n signature = await image_config.sign(signer=signer)\n assert \"PGP SIGNATURE\" in signature\n\n # A signature should always be able to be added ...\n assert b\"BEGIN PGP SIGNATURE\" in image_config.get_bytes()\n signatures = image_config.get_signature_list()\n assert len(signatures) == 1\n assert signatures[0].digest == config_digest_canonical\n assert signatures[0].signature == signature", "def __init__(self, gpg_binary, gpg_home):\n\n self.gpg_binary = gpg_binary\n self.gpg_home = gpg_home\n try:\n from gnupg import GPG\n except ImportError:\n raise TracError(_(\"Unable to load the python-gnupg module. \" \\\n \"Please check and correct your installation.\"))\n try:\n self.gpg = GPG(gpgbinary=self.gpg_binary, gnupghome=self.gpg_home)\n # get list of available public keys once for later use\n self.pubkeys = self.gpg.list_keys() # same as gpg.list_keys(False)\n except ValueError:\n raise TracError(_(\"Missing the crypto binary. \" \\\n \"Please check and set full path \" \\\n \"with option 'gpg_binary'.\"))", "def ghost():\n with settings(hide('everything'), warn_only=True):\n try:\n gcccheck = run('which gcc')\n if gcccheck.return_code == 1:\n print('gcc not found on remote host..'\n 'Sending locally compiled %s ghost binary.' % local_arch)\n put('bin/GHOST', mirror_local_mode=True)\n else:\n print('gcc detected on remote host.. '\n 'Sending ghost source.')\n put('src/GHOST.c', mirror_local_mode=True)\n run('gcc -o GHOST GHOST.c')\n run('rm GHOST.c')\n check = run('./GHOST')\n check = check.upper()\n run('rm GHOST')\n if re.search(r'\\bEXEC FORMAT ERROR\\b', check):\n arch_check = run('uname -i')\n print('%s: Detected %s architecture..\\n'\n 'Install gcc on host and try again.'\n % (env.host, arch_check))\n logging.warning('%s: Testing for \\'Ghost\\' failed. '\n 'Architecture Mismatch (%s != %s)'\n % (env.host, local_arch, arch_check))\n else:\n print('%s is %s' % (env.host, check))\n logging.warning('%s: Ghost %s' % (env.host, check))\n except Exception as e:\n logging.warning('%s: Error: %s' % (env.host, e.message))", "def test_6_1_8_etc_gshadow_exists(host):\n assert host.file(ETC_GSHADOW).exists", "def check_glibver(reqver_text):\n\treturn check_pkgcfg_ver(reqver_text, 'glib-2.0')", "def test_missing_proteins():\n\n rv, out = getstatusoutput(f'{prg} -p {proteins}')\n assert rv > 0\n assert re.search('the following arguments are required: -c/--cdhit', out)", "def __init__(self, home=None):\n if not home:\n home = os.getenv(\"HOME\") + \"/.gnupg\"\n self.gpg = gnupg.GPG(gnupghome=home)", "def gpg_version():\n cmd = flatten([gnupg_bin(), \"--version\"])\n output = stderr_output(cmd)\n output = output \\\n .split('\\n')[0] \\\n .split(\" \")[2] \\\n .split('.')\n return tuple([int(x) for x in output])", "def check_install(self, gppkg_filename):\n cmd = \"gppkg -q %s\" % gppkg_filename\n results = run_command(cmd)\n test_str = ''.join(gppkg_filename.split('-')[:1]) + \" is installed\"\n is_installed = test_str in results\n return is_installed and CheckFile(os.path.join(ARCHIVE_PATH, gppkg_filename)).run()", "def run():\n if ppg2.inside_ppg():\n ppg2.run()\n else:\n pass", "def test_check_gff():\n gene_list = []\n gene, gene_list= check_gff(INPUT_ok, gene_list)\n # print(gene, gene_list)\n assert_equal(gene, \"GPLIN_000000100\")\n assert_equal(gene_list, [\"GPLIN_000000100\"])", "def check_gnome_version():\n (stdin, stdout) = os.popen2(\"gnome-session --version\")\n version = stdout.read().split()[1]\n if version[0] == '3':\n return \"gnome3\"\n elif version[0] == '2':\n return \"gnome2\"\n else:\n return None", "def find_gpas(s):\n \"*** YOUR CODE HERE ***\"", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()" ]
[ "0.70118", "0.6833564", "0.6640107", "0.6095676", "0.59951377", "0.5807542", "0.5769558", "0.5716728", "0.5476184", "0.5413578", "0.5398371", "0.53139794", "0.5295809", "0.5264155", "0.52117217", "0.5208635", "0.51940846", "0.5167096", "0.5162217", "0.51571894", "0.51027113", "0.51027113", "0.51027113", "0.51027113", "0.51027113", "0.51027113", "0.51027113", "0.51027113", "0.51027113", "0.51027113" ]
0.7122632
0
Tests that converting a Python 2 ConfigParser object to a dictionary works as expected.
def test_config_to_dict_py2(self): if PYTHON_VERSION > 2: return from ConfigParser import ConfigParser fixture = ConfigParser() fixture.add_section('something') fixture.set('something', 'value', 'stuff') self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_to_dict_py3(self):\n if PYTHON_VERSION < 3:\n return\n\n from configparser import ConfigParser\n fixture = ConfigParser()\n fixture['something'] = { 'value': 'stuff' }\n\n self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))", "def test_config_as_dict():\n c = core.Config(foo='bar')\n\n # check custom configuration\n assert c['foo'] == 'bar'\n \n # check len and iter behavior\n i = 0\n for k in c:\n i += 1\n assert len(c) == i\n assert 'datapath' in c._keys\n\n # check default get behavior\n assert c.get('doesNotExist') is None\n assert c.get('doesNotExists', 'foobar') == 'foobar'", "def test_config_from_args() -> None:\n test_args = [\n \"--int-field\",\n \"5\",\n \"--str-field\",\n \"hello\",\n \"--float-field\",\n \"0.5\",\n \"--int-enum-field\",\n \"B\",\n \"--str-enum-field\",\n \"A\",\n \"--bool-field\",\n ]\n expected_config = {\n \"int_field\": 5,\n \"str_field\": \"hello\",\n \"float_field\": 0.5,\n \"int_enum_field\": MyIntEnum.B,\n \"str_enum_field\": MyStrEnum.A,\n \"bool_field\": True,\n }\n config = MyConfig.fromargs(args=test_args)\n assert config.asdict() == expected_config", "def test_parse_str(parser):\n doc = parser.parse('{\"hello\": \"world\"}')\n assert doc.as_dict() == {'hello': 'world'}", "def test_parser():\n\n parser = configparser.RawConfigParser()\n version = '1.2.3'\n string = 'string-value'\n bool = 'False'\n literal = \"['a', 'b', 'c']\"\n literal2 = '1.23'\n section = 'dashboard'\n\n parser.add_section(section)\n parser.set(section, 'APP_VERSION', version)\n parser.set(section, 'string', string)\n parser.set(section, 'bool', bool)\n parser.set(section, 'literal', literal)\n parser.set(section, 'literal2', literal2)\n\n assert parse_version(parser, section, 'default') == version\n assert parse_string(parser, section, 'string', 'default') == string\n assert not parse_bool(parser, section, 'bool', 'True')\n assert parse_literal(parser, section, 'literal', 'default') == ['a', 'b', 'c']\n assert parse_literal(parser, section, 'literal2', 'default') == 1.23", "def parse_config_file(config_file):\n\n config = ConfigParser.SafeConfigParser()\n config.read(config_file)\n\n config_dict = {}\n for section in config.sections():\n # TODO : Should I force all section names to lowercase?\n config_dict[section.strip()] = dict(config.items(section))\n\n\n return config_dict", "def test_settings_parse(mock_os_environ):\n expected = {'bla': 'test'}\n parser = MagicMock()\n parser.return_value = expected\n settings_map = settings_parser.Settings(prefix='TEST_STUFF', parser=parser)\n assert parser.call_count == 1\n assert isinstance(settings_map, Mapping)\n assert dict(settings_map) == expected", "def parse_config(config):\n unverified_config = loads(config)\n if not verify_configuration_types(unverified_config):\n raise ValueError(\"Configuration verification failed.\")\n result = {\"count\": unverified_config[\"count\"]}\n return result", "def test_issue588(self):\n c = ConfigDict()\n c.load_dict({'a': {'b': 'c'}}, make_namespaces=True)\n self.assertEqual('c', c['a.b'])\n self.assertEqual('c', c['a']['b'])\n self.assertEqual({'b': 'c'}, c['a'])", "def ini_to_dict(path):\n config = ConfigParser()\n config.read(path)\n return_value = OrderedDict()\n for section in reversed(config.sections()):\n return_value[section] = OrderedDict()\n section_tuples = config.items(section)\n for itemTurple in reversed(section_tuples):\n return_value[section][itemTurple[0]] = itemTurple[1]\n return return_value", "def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d", "def test_collect_configuration(self):\n sample_config = \"\"\"[dyndnsc]\nconfigs = testconfig\n\n[testconfig]\nuse_preset = testpreset\nupdater-userid = bob\nupdater-password = XYZ\n# test overwriting a preset value:\ndetector-url = http://myip.example.com/\n\n[preset:testpreset]\nupdater = fubarUpdater\nupdater-url = https://update.example.com/nic/update\nupdater-moreparam = some_stuff\ndetector = webcheck4\ndetector-family = INET\ndetector-url = http://ip.example.com/\ndetector-parser = plain\n \"\"\"\n p = configparser.ConfigParser()\n p.readfp(StringIO(sample_config)) # XXX readfp() is deprecated since py 3.2\n config = collect_config(p)\n self.assertEqual(dict, type(config))\n self.assertTrue('testconfig' in config)\n self.assertTrue('detector' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['detector'], list))\n self.assertEqual(1, len(config['testconfig']['detector']))\n detector, detector_opts = config['testconfig']['detector'][-1]\n self.assertEqual(detector, \"webcheck4\") # from the preset\n self.assertEqual(detector_opts['url'], \"http://myip.example.com/\") # from the user conf\n self.assertTrue('updater' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['updater'], list))\n self.assertEqual(1, len(config['testconfig']['updater']))\n updater = config['testconfig']['updater'][0]\n self.assertEqual(\"fubarUpdater\", updater[0])\n self.assertTrue(\"url\" in updater[1])\n self.assertTrue(\"moreparam\" in updater[1])\n self.assertEqual(\"some_stuff\", updater[1][\"moreparam\"])", "def read_raw_parser_conf(data: str) -> dict:\n config = configparser.ConfigParser(allow_no_value=True)\n config.read_string(data)\n try:\n _data: dict = dict(config[\"jiratag_commitizen\"])\n if \"files\" in _data:\n files = _data[\"files\"]\n _f = json.loads(files)\n _data.update({\"files\": _f})\n\n return _data\n\n except KeyError:\n return {}", "def test_load_json_good_to_dictionary(self):\n self.assertIsInstance(LoadJsonConfig.read_config_file(LoadJsonConfig(),'data/json/conf_ok.json'),OrderedDict)", "def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"", "def read_config_info(ini_file):\n try:\n config = RawConfigParser()\n config.optionxform = lambda option: option\n config.read(ini_file)\n the_stuff = {}\n for section in config.sections():\n the_stuff[section] = {}\n for option in config.options(section):\n the_stuff[section][option] = config.get(section, option)\n\n return the_stuff\n except Exception as wtf:\n logger.error(f'Exception caught in read_config_info(): {wtf}')\n traceback.print_exc(file=sys.stdout)\n return sys.exit(1)", "def _createConfigParser(self):\n return ConfigParser.ConfigParser()", "def config_dump(config: configparser.ConfigParser = None) -> dict:\n return {s: {v: config[s][v] for v in config[s]} for s in config.keys()}", "def test_ssh_config2(self):\n self.assertEqual(\n parse(self.f_in['ssh_config2'], quiet=True),\n self.f_json['ssh_config2']\n )", "def process_config(config_file=''):\n if not config_file:\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config\")\n config = configparser.ConfigParser()\n config.read(config_file)\n config_dict = {}\n for section in config.sections():\n config_dict[section] = {name: value for name, value in config.items(section)}\n return config_dict", "def parse_ini_file_into_dict(filename):\n output = {}\n\n INIfile = SafeConfigParser()\n result = INIfile.read(filename) # returns an empty list if file error\n if result == []:\n raise IOError\n\n #iterate through INI file and build dictionary\n for section_name in INIfile.sections():\n section_dict = {}\n for option_name in INIfile.options(section_name):\n option_value = INIfile.get(section_name, option_name)\n section_dict[option_name] = option_value\n output[section_name] = section_dict\n\n return output", "def testGetConfDict():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n confDict = conf.getConfDict()\n assert type(confDict) == dict\n assert confDict['x'] == conf.x\n assert confDict['y'] == conf.y\n assert confDict['L'] == conf.L", "def get_config(config_path):\n import configobj\n try:\n config_dict = configobj.ConfigObj(config_path, file_error=True)\n except IOError:\n print('Error: unable to open configuration file %s' % config_path)\n raise\n except configobj.ConfigObjError as e:\n print('Error while parsing configuration file %s' % config_path)\n print(\"*** Reason: '%s'\" % e)\n raise\n\n return config_dict", "def test_string_to_dict2(self):\n actual_result = IperfParser(OUTPUT_RESULT_UDP).to_parse()\n self.assertEqual(actual_result, PARSER_EXPECTED_RESULT2)", "def parse_config(old_config):\n new_config = {}\n for sec in old_config.sections():\n new_config[sec] = {}\n for key in old_config[sec].keys():\n key_val = old_config[sec][key]\n try:\n new_config[sec][key] = ast.literal_eval(key_val)\n except StandardError:\n if key_val == \"\":\n key_val = None\n elif key_val[0] in [\"[\", \"{\", \"(\"] or key_val[-1] in [\"]\", \"}\", \")\"]:\n err_message = \"Value cannot be parsed for variable: \\\n{0} of the Section: {1}.\\nSyntax Error in value:{2}\".format(key, sec, key_val)\n raise Exception(err_message)\n new_config[sec][key] = key_val\n return new_config", "def _get_config_dict():\n path = _config_path()\n if not os.path.exists(path):\n return dict()\n with open(path, 'r') as f:\n config = json.load(f)\n assert isinstance(config, dict)\n return config", "def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}", "def test_string_to_dict(self):\n actual_result = IperfParser(OUTPUT_RESULT).to_parse()\n self.assertEqual(actual_result, IPERF_PARSER_EXPECTED_RESULT)", "def _parse_from_yaml(self) -> Dict:\n config_path = path.join(path.dirname(path.abspath(__file__)), self.config_file)\n try:\n with open(config_path, \"r\") as f:\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\n return config_dict\n except FileNotFoundError as fnfe:\n raise FileNotFoundError('configuration file not found.')\n except Exception as exc:\n raise Exception('Error while loading config file.')", "def test_merge_configparser(self):\n cd = ConfigDict.from_dict({\n 'a': 1,\n 'b': {\n 'c': 2,\n 'd': 3,\n }\n })\n\n schema = Schema({\n 'a': Coerce(int),\n 'z': basestring,\n 'b': {\n 'c': Coerce(int)\n }\n }, extra=True)\n cd.register_trigger(\n SchemaTrigger(schema)\n )\n\n cfg = ConfigParser()\n cfg.read_string(u\"\"\"\n [main]\n a = 11\n z = 99\n\n [b]\n c = 22\n \"\"\")\n\n cd.merge_configparser(cfg)\n cd.configure()\n\n self.assertEquals(cd.a,11)\n self.assertEquals(cd.z, '99')\n self.assertEquals(cd.b.c, 22)\n self.assertEquals(cd.b.d, 3)" ]
[ "0.77845216", "0.68661237", "0.62058705", "0.6147573", "0.6130427", "0.6077881", "0.60639715", "0.5976578", "0.59647095", "0.5959332", "0.5956973", "0.59301674", "0.59173024", "0.5910618", "0.5908001", "0.5880878", "0.5876987", "0.58738756", "0.58470243", "0.5835892", "0.57724273", "0.5760726", "0.57173437", "0.5714739", "0.5708861", "0.57067335", "0.5695399", "0.5695333", "0.56949043", "0.5668999" ]
0.8247538
0
Tests that converting a Python 3 ConfigParser object to a dictionary works as expected.
def test_config_to_dict_py3(self): if PYTHON_VERSION < 3: return from configparser import ConfigParser fixture = ConfigParser() fixture['something'] = { 'value': 'stuff' } self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_to_dict_py2(self):\n if PYTHON_VERSION > 2:\n return\n\n from ConfigParser import ConfigParser\n fixture = ConfigParser()\n fixture.add_section('something')\n fixture.set('something', 'value', 'stuff')\n\n self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))", "def test_config_as_dict():\n c = core.Config(foo='bar')\n\n # check custom configuration\n assert c['foo'] == 'bar'\n \n # check len and iter behavior\n i = 0\n for k in c:\n i += 1\n assert len(c) == i\n assert 'datapath' in c._keys\n\n # check default get behavior\n assert c.get('doesNotExist') is None\n assert c.get('doesNotExists', 'foobar') == 'foobar'", "def test_load_json_good_to_dictionary(self):\n self.assertIsInstance(LoadJsonConfig.read_config_file(LoadJsonConfig(),'data/json/conf_ok.json'),OrderedDict)", "def test_parse_str(parser):\n doc = parser.parse('{\"hello\": \"world\"}')\n assert doc.as_dict() == {'hello': 'world'}", "def test_issue588(self):\n c = ConfigDict()\n c.load_dict({'a': {'b': 'c'}}, make_namespaces=True)\n self.assertEqual('c', c['a.b'])\n self.assertEqual('c', c['a']['b'])\n self.assertEqual({'b': 'c'}, c['a'])", "def ini_to_dict(path):\n config = ConfigParser()\n config.read(path)\n return_value = OrderedDict()\n for section in reversed(config.sections()):\n return_value[section] = OrderedDict()\n section_tuples = config.items(section)\n for itemTurple in reversed(section_tuples):\n return_value[section][itemTurple[0]] = itemTurple[1]\n return return_value", "def parse_config_file(config_file):\n\n config = ConfigParser.SafeConfigParser()\n config.read(config_file)\n\n config_dict = {}\n for section in config.sections():\n # TODO : Should I force all section names to lowercase?\n config_dict[section.strip()] = dict(config.items(section))\n\n\n return config_dict", "def test_settings_parse(mock_os_environ):\n expected = {'bla': 'test'}\n parser = MagicMock()\n parser.return_value = expected\n settings_map = settings_parser.Settings(prefix='TEST_STUFF', parser=parser)\n assert parser.call_count == 1\n assert isinstance(settings_map, Mapping)\n assert dict(settings_map) == expected", "def test_parser():\n\n parser = configparser.RawConfigParser()\n version = '1.2.3'\n string = 'string-value'\n bool = 'False'\n literal = \"['a', 'b', 'c']\"\n literal2 = '1.23'\n section = 'dashboard'\n\n parser.add_section(section)\n parser.set(section, 'APP_VERSION', version)\n parser.set(section, 'string', string)\n parser.set(section, 'bool', bool)\n parser.set(section, 'literal', literal)\n parser.set(section, 'literal2', literal2)\n\n assert parse_version(parser, section, 'default') == version\n assert parse_string(parser, section, 'string', 'default') == string\n assert not parse_bool(parser, section, 'bool', 'True')\n assert parse_literal(parser, section, 'literal', 'default') == ['a', 'b', 'c']\n assert parse_literal(parser, section, 'literal2', 'default') == 1.23", "def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d", "def parse_config(config):\n unverified_config = loads(config)\n if not verify_configuration_types(unverified_config):\n raise ValueError(\"Configuration verification failed.\")\n result = {\"count\": unverified_config[\"count\"]}\n return result", "def read_raw_parser_conf(data: str) -> dict:\n config = configparser.ConfigParser(allow_no_value=True)\n config.read_string(data)\n try:\n _data: dict = dict(config[\"jiratag_commitizen\"])\n if \"files\" in _data:\n files = _data[\"files\"]\n _f = json.loads(files)\n _data.update({\"files\": _f})\n\n return _data\n\n except KeyError:\n return {}", "def test_config_from_args() -> None:\n test_args = [\n \"--int-field\",\n \"5\",\n \"--str-field\",\n \"hello\",\n \"--float-field\",\n \"0.5\",\n \"--int-enum-field\",\n \"B\",\n \"--str-enum-field\",\n \"A\",\n \"--bool-field\",\n ]\n expected_config = {\n \"int_field\": 5,\n \"str_field\": \"hello\",\n \"float_field\": 0.5,\n \"int_enum_field\": MyIntEnum.B,\n \"str_enum_field\": MyStrEnum.A,\n \"bool_field\": True,\n }\n config = MyConfig.fromargs(args=test_args)\n assert config.asdict() == expected_config", "def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"", "def _get_config_dict():\n path = _config_path()\n if not os.path.exists(path):\n return dict()\n with open(path, 'r') as f:\n config = json.load(f)\n assert isinstance(config, dict)\n return config", "def test_collect_configuration(self):\n sample_config = \"\"\"[dyndnsc]\nconfigs = testconfig\n\n[testconfig]\nuse_preset = testpreset\nupdater-userid = bob\nupdater-password = XYZ\n# test overwriting a preset value:\ndetector-url = http://myip.example.com/\n\n[preset:testpreset]\nupdater = fubarUpdater\nupdater-url = https://update.example.com/nic/update\nupdater-moreparam = some_stuff\ndetector = webcheck4\ndetector-family = INET\ndetector-url = http://ip.example.com/\ndetector-parser = plain\n \"\"\"\n p = configparser.ConfigParser()\n p.readfp(StringIO(sample_config)) # XXX readfp() is deprecated since py 3.2\n config = collect_config(p)\n self.assertEqual(dict, type(config))\n self.assertTrue('testconfig' in config)\n self.assertTrue('detector' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['detector'], list))\n self.assertEqual(1, len(config['testconfig']['detector']))\n detector, detector_opts = config['testconfig']['detector'][-1]\n self.assertEqual(detector, \"webcheck4\") # from the preset\n self.assertEqual(detector_opts['url'], \"http://myip.example.com/\") # from the user conf\n self.assertTrue('updater' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['updater'], list))\n self.assertEqual(1, len(config['testconfig']['updater']))\n updater = config['testconfig']['updater'][0]\n self.assertEqual(\"fubarUpdater\", updater[0])\n self.assertTrue(\"url\" in updater[1])\n self.assertTrue(\"moreparam\" in updater[1])\n self.assertEqual(\"some_stuff\", updater[1][\"moreparam\"])", "def process_config(config_file=''):\n if not config_file:\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config\")\n config = configparser.ConfigParser()\n config.read(config_file)\n config_dict = {}\n for section in config.sections():\n config_dict[section] = {name: value for name, value in config.items(section)}\n return config_dict", "def config_dump(config: configparser.ConfigParser = None) -> dict:\n return {s: {v: config[s][v] for v in config[s]} for s in config.keys()}", "def parse_ini_file_into_dict(filename):\n output = {}\n\n INIfile = SafeConfigParser()\n result = INIfile.read(filename) # returns an empty list if file error\n if result == []:\n raise IOError\n\n #iterate through INI file and build dictionary\n for section_name in INIfile.sections():\n section_dict = {}\n for option_name in INIfile.options(section_name):\n option_value = INIfile.get(section_name, option_name)\n section_dict[option_name] = option_value\n output[section_name] = section_dict\n\n return output", "def read_config_info(ini_file):\n try:\n config = RawConfigParser()\n config.optionxform = lambda option: option\n config.read(ini_file)\n the_stuff = {}\n for section in config.sections():\n the_stuff[section] = {}\n for option in config.options(section):\n the_stuff[section][option] = config.get(section, option)\n\n return the_stuff\n except Exception as wtf:\n logger.error(f'Exception caught in read_config_info(): {wtf}')\n traceback.print_exc(file=sys.stdout)\n return sys.exit(1)", "def get_config_dicts(config_file):\n config_dicts = dict()\n time_stamp = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n\n current_name = None\n for i, line in enumerate(config_file):\n try:\n line = line.strip()\n line = re.sub(r\"#.*\", \"\", line)\n line = re.sub(r\"\\$TIME\", time_stamp, line)\n if not line:\n pass\n elif line.startswith(\";\"):\n pass\n elif OBJECT_NAME.match(line):\n current_name = OBJECT_NAME.match(line).group(1)\n if current_name in config_dicts:\n raise IniSyntaxError(i, \"Duplicit object key: '{}', line {}.\"\n .format(current_name, i))\n config_dicts[current_name] = dict()\n elif KEY_VALUE_PAIR.match(line):\n matched = KEY_VALUE_PAIR.match(line)\n key = matched.group(1)\n value_string = matched.group(2)\n if key in config_dicts[current_name]:\n raise IniSyntaxError(i, \"Duplicit key in '{}' object, line {}.\"\n .format(key, i))\n config_dicts[current_name][key] = format_value(value_string)\n else:\n raise IniSyntaxError(i, \"Unknown string: '{}'\".format(line))\n except IniSyntaxError as exc:\n raise\n except Exception as exc:\n raise IniSyntaxError(i, \"Error\", exc) from None\n\n config_file.close()\n return config_dicts", "def testGetConfDict():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n confDict = conf.getConfDict()\n assert type(confDict) == dict\n assert confDict['x'] == conf.x\n assert confDict['y'] == conf.y\n assert confDict['L'] == conf.L", "def test_mk_config(self):\n persistence_helper = self.add_helper(PersistenceHelper())\n config = {}\n new_config = persistence_helper.mk_config(config)\n self.assertEqual(\n ['redis_manager', 'riak_manager'], sorted(new_config.keys()))\n self.assertEqual(config, {})", "def get_config(config_path):\n import configobj\n try:\n config_dict = configobj.ConfigObj(config_path, file_error=True)\n except IOError:\n print('Error: unable to open configuration file %s' % config_path)\n raise\n except configobj.ConfigObjError as e:\n print('Error while parsing configuration file %s' % config_path)\n print(\"*** Reason: '%s'\" % e)\n raise\n\n return config_dict", "def test_string_to_dict(self):\n actual_result = IperfParser(OUTPUT_RESULT).to_parse()\n self.assertEqual(actual_result, IPERF_PARSER_EXPECTED_RESULT)", "def test_parser_to_dict(self):\n xml = \"\"\"\\\n<foo>\n <bar>baz</bar>\n <quz>\n <wow>works!</wow>\n </quz>\n</foo>\n\"\"\"\n d = x.to_dict(xml, {})\n assert d.bar.text_ == u'baz'\n assert d.quz.wow.text_ == u'works!'", "def get_confg(self):\n\n ini = ConfigParser()\n self.config_parser = ini\n # if isinstance(cfile, (file, StringIO.StringIO, io.BytesIO)):\n if isinstance(self.config_data, str) and self.config_data:\n fp = io.BytesIO(self.config_data)\n ini.readfp(fp)\n elif self.config_file is not None:\n ini.read([self.config_file, os.path.expanduser('~/.' + self.config_file)])\n\n if ini.has_section('whoshere'):\n return ini.items('whoshere')\n\n return {}", "def parse_config(old_config):\n new_config = {}\n for sec in old_config.sections():\n new_config[sec] = {}\n for key in old_config[sec].keys():\n key_val = old_config[sec][key]\n try:\n new_config[sec][key] = ast.literal_eval(key_val)\n except StandardError:\n if key_val == \"\":\n key_val = None\n elif key_val[0] in [\"[\", \"{\", \"(\"] or key_val[-1] in [\"]\", \"}\", \")\"]:\n err_message = \"Value cannot be parsed for variable: \\\n{0} of the Section: {1}.\\nSyntax Error in value:{2}\".format(key, sec, key_val)\n raise Exception(err_message)\n new_config[sec][key] = key_val\n return new_config", "def testConfigDictIO(self):\n testdict = {\n 'simple_types': {\n 'float': 1.0,\n 'int': 1,\n 'percent string': '5 % is too much',\n 'backslash string': 'i can use \\\\',\n 'empty_string': '',\n 'nonestring': 'None',\n 'nonetype': None,\n 'interpstring': 'interpolation: %(percent string)s',\n },\n 'containers': {\n 'list': [-1, 'string', 3.0, False, None],\n 'array': numpy.array([1.0, 2.0, 3.0]),\n 'dict': {\n 'key1': 'Hello World',\n 'key2': 2.0,\n }\n }\n }\n\n dump(testdict, self.ini_fname)\n\n #read the data back\n readdict = load(self.ini_fname)\n\n testdictkeys = list(testdict.keys())\n readkeys = list(readdict.keys())\n\n self.assertTrue(len(readkeys) == len(testdictkeys),\n \"Number of read keys not equal\")\n\n self.assertEqual(readdict['simple_types'][\"interpstring\"],\n \"interpolation: 5 % is too much\")\n\n testdict['simple_types'][\"interpstring\"] = \"interpolation: 5 % is too much\"\n\n for key in testdict[\"simple_types\"]:\n original = testdict['simple_types'][key]\n read = readdict['simple_types'][key]\n self.assertEqual(read, original,\n \"Read <%s> instead of <%s>\" % (read, original))\n\n for key in testdict[\"containers\"]:\n original = testdict[\"containers\"][key]\n read = readdict[\"containers\"][key]\n if key == 'array':\n self.assertEqual(read.all(), original.all(),\n \"Read <%s> instead of <%s>\" % (read, original))\n else:\n self.assertEqual(read, original,\n \"Read <%s> instead of <%s>\" % (read, original))", "def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict" ]
[ "0.7985385", "0.71259654", "0.63515246", "0.62299037", "0.6216688", "0.62075067", "0.61899275", "0.61818033", "0.61405694", "0.6104885", "0.6103535", "0.609441", "0.60868174", "0.6085061", "0.59943795", "0.59495205", "0.59309465", "0.5867904", "0.5845856", "0.5839396", "0.5835465", "0.58332807", "0.58011717", "0.57993317", "0.5772369", "0.5768613", "0.5763713", "0.57496125", "0.5735474", "0.57331663" ]
0.8315104
0
Deactivates a behavior, and returns a boolean value true if the behavior was active, and false if it was not
def deactivate_behavior(self, behavior): exits = behavior in self.active_behaviors if exists: self.active_behaviors.remove(behavior) active_behavior.consider_deactivation() return exists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deactivate(self) -> bool:\n pass", "def consider_deactivation(self):\n if self.sensob_ultrasonic.get_value() >= 0.20:\n self.active_flag = False\n self.bbcon.deactivate_behaviour(self)", "def deactivate(self):\n self._is_active = False", "def consider_deactivation(self):\n pass", "def unset_running_behavior(self, behavior: Behavior) -> None:", "def deactivate(self):\r\n self.activated = False", "def deactivate(self):\n self.active = False", "def deactivate(self):\n self.active = False", "def deactivate(self):\n pass", "def deactivate(self):\n pass", "def deactivate(self) -> None:\n return self.stop()", "def deactivate(self):\n pass", "def deactivate(self):\n if self.happy_anim and self.happy_anim.is_running:\n self.happy_anim.abort()\n\n if self.roll_block_behavior and self.roll_block_behavior.is_active:\n self.roll_block_behavior.stop()", "async def async_turn_off_when_active(self, **kwargs: Any) -> None:\n await self._data.controller.programs.stop(self.entity_description.uid)\n self._update_activities()", "def deactivate(self):\n raise NotImplementedError('Subclasses must implement deactivate()')", "def removeBehavior(self, behavior):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.removeBehavior(behavior)", "async def async_turn_off_when_active(self, **kwargs: Any) -> None:\n raise NotImplementedError", "def deactivate(self):\r\n self.update_enrollment(is_active=False)", "def deactivate(self) -> None:\n self._bot.remove_flows_from(self)\n self.is_activated = False", "def behaviors_paused(self) -> bool:", "async def async_turn_off(self, **kwargs: Any) -> None:\n if not self.coordinator.data[self.entity_description.uid][\"active\"]:\n raise HomeAssistantError(\n f\"Cannot turn off an inactive program/zone: {self.name}\"\n )\n\n await self.async_turn_off_when_active(**kwargs)", "def deactivate(self):\n if self.search_behavior:\n self.search_behavior.stop()", "async def async_turn_off(self, **kwargs):\n await self.data.set_appliance_state(self.appliance_id, False)\n return True", "def deactivate(self):\n super(Pixiv_bot, self).deactivate()", "def _isdisable(self):\n return self.dp.state()==PyTango.DevState.DISABLE", "def deactivate_user(self, user):\n if user.active:\n user.active = False\n return True\n return False", "def removeDefaultBehavior(self, behavior):\n if not self.proxy:\n self.proxy = self.session.service(\"ALBehaviorManager\")\n return self.proxy.removeDefaultBehavior(behavior)", "def deactivate(self, util):\n return self._deactivate(util, persist=True)", "def deactivate(self):\n super(Pfsense, self).deactivate()", "def deactivate(self):\n super(Hipchap, self).deactivate()" ]
[ "0.72328097", "0.6804652", "0.6604412", "0.6549748", "0.647297", "0.6391283", "0.6334366", "0.6334366", "0.62551916", "0.62551916", "0.6246407", "0.6181994", "0.6126587", "0.6080045", "0.6040956", "0.59550935", "0.59410554", "0.5849345", "0.5833029", "0.58273166", "0.57891554", "0.57772726", "0.5724012", "0.56060874", "0.558255", "0.5572291", "0.55421275", "0.55394", "0.5527736", "0.5525499" ]
0.8199572
0
Updates all active behaviors
def update_all_behaviors(self): for behavior in self.active_behaviors: behavior.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update(self):\n #self.consider_deactivation() if self.active_flag else self.consider_activation()\n if self.active_flag:\n self.consider_deactivation()\n else:\n self.consider_activation()\n if self.active_flag:\n self.sense_and_act()\n self.weight = self.match_degree*self.priority", "def update_actions(self):\n for i, player in enumerate(self.players):\n if self.alive[i]:\n self.actions[i] = player.get_action(self.states[i])", "def updateCurrentBehavior(self, gameState, action):\n self.behavior = \"attack\"", "def updateWeapons(self):\n self.readyWeapons = []\n self.setWeaponStatus()\n\n for myWeapon in self.activeWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.alternateTargets = []\n\n if self.amsTargets != []:\n for myWeapon in self.amsWeapons:\n if myWeapon.preFireCheck() == 1:\n self.readyWeapons.append(myWeapon)\n self.amsTargets = []", "def update_all_agent(self):\n for a in self.agents:\n soft_update(a.target_actor, a.actor, self.tau)\n soft_update(a.target_critic, a.critic, self.tau)\n self.num_iteration += 1", "def set_all_active(self):\n for name in self.get_names():\n self.set_active(name)", "def _update_on_active(self):\n pass", "def update_observable(self):\n self.scenario.update_observable()", "def update(self,update_flags):\n pass", "def bayesian_update(self):\n for p in self.parameters:\n p.bayesian_update()", "def update_activation(self) -> None:\n if self.forced:\n return\n self.units.update_activation()", "def update(self):\n self.reset()\n\n # Indices of agents that are playing the game\n # Loop over agents and update strategies\n agent = self.simulation.agent\n self.mask = agent.active & self.room.contains_points(agent.position)\n indices = np.arange(agent.size)[self.mask]\n\n # Agents that are not playing anymore will be patient again\n self.strategy[self.mask ^ True] = 1\n\n self.t_aset = self.t_aset_0 - self.simulation.time_tot\n poisson_update(self.simulation.agent, indices, self.door,\n self.radius, self.strategy, self.strategies, self.t_evac,\n self.t_aset, self.interval, self.simulation.dt)", "def _update_goals(self):\n print\"updating goals\"\n response = self.goal_tracker_call() # type: GoalsResponse\n self._goals = []\n for goal in response.goals: # type: Point\n self._goals.append([goal.x, goal.y, goal.z])\n self._num_goals = len(self._goals)\n\n self._current_belief = self._init_belief()", "def update_activity():\n pass", "def update(self, obs, actions, rewards, new_obs):\n pass", "def behavior_manager(self) -> BehaviorManager:", "def behavior_manager(self) -> BehaviorManager:", "def updateActionsAndMenus(self):\n self.app.actions.getAction(\"save_CAlpha\").setEnabled(self.loaded)\n self.app.actions.getAction(\"unload_CAlpha\").setEnabled(self.loaded)", "def update_all_step_settings(self, settings):\n for i, x in enumerate(self):\n self.update_settings_at_index(settings, index=i)", "def update(self):\n startstate = self.state\n goalstates =self.env.getGoalStates()\n inputs = self.env.sense(self)\n self.action_sequence = self.drive(goalstates,inputs)\n action = self.choose_action() # Choose an action\n self.state = self.env.act(self,action) \n return", "def update(self):\n\n pass", "def __update_observers(self):\n for observer in self.__observers: \n # print(\"hello\")\n observer.update(self)", "def apply_changes(self, updated_talk=None):\r\n self.presentationModel.select()\r\n self.select_talk(updated_talk)\r\n self.update_autocomplete_fields()", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "def set_running_behavior(self, behavior: Behavior) -> None:", "def update(self):\n pass", "def update(self):\n pass" ]
[ "0.61757594", "0.61757594", "0.61757594", "0.61757594", "0.6033049", "0.6010509", "0.59927744", "0.5974894", "0.596138", "0.5838394", "0.5811177", "0.57843184", "0.5768181", "0.5756088", "0.5754011", "0.5745337", "0.57002616", "0.56976306", "0.56803435", "0.56803435", "0.56797075", "0.56739074", "0.5664675", "0.564989", "0.5631347", "0.5628084", "0.56259537", "0.56130207", "0.5610054", "0.5610054" ]
0.9065885
0
Parses the FEMA P58 DS hierarchy into a set of arrays.
def parse_DS_Hierarchy(DSH): if DSH[:3] == 'Seq': DSH = DSH[4:-1] DS_setup = [] while len(DSH) > 0: if DSH[:2] == 'DS': DS_setup.append(DSH[:3]) DSH = DSH[4:] elif DSH[:5] in {'MutEx', 'Simul'}: closing_pos = DSH.find(')') subDSH = DSH[:closing_pos + 1] DSH = DSH[closing_pos + 2:] DS_setup.append([subDSH[:5]] + subDSH[6:-1].split(',')) return DS_setup
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def structure(self, ism_input):\n f = open(ism_input, 'r')\n data = []\n for line in f:\n line = line.replace('\\\"', '')\n line = line.replace('],[', '];[')\n line = line.strip()\n line = line.replace(']', '')\n line = line.replace('[', '')\n line = line.split(';')\n line[0] = line[0].split('|')\n ls = list(map(lambda x: x.split(','), line[1:]))\n ls = list(map(lambda x: list(map(lambda y: y.split('|'), x)), ls))\n line[1:] = ls\n data.append(line)\n data = np.array(data[1:]) \n \n return data", "def parse_level_data(build_dir=\"\"):\n build_dir = os.path.join(build_dir, \"ENSDF\")\n\n level_list = []\n files = sorted([f for f in glob.glob(os.path.join(build_dir, \"ensdf.*\"))])\n for f in files:\n print(\" building level data from {0}\".format(f))\n level_list = ensdf.levels(f, level_list)\n\n level_list_array = np.array(level_list, dtype=level_dtype)\n\n return level_list_array", "def test_parse_pi_xml_08(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)", "def parse(self):\n count = [] #count for trainset_size\n with open(self.file) as f:\n for line in f:\n data = line.split(\" \")[0]\n filename = data[:-1]\n id = data[-1:]\n if (filename not in count):\n count.append(filename)\n\n acid = \"\"\n structure = \"\"\n with open(self.directory+\"/\"+filename+\".dssp\") as dssp:\n for i in range(28): #skip lines we don't need\n next(dssp)\n for line in dssp:\n if (line[9] != \" \" and line[10] == \" \" and line[11] == id and line[13] not in (\"*\",\"!\",\"B\",\"Z\",\"X\")):\n #amino acid sequence\n if (line[13].islower()):\n acid += \"C\"\n else:\n acid += line[13]\n\n #sequence of the structure\n if (line[16] in (\"H\",\"G\",\"I\")):\n structure += \"H\"\n elif (line[16] in (\"E\",\"B\")):\n structure += \"E\"\n else:\n structure += \"C\"\n\n if (len(count) > self.trainset_size):\n self.testset.append((acid,structure))\n else:\n self.trainset.append((acid,structure))", "def init_sets(fastapath):\n st = set()\n with open (fastapath, 'r') as f:\n\n for rec in SeqIO.parse(f, 'fasta'):\n sq = str(rec.seq)\n st.add(sq)\n\n return st", "def test_parse_pi_xml_08(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.bulk_get_series(chunk_size=5):\n pass\n self.assertTrue(True)", "def test_split_by_chain_2():\n pdb_str = \"\"\"\\\nCRYST1 174.866 238.091 243.466 90.00 90.00 90.00 P 21 21 21\nSCALE1 0.005719 0.000000 0.000000 0.00000\nSCALE2 0.000000 0.004200 0.000000 0.00000\nSCALE3 0.000000 0.000000 0.004107 0.00000\nATOM 15295 N SER D 74 59.475 -46.773 121.838 1.00383.47 N\nATOM 15296 CA SER D 74 59.914 -46.196 120.574 1.00383.24 C\nATOM 15297 C SER D 74 59.605 -44.703 120.512 1.00379.51 C\nATOM 15298 O SER D 74 58.830 -44.258 119.666 1.00374.31 O\nATOM 15299 CB SER D 74 59.247 -46.915 119.397 1.00380.53 C\nATOM 15300 OG SER D 74 59.544 -48.301 119.400 1.00383.68 O\nATOM 15301 N ASN D 75 60.241 -43.943 121.402 1.00382.45 N\nATOM 15302 CA ASN D 75 60.129 -42.482 121.464 1.00380.05 C\nATOM 15303 C ASN D 75 58.725 -41.896 121.293 1.00372.68 C\nATOM 15304 O ASN D 75 58.581 -40.746 120.881 1.00370.19 O\nATOM 15305 CB ASN D 75 61.046 -41.855 120.418 1.00382.20 C\nATOM 15306 CG ASN D 75 62.508 -42.175 120.656 1.00389.96 C\nATOM 15307 OD1 ASN D 75 62.958 -42.273 121.796 1.00394.06 O\nATOM 15308 ND2 ASN D 75 63.263 -42.322 119.575 1.00392.78 N\nATOM 15309 N GLY D 76 57.703 -42.688 121.603 1.00369.47 N\nATOM 15310 CA GLY D 76 56.321 -42.306 121.367 1.00362.43 C\nATOM 15311 C GLY D 76 55.844 -41.019 122.011 1.00358.49 C\nATOM 15312 O GLY D 76 56.116 -40.749 123.182 1.00360.25 O\nATOM 15313 N ARG D 77 55.124 -40.219 121.235 1.00353.03 N\nATOM 15314 CA ARG D 77 54.452 -39.045 121.771 1.00347.98 C\nATOM 15315 C ARG D 77 53.192 -38.773 120.956 1.00340.73 C\nATOM 15316 O ARG D 77 53.132 -39.072 119.764 1.00339.65 O\nATOM 15317 CB ARG D 77 55.383 -37.824 121.801 1.00349.75 C\nATOM 15318 CG ARG D 77 55.931 -37.385 120.451 1.00350.35 C\nATOM 15319 CD ARG D 77 57.293 -37.995 120.136 1.00358.77 C\nATOM 15320 NE ARG D 77 58.029 -37.121 119.227 1.00359.99 N\nATOM 15321 CZ ARG D 77 59.247 -37.355 118.749 1.00366.18 C\nATOM 15322 NH1 ARG D 77 59.921 -38.437 119.111 1.00371.93 N\nATOM 15323 NH2 ARG D 77 59.809 -36.473 117.933 1.00366.55 N\nTER\nATOM 15324 N TYR D 78 52.166 -38.264 121.625 1.00335.62 N\nATOM 15325 CA TYR D 78 50.919 -37.914 120.961 1.00328.66 C\nATOM 15326 C TYR D 78 50.974 -36.476 120.456 1.00325.47 C\nATOM 15327 O TYR D 78 50.504 -35.558 121.125 1.00322.51 O\nATOM 15328 CB TYR D 78 49.734 -38.100 121.913 1.00324.55 C\nATOM 15329 CG TYR D 78 49.238 -39.526 122.016 1.00325.38 C\nATOM 15330 CD1 TYR D 78 49.026 -40.289 120.877 1.00324.97 C\nATOM 15331 CD2 TYR D 78 48.973 -40.106 123.251 1.00326.27 C\nATOM 15332 CE1 TYR D 78 48.567 -41.589 120.962 1.00325.32 C\nATOM 15333 CE2 TYR D 78 48.514 -41.408 123.345 1.00326.09 C\nATOM 15334 CZ TYR D 78 48.314 -42.144 122.197 1.00325.64 C\nATOM 15335 OH TYR D 78 47.859 -43.439 122.280 1.00325.77 O\nATOM 19064 N ASN E 75 29.075 5.259 120.437 1.00232.69 N\nATOM 19065 CA ASN E 75 28.096 5.658 121.456 1.00227.54 C\nATOM 19066 C ASN E 75 27.246 4.544 122.068 1.00223.14 C\nATOM 19067 O ASN E 75 26.146 4.804 122.555 1.00219.10 O\nATOM 19068 CB ASN E 75 27.159 6.709 120.876 1.00226.97 C\nATOM 19069 CG ASN E 75 27.882 7.970 120.467 1.00230.84 C\nATOM 19070 OD1 ASN E 75 28.845 8.385 121.111 1.00232.56 O\nATOM 19071 ND2 ASN E 75 27.417 8.595 119.393 1.00233.24 N\nATOM 19072 N GLY E 76 27.759 3.318 122.053 1.00224.32 N\nATOM 19073 CA GLY E 76 26.998 2.150 122.461 1.00220.68 C\nATOM 19074 C GLY E 76 26.384 2.164 123.847 1.00215.47 C\nATOM 19075 O GLY E 76 27.013 2.573 124.823 1.00215.60 O\nATOM 19076 N ARG E 77 25.135 1.722 123.922 1.00211.38 N\nATOM 19077 CA ARG E 77 24.469 1.517 125.200 1.00206.91 C\nATOM 19078 C ARG E 77 23.459 0.384 125.076 1.00203.78 C\nATOM 19079 O ARG E 77 22.895 0.146 124.008 1.00203.81 O\nATOM 19080 CB ARG E 77 23.800 2.801 125.707 1.00204.73 C\nATOM 19081 CG ARG E 77 22.666 3.330 124.849 1.00204.05 C\nATOM 19082 CD ARG E 77 23.143 4.321 123.799 1.00208.96 C\nATOM 19083 NE ARG E 77 22.118 5.331 123.560 1.00208.57 N\nATOM 19084 CZ ARG E 77 22.230 6.335 122.698 1.00211.78 C\nATOM 19085 NH1 ARG E 77 23.334 6.480 121.983 1.00215.57 N\nATOM 19086 NH2 ARG E 77 21.241 7.205 122.563 1.00211.19 N\nTER\nATOM 19087 N TYR E 78 23.286 -0.346 126.170 1.00201.65 N\nATOM 19088 CA TYR E 78 22.334 -1.441 126.223 1.00198.95 C\nATOM 19089 C TYR E 78 20.973 -0.908 126.642 1.00195.03 C\nATOM 19090 O TYR E 78 20.616 -0.967 127.815 1.00193.71 O\nATOM 19091 CB TYR E 78 22.813 -2.517 127.201 1.00199.92 C\nATOM 19092 CG TYR E 78 23.854 -3.449 126.623 1.00204.23 C\nATOM 19093 CD1 TYR E 78 23.681 -4.014 125.369 1.00205.06 C\nATOM 19094 CD2 TYR E 78 25.014 -3.757 127.326 1.00208.09 C\nATOM 19095 CE1 TYR E 78 24.627 -4.863 124.832 1.00209.52 C\nATOM 19096 CE2 TYR E 78 25.969 -4.606 126.795 1.00212.29 C\nATOM 19097 CZ TYR E 78 25.769 -5.155 125.547 1.00213.03 C\nATOM 19098 OH TYR E 78 26.711 -6.001 125.009 1.00218.33 O\nTER\n \"\"\"\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str)\n h = pdb_inp.construct_hierarchy()\n pars = ncs.input.get_default_params()\n pars.ncs_search.chain_similarity_threshold = 0.5\n\n ncs_obj = ncs.input(hierarchy=h, params=pars.ncs_search)\n spec = ncs_obj.get_ncs_info_as_spec()\n # ncs_obj.show(format='spec')\n assert len(spec.ncs_groups()) ==1", "def get_lookup_arrays(read_from_flat_files=False, from_pickle=False):\n if from_pickle:\n year_arr = pickle.load(open(variables.tables_dict[\"year_arr\"], \"rb\"))\n hierlevel_arr = pickle.load(open(variables.tables_dict[\"hierlevel_arr\"], \"rb\"))\n entitytype_arr = pickle.load(open(variables.tables_dict[\"entitytype_arr\"], \"rb\"))\n functionalterm_arr = pickle.load(open(variables.tables_dict[\"functionalterm_arr\"], \"rb\"))\n indices_arr = pickle.load(open(variables.tables_dict[\"indices_arr\"], \"rb\"))\n description_arr = pickle.load(open(variables.tables_dict[\"description_arr\"], \"rb\"))\n category_arr = pickle.load(open(variables.tables_dict[\"category_arr\"], \"rb\"))\n return year_arr, hierlevel_arr, entitytype_arr, functionalterm_arr, indices_arr, description_arr, category_arr\n if read_from_flat_files:\n # fn = os.path.join(variables.TABLES_DIR, \"Functions_table_STRING.txt\")\n fn = variables.tables_dict[\"Functions_table\"]\n result = get_results_of_statement_from_flat_file(fn)\n result = list(result)\n else:\n result = get_results_of_statement(\"SELECT * FROM functions\")\n shape_ = len(result)\n year_arr = np.full(shape=shape_, fill_value=-1, dtype=\"int16\") # Integer (-32768 to 32767)\n entitytype_arr = np.full(shape=shape_, fill_value=0, dtype=\"int8\")\n # if not low_memory:\n # description_arr = np.empty(shape=shape_, dtype=object) # \"\"U261\"))\n # category_arr = np.empty(shape=shape_, dtype=object) # description of functional category (e.g. \"Gene Ontology biological process\") # category_arr = np.empty(shape=shape_, dtype=np.dtype(\"U49\")) # description of functional category (e.g. \"Gene Ontology biological process\")\n description_arr = np.empty(shape=shape_, dtype=object) # \"\"U261\"))\n category_arr = np.empty(shape=shape_, dtype=object) # description of functional category (e.g. \"Gene Ontology biological process\") # category_arr = np.empty(shape=shape_,\n functionalterm_arr = np.empty(shape=shape_, dtype=object) #np.dtype(\"U13\"))\n hierlevel_arr = np.full(shape=shape_, fill_value=-1, dtype=\"int8\") # Byte (-128 to 127)\n indices_arr = np.arange(shape_, dtype=np.dtype(\"uint32\"))\n indices_arr.flags.writeable = False\n\n for i, res in enumerate(result):\n func_enum, etype, term, description, year, hierlevel = res\n func_enum = int(func_enum)\n etype = int(etype)\n try:\n year = int(year)\n except ValueError: # e.g. \"....\"\n year = -1\n hierlevel = int(hierlevel)\n entitytype_arr[func_enum] = etype\n functionalterm_arr[func_enum] = term\n year_arr[func_enum] = year\n hierlevel_arr[func_enum] = hierlevel\n # if not low_memory:\n # description_arr[func_enum] = description\n # category_arr[func_enum] = variables.entityType_2_functionType_dict[etype]\n description_arr[func_enum] = description\n category_arr[func_enum] = variables.entityType_2_functionType_dict[etype]\n\n year_arr.flags.writeable = False # make it immutable\n hierlevel_arr.flags.writeable = False\n entitytype_arr.flags.writeable = False\n functionalterm_arr.flags.writeable = False\n # if not low_memory:\n # description_arr.flags.writeable = False\n # category_arr.flags.writeable = False\n # return year_arr, hierlevel_arr, entitytype_arr, functionalterm_arr, indices_arr, description_arr, category_arr\n # else:\n # return year_arr, hierlevel_arr, entitytype_arr, functionalterm_arr, indices_arr\n description_arr.flags.writeable = False\n category_arr.flags.writeable = False\n return year_arr, hierlevel_arr, entitytype_arr, functionalterm_arr, indices_arr, description_arr, category_arr", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the z component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[1])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def test_parse_pdb(self):\n\n pdbfile = open(self.pdbfile, 'r').read()\n\n parser = PDBParser()\n pdbdf = parser.parse_to_pandas(pdbfile)\n\n self.assertItemsEqual(pdbdf['chain'].unique(), [None])\n self.assertItemsEqual(pdbdf['segid'].unique(), ['A'])\n self.assertItemsEqual(pdbdf['resnum'].unique(), range(89, 137))\n self.assertItemsEqual(pdbdf['resname'].unique(), ['ARG', 'ALA', 'GLN', 'PRO', 'LYS', 'TYR', 'SER', 'VAL',\n 'ASP', 'GLU', 'ASN', 'GLY', 'THR', 'TRP', 'ILE', 'MET',\n 'LEU', 'PHE'])", "def _parse_nscf(self) -> None:\n alat = 0\n lattice = np.zeros((3,3))\n recip = np.zeros((3,3))\n nbnd = 0\n natom = 0\n positions = []\n nk = 0\n symbols = []\n k_frac = []\n efermi = 0\n\n energy = {\"spinup\" : [],\n \"spindown\" : []\n }\n\n which = \"spinup\" # remember if we are reading spin up or spin down\n \n with open(self.output,'r') as f:\n aline=f.readline()\n\n while aline:\n # read information by checking the flags\n if \"lattice parameter (alat) =\" in aline:\n data = aline.split('=')[1]\n data = data.split()\n alat = float(data[0]) # in Bohr\n\n if \"number of Kohn-Sham states\" in aline:\n data = aline.split()[-1]\n nbnd = int(data)\n\n if \"number of atoms/cell\" in aline:\n data = aline.split()[-1]\n natom = int(data)\n\n if \"crystal axes: (cart. coord. in units of alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n lattice[i] = np.array(data, dtype = float) \n lattice *= alat * Bohr2A\n\n if \"reciprocal axes: (cart. coord. in units 2 pi/alat)\" in aline:\n for i in range(3):\n data = f.readline().split()[3:6]\n recip[i] = np.array(data, dtype = float)\n recip *= 2 * np.pi / (alat * Bohr2A)\n\n if \"site n. atom positions (cryst. coord.)\" in aline:\n for i in range(natom):\n data = f.readline()\n symbols.append(re.findall(r'[A-Z][a-z]*', data)[0])\n positions.append(np.array(re.findall('-?\\d+\\.\\d+', data), dtype = float))\n \n if \"number of k points= \" in aline:\n nk = int( re.findall(r'\\d+', aline)[0] )\n k_frac = np.zeros((nk,3))\n\n if re.search(r'k\\(.+\\)\\s+=\\s+\\(.+\\)', aline) != None:\n parts = aline.split('=')\n ik = int( re.findall(r'\\d+', parts[0])[0] )\n pos = np.array(re.findall(r'-?\\d+\\.\\d+', parts[1]), dtype = float)\n k_frac[ik-1] = pos\n\n if \"the Fermi energy is\" in aline:\n efermi = float(re.findall(r'-?\\d+\\.\\d+', aline)[0])\n\n if \"------ SPIN UP ------------\" in aline:\n which = \"spinup\"\n\n if \"------ SPIN DOWN ----------\" in aline:\n which = \"spindown\"\n\n if re.search('k\\s+=\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s*-?\\d+\\.\\d+\\s',aline) != None:\n kstr=re.findall(r'-?\\d+\\.\\d+',aline)\n\n f.readline()\n\n lenergy = [] # local energy for each k point\n while len(lenergy) < nbnd:\n aline = f.readline()\n data = np.array(aline.split(), dtype = float)\n for d in data:\n lenergy.append(d)\n\n if len(lenergy) > nbnd:\n raise \"length of energy > nbnd\"\n\n energy[which].append(lenergy)\n \n aline = f.readline()\n\n self.efermi = efermi\n self.lattice = lattice\n self.symbols = symbols \n self.positions = np.array(positions)\n self.reciprocal = recip\n self.kpoints = k_frac\n\n self.eig = {}\n self.eig[Spin.up] = np.array(energy[\"spinup\"]).T\n\n if energy[\"spindown\"]:\n self.spin_polarized = True\n self.eig[Spin.down] = np.array(energy[\"spindown\"]).T", "def test_parse_pi_xml_02(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.get_series():\n pass\n self.assertTrue(True)", "def dataArr(filename):\r\n #Open the file\r\n f=h5py.File(filename,'r')\r\n \r\n #Initialize the data arrays\r\n cdata=[]\r\n idxset=[]\r\n vertices=[]\r\n \r\n #Open groups in the file\r\n for group in f.keys():\r\n# print('Group- '+group)\r\n \r\n #Get the group\r\n currGroup=f[group]\r\n \r\n #Open keys in the group\r\n for key in currGroup.keys():\r\n# print('Key- '+key)\r\n \r\n #Append the data to the respective arrays\r\n if key=='cdata(Complex)':\r\n cdataGroup=currGroup[key]\r\n \r\n imag=[]\r\n real=[]\r\n #Open the keys in cdata\r\n for subkey in cdataGroup.keys():\r\n# print('Subkey- '+subkey)\r\n \r\n #Get the real and imaginary parts of the array\r\n if subkey=='Imag':\r\n imag=cdataGroup[subkey][()]\r\n elif subkey=='Real':\r\n real=cdataGroup[subkey][()]\r\n \r\n #Convert lists to numpy arrays\r\n imag=np.array(imag)\r\n real=np.array(real)\r\n #Get the cdata value\r\n cdata=real+1j*imag\r\n \r\n elif key=='idxset':\r\n idxset=currGroup[key][()]\r\n elif key=='vertices':\r\n vertices=currGroup[key][()]\r\n \r\n #Remove the y component from the vertices\r\n xVals=[]\r\n yVals=[]\r\n newVertices=[]\r\n for vertex in vertices:\r\n xVals.append(vertex[0])\r\n yVals.append(vertex[2])\r\n newVertices.append([vertex[0],vertex[1]])\r\n vertices=newVertices\r\n \r\n #Convert to numpy arrays\r\n cdata=np.array(cdata)\r\n xVals=np.array(xVals)\r\n yVals=np.array(yVals)\r\n \r\n #Close the file\r\n f.close()\r\n \r\n return cdata, xVals, yVals", "def read_LD(read_fn):\n f = open(read_fn,'r')\n SIGMA = []\n array = []\n for line in f:\n line = line.strip()\n array = line.split()\n SIGMA.append(array)\n return SIGMA", "def Read_Palik():\n fid = open('/Users/simonvassant/TheseSave/Matlab_RMCA/RMCA_Fab_CuN17_Champ/Au_Palik.dat','r')\n L,eps = [],[]\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n L.append(float(line[:25]))\n eps.append(float(line[27:54])+1j*float(line[55:-2]))\n return np.array(L),np.array(eps)", "def getHierarchies():", "def getHierarchies():", "def parse(self):\n return []", "def test_parse_pi_xml_02(self):\n source = os.path.join(DATA_DIR, \"GDresults_dam.xml\")\n reader = PiXmlReader(source)\n for md, df in reader.bulk_get_series(chunk_size=5):\n pass\n self.assertTrue(True)", "def test_hierarchy_perceived_dipeptide(self):\n from openff.toolkit._tests.create_molecules import (\n dipeptide_hierarchy_added as create_dipeptide,\n )\n\n dipeptide_hierarchy_perceived = create_dipeptide()\n\n assert (\n str(dipeptide_hierarchy_perceived.residues[0])\n == \"HierarchyElement ('None', '1', ' ', 'ACE') of iterator 'residues' containing 6 atom(s)\"\n )\n assert dipeptide_hierarchy_perceived.residues[0].chain_id == \"None\"\n assert dipeptide_hierarchy_perceived.residues[0].residue_name == \"ACE\"\n assert dipeptide_hierarchy_perceived.residues[0].insertion_code == \" \"\n assert dipeptide_hierarchy_perceived.residues[0].residue_number == \"1\"\n assert set(dipeptide_hierarchy_perceived.residues[0].atom_indices) == set(\n range(6)\n )\n\n assert (\n str(dipeptide_hierarchy_perceived.residues[1])\n == \"HierarchyElement ('None', '2', ' ', 'ALA') of iterator 'residues' containing 11 atom(s)\"\n )\n assert dipeptide_hierarchy_perceived.residues[1].chain_id == \"None\"\n assert dipeptide_hierarchy_perceived.residues[1].residue_name == \"ALA\"\n assert dipeptide_hierarchy_perceived.residues[1].insertion_code == \" \"\n assert dipeptide_hierarchy_perceived.residues[1].residue_number == \"2\"\n assert set(dipeptide_hierarchy_perceived.residues[1].atom_indices) == set(\n range(6, 17)\n )\n\n for residue in dipeptide_hierarchy_perceived.residues:\n for atom in residue.atoms:\n assert atom.metadata[\"residue_name\"] == residue.residue_name\n assert atom.metadata[\"residue_number\"] == residue.residue_number\n assert atom.metadata[\"insertion_code\"] == residue.insertion_code", "def parseArr(self, s) :\n \n rc = []\n if s.startswith('[') and s.endswith(']') :\n s = s[1:-1]\n z = s.split(',')\n for p in z :\n if p.find('..') >= 0 :\n zz = p.split('..')\n if len(zz) == 2 :\n b = self.str2raw(zz[0])\n e = self.str2raw(zz[1])\n b = self.safe2Int(b)\n e = self.safe2Int(e)\n if not b == None and not e == None and (e >= e):\n for i in range(b, e + 1) :\n rc.append(str(i))\n \n else :\n p = self.str2raw(p)\n rc.append(str(p))\n pass\n return rc", "def read_initial_parameters(inputfilename):\r\n subc_params = []\r\n subarea_params = []\r\n global subc_names\r\n subc_names = []\r\n subcatchment_parameters = []\r\n inputfile = open(inputfilename, 'r')\r\n for line in inputfile:\r\n if(line.find(\"[SUBCATCHMENTS]\") != -1):\r\n line = inputfile.readline()\r\n for i in range(count):\r\n templine = list(line)\r\n if templine[0] == \";\" or templine[0] == \" \" or len(templine) < 10:\r\n line = inputfile.readline()\r\n continue\r\n\r\n elif (line.find(\"[\") != -1):\r\n break\r\n else:\r\n linesplit = line.split()\r\n subc_params.append(linesplit[4:7])\r\n subc_names.append(linesplit[0])\r\n line = inputfile.readline()\r\n if (line.find(\"[SUBAREAS]\") != -1):\r\n line = inputfile.readline()\r\n for i in range(count):\r\n templine = list(line)\r\n if templine[0] == \";\" or templine[0] == \" \" or len(templine) < 10:\r\n line = inputfile.readline()\r\n continue\r\n elif (line.find(\"[\") != -1):\r\n break\r\n else:\r\n linesplit = line.split()\r\n subarea_params.append(linesplit[1:6])\r\n line = inputfile.readline()\r\n inputfile.close()\r\n\r\n #Part of the function that experiments with np array. Potentially removes the need for the list transformation\r\n # functions that chew up a lot of time. Each subcatchment has a row, each parameter type has a column.\r\n global subcatchment_parameters_np\r\n subcatchment_parameters_np = np.empty((len(subc_params[0]) + len(subarea_params[0]), len(subc_params)), dtype=float)\r\n for row in range(len(subc_params)):\r\n for col in range(len(subc_params[0])):\r\n subcatchment_parameters_np[row, col] = float(subc_params[row][col])\r\n for row in range(len(subarea_params)):\r\n for col in range(len(subarea_params[0])):\r\n subcatchment_parameters_np[row, col + len(subc_params[0])] = float(subarea_params[row][col])\r\n\r\n #Old string code\r\n # for i in range(len(subc_params)):\r\n # for j in range(len(subarea_params[i])):\r\n # subc_params[i].append(subarea_params[i][j])\r\n # subcatchment_parameters.append(subc_params[i])\r\n return(np_subcatchment_parameters)", "def deserialize(self, data):\n q = collections.deque(data.split(self.sep))\n \n res = self.dfs2(q)\n \n return res", "def get_trees(self, data, showerrors = False): # -> list:\r\n for element in data:\r\n if not check(self._productionset.alphabet,element):\r\n raise ValueError(\"Unknown element %s\" % str(element))\r\n result = self.__recursive_parser(self._productionset.initialsymbol, data, self._productionset.main_production, showerrors)\r\n finalresult = []\r\n for eresult in result:\r\n if eresult.left == 0 and eresult.right == len(data) and eresult not in finalresult:\r\n finalresult.append(eresult) \r\n return finalresult", "def parse_BS_data(retrieved_folder, fermi_level, kpoints):\n # conversion factor from Ry to eV\n eVscale = get_Ry2eV()\n\n retrieved_list = retrieved_folder.list_object_names()\n qdos_file_list = [i for i in retrieved_list if 'qdos.' in i]\n q_vec_file = 'qvec.dat'\n\n if q_vec_file in retrieved_list:\n with retrieved_folder.open(q_vec_file) as file_opened:\n q_vec = np.loadtxt(file_opened, skiprows=1)\n\n for icount, fname in enumerate(qdos_file_list):\n with retrieved_folder.open(fname) as _f:\n loaded_file = np.loadtxt(_f)\n if icount == 0:\n total_qdos = loaded_file\n else:\n total_qdos[:, 5:] += loaded_file[:, 5:]\n\n ef = fermi_level.value # in Ry unit\n total_qdos[:, 0] = (total_qdos[:, 0] - ef) * eVscale\n eng_points = set(total_qdos[:, 0])\n eng_points = np.sort(list(eng_points))\n no_eng_points = len(eng_points)\n\n qdos_intensity = np.ndarray(shape=(no_eng_points, len(q_vec)))\n for ne in range(np.shape(qdos_intensity)[0]):\n nk = np.shape(qdos_intensity)[1]\n # sum up all l-channels (5 is only the s-channel!)\n qdos_intensity[ne, :] = np.sum(total_qdos[ne * nk:(ne + 1) * nk, 5:], axis=1) / eVscale\n\n qdos_intensity = qdos_intensity.T # setting eng-kpts corresponds to x-y asix\n q_vec = np.asarray(q_vec) # converting q_vec into array\n eng_points = (np.asarray(eng_points)) # converting eng_popints into array in Ry unit\n\n # To save into the ArrayData\n array = ArrayData()\n array.set_array('BlochSpectralFunction', qdos_intensity)\n array.set_array('Kpts', q_vec)\n array.set_array('energy_points', eng_points)\n if kpoints.labels is not None:\n klbl_dict = dict(kpoints.labels) # Special k-points\n array.extras['k-labels'] = klbl_dict\n\n return {'BS_Data': array}", "def readXMLAmat(filename, time_ini, time_end, symbol = '?'):\n # Number of parameters to be read. Today is 6.\n n_param_read = 6\n pos = [[[],]*n_param_read,]*(time_end-time_ini+1)\n for t, time_path in enumerate(range(time_ini, time_end+1, 1)):\n xml_path_corr = corrTIFPath(filename, symbol, time_path)\n tree = etree.parse(xml_path_corr)\n root = tree.getroot()\n all_points = root.xpath('GaussianMixtureModel')\n x = []\n y = []\n z = []\n svID = []\n ID = []\n parent = []\n for point in all_points:\n # Needs try catch to avoid the errors in XML\n try:\n [x_aux, y_aux, z_aux] = [float(x) for x in point.xpath('attribute::m')[0].split()]\n x.append(x_aux)\n y.append(y_aux)\n z.append(z_aux)\n svID.append([int(a) for a in point.xpath('attribute::svIdx')[0].split()])\n ID.append(int(point.xpath('attribute::id')[0].strip()))\n parent.append(int(point.xpath('attribute::parent')[0].strip()))\n except:\n print('Point ID {p_id} in file {f_path} is corrupted'.format( \n f_path = xml_path_corr, p_id = int(point.xpath('attribute::id')[0].strip())))\n continue\n pos[t] = [x,y,z,svID,ID,parent]\n return pos", "def deserialize(self, data):\n if not data:\n return\n q = collections.deque(data.split(self.sep))\n res = self.dfs2(q)\n return res", "def _build_datasets_sis3305(self):\n bc_arr = np.where(self._active_brdch[\"SIS 3305\"])\n\n for board, channel in zip(bc_arr[0], bc_arr[1]):\n brd = board + 1\n ch = channel + 1\n slot = self.get_slot(brd, \"SIS 3305\")\n if 1 <= ch <= 4:\n fpga_str = \"FPGA 1\"\n else:\n fpga_str = \"FPGA 2\"\n ch = ch - 4\n\n for cname in self._active_config:\n # create main dataset\n dset_name = f\"{cname} [Slot {slot}: SIS 3305 {fpga_str} ch {ch}]\"\n shape = (self._sn_size, self._nt)\n data = np.empty(shape=shape, dtype=np.int16)\n self.create_dataset(dset_name, data=data)\n\n # create header dataset\n hdset_name = f\"{dset_name} headers\"\n shape = (self._sn_size,)\n dtype = np.dtype(\n [\n (\"Shot number\", np.int32),\n (\"Scale\", np.float32),\n (\"Offset\", np.float32),\n (\"Min\", np.uint16),\n (\"Max\", np.uint16),\n (\"Clipped\", np.int8),\n ]\n )\n dheader = np.empty(shape=shape, dtype=dtype)\n dheader[\"Shot number\"] = np.arange(\n 1, shape[0] + 1, 1, dtype=dheader[\"Shot number\"].dtype\n )\n dheader[\"Scale\"] = 0.0019550342\n dheader[\"Offset\"] = -1.0\n dheader[\"Min\"] = data.min(axis=1)\n dheader[\"Max\"] = data.max(axis=1)\n dheader[\"Clipped\"] = 0\n self.create_dataset(hdset_name, data=dheader)", "def get_scandata_raft(inputfile, datadir=''):\n raftarrays = []\n if os.path.splitext(inputfile)[1] in [\".fits\", \".fz\"]:\n # starts with 00 through 22\n seglist = [\"%d%d\" % (i, j) for i in range(3) for j in range(3)]\n # when REB2 data is missing\n #seglist = [\"%d%d\" % (i, j) for i in range(2) for j in range(3)]\n raftfits = [inputfile.replace(\"00_\", s + '_') for s in seglist]\n for f in raftfits:\n raftarrays.append(scope.get_scandata_fromfile(f, datadir))\n else:\n # starts with Reb0 through Reb2\n reblist = [\"Reb0\", \"Reb1\", \"Reb2\"]\n rebraws = [inputfile.replace(\"Reb0\", s) for s in reblist]\n seglist = [r + \"-%s\" % stripe for r in reblist for stripe in ['A', 'B', 'C'] ]\n for f in rebraws:\n fullreb = scope.get_scandata_fromfile(f, datadir) # 3D array: 48 channels, lines, columns\n #print fullreb.shape\n raftarrays.extend([a for a in np.split(fullreb, 3, axis=0)]) # splits REB data into 3 CCDs\n\n return raftarrays, seglist" ]
[ "0.5703337", "0.52625185", "0.51882285", "0.5113901", "0.508543", "0.50327176", "0.5027605", "0.50257576", "0.50027895", "0.49920815", "0.49779826", "0.4965833", "0.49642783", "0.49438012", "0.49253464", "0.4914421", "0.48994464", "0.48994464", "0.48665243", "0.48370755", "0.48225456", "0.4815661", "0.4797233", "0.47848317", "0.47720566", "0.4763181", "0.47621244", "0.4745171", "0.47432852", "0.47358924" ]
0.5657535
1
Create a fragility parameter database based on the FEMA P58 data The method was developed to process v3.1.2 of the FragilityDatabase xls that is provided with FEMA P58 2nd edition.
def create_FEMA_P58_fragility_db(source_file, target_data_file='fragility_DB_FEMA_P58_2nd.csv', target_meta_file='fragility_DB_FEMA_P58_2nd.json'): # parse the source file df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1, true_values=["YES", "Yes", "yes"], false_values=["NO", "No", "no"]) # remove the empty rows and columns df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) # filter the columns that we need for the fragility database cols_to_db = [ "Demand Parameter (value):", "Demand Parameter (unit):", "Demand Location (use floor above? Yes/No)", "Directional?", "DS Hierarchy", "DS 1, Probability", "DS 1, Median Demand", "DS 1, Total Dispersion (Beta)", "DS 2, Probability", "DS 2, Median Demand", "DS 2, Total Dispersion (Beta)", "DS 3, Probability", "DS 3, Median Demand", "DS 3, Total Dispersion (Beta)", "DS 4, Probability", "DS 4, Median Demand", "DS 4, Total Dispersion (Beta)", "DS 5, Probability", "DS 5, Median Demand", "DS 5, Total Dispersion (Beta)", ] # filter the columns that we need for the metadata cols_to_meta = [ "Component Name", "Component Description", "Construction Quality:", "Seismic Installation Conditions:", "Comments / Notes", "Author", "Fragility Unit of Measure", "Round to Integer Unit?", "DS 1, Description", "DS 1, Repair Description", "DS 2, Description", "DS 2, Repair Description", "DS 3, Description", "DS 3, Repair Description", "DS 4, Description", "DS 4, Repair Description", "DS 5, Description", "DS 5, Repair Description", ] # remove special characters to make it easier to work with column names str_map = { ord(' '): "_", ord(':'): None, ord('('): None, ord(')'): None, ord('?'): None, ord('/'): None, ord(','): None, } df_db_source = df.loc[:, cols_to_db] df_db_source.columns = [s.translate(str_map) for s in cols_to_db] df_db_source.sort_index(inplace=True) df_meta = df.loc[:, cols_to_meta] df_meta.columns = [s.translate(str_map) for s in cols_to_meta] # replace missing values with an empty string df_meta.fillna('', inplace=True) # the metadata shall be stored in strings df_meta = df_meta.astype(str) # initialize the output fragility table df_db = pd.DataFrame( columns=[ "Index", "Incomplete", "Demand-Type", "Demand-Unit", "Demand-Offset", "Demand-Directional", "LS1-Family", "LS1-Theta_0", "LS1-Theta_1", "LS1-DamageStateWeights", "LS2-Family", "LS2-Theta_0", "LS2-Theta_1", "LS2-DamageStateWeights", "LS3-Family", "LS3-Theta_0", "LS3-Theta_1", "LS3-DamageStateWeights", "LS4-Family", "LS4-Theta_0", "LS4-Theta_1", "LS4-DamageStateWeights" ], index=df_db_source.index, dtype=float ) # initialize the dictionary that stores the fragility metadata meta_dict = {} # conversion dictionary for demand types convert_demand_type = { 'Story Drift Ratio': "Peak Interstory Drift Ratio", 'Link Rotation Angle': "Peak Link Rotation Angle", 'Effective Drift': "Peak Effective Drift Ratio", 'Link Beam Chord Rotation': "Peak Link Beam Chord Rotation", 'Peak Floor Acceleration': "Peak Floor Acceleration", 'Peak Floor Velocity': "Peak Floor Velocity" } # conversion dictionary for demand unit names convert_demand_unit = { 'Unit less': 'unitless', 'Radians': 'rad', 'g': 'g', 'meter/sec': 'mps' } # for each component... # (this approach is not efficient, but easy to follow which was considered # more important than efficiency.) for cmp in df_db_source.itertuples(): # create a dotted component index ID = cmp.Index.split('.') cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}' # store the new index df_db.loc[cmp.Index, 'Index'] = cmpID # assume the component information is complete incomplete = False # store demand specifications df_db.loc[cmp.Index, 'Demand-Type'] = ( convert_demand_type[cmp.Demand_Parameter_value]) df_db.loc[cmp.Index, 'Demand-Unit'] = ( convert_demand_unit[cmp.Demand_Parameter_unit]) df_db.loc[cmp.Index, 'Demand-Offset'] = ( int(cmp.Demand_Location_use_floor_above_YesNo)) df_db.loc[cmp.Index, 'Demand-Directional'] = ( int(cmp.Directional)) # parse the damage state hierarchy DS_setup = parse_DS_Hierarchy(cmp.DS_Hierarchy) # get the raw metadata for the component cmp_meta = df_meta.loc[cmp.Index, :] # store the global (i.e., not DS-specific) metadata # every component is assumed to have a comp. description comments = cmp_meta['Component_Description'] # the additional fields are added to the description if they exist if cmp_meta['Construction_Quality'] != 'Not Specified': comments += f'\nConstruction Quality: ' \ f'{cmp_meta["Construction_Quality"]}' if cmp_meta['Seismic_Installation_Conditions'] not in [ 'Not Specified', 'Not applicable', 'Unknown', 'Any']: comments += f'\nSeismic Installation Conditions: ' \ f'{cmp_meta["Seismic_Installation_Conditions"]}' if cmp_meta['Comments__Notes'] != 'None': comments += f'\nNotes: {cmp_meta["Comments__Notes"]}' if cmp_meta['Author'] not in ['Not Given', 'By User']: comments += f'\nAuthor: {cmp_meta["Author"]}' # get the suggested block size and replace the misleading values with ea block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1] meta_data = { "Description": cmp_meta['Component_Name'], "Comments": comments, "SuggestedComponentBlockSize": ' '.join(block_size), "RoundUpToIntegerQuantity": cmp_meta['Round_to_Integer_Unit'], "LimitStates": {} } # now look at each Limit State for LS_i, LS_contents in enumerate(DS_setup): LS_i = LS_i + 1 LS_contents = np.atleast_1d(LS_contents) ls_meta = {} # start with the special cases with multiple DSs in an LS if LS_contents[0] in {'MutEx', 'Simul'}: # collect the fragility data for the member DSs median_demands = [] dispersions = [] weights = [] for ds in LS_contents[1:]: median_demands.append( getattr(cmp, f"DS_{ds[2]}_Median_Demand")) dispersions.append( getattr(cmp, f"DS_{ds[2]}_Total_Dispersion_Beta")) weights.append(getattr(cmp, f"DS_{ds[2]}_Probability")) # make sure the specified distribution parameters are appropriate if ((np.unique(median_demands).size != 1) or ( np.unique(dispersions).size != 1)): raise ValueError(f"Incorrect mutually exclusive DS " f"definition in component {cmp.Index} at " f"Limit State {LS_i}") if LS_contents[0] == 'MutEx': # in mutually exclusive cases, make sure the specified DS # weights sum up to one np.testing.assert_allclose( np.sum(np.array(weights, dtype=float)), 1.0, err_msg=f"Mutually exclusive Damage State weights do " f"not sum to 1.0 in component {cmp.Index} at " f"Limit State {LS_i}") # and save all DS metadata under this Limit State for ds in LS_contents[1:]: ds_id = ds[2] ls_meta.update({f"DS{ds_id}": { "Description": cmp_meta[f"DS_{ds_id}_Description"], "RepairAction": cmp_meta[ f"DS_{ds_id}_Repair_Description"] }}) else: # in simultaneous cases, convert simultaneous weights into # mutexc weights sim_ds_count = len(LS_contents) - 1 ds_count = 2 ** (sim_ds_count) - 1 sim_weights = [] for ds_id in range(1, ds_count + 1): ds_map = format(ds_id, f'0{sim_ds_count}b') sim_weights.append(np.product( [weights[ds_i] if ds_map[-ds_i - 1] == '1' else 1.0-weights[ds_i] for ds_i in range(sim_ds_count)])) # save ds metadata - we need to be clever here # the original metadata is saved for the pure cases # when only one DS is triggered # all other DSs store information about which # combination of pure DSs they represent if ds_map.count('1') == 1: ds_pure_id = ds_map[::-1].find('1') + 1 ls_meta.update({f"DS{ds_id}": { "Description": f"Pure DS{ds_pure_id}. " + cmp_meta[f"DS_{ds_pure_id}_Description"], "RepairAction": cmp_meta[ f"DS_{ds_pure_id}_Repair_Description"] }}) else: ds_combo = [f'DS{_.start() + 1}' for _ in re.finditer('1', ds_map[::-1])] ls_meta.update({f"DS{ds_id}": { "Description": 'Combination of ' + ' & '.join(ds_combo), "RepairAction": 'Combination of pure DS repair ' 'actions.' }}) # adjust weights to respect the assumption that at least # one DS will occur (i.e., the case with all DSs returning # False is not part of the event space) sim_weights_array = np.array(sim_weights) / np.sum(sim_weights) weights = sim_weights_array theta_0 = median_demands[0] theta_1 = dispersions[0] weights_str = ' | '.join([f"{w:.6f}" for w in weights]) df_db.loc[cmp.Index, f'LS{LS_i}-DamageStateWeights'] = weights_str # then look at the sequential DS cases elif LS_contents[0].startswith('DS'): # this is straightforward, store the data in the table and dict ds_id = LS_contents[0][2] theta_0 = getattr(cmp, f"DS_{ds_id}_Median_Demand") theta_1 = getattr(cmp, f"DS_{ds_id}_Total_Dispersion_Beta") ls_meta.update({f"DS{ds_id}": { "Description": cmp_meta[f"DS_{ds_id}_Description"], "RepairAction": cmp_meta[f"DS_{ds_id}_Repair_Description"] }}) # FEMA P58 assumes lognormal distribution for every fragility df_db.loc[cmp.Index, f'LS{LS_i}-Family'] = 'lognormal' # identify incomplete cases... # where theta is missing if theta_0 != 'By User': df_db.loc[cmp.Index, f'LS{LS_i}-Theta_0'] = theta_0 else: incomplete = True # where beta is missing if theta_1 != 'By User': df_db.loc[cmp.Index, f'LS{LS_i}-Theta_1'] = theta_1 else: incomplete = True # store the collected metadata for this limit state meta_data['LimitStates'].update({f"LS{LS_i}": ls_meta}) # store the incomplete flag for this component df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete) # store the metadata for this component meta_dict.update({cmpID: meta_data}) # assign the Index column as the new ID df_db.set_index('Index', inplace=True) # rename the index df_db.index.name = "ID" # convert to optimal datatypes to reduce file size df_db = df_db.convert_dtypes() # save the fragility data df_db.to_csv(target_data_file) # save the metadata with open(target_meta_file, 'w+', encoding='utf-8') as f: json.dump(meta_dict, f, indent=2) print("Successfully parsed and saved the fragility data from FEMA P58")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_Hazus_EQ_fragility_db(source_file,\n target_data_file='fragility_DB_Hazus_EQ.csv',\n target_meta_file='fragility_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n design_levels = list(\n raw_data['Structural_Fragility_Groups']['EDP_limits'].keys())\n\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n convert_design_level = {\n 'High_code': 'HC',\n 'Moderate_code': 'MC',\n 'Low_code': 'LC',\n 'Pre_code': 'PC'\n }\n\n # initialize the fragility table\n df_db = pd.DataFrame(\n columns=[\n \"ID\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=np.arange(len(building_types) * len(design_levels) * 5),\n dtype=float\n )\n counter = 0\n\n # First, prepare the structural fragilities\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in S_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'STR.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n S_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n S_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = S_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n # create the component id\n df_db.loc[counter, 'ID'] = 'NSD'\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = NSD_data['EDP_limits'][\n LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSD_data['Fragility_beta']\n\n counter += 1\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for dl in design_levels:\n\n # create the component id\n cmp_id = f'NSA.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Floor Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n NSA_data['EDP_limits'][dl][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSA_data['Fragility_beta']\n\n counter += 1\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in LF_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'LF.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Ground Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n LF_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n LF_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = LF_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Fifth, the ground failure fragilities\n GF_data = raw_data['Ground_Failure']\n\n for direction in ('Horizontal', 'Vertical'):\n for f_depth in ('Shallow', 'Deep'):\n # create the component id\n cmp_id = f'GF.{direction[0]}.{f_depth[0]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Permanent Ground Deformation\"\n df_db.loc[counter, 'Demand-Unit'] = \"inch\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n df_db.loc[counter, 'LS1-Family'] = 'lognormal'\n df_db.loc[counter, 'LS1-Theta_0'] = \\\n GF_data['EDP_limits'][direction][f_depth]\n df_db.loc[counter, 'LS1-Theta_1'] = \\\n GF_data['Fragility_beta'][direction][f_depth]\n p_complete = GF_data['P_Complete']\n df_db.loc[counter, 'LS1-DamageStateWeights'] = (\n f'{1.0 - p_complete} | {p_complete}')\n\n counter += 1\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # none of them are directional,\n df_db.loc[:, 'Demand-Directional'] = 0\n\n # rename the index\n df_db.set_index(\"ID\", inplace=True)\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from Hazus EQ\")", "def create_FEMA_P58_bldg_repair_db(\n source_file,\n target_data_file='bldg_repair_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_repair_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.concat(\n [pd.read_excel(source_file, sheet_name=sheet, header=2, index_col=1)\n for sheet in ('Summary', 'Cost Summary', 'Env Summary')], axis=1)\n\n # remove duplicate columns\n # (there are such because we joined two tables that were read separately)\n df = df.loc[:, ~df.columns.duplicated()]\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the repair database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f\"Best Fit, DS{DS_i}\",\n f\"Lower Qty Mean, DS{DS_i}\",\n f\"Upper Qty Mean, DS{DS_i}\",\n f\"Lower Qty Cutoff, DS{DS_i}\",\n f\"Upper Qty Cutoff, DS{DS_i}\",\n f\"CV / Dispersion, DS{DS_i}\",\n\n f\"Best Fit, DS{DS_i}.1\",\n f\"Lower Qty Mean, DS{DS_i}.1\",\n f\"Upper Qty Mean, DS{DS_i}.1\",\n f\"Lower Qty Cutoff, DS{DS_i}.1\",\n f\"Upper Qty Cutoff, DS{DS_i}.1\",\n f\"CV / Dispersion, DS{DS_i}.2\",\n f\"DS {DS_i}, Long Lead Time\",\n\n f'Repair Cost, p10, DS{DS_i}',\n f'Repair Cost, p50, DS{DS_i}',\n f'Repair Cost, p90, DS{DS_i}',\n f'Time, p10, DS{DS_i}',\n f'Time, p50, DS{DS_i}',\n f'Time, p90, DS{DS_i}',\n f'Mean Value, DS{DS_i}',\n f'Mean Value, DS{DS_i}.1',\n\n # Columns added for the Environmental loss\n f\"DS{DS_i} Best Fit\",\n f\"DS{DS_i} CV or Beta\",\n\n f\"DS{DS_i} Best Fit.1\",\n f\"DS{DS_i} CV or Beta.1\",\n\n f\"DS{DS_i} Embodied Carbon (kg CO2eq)\",\n f\"DS{DS_i} Embodied Energy (MJ)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-LongLeadTime\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['Cost', 'Time', 'Carbon', 'Energy']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n convert_family = {\n 'LogNormal': 'lognormal',\n 'Normal': 'normal'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_cost = False\n incomplete_time = False\n incomplete_carbon = False\n incomplete_energy = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'Cost'), 'DV-Unit'] = \"USD_2011\"\n df_db.loc[(cmp.Index, 'Time'), 'DV-Unit'] = \"worker_day\"\n df_db.loc[(cmp.Index, 'Carbon'), 'DV-Unit'] = \"kg\"\n df_db.loc[(cmp.Index, 'Energy'), 'DV-Unit'] = \"MJ\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n cost_est = {}\n time_est = {}\n carbon_est = {}\n energy_est = {}\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n if not pd.isna(getattr(cmp, f'Repair_Cost_p10_DS{DS_i}')):\n\n cost_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Repair_Cost_p10_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p50_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n ])})\n\n time_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Time_p10_DS{DS_i}'),\n getattr(cmp, f'Time_p50_DS{DS_i}'),\n getattr(cmp, f'Time_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1'),\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES')\n ])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta'),\n getattr(cmp, f'DS{DS_i}_Best_Fit')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n carbon_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta_1'),\n getattr(cmp, f'DS{DS_i}_Best_Fit_1')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n energy_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = len(cost_est.keys())\n ds_count = 2 ** (sim_ds_count) - 1\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n cost_vals = np.sum([cost_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(5)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n time_vals = np.sum([time_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(6)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n carbon_vals = np.sum([carbon_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n energy_vals = np.sum([energy_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n # fit a distribution\n family_hat, theta_hat = fit_distribution_to_percentiles(\n cost_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n cost_theta = theta_hat\n if family_hat == 'normal':\n cost_theta[1] = cost_theta[1] / cost_theta[0]\n\n time_theta = [time_vals[1],\n np.sqrt(cost_theta[1] ** 2.0 + 0.25 ** 2.0)]\n\n # fit distributions to environmental impact consequences\n family_hat_carbon, theta_hat_carbon = fit_distribution_to_percentiles(\n carbon_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n carbon_theta = theta_hat_carbon\n if family_hat_carbon == 'normal':\n carbon_theta[1] = carbon_theta[1] / carbon_theta[0]\n\n family_hat_energy, theta_hat_energy = fit_distribution_to_percentiles(\n energy_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n energy_theta = theta_hat_energy\n if family_hat_energy == 'normal':\n energy_theta[1] = energy_theta[1] / energy_theta[0]\n\n # Note that here we assume that the cutoff quantities are\n # identical across damage states.\n # This assumption holds for the second edition of FEMA P58, but\n # it might need to be revisited in future editions.\n cost_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1')\n cost_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1')\n time_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1_1')\n time_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1_1')\n\n # store the results\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{cost_vals[3]:g},{cost_vals[4]:g}|\"\n f\"{cost_qnt_low:g},{cost_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Theta_1'] = f\"{cost_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{time_vals[3]:g},{time_vals[4]:g}|\"\n f\"{time_qnt_low:g},{time_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Theta_1'] = f\"{time_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-LongLeadTime'] = int(time_vals[5] > 0)\n\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = family_hat_carbon\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = f\"{carbon_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Carbon'),\n f'DS{DS_i}-Theta_1'] = f\"{carbon_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = family_hat_energy\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = f\"{energy_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'),\n f'DS{DS_i}-Theta_1'] = f\"{energy_theta[1]:g}\"\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\":\n cmp_meta[f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n # cost\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}')):\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}'):g}\")\n\n else:\n incomplete_cost = True\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\n \"Description\": cmp_meta[f\"DS_{DS_i}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{DS_i}_Repair_Description\"]}})\n\n # time\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}_1')):\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}_1')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}_1')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}_1')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}_2'):g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-LongLeadTime'] = (\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES'))\n\n else:\n incomplete_time = True\n\n # Carbon\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit')):\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit')])\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = getattr(cmp,\n f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta')\n\n # Energy\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit_1')):\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit_1')])\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta_1')\n\n df_db.loc[(cmp.Index, 'Cost'), 'Incomplete'] = int(incomplete_cost)\n df_db.loc[(cmp.Index, 'Time'), 'Incomplete'] = int(incomplete_time)\n df_db.loc[(cmp.Index, 'Carbon'), 'Incomplete'] = int(incomplete_carbon)\n df_db.loc[(cmp.Index, 'Energy'), 'Incomplete'] = int(incomplete_energy)\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n for cmp in cmp_to_drop:\n if cmp[0] in meta_dict:\n del meta_dict[cmp[0]]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from FEMA \"\n \"P58\")", "def create_FEMA_P58_bldg_injury_db(\n source_file,\n target_data_file='bldg_injury_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_injury_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n\n f'DS {DS_i}, Potential non-collapse casualty?',\n f'DS {DS_i} - Casualty Affected Area',\n f'DS {DS_i} Serious Injury Rate - Median',\n f'DS {DS_i} Serious Injury Rate - Dispersion',\n f'DS {DS_i} Loss of Life Rate - Median',\n f'DS {DS_i} Loss of Life Rate - Dispersion',\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-AffectedArea\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['S1', 'S2']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'Severity'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_S1 = False\n incomplete_S2 = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'S1'), 'DV-Unit'] = \"persons\"\n df_db.loc[(cmp.Index, 'S2'), 'DV-Unit'] = \"persons\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n inj_data = {}\n ds_tot = 0\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n casualty_model = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_model is True:\n\n inj_data.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'DS_{DS_i}___Casualty_Affected_Area'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Dispersion'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Dispersion')\n ])})\n ds_tot += 1\n\n elif casualty_model is False:\n ds_tot += 1\n\n # only continue if there is injury data\n if len(inj_data) == 0:\n continue\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = ds_tot\n ds_count = 2 ** (sim_ds_count) - 1\n\n # Here we take advantage of knowing that for every component with\n # simultaneous damage states, only one of the DSs has injury\n # consequences.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n ds_trig = list(inj_data.keys())[0]\n inj_data = inj_data[ds_trig]\n ds_trig = int(ds_trig[2:])\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n if ds_map[-ds_trig] == '1':\n\n # store the consequence data\n for severity in ('S1', 'S2'):\n\n A_affected = inj_data[0]\n\n if severity == 'S1':\n theta_0 = inj_data[1]\n theta_1 = inj_data[2]\n elif severity == 'S2':\n theta_0 = inj_data[3]\n theta_1 = inj_data[4]\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n # store the metadata\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[\n f\"DS_{ds_pure_id}_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo)\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n casualty_flag = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_flag is True:\n\n A_affected = getattr(cmp,\n f'DS_{DS_i}___Casualty_Affected_Area')\n\n for severity in ('S1', 'S2'):\n\n if severity == 'S1':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Dispersion')\n elif severity == 'S2':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n if (pd.isna(theta_0) or pd.isna(\n theta_1) or pd.isna(A_affected)):\n\n if severity == 'S1':\n incomplete_S1 = True\n else:\n incomplete_S2 = True\n\n if ~np.isnan(casualty_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[(cmp.Index, 'S1'), 'Incomplete'] = int(incomplete_S1)\n df_db.loc[(cmp.Index, 'S2'), 'Incomplete'] = int(incomplete_S2)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 16):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from FEMA \"\n \"P58\")", "def create_FEMA_P58_bldg_redtag_db(\n source_file,\n target_data_file='bldg_redtag_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_redtag_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # take another pass with booleans because the first does not always work\n for true_str in (\"YES\", \"Yes\", \"yes\"):\n df.replace(true_str, True, inplace=True)\n\n for false_str in (\"NO\", \"No\", \"no\"):\n df.replace(false_str, False, inplace=True)\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f'DS {DS_i}, Unsafe Placard Trigger Flag',\n f'DS {DS_i}, Unsafe Placard Damage Median',\n f'DS {DS_i}, Unsafe Placard Damage Dispersion'\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\"\n ]\n\n # create the database index\n comps = df_db_source.index.values\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=comps,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n pass\n # Note that we are assuming that components with simultaneous\n # damage states do not have damage that would trigger a red tag.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n redtag_flag = getattr(\n cmp, f'DS_{DS_i}_Unsafe_Placard_Trigger_Flag')\n\n if redtag_flag is True:\n\n theta_0 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_1'] = theta_1\n\n if (pd.isna(theta_0) or pd.isna(theta_1)):\n\n incomplete = True\n\n if ~np.isnan(redtag_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the red tag consequence data from FEMA \"\n \"P58\")", "def produce_database(database_name, is_debug):\n\t\n\t# read files from a01-a35, every file including whole ecg data and the corresponding annotation\n\tdata_annotations_set = get_ecg_data_annotations(database_name, is_debug)\n\t# divide ECG data to minute-by-minute ECG segments\n\t_ = process_ecg_data_segments(database_name, data_annotations_set, is_debug)", "def create_Hazus_EQ_bldg_repair_db(source_file,\n target_data_file='bldg_repair_DB_Hazus_EQ.csv',\n target_meta_file='bldg_repair_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n occupancies = list(\n raw_data['Structural_Fragility_Groups']['Repair_cost'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'NSD', 'NSA', 'LF']\n comps = [f'{cmp_type}.{occ_type}'\n for cmp_type in cmp_types for occ_type in occupancies]\n DVs = ['Cost', 'Time']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'STR.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_cost'][occ_type][ds_i-1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_time'][occ_type][ds_i-1]\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSD.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSD_data['Repair_cost'][occ_type][DS_i-1]\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSA.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSA_data['Repair_cost'][occ_type][DS_i-1]\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'LF.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_cost'][occ_type][ds_i - 1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_time'][occ_type][ds_i - 1]\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[idx[:, 'Cost'], 'DV-Unit'] = \"loss_ratio\"\n df_db.loc[idx[:, 'Time'], 'DV-Unit'] = \"day\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from Hazus \"\n \"EQ\")", "def dpf(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['dpf']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"DPF{0}\".format(str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"DPF\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label] = emitted[-2][-36:]\n\n filename = \"{0}/DPF_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def create_patolli(database='red_cod-db.pkl', sites = -1, elements=-1, maxatoms=-1,\r\n dictionary='structure_dictionary', features='datosrahm.csv',\r\n control_file='model_control_file', \r\n verbose=1, test_frac = 0.15, local_function='fij_2.0_25_diccio',\r\n test_with_all_false = False):\r\n \r\n start_main=time.time()\r\n \r\n X, _, _, _, df = raw_features_extractor(database=database, sites = sites, \r\n elements = elements, maxatoms = maxatoms, \r\n dictionary=dictionary, features=features)\r\n \r\n X = compute_quotients(X=X)\r\n X, df = append_local_functions(X = X, df = df, local_function = local_function)\r\n X, _ , df, _ = split_collection(X = X, df = df, frac = test_frac)\r\n \r\n Y = df['target'].values\r\n class_names=list(set(df['target']))\r\n \r\n subnets=X.shape[1]\r\n features=X.shape[2]\r\n \r\n \r\n average = np.mean(X, axis=0) \r\n stdev = np.std(X, axis=0)\r\n \r\n X = (X - average)/stdev\r\n \r\n dicfeatstand = {'mean':average,'std':stdev}\r\n np.save('feature_standarisation',dicfeatstand)\r\n \r\n with open('feature_standarisation.txt','w') as f:\r\n f.write('X matrix has dimensions '+str(X.shape[0])+' samples x ' + \\\r\n str(X.shape[1]) + ' sites x ' + str(X.shape[2]) + \\\r\n ' features'+'\\n'+'\\n')\r\n f.write('Features - mean:'+'\\n'+'\\n')\r\n f.write(str(average)+'\\n'+'\\n')\r\n f.write('Features - std:'+'\\n'+'\\n')\r\n f.write(str(stdev))\r\n f.close()\r\n \r\n Xor=copy.deepcopy(X)\r\n X,y = shuffle(X,Y,random_state=0)\r\n \r\n x={}\r\n xor={}\r\n \r\n for subnet in range(subnets):\r\n x[subnet] = X[:,subnet,:]\r\n xor[subnet] = Xor[:,subnet,:]\r\n \r\n directorio = time.ctime().replace(' ', '_').replace(':','_')\r\n os.system('mkdir ' + directorio)\r\n os.system('mv compounds_collection.csv ' + directorio +'/')\r\n os.system('mv multiplicities.npy ' + directorio +'/')\r\n os.system('mv occupation_fractions.npy ' + directorio +'/')\r\n os.system('mv output_values.npy ' + directorio +'/')\r\n os.system('mv raw_features.npy ' + directorio +'/')\r\n os.system('mv X*.npy ' + directorio +'/')\r\n os.system('mv db*.csv ' + directorio +'/')\r\n os.system('mv feature_standarisation* ' + directorio +'/')\r\n \r\n \r\n ctrl_diccio = ctrl_dictionary(archivo=control_file)\r\n print('\\n')\r\n print('*************************************************************'+\r\n '*************************************************************'+\r\n '*************************************************************'+\r\n '*************************************************************')\r\n print('ANNs TRAINING WILL START NOW.')\r\n print('\\n')\r\n print('There are ',len(ctrl_diccio.keys()),' ANNs to train')\r\n \r\n for item in list(ctrl_diccio):\r\n print('Training ', item+1,'/',len(ctrl_diccio.keys()))\r\n diccionary = ctrl_diccio[item]\r\n \r\n hidden_layers=[float(x) for x in diccionary['HIDDEN_LAYERS'].split(\",\")]\r\n epochs=int(diccionary['EPOCHS'])\r\n batch_size=int(diccionary['BATCH_SIZE'])\r\n test_val=float(diccionary['TEST_VAL'])\r\n cost_function=diccionary['COST_FUNCTION']\r\n learning_rate=float(diccionary['LEARNING_RATE'])\r\n beta_1=float(diccionary['BETA_1'])\r\n beta_2=float(diccionary['BETA_2'])\r\n decay=float(diccionary['DECAY'])\r\n dropout=float(diccionary['DROPOUT'])\r\n activation=diccionary['ACTIVATION']\r\n name=diccionary['NAME']\r\n \r\n hidden_layers = np.asarray(hidden_layers)*features\r\n hidden_layers = [int(x) for x in hidden_layers]\r\n \r\n model = modelo(hidden_layers=hidden_layers, activation=activation,\r\n features=features, beta_1=beta_1, beta_2=beta_2, lr=learning_rate, decay=decay, \r\n dropout=dropout)\r\n \r\n start=time.time()\r\n data, dataframe, model = training(model, X=[x[i] for i in range(subnets)], Y = y, epochs=epochs, \r\n batch_size=batch_size, test_val=test_val, saveas=name,\r\n verbose=verbose)\r\n \r\n print('NN training lasted ',np.round(time.time() - start,2),'s')\r\n print('\\n')\r\n plotgraph(readfile=name+'.csv', outfiles=name, cost_function=cost_function)\r\n \r\n y_pred = (model.predict([xor[i] for i in range(subnets)]) > 0.5)\r\n \r\n precision, recall, fscore, support = PRFS(df['target'],y_pred)\r\n cnf_matrix=confusion_matrix(df['target'],y_pred)\r\n np.save(str(name)+'_cnfmat.npy',cnf_matrix)\r\n precision = np.round(100*precision,2)\r\n recall = np.round(100*recall,2)\r\n fscore = np.round(100*fscore,2)\r\n \r\n with open('PRFS_'+str(control_file)+'.txt', 'a') as prfs:\r\n prfs.write(str(name)+'\\n')\r\n prfs.write('classes: '+str(class_names)+'\\n')\r\n prfs.write('samples: '+str(support)+'\\n')\r\n prfs.write('precision: '+str(precision)+'\\n')\r\n prfs.write('recall: '+str(recall)+'\\n')\r\n prfs.write('f1-score: '+str(fscore)+'\\n')\r\n prfs.write('\\n')\r\n prfs.close()\r\n \r\n plt.figure(1)\r\n plot_confusion_matrix(cnf_matrix, classes=class_names,\r\n title='Confusion matrix, without normalization')\r\n plt.savefig('cnfmat_'+str(name)+'.png')\r\n \r\n plt.figure(2)\r\n plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\r\n title='Normalized confusion matrix')\r\n plt.savefig('normcnfmat_'+str(name)+'.png')\r\n \r\n plt.close('all')\r\n \r\n os.system('mv *' + name + '* ' + directorio)\r\n os.system('mv PRFS_' + str(control_file) + '.txt ' + directorio)\r\n os.system('cp ' + control_file + '.txt ' + directorio)\r\n os.system('cp ' + dictionary + '.txt ' + directorio)\r\n \r\n if test_frac != 0:\r\n test_models(directorio=directorio)\r\n \r\n if test_with_all_false:\r\n test_all_false(directorio=directorio, database=database, \r\n local_function=local_function)\r\n\r\n print('Whole process lasted ', np.round(-start_main+time.time(),2),'s') \r\n return", "def rpfp(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['rpfp']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"RPFP{0}\".format(str(i))\n distillate_label = \"L{0}-E_C{1}\".format(str(i),str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n lMag_label = 'C{0}MAG'.format(str(i))\n cMag_label = 'C{0}MAG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label, lMag_label, cMag_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n dep_lMag_label = lMag_label\n dep_lMag_name = fields['deps'][2]\n dep_lMag_uuid = self.uuid_map[lMag_label]\n dep_cMag_label = cMag_label\n dep_cMag_name = fields['deps'][3]\n dep_cMag_uuid = self.uuid_map[cMag_label]\n \n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_lMag_label, dep_lMag_name, dep_lMag_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid],\n [dep_cMag_label, dep_cMag_name, dep_cMag_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"RPFP\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map['REAC_PWR{0}'.format(i)] = emitted[-3][-36:]\n output_uuid_map['FUND_PWR{0}'.format(i)] = emitted[-2][-36:]\n\n filename = \"{0}/RPFP_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def create_Hazus_EQ_bldg_injury_db(source_file,\n target_data_file='bldg_injury_DB_Hazus_EQ.csv',\n target_meta_file='bldg_injury_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'LF']\n comps = [f'{cmp_type}.{bt}'\n for cmp_type in cmp_types for bt in building_types]\n DVs = ['S1', 'S2', 'S3', 'S4']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i-1])\n\n # Second, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i - 1])\n\n # remove empty rows\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[:, 'DV-Unit'] = \"injury_rate\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from Hazus \"\n \"EQ\")", "def _create_sql(self):\n\n pdbfile = self.pdbfile\n sqlfile = self.sqlfile\n\n if self.verbose:\n print('-- Create SQLite3 database')\n\n #name of the table\n #table = 'ATOM'\n\n # column names and types\n self.col = {'serial' : 'INT',\n 'name' : 'TEXT',\n 'altLoc' : 'TEXT',\n 'resName' : 'TEXT',\n 'chainID' : 'TEXT',\n 'resSeq' : 'INT',\n 'iCode' : 'TEXT',\n 'x' : 'REAL',\n 'y' : 'REAL',\n 'z' : 'REAL',\n 'occ' : 'REAL',\n 'temp' : 'REAL'}\n\n # delimtier of the column format\n # taken from http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n self.delimiter = {\n 'serial' : [6,11],\n 'name' : [12,16],\n 'altLoc' : [16,17],\n 'resName' :[17,20],\n 'chainID' :[21,22],\n 'resSeq' :[22,26],\n 'iCode' :[26,26],\n 'x' :[30,38],\n 'y' :[38,46],\n 'z' :[46,54],\n 'occ' :[54,60],\n 'temp' :[60,66]}\n\n if self.no_extra:\n del self.col['occ']\n del self.col['temp']\n\n # size of the things\n ncol = len(self.col)\n ndel = len(self.delimiter)\n\n\n # open the data base\n # if we do not specify a db name\n # the db is only in RAM\n # there might be little advantage to use memory\n # https://stackoverflow.com/questions/764710/sqlite-performance-benchmark-why-is-memory-so-slow-only-1-5x-as-fast-as-d\n if self.sqlfile is None:\n self.conn = sqlite3.connect(':memory:')\n \n # or we create a new db file\n else:\n if os.path.isfile(sqlfile):\n sp.call('rm %s' %sqlfile,shell=True)\n self.conn = sqlite3.connect(sqlfile)\n self.c = self.conn.cursor()\n\n # intialize the header/placeholder\n header,qm = '',''\n for ic,(colname,coltype) in enumerate(self.col.items()):\n header += '{cn} {ct}'.format(cn=colname,ct=coltype)\n qm += '?'\n if ic < ncol-1:\n header += ', '\n qm += ','\n\n # create the table\n query = 'CREATE TABLE ATOM ({hd})'.format(hd=header)\n self.c.execute(query)\n\n\n # read the pdb file\n # this is dangerous if there are ATOM written in the comment part\n # which happends often\n #data = sp.check_output(\"awk '/ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a safer version consist at matching against the first field\n # won't work on windows\n #data = sp.check_output(\"awk '$1 ~ /^ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a pure python way\n # RMK we go through the data twice here. Once to read the ATOM line and once to parse the data ...\n # we could do better than that. But the most time consuming step seems to be the CREATE TABLE query\n # if we path a file we read it\n if isinstance(pdbfile,str):\n if os.path.isfile(pdbfile):\n with open(pdbfile,'r') as fi:\n data = [line.split('\\n')[0] for line in fi if line.startswith('ATOM')]\n else:\n raise FileNotFoundError('File %s was not found',pdbfile)\n\n # if we pass a list as for h5py read/write\n # we directly use that\n elif isinstance(pdbfile,np.ndarray):\n data = [l.decode('utf-8') for l in pdbfile.tolist()]\n\n # if we cant read it\n else:\n print(pdbfile)\n raise ValueError('PDB data not recognized')\n\n # if there is no ATOM in the file\n if len(data)==1 and data[0]=='':\n print(\"-- Error : No ATOM in the pdb file.\")\n self.is_valid = False\n return\n\n # haddock chain ID fix\n del_copy = self.delimiter.copy()\n if data[0][del_copy['chainID'][0]] == ' ':\n del_copy['chainID'] = [72,73]\n\n # get all the data\n data_atom = []\n for iatom,atom in enumerate(data):\n\n # sometimes we still have an empty line somewhere\n if len(atom) == 0:\n continue\n\n # browse all attribute of each atom\n at = ()\n for ik,(colname,coltype) in enumerate(self.col.items()):\n\n # get the piece of data\n data = atom[del_copy[colname][0]:del_copy[colname][1]].strip()\n\n # convert it if necessary\n if coltype == 'INT':\n data = int(data)\n elif coltype == 'REAL':\n data = float(data)\n\n # append keep the comma !!\n # we need proper tuple\n at +=(data,)\n\n # append\n data_atom.append(at)\n\n\n # push in the database\n self.c.executemany('INSERT INTO ATOM VALUES ({qm})'.format(qm=qm),data_atom)", "def design_TIA_inverter(db_n, db_p, sim_env,\n vg_res, rf_res,\n vdd_nom, vdd_vec, cpd, cload, \n rdc_min, fbw_min, pm_min, BER_max,\n vos, isw_pkpk,\n vb_n, vb_p, error_tol=0.05, ibias_max=20e-6):\n # Finds all possible designs for one value of VDD, then\n # confirm which work with all other VDD values.\n possibilities = []\n\n vg_vec = np.arange(0, vdd_nom, vg_res)\n \n for vg in vg_vec:\n print(\"VIN:\\t{0}\".format(vg))\n n_op_info = db_n.query(vgs=vg, vds=vg, vbs=vb_n-0)\n p_op_info = db_p.query(vgs=vg-vdd_nom, vds=vg-vdd_nom, vbs=vb_p-vdd_nom)\n \n if np.isinf(ibias_max):\n nf_n_max = 200\n else:\n nf_n_max = int(round(ibias_max/n_op_info['ibias']))\n \n nf_n_vec = np.arange(1, nf_n_max, 1)\n for nf_n in nf_n_vec:\n # Number of fingers can only be integer,\n # so increase as necessary until you get\n # sufficiently accurate/precise bias + current match\n ratio_good, nf_p = verify_ratio(n_op_info['ibias'],\n p_op_info['ibias'],\n nf_n,\n error_tol)\n if not ratio_good:\n continue\n\n # Getting small signal parameters to constrain Rf\n inv = LTICircuit()\n inv.add_transistor(n_op_info, 'out', 'in', 'gnd', fg=nf_n)\n inv.add_transistor(p_op_info, 'out', 'in', 'gnd', fg=nf_p)\n inv_num, inv_den = inv.get_num_den(in_name='in', out_name='out', in_type='v')\n A0 = abs(inv_num[-1]/inv_den[-1])\n \n gds_n = n_op_info['gds'] * nf_n\n gds_p = p_op_info['gds'] * nf_p\n gds = abs(gds_n) + abs(gds_p)\n ro = 1/gds\n \n # Assume Rdc is negative, bound Rf\n rf_min = max(rdc_min*(1+A0)/A0 + ro/A0, 0)\n rf_vec = np.arange(rf_min, rdc_min*2, rf_res)\n for rf in rf_vec:\n # With all parameters, check if it meets small signal spec\n meets_SS, SS_vals = verify_TIA_inverter_SS(n_op_info, p_op_info,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n # With all parameters, estimate if it will meet noise spec\n meets_noise, BER = verify_TIA_inverter_BER(n_op_info, p_op_info, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n # If it meets small signal spec, append it to the list\n # of possibilities\n if meets_spec:\n possibilities.append(dict(vg=vg,\n vdd=vdd_nom,\n nf_n=nf_n,\n nf_p=nf_p,\n rf=rf,\n rdc=SS_vals['rdc'],\n fbw=SS_vals['fbw'],\n pm=SS_vals['pm'],\n ibias=ibias_n,\n BER=BER))\n elif SS_vals['fbw'] != None and SS_vals['fbw'] < fbw_min:\n # Increasing resistor size won't help bandwidth\n break\n \n # Go through all possibilities which work at the nominal voltage\n # and ensure functionality at other bias voltages\n # Remove any nonviable options\n print(\"{0} working at nominal VDD\".format(len(possibilities)))\n for candidate in possibilities:\n nf_n = candidate['nf_n']\n nf_p = candidate['nf_p']\n rf = candidate['rf']\n for vdd in vdd_vec:\n new_op_dict = vary_supply(vdd, db_n, db_p, nf_n, nf_p, vb_n, vb_p)\n vg = new_op_dict['vb']\n n_op = new_op_dict['n_op']\n p_op = new_op_dict['p_op']\n \n # Confirm small signal spec is met\n meets_SS, scratch = verify_TIA_inverter_SS(n_op, p_op,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n \n # Confirm noise spec is met\n meets_noise, BER = verify_TIA_inverter_BER(n_op, p_op, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n \n if not meets_spec:\n possibilities.remove(candidate)\n break\n \n # Of the remaining possibilities, check for lowest power.\n # If there are none, raise a ValueError.\n if len(possibilities) == 0:\n raise ValueError(\"No final viable solutions\")\n \n print(\"{0} working at all VDD\".format(len(possibilities)))\n best_op = possibilities[0]\n for candidate in possibilities:\n best_op = choose_op_comparison(best_op, candidate)\n \n return best_op", "def parameterize(param_directory,pdb_file,topology_file,polymer_code,polymer_length):\n\n terphenyl_top = get_terphenyl_top_directory()\n if not os.path.exists(param_directory):\n os.mkdir(param_directory)\n param_topology = str(str(param_directory)+\"/topol.top\")\n copyfile(topology_file,param_topology)\n cwd = os.getcwd()\n if cwd != param_directory:\n os.chdir(param_directory)\n param_pdb = str(str(param_directory)+\"/\"+str(polymer_length)+\".pdb\")\n copyfile(pdb_file,param_pdb)\n\n # Parameterize our polymer using 'antechamber', from AmberTools.\n#\n # We parameterize the PDB structure using the param.sh BASH script written by Ben Coscia as a template: \"https://github.com/shirtsgroup/useful-scripts/blob/master/Paramaterization/GAFF/param.sh\"\n gaff_directory = str(str(terphenyl_top)+\"/setup_files/gaff\")\n replace(param_topology,'$TERPHENYL_TOP',terphenyl_top)\n replace(param_topology,'$RUN_DIRECTORY',param_directory)\n replace(param_topology,'$POLYMER_CODE ',str(\"{:<15}\".format(polymer_code)))\n replace(param_topology,'$POLYMER_CODE ',str(\"{:<3}\".format(polymer_code)))\n copyfile(str(str(gaff_directory)+\"/acpype.py\"),str(str(param_directory)+\"/acpype.py\"))\n copyfile(str(str(gaff_directory)+\"/insertmol2charges.py\"),str(str(param_directory)+\"/insertmol2charges.py\"))\n# copyfile(str(str(gaff_directory)+\"/anneal.mdp\"),str(run_directory+\"/anneal.mdp\"))\n # Replace the variable keyword '$NAME' in param.sh with the name of the current polymer length\n copyfile(str(str(gaff_directory)+\"/param.sh\"),str(str(param_directory)+\"/param.sh\"))\n replace(str(param_directory+\"/param.sh\"),'$NAME',polymer_length)\n replace(str(param_directory+\"/param.sh\"),'$RES',polymer_code)\n # Place the residue name in the input PDB file residue name columns\n with open(pdb_file, \"rt\") as fin:\n\n new_pdb_file = param_pdb\n with open(new_pdb_file, \"wt\") as fout:\n for line in fin:\n line_list = [char for char in line]\n line_start = ''.join(line_list[0:6])\n residue_code = ''.join(line_list[17:20])\n if line_start == 'HETATM' or line_start == 'ATOM ':\n if residue_code == ' ':\n line_list[17:20] = str(\"{:<3}\".format(polymer_code)).split()\n #del line_list[29]\n line = ''.join(line_list)\n fout.write(line)\n subprocess.run([\"chmod\",\"+x\",str(str(param_directory)+\"/param.sh\")])\n os.chdir(param_directory)\n subprocess.run([str(str(param_directory)+\"/param.sh\")])\n solute_gro_file = str(str(param_directory)+\"/\"+str(polymer_length)+\".gro\")\n solute_topology_file = str(str(param_directory)+\"/\"+str(polymer_code)+\".top\")\n if cwd != param_directory:\n os.chdir(cwd)\n return(solute_gro_file,solute_topology_file)", "def _prep_data( self, db_device_adapter, db_start, db_end, min_points ):\n msg = f\"Preparing data for {db_device_adapter.name}\"\n AppGlobal.gui.display_info_string( msg )\n #self.logger.debug( f\"_prep_data {db_start}, {db_end}\" )\n\n# set_zero = self.parameters.graph_time_zero # \"max\" # min max\n# units = self.parameters.graph_time_units # \"hour\" # day hour use my converter in future\n\n db_device_name = db_device_adapter.name\n time_data = [] # raw data on time may be timestamp......\n\n inst_pw_data = [] #\n total_power_data = [] #\n\n# ( plug_name, plug_time, measure_type, plug_state, voltage, current, inst_power, total_power )\n\n sql = ( \"SELECT plug_name, plug_time, measure_type, plug_state, voltage, current, inst_power, total_power \" +\n \" FROM plug_measurements WHERE ( plug_time > ? ) AND ( plug_time < ? ) AND ( plug_name = ? ) order by plug_time asc\" )\n\n a_datetime_begin = datetime.datetime.fromtimestamp( db_start )\n# print( f\"a_datetime_begin = { type(a_datetime_begin)} {a_datetime_begin} \" )\n\n a_datetime_end = datetime.datetime.fromtimestamp( db_end )\n# print( f\"a_datetime_end = { type(a_datetime_end)} {a_datetime_end} \" )\n\n db_file_name = AppGlobal.gui.get_db_file_name()\n if not( os.path.isfile( db_file_name )):\n msg = f\"Error: db file does not exist: {db_file_name}\"\n AppGlobal.gui.display_info_string( msg )\n return( None, None, None )\n\n sql_con = lite.connect( db_file_name )\n with sql_con:\n cur = sql_con.cursor()\n# print(f\"db_device_name{db_device_name}\")\n cur.execute( sql , ( db_start, db_end, db_device_name ) )\n\n # get rows one at a time in loop\n while True:\n row = cur.fetchone()\n\n if row is None:\n break\n print( f\"{row} [1] {row[1]} {row[6]}\" )\n\n time_data.append( row[1] )\n inst_pw_data.append( row[6] )\n total_power_data.append( row[7] )\n\n msg = f\"For device {db_device_name}: data points fetched: {len( time_data )}\"\n AppGlobal.gui.display_info_string( msg )\n\n if len( time_data ) < 10:\n msg = f\"Not enough data to process for {db_device_name}\"\n AppGlobal.gui.display_info_string( msg )\n return( None, None, None )\n\n temp = [] # temporary to build new time data will put back ... !! list comp makes this not needed\n\n zero = self.parameters.graph_time_zero.lower()\n if zero in [ \"db_sql_begin\", ]:\n #convert_offset = time_data[0] # sec to minutes\n convert_offset = db_start\n a_datetime = datetime.datetime.fromtimestamp( convert_offset )\n self.graph_time_zero = f\"sql select begin( {a_datetime} )\"\n\n elif zero in [ \"data_begin\" ]:\n\n convert_offset = time_data[0]\n a_datetime = datetime.datetime.fromtimestamp( convert_offset )\n self.graph_time_zero = f\"first data point( {a_datetime} )\"\n else: # default or error ??\n convert_offset = db_start\n a_datetime = datetime.datetime.fromtimestamp( convert_offset )\n self.graph_time_zero = f\"sql select begin( {a_datetime} )\"\n\n# print( f\"parameters say units {self.parameters.graph_time_units}\" )\n units = self.parameters.graph_time_units.lower()\n if units in [ \"min\", \"minutes\" ]:\n convert_factor = 1./60. # sec to minutes\n self.graph_time_units = \"minutes\"\n elif units in [ \"hr\", \"hour\", \"hours\" ]:\n convert_factor = 1./( 60. * 60 )\n self.graph_time_units = \"hours\"\n elif units in [ \"day\", \"days\", ]:\n convert_factor = 1./( 60. * 60 * 24 )\n self.graph_time_units = \"days\"\n else: # seconds\n convert_factor = 1.\n self.graph_time_units = \"seconds\"\n\n temp = [ ( ( x - convert_offset ) * convert_factor ) for x in time_data ] # temp is new time_data\n\n # !! add this for energy zero self.graph_energy_zero = \"absolute\" # \"absolute\" \"first_value\"\n # !! convert to named tuple\n graph_data = ( temp, inst_pw_data, total_power_data )\n\n return graph_data", "def create_database(db_file: str, table_data: List) -> None:\n connection = None\n add_col = []\n\n table_root = '''id INTEGER PRIMARY KEY,\n iteration INTEGER NOT NULL,\n best_local_min TEXT NOT NULL,\n current_epoch INTEGER NOT NULL,\n trades_count INTEGER NOT NULL,\n avg_profit_pct REAL NOT NULL,\n total_profit_currency REAL NOT NULL,\n total_profit_pct REAL NOT NULL,\n avg_duration_minutes REAL NOT NULL,\n loss_func REAL NOT NULL, '''\n\n spaces_col = {'buy': 'buy TEXT NOT NULL',\n 'sell': 'sell TEXT NOT NULL',\n 'roi': 'roi TEXT NOT NULL',\n 'stoploss': 'stoploss TEXT NOT NULL',\n 'trailing': 'trailing TEXT NOT NULL'}\n\n try:\n os.remove(db_file)\n except OSError as err:\n print(err)\n\n try:\n connection = sqlite3.connect(db_file)\n cursor = connection.cursor()\n print(f\"{Fore.MAGENTA}Successfully connected to SQLite DB - {db_file}{Fore.RESET}\")\n\n if 'all' in table_data:\n table_data = ['buy', 'sell', 'roi', 'stoploss', 'trailing']\n elif 'default' in table_data:\n table_data = ['buy', 'sell', 'roi', 'stoploss']\n\n for param in table_data:\n add_col.append(spaces_col[param])\n\n table_root += ', '.join(add_col)\n\n create_hyperopt_data_table = 'CREATE TABLE hyperopt_results (' + table_root + ');'\n\n cursor.execute(create_hyperopt_data_table)\n connection.commit()\n print(f'{Fore.MAGENTA}Table successfully created.{Fore.RESET}')\n\n cursor.close()\n except sqlite3.Error as err:\n print(err)\n finally:\n if connection:\n connection.close()\n print(f'{Fore.MAGENTA}The SQLite connection is closed{Fore.RESET}')", "def create_database(excelpath, database=None):\n\n def generate_database_from_metadatas(metadata_dict, stimulus_dict):\n \"\"\"\n Given a dictionary of session objects with their metadata creates a new database\n with all the sessions in the database and the associated metadata\n \"\"\"\n # Create empty database from template class\n indexes = sorted(metadata_dict.keys())\n database = pd.DataFrame(index=indexes, columns=['Number', 'Metadata', 'Tracking', 'Registration', 'Stimuli'])\n\n # Fill in metadata from the dictionary\n for sessname, metadata in sorted(metadata_dict.items()):\n database['Metadata'][sessname] = metadata\n database['Number'][sessname] = metadata['number']\n\n for sessname, stimulus in sorted(stimulus_dict.items()):\n database['Stimuli'][sessname] = stimulus\n\n print(colored('Database initialized.','yellow'))\n return database\n\n def get_session_videodata(videos):\n \"\"\"\n Get relevant variables for video files\n \"\"\"\n # Get first frame of first video for future processing and number of frames in each video\n videos_data = {'Frame rate': [], 'Number frames': []}\n for idx, videofile in enumerate(videos):\n cap = cv2.VideoCapture(videofile)\n videos_data['Frame rate'].append(cap.get(cv2.CAP_PROP_FPS))\n videos_data['Number frames'].append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n videos_data['Cumu. Num Frames'] = np.cumsum(videos_data['Number frames'])\n return videos_data\n\n def get_stim_onset_times(sessions, metadata_dict):\n \"\"\"\n loops over a list of dictionary with the info for each session and gets all the stimulus onset times\n \"\"\"\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n if database is not None and session_name in database.index:\n continue\n session_stimuli = {}\n session_stimuli['session_id'] = session_id\n session_stimuli['stimuli'] = {}\n session_stimuli['stimuli']['visual'] = []\n session_stimuli['stimuli']['audio'] = []\n session_stimuli['stimuli']['digital'] = []\n videopaths = []\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n print(videopaths)\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n # Loop over each .tdms file and extract stimuli frames\n print(colored('Loading {}: {}'.format(session_name,os.path.basename(tdmspath)),'yellow'))\n tdms = TdmsFile(tdmspath)\n if metadata_dict[session_name]['software'] == 'behaviour':\n visual_rec_stims, audio_rec_stims, digital_rec_stims = [], [], []\n for group in tdms.groups():\n for obj in tdms.group_channels(group):\n if 'stimulis' in str(obj).lower():\n for idx in obj.as_dataframe().loc[0].index:\n if \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n elif \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n else:\n framen = int(idx.split(\"/'\")[2].split('-')[0])\n if 'visual' in str(obj).lower():\n visual_rec_stims.append(framen)\n elif 'audio' in str(obj).lower():\n audio_rec_stims.append(framen)\n elif 'digital' in str(obj).lower():\n digital_rec_stims.append(framen)\n else:\n print(colored('Couldnt load stim correctly','yellow'))\n # Now use the AI channels to find the *real* stimulus onset times and replace them\n if audio_rec_stims:\n stimulus_on_idx = np.where(tdms.group_channels('AI')[3].data > .55)[0] #in first data sets this is AI 1, later AI 2\n idx_since_last_stimulus_on = np.diff(stimulus_on_idx)\n if stimulus_on_idx.size:\n stimulus_start_idx = stimulus_on_idx[np.append(np.ones(1).astype(bool),idx_since_last_stimulus_on>2*10000)] #usually 10 or 30\n stimulus_start_frame = np.ceil(stimulus_start_idx / 10000 / (33 + 1 / 3) * 1000).astype(int)\n stimulus_start_frame = stimulus_start_frame[stimulus_start_frame > 300]\n else:\n stimulus_start_frame = np.array(audio_rec_stims)\n print('NO STIMULI FOUND!!')\n\n if len(stimulus_start_frame) != len(audio_rec_stims):\n print('audio AI channel does not match number of timestamps by ' + str(len(audio_rec_stims)-len(stimulus_start_frame)) )\n else:\n discrepancy = stimulus_start_frame - audio_rec_stims\n if sum(discrepancy>8):\n print('audio AI channel does not match values of timestamps')\n else:\n print(discrepancy)\n # for conditioning experiment, just use what the tdms says\n # if 'food' in line['Experiment']:\n # stimulus_start_frame = np.array(audio_rec_stims)\n audio_rec_stims = list(stimulus_start_frame)\n\n session_stimuli['stimuli']['visual'].append(visual_rec_stims)\n session_stimuli['stimuli']['audio'].append(audio_rec_stims)\n session_stimuli['stimuli']['digital'].append(digital_rec_stims)\n\n else:\n \"\"\" HERE IS WHERE THE CODE TO GET THE STIM TIMES IN MANTIS WILL HAVE TO BE ADDEDD \"\"\"\n pass\n\n # Add to dictionary (or update entry)\n stimulus_dict[session_name] = session_stimuli\n return stimulus_dict\n\n def get_metadata(sessions):\n \"\"\"\n loops over a list of dictionary with the info for each session and gets all the metadata\n \"\"\"\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n # if database is not None and session_name in database.index:\n # print(colored('Session is already in database','yellow'))\n # continue\n\n # Create the metadata\n session_metadata = {}\n session_metadata['session_id'] = session_id\n session_metadata['experiment'] = line['Experiment']\n session_metadata['date'] = line['Date']\n session_metadata['mouse_id'] = line['MouseID']\n session_metadata['software'] = line['Software']\n session_metadata['number'] = line['Number']\n\n # initialize video data\n session_metadata['video_file_paths'] = []\n session_metadata['tdms_file_paths'] = []\n session_metadata['videodata'] = []\n\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n videopaths = []\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n\n # add file paths to metadata\n session_metadata['video_file_paths'].append(videopaths)\n session_metadata['tdms_file_paths'].append(tdmspath)\n\n # Loop over each video and get the relevant data [e.g., number of frames, fps...]\n session_metadata['videodata'].append(get_session_videodata(videopaths))\n\n # Add to dictionary (or update entry)\n metadata_dict[session_name] = session_metadata\n return metadata_dict\n\n\n ''' MAIN SECTION OF SCRIPT TO GENERATE DATABASE '''\n\n loaded_excel = pyexcel.get_records(file_name=excelpath)\n\n # Create a dictionary with each session's name as key and its metadata as value\n stimulus_dict, metadata_dict, all_metadata = {}, {}, []\n for line in loaded_excel: # Read each line in the excel spreadsheet and load info\n temp = {\n 'Sess.ID': line['Sess.ID'],\n 'Date': line['Date'],\n 'MouseID': line['MouseID'],\n 'Experiment': line['Experiment'],\n 'Software': line['Software'],\n 'Base fld': line['Base fld'],\n 'Exp fld': line['Exp fld'],\n 'Recordings': line['Sub Folders'].split('; '),\n 'Number': line['Number']\n }\n all_metadata.append(temp)\n\n # Loop over each recordings subfolder and check that the paths are correct [fast check]\n for line in all_metadata:\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n if not os.path.exists(path):\n raise ValueError('Folder not found\\n{}'.format(path))\n print(colored('Excel spreadsheet loaded correctly. Now loading metadata.','yellow'))\n\n # Use loaded metadata to create the database. Threadpooled for faster execution\n num_parallel_processes = 4\n splitted_all_metadata = [all_metadata[i::num_parallel_processes] for i in range(num_parallel_processes)]\n pool = ThreadPool(num_parallel_processes)\n\n # get metadata for *all* sessions\n _ = pool.map(get_metadata, splitted_all_metadata)\n\n # get stimulus information for *new* sessions\n splitted_all_metadata = [(all_metadata[i::num_parallel_processes], metadata_dict) for i in range(num_parallel_processes)]\n _ = pool.starmap(get_stim_onset_times, splitted_all_metadata)\n\n # Create new database, and add to the old one if applicable\n if database is None:\n return generate_database_from_metadatas(metadata_dict, stimulus_dict)\n else:\n new_database = generate_database_from_metadatas(metadata_dict, stimulus_dict)\n for index, row in new_database.iterrows():\n if (index in database.index):\n # loop in case of erroneous duplicate entries (only take the first)\n # for stimuli, registration, tracking in zip(database.loc[index].Stimuli, database.loc[index].Registration, database.loc[index].Tracking):\n stimuli, registration, tracking = database.loc[index].Stimuli, database.loc[index].Registration, database.loc[index].Tracking\n new_database.loc[index].Stimuli = stimuli\n new_database.loc[index].Registration = registration\n new_database.loc[index].Tracking = tracking\n # break\n new_database = new_database.sort_values(by='Number')\n\n return new_database.sort_values(by='Number')", "def __init__(self):\n # File settings and locations.\n self.DATA_DIR = 'data'\n self.DATA_COL_DIR = 'data_collated'\n\n self.FIRE_DATABASE = 'FPA_FOD_20170508.sqlite'\n self.CLIMATE_DATA = 'GlobalLandTemperaturesByCity.csv'\n self.STOCK_DATA = 'historical_stock_prices.csv'\n self.COMBINED_DATA = 'combined_data.db'\n\n self.MODEL_PATH = 'models/dnn_wildfires.ckpt'\n\n # Setting to use reduced data for prototyping purposes.\n self.prototyping = False\n self.sample_size = 80000\n\n # Start date of data\n self.start = pd.to_datetime('1992-01-01')\n\n # Stocks in stock data to keep for analysis.\n self.stocks = ['MSFT', 'AAPL', 'GE', 'JNJ', 'JPM', 'PG']\n\n # Settings for validation and test set partitioning.\n self.val_set_ratio = 0.15\n self.test_set_ratio = 0.15\n\n # Separation of features for pipeline preparation \n self.cat_attribs = ['STATE', 'FIRE_SIZE_CLASS', 'OWNER_CODE', 'City']\n self.num_attribs = ['FIRE_YEAR', 'LATITUDE', 'LONGITUDE', 'FIRE_SIZE', \n 'FIRE_LENGTH', 'DIST_TO_MAJOR_CITY', 'AverageTemperature',\n 'AverageTemperatureUncertainty', 'AAPL', 'GE', 'JNJ', \n 'JPM', 'MSFT', 'PG']\n self.cycle_cols = ['DISC_MONTH', 'DISC_DAY_OF_WEEK', 'DISCOVERY_TIME', \n 'DISCOVERY_DOY', 'CONT_MONTH', 'CONT_DAY_OF_WEEK',\n 'CONT_TIME']\n\n # Define the ranges of the cycles in cycle_cols and whether any offset for\n # zero-indexing is needed (i.e., 'DISC_MONTH' cycles over a 12 month period\n # and the months need an offset of one to start the indicies at 0 for Jan.).\n self.cycle_ranges = [12, 7, 2400, 365, 12, 7, 2400]\n self.cycle_offsets = [1, 0, 0, 1, 1, 0, 0]\n\n # Parameters for deep learning model determined from randomized \n # hyperparameter search.\n self.n_hidden_layers = 4\n self.n_neurons = 200\n self.batch_size = 500\n self.batch_norm_momentum = 0.999\n self.dropout_rate = 0.4\n self.learning_rate = 0.01\n self.activation = tf.nn.elu\n\n # Hyperparameter settings .\n self.hp_search = False", "def setup_fpa():\n # it is a silicon detector. Based on the graph, the quantum efficiency\n # at 1.06 um is ~50%.\n fpa = {}\n fpa[\"quantum_efficiency\"] = 0.5\n return fpa", "def regenerateTable():\n deleteAll()\n\n # Start generating records from start nodes, and continue generating\n # records for their children until either the bottom of the ANAD_PART_OF\n # tree is reached, or stop nodes are reached.\n\n for perspective in Perspectives.Iterator():\n perspectiveName = perspective.getName()\n starts = PerspectiveAmbits.getStartAmbitForPerspective(perspectiveName)\n stops = PerspectiveAmbits.getStopAmbitForPerspective(perspectiveName)\n startNodeOids = sets.Set(starts.keys())\n stopNodeOids = sets.Set(stops.keys())\n \n #print perspectiveName\n #print startNodeOids\n #print stopNodeOids\n \n startApos = [PartOfs.getPrimaryPathApoForNodeOid(nodeOid)\n for nodeOid in startNodeOids]\n apoList = startApos[:]\n\n while len(apoList) > 0:\n partOf = apoList.pop()\n\n # create POP record for this part of.\n \n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(partOf.getOid())\n pop.setIsAncestor(False)\n pop.setNodeOid(partOf.getNodeOid())\n pop.insert()\n \n #if partOf.getOid() == 68470:\n # print \n # print pop.getPerspectiveName()\n # print pop.getApoOid()\n # print pop.isAncestor()\n # print pop.getNodeOid()\n # print\n # print partOf.getOid()\n # print partOf.getSpecies()\n # print partOf.getNodeStartStageOid()\n # print partOf.getNodeEndStageOid()\n # print partOf.getPathStartStageOid()\n # print partOf.getPathEndStageOid()\n # print partOf.getNodeOid()\n # print partOf.getSequence()\n # print partOf.getDepth()\n # print partOf.getFullPathEmapas()\n # print partOf.getFullPath()\n # print partOf.isPrimaryPath()\n # print partOf.getParentApoOid()\n\n _addToKnowledge(pop)\n\n # if this is not a stop node, then add all its part-of kids\n # to the list of APOs to generate POP records for.\n if partOf.getNodeOid() not in stopNodeOids:\n apoList.extend(PartOfs.getByParentOid(partOf.getOid()))\n\n # for each start node, add any ancestor APOs that were not added\n # by the above process.\n ancesApos = sets.Set()\n for apo in startApos:\n parentApoOid = apo.getParentApoOid()\n if parentApoOid != None:\n parentApo = PartOfs.getByOid(parentApoOid)\n if (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None):\n ancesApos.add(parentApo)\n\n while len(ancesApos) > 0:\n ancesApo = ancesApos.pop()\n # create POP record for this ancestor\n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(ancesApo.getOid())\n pop.setIsAncestor(True)\n pop.setNodeOid(ancesApo.getNodeOid())\n pop.insert()\n _addToKnowledge(pop)\n\n # if this APO has a parent that hasn't yet been processed then\n # add it to list of ancestor APOs to generate records for.\n parentApoOid = ancesApo.getParentApoOid()\n if (parentApoOid != None and\n (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None)):\n parentApo = PartOfs.getByOid(parentApoOid)\n ancesApos.add(parentApo)\n \n \n \n return", "def create_flux_vector_pf_gr_bif_1(self):\n # volumes_in_primal_set = self.mb.tag_get_data(self.volumes_in_primal_tag, 0, flat=True)[0]\n # volumes_in_primal_set = self.mb.get_entities_by_handle(volumes_in_primal_set)\n lim = 1e-4\n self.dfdsmax = 0\n self.fimin = 10\n self.qmax = 0\n self.store_velocity_pf = {}\n store_flux_pf = {}\n for primal in self.primals:\n #1\n primal_id1 = self.mb.tag_get_data(self.primal_id_tag, primal, flat=True)[0]\n primal_id = self.ident_primal[primal_id1]\n fine_elems_in_primal = self.mb.get_entities_by_handle(primal)\n for volume in fine_elems_in_primal:\n #2\n list_keq = []\n list_p = []\n list_gid = []\n list_keq3 = []\n list_gidsadj = []\n list_qw = []\n qw3 = []\n qw = 0\n flux = {}\n velocity = {}\n fi = self.mb.tag_get_data(self.fi_tag, volume, flat=True)[0]\n if fi < self.fimin:\n self.fimin = fi\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume, flat=True)[0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume, flat=True)[0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n fw_vol = self.mb.tag_get_data(self.fw_tag, volume, flat=True)[0]\n sat_vol = self.mb.tag_get_data(self.sat_tag, volume, flat=True)[0]\n centroid_volume = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - centroid_volume[2]\n adjs_vol = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n gid_vol = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n pvol = self.mb.tag_get_data(self.pf_tag, volume, flat=True)[0]\n for adj in adjs_vol:\n #3\n gid_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n sat_adj = self.mb.tag_get_data(self.sat_tag, adj, flat=True)[0]\n padj = self.mb.tag_get_data(self.pf_tag, adj, flat=True)[0]\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n centroid_adj = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - centroid_adj[2]\n direction = centroid_adj - centroid_volume\n unit = direction/np.linalg.norm(direction)\n #unit = vetor unitario na direcao de direction\n uni = self.unitary(direction)\n # uni = valor positivo do vetor unitario\n kvol = np.dot(np.dot(kvol,uni),uni)\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj, flat=True)[0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj, flat=True)[0]\n lbt_adj = lamb_w_adj + lamb_o_adj\n fw_adj = self.mb.tag_get_data(self.fw_tag, adj, flat=True)[0]\n\n keq3 = (kvol*lamb_w_vol + kadj*lamb_w_adj)/2.0\n\n # kvol = kvol*(lamb_w_vol + lamb_o_vol)\n # kadj = kadj*(lamb_w_adj + lamb_o_adj)\n\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n grad_p = (padj - pvol)/float(abs(np.dot(direction, uni)))\n grad_z = (z_adj - z_vol)/float(abs(np.dot(direction, uni)))\n q = ((grad_p) - grad_z*self.gama)*(np.dot(self.A, uni))*keq\n\n list_keq.append(keq)\n list_p.append(padj)\n list_gid.append(gid_adj)\n\n keq2 = keq\n\n qw += q*(fw_adj + fw_vol)/2.0\n\n #keq = keq*(np.dot(self.A, uni))\n #pvol2 = self.mb.tag_get_data(self.pms_tag, volume, flat=True)[0]\n #padj2 = self.mb.tag_get_data(self.pms_tag, adj, flat=True)[0]\n\n #grad_p2 = (padj2 - pvol2)/float(abs(np.dot(direction, uni)))\n #q = (grad_p)*keq\n #qw3.append(grad_p*keq3*(np.dot(self.A, uni)))\n # if grad_p < 0:\n # #4\n # fw = fw_vol\n # qw += (fw*grad_p*kvol*(np.dot(self.A, uni)))\n # list_qw.append(fw*grad_p*kvol*(np.dot(self.A, uni)))\n #\n # else:\n # fw = fw_adj\n # qw += (fw*grad_p*kadj*(np.dot(self.A, uni)))\n # list_qw.append(fw*grad_p*kadj*(np.dot(self.A, uni)))\n\n\n # if gid_adj > gid_vol:\n # v = -(grad_p)*keq2\n # else:\n # v = (grad_p)*keq2\n\n flux[tuple(unit)] = q\n #velocity[tuple(unit)] = v\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n if abs(sat_adj - sat_vol) < lim or abs(fw_adj -fw_vol) < lim:\n continue\n dfds = abs((fw_adj - fw_vol)/(sat_adj - sat_vol))\n # print('aqui')\n # print(gid_vol)\n # print(gid_adj)\n # print(fw_adj - fw_vol)\n # print(sat_adj - sat_vol)\n # print(dfds)\n if dfds > self.dfdsmax:\n self.dfdsmax = dfds\n\n #2\n # list_keq.append(-sum(list_keq))\n # list_p.append(pvol)\n # list_gid.append(gid_vol)\n #\n # list_keq = np.array(list_keq)\n # list_p = np.array(list_p)\n # resultado = sum(list_keq*list_p)\n\n # print(gid_vol)\n # print(velocity)\n # print('\\n')\n # import pdb; pdb.set_trace()\n #self.store_velocity_pf[volume] = velocity\n store_flux_pf[volume] = flux\n flt = sum(flux.values())\n print('gid')\n print(gid_vol)\n print('flux')\n print(flt)\n print('\\n')\n import pdb; pdb.set_trace()\n self.mb.tag_set_data(self.flux_fine_pf_tag, volume, flt)\n\n if abs(sum(flux.values())) > lim and volume not in self.wells:\n print('nao esta dando conservativo na malha fina')\n print(gid_vol)\n print(sum(flux.values()))\n import pdb; pdb.set_trace()\n\n qmax = max(list(map(abs, flux.values())))\n if qmax > self.qmax:\n self.qmax = qmax\n if volume in self.wells_prod:\n qw_out = sum(flux.values())*fw_vol\n #qw3.append(-qw_out)\n qo_out = sum(flux.values())*(1 - fw_vol)\n self.prod_o.append(qo_out)\n self.prod_w.append(qw_out)\n qw = qw - qw_out\n\n if abs(qw) < lim and qw < 0.0:\n qw = 0.0\n\n elif qw < 0 and volume not in self.wells_inj:\n print('gid')\n print(gid_vol)\n print('qw < 0')\n print(qw)\n import pdb; pdb.set_trace()\n\n else:\n pass\n\n\n # if (qw < 0.0 or sum(qw3) < 0.0) and volume not in self.wells_inj:\n # print('qw3')\n # print(sum(qw3))\n # print('qw')\n # print(qw)\n # import pdb; pdb.set_trace()\n self.mb.tag_set_data(self.flux_w_tag, volume, qw)\n\n # print(self.dfdsmax)\n # print(sum(flux.values()))\n # print(sum(qw))\n # print(sum(qw3))\n # print('\\n')\n\n soma_inj = []\n soma_prod = []\n soma2 = 0\n with open('fluxo_malha_fina_bif_gr{0}.txt'.format(self.loop), 'w') as arq:\n for volume in self.wells:\n gid = self.mb.tag_get_data(self.global_id_tag, volume, flat = True)[0]\n values = self.store_flux_pf[volume].values()\n arq.write('gid:{0} , fluxo:{1}\\n'.format(gid, sum(values)))\n\n # print('gid:{0}'.format(gid))\n # print('valor:{0}'.format(sum(values)))\n if volume in self.wells_inj:\n soma_inj.append(sum(values))\n else:\n soma_prod.append(sum(values))\n # print('\\n')\n soma2 += sum(values)\n arq.write('\\n')\n arq.write('soma_inj:{0}\\n'.format(sum(soma_inj)))\n arq.write('soma_prod:{0}\\n'.format(sum(soma_prod)))\n arq.write('tempo:{0}'.format(self.tempo))\n\n return store_flux_pf", "def __init__(self, proj_num,\n base_data_dir = '/home/server/server2/data/SVR166219/',\n base_setup_dir = '/home/server/server2/projects/Gromacs/'):\n\n self.proj_num = proj_num\n self.base_data_dir = base_data_dir\n self.dataframe_path = os.path.join(base_dir, 'PROJ' + str(self.proj_num), 'p'+str(self.proj_num)+'.h5')\n\n # If the *.h5 DataFrame doesn't yet exist, make a new one\n if not os.path.exists(self.dataframe_path):\n self.create_dataframe()\n else:\n print('Using existing pandas DataFrame:', self.dataframe_path)\n\n ### create a file handle to store the data in (the dict-like) HDF5 format\n self.store = pd.HDFStore(self.dataframe_path)\n\n # store paths for all the setup files\n self.base_setup_dir = base_setup_dir\n assert os.path.exists(self.base_setup_dir)\n\n self.setup_dir = os.path.join(self.base_setup_dir, 'p'+str(self.proj_num))\n assert os.path.exists(self.setup_dir)\n\n self.feature_dir = os.path.join(self.setup_dir, 'features')\n assert os.path.exists(self.feature_dir)\n\n self.config_filename = os.path.join(self.feature_dir, 'config.dat')\n assert os.path.exists(self.feature_dir)\n\n # The Featurizer class needs to know how many frames per gen, and ns per frame, etc.\n self.nruns = None\n self.nclones = None\n self.ngens = None\n self.ns_per_gen = None\n self.ns_per_frame = None\n self.gmx_index_groups = [] # needed to process the pbc conversion for each run\n self.gmx_index_filenames = [] # needed to process the pbc conversion for each run\n\n # fill the these attributes using information from the config.dat file\n self.read_config(self.config_filename)\n\n # create some temp storage for the gro file, and run,\n # so we don't do extra work finding these it and loading it in each time featurize() is called\n self.gro_file = None\n self.run = None\n\n \"\"\"\n self.xtc_file = rm_periodic_boundary_cond(self)\n self.traj = md.load(self.xtc_file, top=self.gro_file)\n os.system('rm /home/server/git/fah-scripts/DataAnalysisScripts/temp_xtc/*.xtc')\n \"\"\"", "def create_geneIDsDF():\n datas=data.plfam_to_matrix()\n datas.run()\n print('***Dataframe created***')", "def extract_db(psm_file, is_spider):\n\tdb = pd.read_csv(psm_file)\n\tdb_result = db.loc[:, ['Scan', 'Peptide', 'Intensity']]\n\tdb_result.columns = ['scan', 'peptide', 'area']\n\n\tif (is_spider):\n\t\tdb_result['peptide'] = db_result['peptide'].apply(modify_AA)\n\n\tdb_result['conf'] = db_result['peptide'].apply(fake_conf_score)\n\tdb_result = db_result.fillna(100)\n\t\n\tdb_result = db_result[['scan', 'peptide', 'conf', 'area']]\n\t\n\treturn db_result", "def gff2database(fnamegff,fnamefasta):\n return gff2db(\n dbname=DB_NAME,\n dbhost=DB_HOST,\n dbconnection=DB_CONNECTION,\n dbuser=DB_USER,\n dbpass=DB_PASS,\n fnamefasta=fnamefasta,\n fnamegff=fnamegff,\n pathperl=PERL_PATH,\n pathloadgff=EXECUTABLE_LOAD_GFF)", "def prepare(dp: frictionless.package.Package, name: str):\n data = read_datapackage(dp)\n data[\"fid\"] = name + \"_\" + data[ID].astype(str)\n\n spatial = gpd.GeoDataFrame(\n data[\"fid\"],\n columns=[\"fid\"],\n geometry=gpd.points_from_xy(data.longitude, data.latitude),\n crs=\"EPSG:4326\",\n )\n\n # Other fields to json\n def np_encoder(object):\n \"\"\"Source: https://stackoverflow.com/a/65151218.\"\"\"\n if isinstance(object, np.generic):\n return object.item()\n\n other_cols = [\n x for x in data.columns if x not in VALUE_VARS + SPATIAL_VARS + ID_VARS\n ]\n\n # Int64 to int\n data.loc[:, other_cols].loc[:, data[other_cols].dtypes == \"int64\"] = (\n data.loc[:, other_cols].loc[:, data[other_cols].dtypes == \"int64\"].astype(int)\n )\n data = data.replace({np.nan: None})\n data[\"fields\"] = data[other_cols].to_dict(orient=\"records\")\n data[\"fields\"] = data[\"fields\"].apply(lambda x: json.dumps(x, default=np_encoder))\n\n # Unpivoting\n data = data.melt(id_vars=ID_VARS, value_vars=VALUE_VARS)\n\n # Remove nan\n data = data.dropna()\n\n # Conversion\n enermaps_data = utilities.ENERMAPS_DF\n enermaps_data[\"fid\"] = data[\"fid\"]\n enermaps_data[\"value\"] = data[\"value\"]\n enermaps_data[\"variable\"] = data[\"variable\"]\n enermaps_data[\"fields\"] = data[\"fields\"]\n enermaps_data[\"unit\"] = UNIT\n enermaps_data[\"israster\"] = ISRASTER\n\n return enermaps_data, spatial", "def find_mpe(fbn, sbn, compat, beta, e):\n evars = set(e)\n freevars = [v for v in fbn.V if v.name not in evars]\n\n # para instanaciar las variables splitted primero. Ver popsition 1\n # del paper\n freevars.sort(key=lambda x: x.name in compat) \n \n t = datetime.now()\n ac = dnnf.todnnf(sbn)\n print datetime.now() - t\n print \"dfs\", freevars\n def dfs(q, varsleft, z, k):\n \"\"\"\n q: cota actual\n varsleft: variables que faltan por instanciar. Se sacan del final.\n z: instanciacion parcial actual\n k: numero de variables splitted que falta por instanciar\n \"\"\"\n var = varsleft.pop()\n varname = var.name\n domain = var.Domain\n k -= 1\n clones = []\n if varname in compat:\n for clone in compat[varname]:\n clones.append(clone)\n\n # probar todos sus posibles valores\n for value in domain:\n # agregar ese valor a la instancia parcial\n z[varname] = value\n for clone in clones:\n z[clone] = value\n p = ac.mpe(z)\n\n if varsleft:\n # si todavia quedan variables por asignar\n # hacer prune si podemos\n \n if k<=0:\n # ya todas las variables splitted estan\n # asignadas. Ahora el MPE(sbn) = MPE(fbn), no hace\n # falta hacer mas asignaciones para obtener el\n # valor exacto (Proposicion 1 del paper)\n q = max(q, beta*p)\n else:\n if p*beta <= q:\n # la cota superior sobre sbc es menor que la\n # cota inferior q que llevamos. Por aqui no\n # hay nada mejor\n continue\n else:\n # todavia puede haber algo bueno por aqui\n q = max(q, dfs(q, varsleft, z, k))\n else:\n # si no queda ninguna variable por asignar.\n # por un teorema, el MPE(fbn, x) == beta*MPE(sbn, x)\n q = max(q, beta*p)\n\n # regresar todo al estado orignal\n varsleft.append(var)\n del z[varname]\n for clone in clones:\n del z[clone]\n return q\n\n return dfs(0.0, freevars, e, len(compat))", "def sfp_prior_preparation(portshow_sfp_aggregated_df, pattern_dct):\n\n sfp_aggregated_modified_df = portshow_sfp_aggregated_df.copy()\n # drop duplicated port rows\n sfp_aggregated_modified_df.drop_duplicates(subset=['configname', 'chassis_name', 'chassis_wwn', \n 'switchName', 'switchWwn', 'slot', 'port'], inplace=True)\n # extract transceiver speed\n sfp_aggregated_modified_df['Transceiver_speed_extracted'] = \\\n sfp_aggregated_modified_df['Transceiver_mode'].str.extract(pattern_dct['transceiver_speed'])\n # extract transceiver mode\n sfp_aggregated_modified_df['Transceiver_mode_extracted'] = \\\n sfp_aggregated_modified_df['Transceiver_mode'].str.extract(pattern_dct['transceiver_mode'])\n # merge sfp speed and mode (lw, sw)\n sfp_aggregated_modified_df = dfop.merge_columns(sfp_aggregated_modified_df, summary_column='Transceiver_speed_mode_extracted', \n merge_columns=['Transceiver_speed_extracted', 'Transceiver_mode_extracted'], \n sep=' ', drop_merge_columns=False, sort_summary=False)\n # merge port state with transceiver details\n # add 'No_SFP_module' tag for cu media in blade switches to mark portPhys status\n # mask_vendor_no_sfp_module = sfp_aggregated_modified_df['Transceiver_Name'] == 'No SFP module'\n # mask_portphys_no_module = sfp_aggregated_modified_df['portPhys'] == 'No_Module'\n # sfp_aggregated_modified_df.loc[~mask_portphys_no_module & mask_vendor_no_sfp_module, 'No_SFP_module'] = 'No_SFP_module'\n sfp_aggregated_modified_df = dfop.merge_columns(sfp_aggregated_modified_df, summary_column='PortPhys_transceiver', \n merge_columns=['portPhys', 'Transceiver_speed_mode_extracted'], \n sep=' ', drop_merge_columns=False, sort_summary=False)\n # add annotation to the intervals\n comment_sfp_readings_interval(sfp_aggregated_modified_df) \n # transceiver support\n comment_sfp_support(sfp_aggregated_modified_df)\n # transceiver form factor (qsfp, dsfp)\n comment_specific_sfp(sfp_aggregated_modified_df, sfp_specification_column='Transceiver_form_factor', sfp_specification_name='Form factor', normal_value='sfp', upper_case_spec=True)\n # long distance sfps\n comment_specific_sfp(sfp_aggregated_modified_df, sfp_specification_column='Transceiver_distanceMax', sfp_specification_name='Distance', normal_value='normal')\n # merge vendor, part number and transcever details (speed and mode) \n sfp_aggregated_modified_df = dfop.merge_columns(\n sfp_aggregated_modified_df, summary_column='Transceiver_Name_PN', \n merge_columns=['Transceiver_Name', 'Transceiver_PN', 'Transceiver_speed_mode_extracted'], \n sep=' ', drop_merge_columns=False)\n # port_quantity column\n sfp_aggregated_modified_df['Port_quantity'] = 'Port_quantity'\n # transceiver quantity column\n mask_sfp_pn_notna = sfp_aggregated_modified_df['Transceiver_PN'].notna()\n sfp_aggregated_modified_df.loc[mask_sfp_pn_notna, 'Transceiver_quantity'] = 'Transceiver_quantity'\n return sfp_aggregated_modified_df", "def readVCTPINPUTS(self): \n #fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.CTPINPUTS\"\n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/ctpinputs.cfg\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n count=0\n #print \"look for me if you want different inputs range...\"\n for i in lines:\n if(i[0] == 'l' and i[1] == '0'): continue\n if i == \"\\n\": continue\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n continue\n #return None\n if items[3] == 'M': continue\n count=count+1\n #if count<6 or count>11 : continue\n #if count>10 and count<24 : continue\n #if count<16: continue\n #if count > 4 and count < 15: continue\n #if items[3] != '1': continue\n #if items[2] != \"EMCAL\": continue\n #if (items[2] != \"SPD\") and (items[2] != \"T0\"): continue\n flag=1\n for i in self.detectors:\n if items[2].find(i)>=0 or i.find(\"ALL\")>=0:\n flag=0;\n break\n if flag: continue\n # input not connected\n if items[7] == '0' and items[3] == '0': continue\n db={}\n db['name']=items[0]\n db['detector']=items[2]\n db['level']='L'+items[3]\n db['signature']=items[4]\n #db['number']=items[5]\n db['number']=items[7]\n db['numberDIM']=items[6]\n db['ctpnum']=items[5]\n db['Edge'] = items[8]\n db['Delay'] = items[9]\n dbinputs.append(db)\n #print \"Adding: \", db\n return dbinputs", "def readdatabase2(self):\n fname=\"/home/alice/rl/v/vme/ADCI/DB/INPUTS.txt\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n for i in lines:\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n return None\n db={}\n db['number']=items[0]\n db['numberDIM']=items[1]\n db['level']=items[2]\n db['name']=items[3]\n db['detector']=items[4]\n db['signature']=items[5]\n dbinputs.append(db)\n return dbinputs", "def initialise_bdd(self):\n print(fr.FR[1])\n self.base.create_database(\"sql/p5.sql\")\n print(fr.FR[2])\n self.category_table.save_category()\n print(fr.FR[3])" ]
[ "0.65771925", "0.610855", "0.5858219", "0.5710821", "0.55405086", "0.5427353", "0.5412174", "0.53368837", "0.5281407", "0.5184217", "0.5183464", "0.51705265", "0.51649034", "0.51160085", "0.5094011", "0.5080228", "0.5078397", "0.50772166", "0.50710726", "0.5054762", "0.5053972", "0.5045321", "0.5032136", "0.5031905", "0.498375", "0.49792635", "0.49745348", "0.4969462", "0.4956578", "0.4951982" ]
0.745988
0
Create a repair consequence parameter database based on the FEMA P58 data The method was developed to process v3.1.2 of the FragilityDatabase xls that is provided with FEMA P58 2nd edition.
def create_FEMA_P58_bldg_repair_db( source_file, target_data_file='bldg_repair_DB_FEMA_P58_2nd.csv', target_meta_file='bldg_repair_DB_FEMA_P58_2nd.json'): # parse the source file df = pd.concat( [pd.read_excel(source_file, sheet_name=sheet, header=2, index_col=1) for sheet in ('Summary', 'Cost Summary', 'Env Summary')], axis=1) # remove duplicate columns # (there are such because we joined two tables that were read separately) df = df.loc[:, ~df.columns.duplicated()] # remove empty rows and columns df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) # filter the columns we need for the repair database cols_to_db = [ "Fragility Unit of Measure", 'DS Hierarchy', ] for DS_i in range(1, 6): cols_to_db += [ f"Best Fit, DS{DS_i}", f"Lower Qty Mean, DS{DS_i}", f"Upper Qty Mean, DS{DS_i}", f"Lower Qty Cutoff, DS{DS_i}", f"Upper Qty Cutoff, DS{DS_i}", f"CV / Dispersion, DS{DS_i}", f"Best Fit, DS{DS_i}.1", f"Lower Qty Mean, DS{DS_i}.1", f"Upper Qty Mean, DS{DS_i}.1", f"Lower Qty Cutoff, DS{DS_i}.1", f"Upper Qty Cutoff, DS{DS_i}.1", f"CV / Dispersion, DS{DS_i}.2", f"DS {DS_i}, Long Lead Time", f'Repair Cost, p10, DS{DS_i}', f'Repair Cost, p50, DS{DS_i}', f'Repair Cost, p90, DS{DS_i}', f'Time, p10, DS{DS_i}', f'Time, p50, DS{DS_i}', f'Time, p90, DS{DS_i}', f'Mean Value, DS{DS_i}', f'Mean Value, DS{DS_i}.1', # Columns added for the Environmental loss f"DS{DS_i} Best Fit", f"DS{DS_i} CV or Beta", f"DS{DS_i} Best Fit.1", f"DS{DS_i} CV or Beta.1", f"DS{DS_i} Embodied Carbon (kg CO2eq)", f"DS{DS_i} Embodied Energy (MJ)", ] # filter the columns that we need for the metadata cols_to_meta = [ "Component Name", "Component Description", "Construction Quality:", "Seismic Installation Conditions:", "Comments / Notes", "Author", "Fragility Unit of Measure", "Round to Integer Unit?", "DS 1, Description", "DS 1, Repair Description", "DS 2, Description", "DS 2, Repair Description", "DS 3, Description", "DS 3, Repair Description", "DS 4, Description", "DS 4, Repair Description", "DS 5, Description", "DS 5, Repair Description", ] # remove special characters to make it easier to work with column names str_map = { ord(' '): "_", ord('.'): "_", ord(':'): None, ord('('): None, ord(')'): None, ord('?'): None, ord('/'): None, ord(','): None, } df_db_source = df.loc[:, cols_to_db] df_db_source.columns = [s.translate(str_map) for s in cols_to_db] df_db_source.sort_index(inplace=True) df_meta = df.loc[:, cols_to_meta] df_meta.columns = [s.translate(str_map) for s in cols_to_meta] df_db_source.replace('BY USER', np.nan, inplace=True) # initialize the output loss table # define the columns out_cols = [ "Index", "Incomplete", "Quantity-Unit", "DV-Unit", ] for DS_i in range(1, 16): out_cols += [ f"DS{DS_i}-Family", f"DS{DS_i}-Theta_0", f"DS{DS_i}-Theta_1", f"DS{DS_i}-LongLeadTime", ] # create the MultiIndex comps = df_db_source.index.values DVs = ['Cost', 'Time', 'Carbon', 'Energy'] df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV']) df_db = pd.DataFrame( columns=out_cols, index=df_MI, dtype=float ) # initialize the dictionary that stores the loss metadata meta_dict = {} convert_family = { 'LogNormal': 'lognormal', 'Normal': 'normal' } # for each component... # (this approach is not efficient, but easy to follow which was considered # more important than efficiency.) for cmp in df_db_source.itertuples(): ID = cmp.Index.split('.') cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}' # store the new index df_db.loc[cmp.Index, 'Index'] = cmpID # assume the component information is complete incomplete_cost = False incomplete_time = False incomplete_carbon = False incomplete_energy = False # store units df_db.loc[cmp.Index, 'Quantity-Unit'] = ( ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip()) df_db.loc[(cmp.Index, 'Cost'), 'DV-Unit'] = "USD_2011" df_db.loc[(cmp.Index, 'Time'), 'DV-Unit'] = "worker_day" df_db.loc[(cmp.Index, 'Carbon'), 'DV-Unit'] = "kg" df_db.loc[(cmp.Index, 'Energy'), 'DV-Unit'] = "MJ" # get the raw metadata for the component cmp_meta = df_meta.loc[cmp.Index, :] # store the global (i.e., not DS-specific) metadata # every component is assumed to have a comp. description comments = cmp_meta['Component_Description'] # the additional fields are added to the description if they exist if cmp_meta['Construction_Quality'] != 'Not Specified': comments += f'\nConstruction Quality: ' \ f'{cmp_meta["Construction_Quality"]}' if cmp_meta['Seismic_Installation_Conditions'] not in [ 'Not Specified', 'Not applicable', 'Unknown', 'Any']: comments += f'\nSeismic Installation Conditions: ' \ f'{cmp_meta["Seismic_Installation_Conditions"]}' if cmp_meta['Comments__Notes'] != 'None': comments += f'\nNotes: {cmp_meta["Comments__Notes"]}' if cmp_meta['Author'] not in ['Not Given', 'By User']: comments += f'\nAuthor: {cmp_meta["Author"]}' # get the suggested block size and replace the misleading values with ea block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1] meta_data = { "Description": cmp_meta['Component_Name'], "Comments": comments, "SuggestedComponentBlockSize": ' '.join(block_size), "RoundUpToIntegerQuantity": cmp_meta['Round_to_Integer_Unit'], "ControllingDemand": "Damage Quantity", "DamageStates": {} } # Handle components with simultaneous damage states separately if 'Simul' in cmp.DS_Hierarchy: # Note that we are assuming that all damage states are triggered by # a single limit state in these components. # This assumption holds for the second edition of FEMA P58, but it # might need to be revisited in future editions. cost_est = {} time_est = {} carbon_est = {} energy_est = {} # get the p10, p50, and p90 estimates for all damage states for DS_i in range(1, 6): if not pd.isna(getattr(cmp, f'Repair_Cost_p10_DS{DS_i}')): cost_est.update({f'DS{DS_i}': np.array([ getattr(cmp, f'Repair_Cost_p10_DS{DS_i}'), getattr(cmp, f'Repair_Cost_p50_DS{DS_i}'), getattr(cmp, f'Repair_Cost_p90_DS{DS_i}'), getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}'), getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}') ])}) time_est.update({f'DS{DS_i}': np.array([ getattr(cmp, f'Time_p10_DS{DS_i}'), getattr(cmp, f'Time_p50_DS{DS_i}'), getattr(cmp, f'Time_p90_DS{DS_i}'), getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1'), getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1'), int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES') ])}) if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')): theta_0, theta_1, family = [ getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq'), getattr(cmp, f'DS{DS_i}_CV_or_Beta'), getattr(cmp, f'DS{DS_i}_Best_Fit') ] if family == 'Normal': p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1) elif family == 'LogNormal': p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1)) carbon_est.update({f'DS{DS_i}': np.array([p10, p50, p90])}) if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')): theta_0, theta_1, family = [ getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ'), getattr(cmp, f'DS{DS_i}_CV_or_Beta_1'), getattr(cmp, f'DS{DS_i}_Best_Fit_1') ] if family == 'Normal': p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1) elif family == 'LogNormal': p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1)) energy_est.update({f'DS{DS_i}': np.array([p10, p50, p90])}) # now prepare the equivalent mutex damage states sim_ds_count = len(cost_est.keys()) ds_count = 2 ** (sim_ds_count) - 1 for DS_i in range(1, ds_count + 1): ds_map = format(DS_i, f'0{sim_ds_count}b') cost_vals = np.sum([cost_est[f'DS{ds_i + 1}'] if ds_map[-ds_i - 1] == '1' else np.zeros(5) for ds_i in range(sim_ds_count)], axis=0) time_vals = np.sum([time_est[f'DS{ds_i + 1}'] if ds_map[-ds_i - 1] == '1' else np.zeros(6) for ds_i in range(sim_ds_count)], axis=0) carbon_vals = np.sum([carbon_est[f'DS{ds_i + 1}'] if ds_map[-ds_i - 1] == '1' else np.zeros(3) for ds_i in range(sim_ds_count)], axis=0) energy_vals = np.sum([energy_est[f'DS{ds_i + 1}'] if ds_map[-ds_i - 1] == '1' else np.zeros(3) for ds_i in range(sim_ds_count)], axis=0) # fit a distribution family_hat, theta_hat = fit_distribution_to_percentiles( cost_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal']) cost_theta = theta_hat if family_hat == 'normal': cost_theta[1] = cost_theta[1] / cost_theta[0] time_theta = [time_vals[1], np.sqrt(cost_theta[1] ** 2.0 + 0.25 ** 2.0)] # fit distributions to environmental impact consequences family_hat_carbon, theta_hat_carbon = fit_distribution_to_percentiles( carbon_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal']) carbon_theta = theta_hat_carbon if family_hat_carbon == 'normal': carbon_theta[1] = carbon_theta[1] / carbon_theta[0] family_hat_energy, theta_hat_energy = fit_distribution_to_percentiles( energy_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal']) energy_theta = theta_hat_energy if family_hat_energy == 'normal': energy_theta[1] = energy_theta[1] / energy_theta[0] # Note that here we assume that the cutoff quantities are # identical across damage states. # This assumption holds for the second edition of FEMA P58, but # it might need to be revisited in future editions. cost_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1') cost_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1') time_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1_1') time_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1_1') # store the results df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = family_hat df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = ( f"{cost_vals[3]:g},{cost_vals[4]:g}|" f"{cost_qnt_low:g},{cost_qnt_up:g}") df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_1'] = f"{cost_theta[1]:g}" df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = family_hat df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = ( f"{time_vals[3]:g},{time_vals[4]:g}|" f"{time_qnt_low:g},{time_qnt_up:g}") df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_1'] = f"{time_theta[1]:g}" df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-LongLeadTime'] = int(time_vals[5] > 0) df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = family_hat_carbon df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = f"{carbon_theta[0]:g}" df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_1'] = f"{carbon_theta[1]:g}" df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = family_hat_energy df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = f"{energy_theta[0]:g}" df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_1'] = f"{energy_theta[1]:g}" if ds_map.count('1') == 1: ds_pure_id = ds_map[::-1].find('1') + 1 meta_data['DamageStates'].update({f"DS{DS_i}": { "Description": f"Pure DS{ds_pure_id}. " + cmp_meta[f"DS_{ds_pure_id}_Description"], "RepairAction": cmp_meta[f"DS_{ds_pure_id}_Repair_Description"] }}) else: ds_combo = [f'DS{_.start() + 1}' for _ in re.finditer('1', ds_map[::-1])] meta_data['DamageStates'].update({f"DS{DS_i}": { "Description": 'Combination of ' + ' & '.join(ds_combo), "RepairAction": 'Combination of pure DS repair ' 'actions.' }}) # for every other component... else: # now look at each Damage State for DS_i in range(1, 6): # cost if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}')): df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = ( convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}')]) if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')): theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}') theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}') qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}') qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}') if theta_0_low == 0. and theta_0_up == 0.: df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = np.nan else: df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = ( f"{theta_0_low:g},{theta_0_up:g}|" f"{qnt_low:g},{qnt_up:g}") df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_1'] = ( f"{getattr(cmp, f'CV__Dispersion_DS{DS_i}'):g}") else: incomplete_cost = True meta_data['DamageStates'].update({ f"DS{DS_i}": { "Description": cmp_meta[f"DS_{DS_i}_Description"], "RepairAction": cmp_meta[ f"DS_{DS_i}_Repair_Description"]}}) # time if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}_1')): df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = ( convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}_1')]) if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')): theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1') theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1') qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}_1') qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}_1') if theta_0_low == 0. and theta_0_up == 0.: df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = np.nan else: df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = ( f"{theta_0_low:g},{theta_0_up:g}|" f"{qnt_low:g},{qnt_up:g}") df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_1'] = ( f"{getattr(cmp, f'CV__Dispersion_DS{DS_i}_2'):g}") df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-LongLeadTime'] = ( int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES')) else: incomplete_time = True # Carbon if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit')): df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = ( convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit')]) df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq') df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta') # Energy if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit_1')): df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = ( convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit_1')]) df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ') df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta_1') df_db.loc[(cmp.Index, 'Cost'), 'Incomplete'] = int(incomplete_cost) df_db.loc[(cmp.Index, 'Time'), 'Incomplete'] = int(incomplete_time) df_db.loc[(cmp.Index, 'Carbon'), 'Incomplete'] = int(incomplete_carbon) df_db.loc[(cmp.Index, 'Energy'), 'Incomplete'] = int(incomplete_energy) # store the metadata for this component meta_dict.update({cmpID: meta_data}) # assign the Index column as the new ID df_db.index = pd.MultiIndex.from_arrays( [df_db['Index'].values, df_db.index.get_level_values(1)]) df_db.drop('Index', axis=1, inplace=True) # review the database and drop rows with no information cmp_to_drop = [] for cmp in df_db.index: empty = True for DS_i in range(1, 6): if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']): empty = False break if empty: cmp_to_drop.append(cmp) df_db.drop(cmp_to_drop, axis=0, inplace=True) for cmp in cmp_to_drop: if cmp[0] in meta_dict: del meta_dict[cmp[0]] # convert to optimal datatypes to reduce file size df_db = df_db.convert_dtypes() df_db = base.convert_to_SimpleIndex(df_db, 0) # rename the index df_db.index.name = "ID" # save the consequence data df_db.to_csv(target_data_file) # save the metadata with open(target_meta_file, 'w+', encoding='utf-8') as f: json.dump(meta_dict, f, indent=2) print("Successfully parsed and saved the repair consequence data from FEMA " "P58")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_FEMA_P58_fragility_db(source_file,\n target_data_file='fragility_DB_FEMA_P58_2nd.csv',\n target_meta_file='fragility_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove the empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns that we need for the fragility database\n cols_to_db = [\n \"Demand Parameter (value):\",\n \"Demand Parameter (unit):\",\n \"Demand Location (use floor above? Yes/No)\",\n \"Directional?\",\n \"DS Hierarchy\",\n \"DS 1, Probability\",\n \"DS 1, Median Demand\",\n \"DS 1, Total Dispersion (Beta)\",\n \"DS 2, Probability\",\n \"DS 2, Median Demand\",\n \"DS 2, Total Dispersion (Beta)\",\n \"DS 3, Probability\",\n \"DS 3, Median Demand\",\n \"DS 3, Total Dispersion (Beta)\",\n \"DS 4, Probability\",\n \"DS 4, Median Demand\",\n \"DS 4, Total Dispersion (Beta)\",\n \"DS 5, Probability\",\n \"DS 5, Median Demand\",\n \"DS 5, Total Dispersion (Beta)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n # replace missing values with an empty string\n df_meta.fillna('', inplace=True)\n # the metadata shall be stored in strings\n df_meta = df_meta.astype(str)\n\n # initialize the output fragility table\n df_db = pd.DataFrame(\n columns=[\n \"Index\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=df_db_source.index,\n dtype=float\n )\n\n # initialize the dictionary that stores the fragility metadata\n meta_dict = {}\n\n # conversion dictionary for demand types\n convert_demand_type = {\n 'Story Drift Ratio': \"Peak Interstory Drift Ratio\",\n 'Link Rotation Angle': \"Peak Link Rotation Angle\",\n 'Effective Drift': \"Peak Effective Drift Ratio\",\n 'Link Beam Chord Rotation': \"Peak Link Beam Chord Rotation\",\n 'Peak Floor Acceleration': \"Peak Floor Acceleration\",\n 'Peak Floor Velocity': \"Peak Floor Velocity\"\n }\n\n # conversion dictionary for demand unit names\n convert_demand_unit = {\n 'Unit less': 'unitless',\n 'Radians': 'rad',\n 'g': 'g',\n 'meter/sec': 'mps'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n # create a dotted component index\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # store demand specifications\n df_db.loc[cmp.Index, 'Demand-Type'] = (\n convert_demand_type[cmp.Demand_Parameter_value])\n df_db.loc[cmp.Index, 'Demand-Unit'] = (\n convert_demand_unit[cmp.Demand_Parameter_unit])\n df_db.loc[cmp.Index, 'Demand-Offset'] = (\n int(cmp.Demand_Location_use_floor_above_YesNo))\n df_db.loc[cmp.Index, 'Demand-Directional'] = (\n int(cmp.Directional))\n\n # parse the damage state hierarchy\n DS_setup = parse_DS_Hierarchy(cmp.DS_Hierarchy)\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"LimitStates\": {}\n }\n\n # now look at each Limit State\n for LS_i, LS_contents in enumerate(DS_setup):\n\n LS_i = LS_i + 1\n LS_contents = np.atleast_1d(LS_contents)\n\n ls_meta = {}\n\n # start with the special cases with multiple DSs in an LS\n if LS_contents[0] in {'MutEx', 'Simul'}:\n\n # collect the fragility data for the member DSs\n median_demands = []\n dispersions = []\n weights = []\n for ds in LS_contents[1:]:\n median_demands.append(\n getattr(cmp, f\"DS_{ds[2]}_Median_Demand\"))\n\n dispersions.append(\n getattr(cmp, f\"DS_{ds[2]}_Total_Dispersion_Beta\"))\n\n weights.append(getattr(cmp, f\"DS_{ds[2]}_Probability\"))\n\n # make sure the specified distribution parameters are appropriate\n if ((np.unique(median_demands).size != 1) or (\n np.unique(dispersions).size != 1)):\n raise ValueError(f\"Incorrect mutually exclusive DS \"\n f\"definition in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n if LS_contents[0] == 'MutEx':\n\n # in mutually exclusive cases, make sure the specified DS\n # weights sum up to one\n np.testing.assert_allclose(\n np.sum(np.array(weights, dtype=float)), 1.0,\n err_msg=f\"Mutually exclusive Damage State weights do \"\n f\"not sum to 1.0 in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n # and save all DS metadata under this Limit State\n for ds in LS_contents[1:]:\n ds_id = ds[2]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n else:\n # in simultaneous cases, convert simultaneous weights into\n # mutexc weights\n sim_ds_count = len(LS_contents) - 1\n ds_count = 2 ** (sim_ds_count) - 1\n\n sim_weights = []\n\n for ds_id in range(1, ds_count + 1):\n ds_map = format(ds_id, f'0{sim_ds_count}b')\n\n sim_weights.append(np.product(\n [weights[ds_i]\n if ds_map[-ds_i - 1] == '1' else 1.0-weights[ds_i]\n for ds_i in range(sim_ds_count)]))\n\n # save ds metadata - we need to be clever here\n # the original metadata is saved for the pure cases\n # when only one DS is triggered\n # all other DSs store information about which\n # combination of pure DSs they represent\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # adjust weights to respect the assumption that at least\n # one DS will occur (i.e., the case with all DSs returning\n # False is not part of the event space)\n sim_weights_array = np.array(sim_weights) / np.sum(sim_weights)\n\n weights = sim_weights_array\n\n theta_0 = median_demands[0]\n theta_1 = dispersions[0]\n weights_str = ' | '.join([f\"{w:.6f}\" for w in weights])\n\n df_db.loc[cmp.Index, f'LS{LS_i}-DamageStateWeights'] = weights_str\n\n # then look at the sequential DS cases\n elif LS_contents[0].startswith('DS'):\n\n # this is straightforward, store the data in the table and dict\n ds_id = LS_contents[0][2]\n\n theta_0 = getattr(cmp, f\"DS_{ds_id}_Median_Demand\")\n theta_1 = getattr(cmp, f\"DS_{ds_id}_Total_Dispersion_Beta\")\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n # FEMA P58 assumes lognormal distribution for every fragility\n df_db.loc[cmp.Index, f'LS{LS_i}-Family'] = 'lognormal'\n\n # identify incomplete cases...\n\n # where theta is missing\n if theta_0 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_0'] = theta_0\n else:\n incomplete = True\n\n # where beta is missing\n if theta_1 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_1'] = theta_1\n else:\n incomplete = True\n\n # store the collected metadata for this limit state\n meta_data['LimitStates'].update({f\"LS{LS_i}\": ls_meta})\n\n # store the incomplete flag for this component\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from FEMA P58\")", "def create_FEMA_P58_bldg_redtag_db(\n source_file,\n target_data_file='bldg_redtag_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_redtag_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # take another pass with booleans because the first does not always work\n for true_str in (\"YES\", \"Yes\", \"yes\"):\n df.replace(true_str, True, inplace=True)\n\n for false_str in (\"NO\", \"No\", \"no\"):\n df.replace(false_str, False, inplace=True)\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f'DS {DS_i}, Unsafe Placard Trigger Flag',\n f'DS {DS_i}, Unsafe Placard Damage Median',\n f'DS {DS_i}, Unsafe Placard Damage Dispersion'\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\"\n ]\n\n # create the database index\n comps = df_db_source.index.values\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=comps,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n pass\n # Note that we are assuming that components with simultaneous\n # damage states do not have damage that would trigger a red tag.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n redtag_flag = getattr(\n cmp, f'DS_{DS_i}_Unsafe_Placard_Trigger_Flag')\n\n if redtag_flag is True:\n\n theta_0 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_1'] = theta_1\n\n if (pd.isna(theta_0) or pd.isna(theta_1)):\n\n incomplete = True\n\n if ~np.isnan(redtag_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the red tag consequence data from FEMA \"\n \"P58\")", "def create_Hazus_EQ_bldg_repair_db(source_file,\n target_data_file='bldg_repair_DB_Hazus_EQ.csv',\n target_meta_file='bldg_repair_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n occupancies = list(\n raw_data['Structural_Fragility_Groups']['Repair_cost'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'NSD', 'NSA', 'LF']\n comps = [f'{cmp_type}.{occ_type}'\n for cmp_type in cmp_types for occ_type in occupancies]\n DVs = ['Cost', 'Time']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'STR.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_cost'][occ_type][ds_i-1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_time'][occ_type][ds_i-1]\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSD.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSD_data['Repair_cost'][occ_type][DS_i-1]\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSA.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSA_data['Repair_cost'][occ_type][DS_i-1]\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'LF.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_cost'][occ_type][ds_i - 1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_time'][occ_type][ds_i - 1]\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[idx[:, 'Cost'], 'DV-Unit'] = \"loss_ratio\"\n df_db.loc[idx[:, 'Time'], 'DV-Unit'] = \"day\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from Hazus \"\n \"EQ\")", "def create_FEMA_P58_bldg_injury_db(\n source_file,\n target_data_file='bldg_injury_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_injury_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n\n f'DS {DS_i}, Potential non-collapse casualty?',\n f'DS {DS_i} - Casualty Affected Area',\n f'DS {DS_i} Serious Injury Rate - Median',\n f'DS {DS_i} Serious Injury Rate - Dispersion',\n f'DS {DS_i} Loss of Life Rate - Median',\n f'DS {DS_i} Loss of Life Rate - Dispersion',\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-AffectedArea\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['S1', 'S2']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'Severity'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_S1 = False\n incomplete_S2 = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'S1'), 'DV-Unit'] = \"persons\"\n df_db.loc[(cmp.Index, 'S2'), 'DV-Unit'] = \"persons\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n inj_data = {}\n ds_tot = 0\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n casualty_model = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_model is True:\n\n inj_data.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'DS_{DS_i}___Casualty_Affected_Area'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Dispersion'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Dispersion')\n ])})\n ds_tot += 1\n\n elif casualty_model is False:\n ds_tot += 1\n\n # only continue if there is injury data\n if len(inj_data) == 0:\n continue\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = ds_tot\n ds_count = 2 ** (sim_ds_count) - 1\n\n # Here we take advantage of knowing that for every component with\n # simultaneous damage states, only one of the DSs has injury\n # consequences.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n ds_trig = list(inj_data.keys())[0]\n inj_data = inj_data[ds_trig]\n ds_trig = int(ds_trig[2:])\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n if ds_map[-ds_trig] == '1':\n\n # store the consequence data\n for severity in ('S1', 'S2'):\n\n A_affected = inj_data[0]\n\n if severity == 'S1':\n theta_0 = inj_data[1]\n theta_1 = inj_data[2]\n elif severity == 'S2':\n theta_0 = inj_data[3]\n theta_1 = inj_data[4]\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n # store the metadata\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[\n f\"DS_{ds_pure_id}_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo)\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n casualty_flag = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_flag is True:\n\n A_affected = getattr(cmp,\n f'DS_{DS_i}___Casualty_Affected_Area')\n\n for severity in ('S1', 'S2'):\n\n if severity == 'S1':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Dispersion')\n elif severity == 'S2':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n if (pd.isna(theta_0) or pd.isna(\n theta_1) or pd.isna(A_affected)):\n\n if severity == 'S1':\n incomplete_S1 = True\n else:\n incomplete_S2 = True\n\n if ~np.isnan(casualty_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[(cmp.Index, 'S1'), 'Incomplete'] = int(incomplete_S1)\n df_db.loc[(cmp.Index, 'S2'), 'Incomplete'] = int(incomplete_S2)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 16):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from FEMA \"\n \"P58\")", "def create_Hazus_EQ_fragility_db(source_file,\n target_data_file='fragility_DB_Hazus_EQ.csv',\n target_meta_file='fragility_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n design_levels = list(\n raw_data['Structural_Fragility_Groups']['EDP_limits'].keys())\n\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n convert_design_level = {\n 'High_code': 'HC',\n 'Moderate_code': 'MC',\n 'Low_code': 'LC',\n 'Pre_code': 'PC'\n }\n\n # initialize the fragility table\n df_db = pd.DataFrame(\n columns=[\n \"ID\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=np.arange(len(building_types) * len(design_levels) * 5),\n dtype=float\n )\n counter = 0\n\n # First, prepare the structural fragilities\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in S_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'STR.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n S_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n S_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = S_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n # create the component id\n df_db.loc[counter, 'ID'] = 'NSD'\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = NSD_data['EDP_limits'][\n LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSD_data['Fragility_beta']\n\n counter += 1\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for dl in design_levels:\n\n # create the component id\n cmp_id = f'NSA.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Floor Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n NSA_data['EDP_limits'][dl][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSA_data['Fragility_beta']\n\n counter += 1\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in LF_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'LF.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Ground Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n LF_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n LF_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = LF_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Fifth, the ground failure fragilities\n GF_data = raw_data['Ground_Failure']\n\n for direction in ('Horizontal', 'Vertical'):\n for f_depth in ('Shallow', 'Deep'):\n # create the component id\n cmp_id = f'GF.{direction[0]}.{f_depth[0]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Permanent Ground Deformation\"\n df_db.loc[counter, 'Demand-Unit'] = \"inch\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n df_db.loc[counter, 'LS1-Family'] = 'lognormal'\n df_db.loc[counter, 'LS1-Theta_0'] = \\\n GF_data['EDP_limits'][direction][f_depth]\n df_db.loc[counter, 'LS1-Theta_1'] = \\\n GF_data['Fragility_beta'][direction][f_depth]\n p_complete = GF_data['P_Complete']\n df_db.loc[counter, 'LS1-DamageStateWeights'] = (\n f'{1.0 - p_complete} | {p_complete}')\n\n counter += 1\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # none of them are directional,\n df_db.loc[:, 'Demand-Directional'] = 0\n\n # rename the index\n df_db.set_index(\"ID\", inplace=True)\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from Hazus EQ\")", "def _create_sql(self):\n\n pdbfile = self.pdbfile\n sqlfile = self.sqlfile\n\n if self.verbose:\n print('-- Create SQLite3 database')\n\n #name of the table\n #table = 'ATOM'\n\n # column names and types\n self.col = {'serial' : 'INT',\n 'name' : 'TEXT',\n 'altLoc' : 'TEXT',\n 'resName' : 'TEXT',\n 'chainID' : 'TEXT',\n 'resSeq' : 'INT',\n 'iCode' : 'TEXT',\n 'x' : 'REAL',\n 'y' : 'REAL',\n 'z' : 'REAL',\n 'occ' : 'REAL',\n 'temp' : 'REAL'}\n\n # delimtier of the column format\n # taken from http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n self.delimiter = {\n 'serial' : [6,11],\n 'name' : [12,16],\n 'altLoc' : [16,17],\n 'resName' :[17,20],\n 'chainID' :[21,22],\n 'resSeq' :[22,26],\n 'iCode' :[26,26],\n 'x' :[30,38],\n 'y' :[38,46],\n 'z' :[46,54],\n 'occ' :[54,60],\n 'temp' :[60,66]}\n\n if self.no_extra:\n del self.col['occ']\n del self.col['temp']\n\n # size of the things\n ncol = len(self.col)\n ndel = len(self.delimiter)\n\n\n # open the data base\n # if we do not specify a db name\n # the db is only in RAM\n # there might be little advantage to use memory\n # https://stackoverflow.com/questions/764710/sqlite-performance-benchmark-why-is-memory-so-slow-only-1-5x-as-fast-as-d\n if self.sqlfile is None:\n self.conn = sqlite3.connect(':memory:')\n \n # or we create a new db file\n else:\n if os.path.isfile(sqlfile):\n sp.call('rm %s' %sqlfile,shell=True)\n self.conn = sqlite3.connect(sqlfile)\n self.c = self.conn.cursor()\n\n # intialize the header/placeholder\n header,qm = '',''\n for ic,(colname,coltype) in enumerate(self.col.items()):\n header += '{cn} {ct}'.format(cn=colname,ct=coltype)\n qm += '?'\n if ic < ncol-1:\n header += ', '\n qm += ','\n\n # create the table\n query = 'CREATE TABLE ATOM ({hd})'.format(hd=header)\n self.c.execute(query)\n\n\n # read the pdb file\n # this is dangerous if there are ATOM written in the comment part\n # which happends often\n #data = sp.check_output(\"awk '/ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a safer version consist at matching against the first field\n # won't work on windows\n #data = sp.check_output(\"awk '$1 ~ /^ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a pure python way\n # RMK we go through the data twice here. Once to read the ATOM line and once to parse the data ...\n # we could do better than that. But the most time consuming step seems to be the CREATE TABLE query\n # if we path a file we read it\n if isinstance(pdbfile,str):\n if os.path.isfile(pdbfile):\n with open(pdbfile,'r') as fi:\n data = [line.split('\\n')[0] for line in fi if line.startswith('ATOM')]\n else:\n raise FileNotFoundError('File %s was not found',pdbfile)\n\n # if we pass a list as for h5py read/write\n # we directly use that\n elif isinstance(pdbfile,np.ndarray):\n data = [l.decode('utf-8') for l in pdbfile.tolist()]\n\n # if we cant read it\n else:\n print(pdbfile)\n raise ValueError('PDB data not recognized')\n\n # if there is no ATOM in the file\n if len(data)==1 and data[0]=='':\n print(\"-- Error : No ATOM in the pdb file.\")\n self.is_valid = False\n return\n\n # haddock chain ID fix\n del_copy = self.delimiter.copy()\n if data[0][del_copy['chainID'][0]] == ' ':\n del_copy['chainID'] = [72,73]\n\n # get all the data\n data_atom = []\n for iatom,atom in enumerate(data):\n\n # sometimes we still have an empty line somewhere\n if len(atom) == 0:\n continue\n\n # browse all attribute of each atom\n at = ()\n for ik,(colname,coltype) in enumerate(self.col.items()):\n\n # get the piece of data\n data = atom[del_copy[colname][0]:del_copy[colname][1]].strip()\n\n # convert it if necessary\n if coltype == 'INT':\n data = int(data)\n elif coltype == 'REAL':\n data = float(data)\n\n # append keep the comma !!\n # we need proper tuple\n at +=(data,)\n\n # append\n data_atom.append(at)\n\n\n # push in the database\n self.c.executemany('INSERT INTO ATOM VALUES ({qm})'.format(qm=qm),data_atom)", "def produce_database(database_name, is_debug):\n\t\n\t# read files from a01-a35, every file including whole ecg data and the corresponding annotation\n\tdata_annotations_set = get_ecg_data_annotations(database_name, is_debug)\n\t# divide ECG data to minute-by-minute ECG segments\n\t_ = process_ecg_data_segments(database_name, data_annotations_set, is_debug)", "def readdatabase2(self):\n fname=\"/home/alice/rl/v/vme/ADCI/DB/INPUTS.txt\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n for i in lines:\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n return None\n db={}\n db['number']=items[0]\n db['numberDIM']=items[1]\n db['level']=items[2]\n db['name']=items[3]\n db['detector']=items[4]\n db['signature']=items[5]\n dbinputs.append(db)\n return dbinputs", "def process_program(conn, table04, table11, table12, table13,\n table14, table15, table16):\n c = get_cursor(conn)\n c.execute('''ATTACH[{}]as AM '''.format(A.DB_FILE_1))\n\n drop_table(conn, table11)\n drop_table(conn, table12)\n drop_table(conn, table13)\n drop_table(conn, table14)\n drop_table(conn, table15)\n drop_table(conn, table16)\n\n # create table11\n create_table(conn, create_table11_sql)\n c.execute('Insert into[{}]select * FROM AM.[{}] WHERE beam = ? or valve = ?'\n .format(table11, table04), [(\"off\"), (\"off\")])\n # create table12\n create_table(conn, create_table12_sql)\n c.execute('Insert into[{}]select * FROM AM.[{}] WHERE beam= ? and valve= ?'\n .format(table12, table04), [(\"on\"), (\"on\")])\n # update table12 with background correction\n update_with_background(conn, table11, table12)\n # create table13\n create_table(conn, create_table13_sql)\n c.execute('Insert into[{}]select * FROM[{}]WHERE sf1_frequency= ? and \\\n sf2_frequency= ?'.format(table13, table12), [(0), (0)])\n # create table14\n create_table(conn, create_table14_sql)\n c.execute('Insert into[{}]select * FROM[{}]WHERE sf1_frequency != ? and \\\n sf2_frequency= ?'.format(table14, table12), [(0), (0)])\n # create table15\n create_table(conn, create_table15_sql)\n c.execute('Insert into[{}]select * FROM[{}]WHERE sf1_frequency= ? and \\\n sf2_frequency != ?'.format(table15, table12), [(0), (0)])\n # create table16\n create_table(conn, create_table16_sql)\n c.execute('Insert into[{}]select * FROM[{}]WHERE sf1_frequency != ? and \\\n sf2_frequency != ?'.format(table16, table12), [(0), (0)])\n c.execute(\"DETACH DATABASE 'AM'\")\n conn.commit()\n conn.close()\n print(\"process part is achieved!\")", "def readVCTPINPUTS(self): \n #fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.CTPINPUTS\"\n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/ctpinputs.cfg\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n count=0\n #print \"look for me if you want different inputs range...\"\n for i in lines:\n if(i[0] == 'l' and i[1] == '0'): continue\n if i == \"\\n\": continue\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n continue\n #return None\n if items[3] == 'M': continue\n count=count+1\n #if count<6 or count>11 : continue\n #if count>10 and count<24 : continue\n #if count<16: continue\n #if count > 4 and count < 15: continue\n #if items[3] != '1': continue\n #if items[2] != \"EMCAL\": continue\n #if (items[2] != \"SPD\") and (items[2] != \"T0\"): continue\n flag=1\n for i in self.detectors:\n if items[2].find(i)>=0 or i.find(\"ALL\")>=0:\n flag=0;\n break\n if flag: continue\n # input not connected\n if items[7] == '0' and items[3] == '0': continue\n db={}\n db['name']=items[0]\n db['detector']=items[2]\n db['level']='L'+items[3]\n db['signature']=items[4]\n #db['number']=items[5]\n db['number']=items[7]\n db['numberDIM']=items[6]\n db['ctpnum']=items[5]\n db['Edge'] = items[8]\n db['Delay'] = items[9]\n dbinputs.append(db)\n #print \"Adding: \", db\n return dbinputs", "def sfp_prior_preparation(portshow_sfp_aggregated_df, pattern_dct):\n\n sfp_aggregated_modified_df = portshow_sfp_aggregated_df.copy()\n # drop duplicated port rows\n sfp_aggregated_modified_df.drop_duplicates(subset=['configname', 'chassis_name', 'chassis_wwn', \n 'switchName', 'switchWwn', 'slot', 'port'], inplace=True)\n # extract transceiver speed\n sfp_aggregated_modified_df['Transceiver_speed_extracted'] = \\\n sfp_aggregated_modified_df['Transceiver_mode'].str.extract(pattern_dct['transceiver_speed'])\n # extract transceiver mode\n sfp_aggregated_modified_df['Transceiver_mode_extracted'] = \\\n sfp_aggregated_modified_df['Transceiver_mode'].str.extract(pattern_dct['transceiver_mode'])\n # merge sfp speed and mode (lw, sw)\n sfp_aggregated_modified_df = dfop.merge_columns(sfp_aggregated_modified_df, summary_column='Transceiver_speed_mode_extracted', \n merge_columns=['Transceiver_speed_extracted', 'Transceiver_mode_extracted'], \n sep=' ', drop_merge_columns=False, sort_summary=False)\n # merge port state with transceiver details\n # add 'No_SFP_module' tag for cu media in blade switches to mark portPhys status\n # mask_vendor_no_sfp_module = sfp_aggregated_modified_df['Transceiver_Name'] == 'No SFP module'\n # mask_portphys_no_module = sfp_aggregated_modified_df['portPhys'] == 'No_Module'\n # sfp_aggregated_modified_df.loc[~mask_portphys_no_module & mask_vendor_no_sfp_module, 'No_SFP_module'] = 'No_SFP_module'\n sfp_aggregated_modified_df = dfop.merge_columns(sfp_aggregated_modified_df, summary_column='PortPhys_transceiver', \n merge_columns=['portPhys', 'Transceiver_speed_mode_extracted'], \n sep=' ', drop_merge_columns=False, sort_summary=False)\n # add annotation to the intervals\n comment_sfp_readings_interval(sfp_aggregated_modified_df) \n # transceiver support\n comment_sfp_support(sfp_aggregated_modified_df)\n # transceiver form factor (qsfp, dsfp)\n comment_specific_sfp(sfp_aggregated_modified_df, sfp_specification_column='Transceiver_form_factor', sfp_specification_name='Form factor', normal_value='sfp', upper_case_spec=True)\n # long distance sfps\n comment_specific_sfp(sfp_aggregated_modified_df, sfp_specification_column='Transceiver_distanceMax', sfp_specification_name='Distance', normal_value='normal')\n # merge vendor, part number and transcever details (speed and mode) \n sfp_aggregated_modified_df = dfop.merge_columns(\n sfp_aggregated_modified_df, summary_column='Transceiver_Name_PN', \n merge_columns=['Transceiver_Name', 'Transceiver_PN', 'Transceiver_speed_mode_extracted'], \n sep=' ', drop_merge_columns=False)\n # port_quantity column\n sfp_aggregated_modified_df['Port_quantity'] = 'Port_quantity'\n # transceiver quantity column\n mask_sfp_pn_notna = sfp_aggregated_modified_df['Transceiver_PN'].notna()\n sfp_aggregated_modified_df.loc[mask_sfp_pn_notna, 'Transceiver_quantity'] = 'Transceiver_quantity'\n return sfp_aggregated_modified_df", "def __init__(self):\n # File settings and locations.\n self.DATA_DIR = 'data'\n self.DATA_COL_DIR = 'data_collated'\n\n self.FIRE_DATABASE = 'FPA_FOD_20170508.sqlite'\n self.CLIMATE_DATA = 'GlobalLandTemperaturesByCity.csv'\n self.STOCK_DATA = 'historical_stock_prices.csv'\n self.COMBINED_DATA = 'combined_data.db'\n\n self.MODEL_PATH = 'models/dnn_wildfires.ckpt'\n\n # Setting to use reduced data for prototyping purposes.\n self.prototyping = False\n self.sample_size = 80000\n\n # Start date of data\n self.start = pd.to_datetime('1992-01-01')\n\n # Stocks in stock data to keep for analysis.\n self.stocks = ['MSFT', 'AAPL', 'GE', 'JNJ', 'JPM', 'PG']\n\n # Settings for validation and test set partitioning.\n self.val_set_ratio = 0.15\n self.test_set_ratio = 0.15\n\n # Separation of features for pipeline preparation \n self.cat_attribs = ['STATE', 'FIRE_SIZE_CLASS', 'OWNER_CODE', 'City']\n self.num_attribs = ['FIRE_YEAR', 'LATITUDE', 'LONGITUDE', 'FIRE_SIZE', \n 'FIRE_LENGTH', 'DIST_TO_MAJOR_CITY', 'AverageTemperature',\n 'AverageTemperatureUncertainty', 'AAPL', 'GE', 'JNJ', \n 'JPM', 'MSFT', 'PG']\n self.cycle_cols = ['DISC_MONTH', 'DISC_DAY_OF_WEEK', 'DISCOVERY_TIME', \n 'DISCOVERY_DOY', 'CONT_MONTH', 'CONT_DAY_OF_WEEK',\n 'CONT_TIME']\n\n # Define the ranges of the cycles in cycle_cols and whether any offset for\n # zero-indexing is needed (i.e., 'DISC_MONTH' cycles over a 12 month period\n # and the months need an offset of one to start the indicies at 0 for Jan.).\n self.cycle_ranges = [12, 7, 2400, 365, 12, 7, 2400]\n self.cycle_offsets = [1, 0, 0, 1, 1, 0, 0]\n\n # Parameters for deep learning model determined from randomized \n # hyperparameter search.\n self.n_hidden_layers = 4\n self.n_neurons = 200\n self.batch_size = 500\n self.batch_norm_momentum = 0.999\n self.dropout_rate = 0.4\n self.learning_rate = 0.01\n self.activation = tf.nn.elu\n\n # Hyperparameter settings .\n self.hp_search = False", "def generate_fixed_parameter_matrix(self) -> None:\n\n fixed_parameter_ids = self.amici_model.getFixedParameterIds()\n self.nk = len(fixed_parameter_ids)\n print(Fore.CYAN + \"Number of fixed parameters:\",\n len(fixed_parameter_ids))\n\n # Create in-memory table, write all at once for speed\n fixed_parameter_matrix = np.full(\n shape=(self.nk, self.num_condition_vectors),\n fill_value=np.nan)\n for i in range(len(fixed_parameter_ids)):\n self.handle_fixed_parameter(i, fixed_parameter_ids[i],\n fixed_parameter_matrix)\n\n self.create_fixed_parameter_dataset_and_write_attributes(\n fixed_parameter_ids, fixed_parameter_matrix)\n\n self.f.flush()", "def MaxRecoveryORM(Vc1,Vc2,Vc3,Vk,Vw,Va,Vf,Dc1,Dc2,Dc3,Dk,Dw,Da,Df,Adsorrption_Model,RSK_In,PK,PI,PF,FOut):\n#\n#\t1. Define conversion factors and Standard Temperature and Pressure (STP) conditions:\n#\t====================================================================================\n\tTSTD=20.00\t# Standard Temperature is 20 Deg C\n\tPSTD=14.7\t# Standard Pressure is 14.7 psi or 1 atm\n\n#\t2. Compute volumes of water and hydrocarbon components:\n#\t=======================================================\n\tVw=PHIe*Swe # Volume of water\n\tVh=PHIe*(1-Swe) # Volume of hydrocarbons\n#\n#\t3. Define volumes of free and adsorbed gas at reservoir conditions:\n#\t===================================================================\n\tif(Adsorrption_Model==\"1. Fixed RSK Value\"):\n\t\tVa=RSK_In*Vk\n\telse:\n\t\tVa=RSK_In*Vk*(PI/PF)/(1+(PI/PK))\n\tif(Va>=Vh):\n\t\tVa=Vh\n\tVf=Vh-Va\n#\n#\t4. Calculate the density of the formation at reservoir conditions:\n#\t===============================================================\n\tIFD=Vc1*Dc1+Vc2*Dc2+Vc3*Dc3+Vk*Dk+Vw*Dw+Va*Da+Vf*Df\n#\n#\t5. Calculate the volume of gas per gram of rock at reservoir conditions in cm3:\n#\t===============================================================================\n\tIPV=(Va+Vf)/IFD", "def create_patolli(database='red_cod-db.pkl', sites = -1, elements=-1, maxatoms=-1,\r\n dictionary='structure_dictionary', features='datosrahm.csv',\r\n control_file='model_control_file', \r\n verbose=1, test_frac = 0.15, local_function='fij_2.0_25_diccio',\r\n test_with_all_false = False):\r\n \r\n start_main=time.time()\r\n \r\n X, _, _, _, df = raw_features_extractor(database=database, sites = sites, \r\n elements = elements, maxatoms = maxatoms, \r\n dictionary=dictionary, features=features)\r\n \r\n X = compute_quotients(X=X)\r\n X, df = append_local_functions(X = X, df = df, local_function = local_function)\r\n X, _ , df, _ = split_collection(X = X, df = df, frac = test_frac)\r\n \r\n Y = df['target'].values\r\n class_names=list(set(df['target']))\r\n \r\n subnets=X.shape[1]\r\n features=X.shape[2]\r\n \r\n \r\n average = np.mean(X, axis=0) \r\n stdev = np.std(X, axis=0)\r\n \r\n X = (X - average)/stdev\r\n \r\n dicfeatstand = {'mean':average,'std':stdev}\r\n np.save('feature_standarisation',dicfeatstand)\r\n \r\n with open('feature_standarisation.txt','w') as f:\r\n f.write('X matrix has dimensions '+str(X.shape[0])+' samples x ' + \\\r\n str(X.shape[1]) + ' sites x ' + str(X.shape[2]) + \\\r\n ' features'+'\\n'+'\\n')\r\n f.write('Features - mean:'+'\\n'+'\\n')\r\n f.write(str(average)+'\\n'+'\\n')\r\n f.write('Features - std:'+'\\n'+'\\n')\r\n f.write(str(stdev))\r\n f.close()\r\n \r\n Xor=copy.deepcopy(X)\r\n X,y = shuffle(X,Y,random_state=0)\r\n \r\n x={}\r\n xor={}\r\n \r\n for subnet in range(subnets):\r\n x[subnet] = X[:,subnet,:]\r\n xor[subnet] = Xor[:,subnet,:]\r\n \r\n directorio = time.ctime().replace(' ', '_').replace(':','_')\r\n os.system('mkdir ' + directorio)\r\n os.system('mv compounds_collection.csv ' + directorio +'/')\r\n os.system('mv multiplicities.npy ' + directorio +'/')\r\n os.system('mv occupation_fractions.npy ' + directorio +'/')\r\n os.system('mv output_values.npy ' + directorio +'/')\r\n os.system('mv raw_features.npy ' + directorio +'/')\r\n os.system('mv X*.npy ' + directorio +'/')\r\n os.system('mv db*.csv ' + directorio +'/')\r\n os.system('mv feature_standarisation* ' + directorio +'/')\r\n \r\n \r\n ctrl_diccio = ctrl_dictionary(archivo=control_file)\r\n print('\\n')\r\n print('*************************************************************'+\r\n '*************************************************************'+\r\n '*************************************************************'+\r\n '*************************************************************')\r\n print('ANNs TRAINING WILL START NOW.')\r\n print('\\n')\r\n print('There are ',len(ctrl_diccio.keys()),' ANNs to train')\r\n \r\n for item in list(ctrl_diccio):\r\n print('Training ', item+1,'/',len(ctrl_diccio.keys()))\r\n diccionary = ctrl_diccio[item]\r\n \r\n hidden_layers=[float(x) for x in diccionary['HIDDEN_LAYERS'].split(\",\")]\r\n epochs=int(diccionary['EPOCHS'])\r\n batch_size=int(diccionary['BATCH_SIZE'])\r\n test_val=float(diccionary['TEST_VAL'])\r\n cost_function=diccionary['COST_FUNCTION']\r\n learning_rate=float(diccionary['LEARNING_RATE'])\r\n beta_1=float(diccionary['BETA_1'])\r\n beta_2=float(diccionary['BETA_2'])\r\n decay=float(diccionary['DECAY'])\r\n dropout=float(diccionary['DROPOUT'])\r\n activation=diccionary['ACTIVATION']\r\n name=diccionary['NAME']\r\n \r\n hidden_layers = np.asarray(hidden_layers)*features\r\n hidden_layers = [int(x) for x in hidden_layers]\r\n \r\n model = modelo(hidden_layers=hidden_layers, activation=activation,\r\n features=features, beta_1=beta_1, beta_2=beta_2, lr=learning_rate, decay=decay, \r\n dropout=dropout)\r\n \r\n start=time.time()\r\n data, dataframe, model = training(model, X=[x[i] for i in range(subnets)], Y = y, epochs=epochs, \r\n batch_size=batch_size, test_val=test_val, saveas=name,\r\n verbose=verbose)\r\n \r\n print('NN training lasted ',np.round(time.time() - start,2),'s')\r\n print('\\n')\r\n plotgraph(readfile=name+'.csv', outfiles=name, cost_function=cost_function)\r\n \r\n y_pred = (model.predict([xor[i] for i in range(subnets)]) > 0.5)\r\n \r\n precision, recall, fscore, support = PRFS(df['target'],y_pred)\r\n cnf_matrix=confusion_matrix(df['target'],y_pred)\r\n np.save(str(name)+'_cnfmat.npy',cnf_matrix)\r\n precision = np.round(100*precision,2)\r\n recall = np.round(100*recall,2)\r\n fscore = np.round(100*fscore,2)\r\n \r\n with open('PRFS_'+str(control_file)+'.txt', 'a') as prfs:\r\n prfs.write(str(name)+'\\n')\r\n prfs.write('classes: '+str(class_names)+'\\n')\r\n prfs.write('samples: '+str(support)+'\\n')\r\n prfs.write('precision: '+str(precision)+'\\n')\r\n prfs.write('recall: '+str(recall)+'\\n')\r\n prfs.write('f1-score: '+str(fscore)+'\\n')\r\n prfs.write('\\n')\r\n prfs.close()\r\n \r\n plt.figure(1)\r\n plot_confusion_matrix(cnf_matrix, classes=class_names,\r\n title='Confusion matrix, without normalization')\r\n plt.savefig('cnfmat_'+str(name)+'.png')\r\n \r\n plt.figure(2)\r\n plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\r\n title='Normalized confusion matrix')\r\n plt.savefig('normcnfmat_'+str(name)+'.png')\r\n \r\n plt.close('all')\r\n \r\n os.system('mv *' + name + '* ' + directorio)\r\n os.system('mv PRFS_' + str(control_file) + '.txt ' + directorio)\r\n os.system('cp ' + control_file + '.txt ' + directorio)\r\n os.system('cp ' + dictionary + '.txt ' + directorio)\r\n \r\n if test_frac != 0:\r\n test_models(directorio=directorio)\r\n \r\n if test_with_all_false:\r\n test_all_false(directorio=directorio, database=database, \r\n local_function=local_function)\r\n\r\n print('Whole process lasted ', np.round(-start_main+time.time(),2),'s') \r\n return", "def parameterize(param_directory,pdb_file,topology_file,polymer_code,polymer_length):\n\n terphenyl_top = get_terphenyl_top_directory()\n if not os.path.exists(param_directory):\n os.mkdir(param_directory)\n param_topology = str(str(param_directory)+\"/topol.top\")\n copyfile(topology_file,param_topology)\n cwd = os.getcwd()\n if cwd != param_directory:\n os.chdir(param_directory)\n param_pdb = str(str(param_directory)+\"/\"+str(polymer_length)+\".pdb\")\n copyfile(pdb_file,param_pdb)\n\n # Parameterize our polymer using 'antechamber', from AmberTools.\n#\n # We parameterize the PDB structure using the param.sh BASH script written by Ben Coscia as a template: \"https://github.com/shirtsgroup/useful-scripts/blob/master/Paramaterization/GAFF/param.sh\"\n gaff_directory = str(str(terphenyl_top)+\"/setup_files/gaff\")\n replace(param_topology,'$TERPHENYL_TOP',terphenyl_top)\n replace(param_topology,'$RUN_DIRECTORY',param_directory)\n replace(param_topology,'$POLYMER_CODE ',str(\"{:<15}\".format(polymer_code)))\n replace(param_topology,'$POLYMER_CODE ',str(\"{:<3}\".format(polymer_code)))\n copyfile(str(str(gaff_directory)+\"/acpype.py\"),str(str(param_directory)+\"/acpype.py\"))\n copyfile(str(str(gaff_directory)+\"/insertmol2charges.py\"),str(str(param_directory)+\"/insertmol2charges.py\"))\n# copyfile(str(str(gaff_directory)+\"/anneal.mdp\"),str(run_directory+\"/anneal.mdp\"))\n # Replace the variable keyword '$NAME' in param.sh with the name of the current polymer length\n copyfile(str(str(gaff_directory)+\"/param.sh\"),str(str(param_directory)+\"/param.sh\"))\n replace(str(param_directory+\"/param.sh\"),'$NAME',polymer_length)\n replace(str(param_directory+\"/param.sh\"),'$RES',polymer_code)\n # Place the residue name in the input PDB file residue name columns\n with open(pdb_file, \"rt\") as fin:\n\n new_pdb_file = param_pdb\n with open(new_pdb_file, \"wt\") as fout:\n for line in fin:\n line_list = [char for char in line]\n line_start = ''.join(line_list[0:6])\n residue_code = ''.join(line_list[17:20])\n if line_start == 'HETATM' or line_start == 'ATOM ':\n if residue_code == ' ':\n line_list[17:20] = str(\"{:<3}\".format(polymer_code)).split()\n #del line_list[29]\n line = ''.join(line_list)\n fout.write(line)\n subprocess.run([\"chmod\",\"+x\",str(str(param_directory)+\"/param.sh\")])\n os.chdir(param_directory)\n subprocess.run([str(str(param_directory)+\"/param.sh\")])\n solute_gro_file = str(str(param_directory)+\"/\"+str(polymer_length)+\".gro\")\n solute_topology_file = str(str(param_directory)+\"/\"+str(polymer_code)+\".top\")\n if cwd != param_directory:\n os.chdir(cwd)\n return(solute_gro_file,solute_topology_file)", "def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )", "def prepare_experiment(assumptions):\n print(\"\\nGenerate species parameters\")\n np.random.seed(assumptions['seed']) \n params = MakeParams(assumptions) \n if assumptions[\"selected_function\"] == \"f5_invader_suppression\":\n print(\"\\nDraw invader feature\")\n params = create_invader(params, assumptions)\n print(params[\"c\"])\n \n print(\"\\nDraw per-capita function and cost\")\n f1_species_smooth, f1_species_rugged, f2_species_smooth, f2_species_rugged = draw_species_function(assumptions)\n params.update({\"f1_species_smooth\": f1_species_smooth, \"f1_species_rugged\": f1_species_rugged, \"f2_species_smooth\": f2_species_smooth, \"f2_species_rugged\": f2_species_rugged})\n gi = draw_species_cost(f1_species_smooth, assumptions)\n params.update({\"g\": gi})\n \n print(\"\\nConstruct plate\")\n np.random.seed(assumptions['seed']) \n plate = make_plate(assumptions,params)\n \n print(\"\\nAdd community function to plate\")\n plate = add_community_function(plate, assumptions, params)\n \n if not pd.isnull(assumptions[\"overwrite_plate\"]) :\n print(\"\\nUpdating the initial plate composition by overwrite_plate\")\n plate = overwrite_plate(plate, assumptions)\n \n print(\"\\nPrepare Protocol\")\n #Extract Protocol from protocol database\n algorithms = make_algorithms(assumptions)\n params_algorithm = algorithms[algorithms['algorithm_name'] == assumptions['protocol']]\n \n #Params_simulation by default contains all assumptions not stored in params.\n params_simulation = dict((k, assumptions[k]) for k in assumptions.keys() if k not in params.keys())\n \n return params, params_simulation , params_algorithm, plate", "def ogip_dictionary_arf():\n \"\"\"\n this function returns the required and optional keywords and columns\n as defined by OGIP 92-002 and 92-002a\n \"\"\"\n global status\n global REPORT\n\n \"\"\"\n FOR the ARF file:\n \"\"\"\n \"\"\"\n Define REQUIRED Keywords for SPECRESP EXTENSION (note: EXTNAME is SPECRESP)\n \"\"\"\n reqkeys = ['TELESCOP', 'INSTRUME']\n reqkeys.append('FILTER')\n reqkeys.append('CHANTYPE[PHA|PI]')\n reqkeys.append('DETCHANS')\n reqkeys.append('HDUCLASS[OGIP]')\n reqkeys.append('HDUCLAS1[RESPONSE]')\n reqkeys.append('HDUCLAS2[SPECRESP]')\n reqkeys.append('HDUVERS[1.1.0]')\n reqkeys.append('TLMIN*')\n reqkeys.append('NUMGRP')\n reqkeys.append('NUMELT')\n reqkeys.append('CCLS0001[CPF]')\n reqkeys.append('CCNM0001[SPECRESP]')\n reqkeys.append('CDTP0001[DATA]')\n reqkeys.append('CVSD0001')\n reqkeys.append('CVST0001')\n reqkeys.append('CDES0001')\n\n \"\"\"\n Define recommended Keywords\n \"\"\"\n optkeys = ['PHAFILE']\n optkeys.append('LO_THRES') # minimum probability threshold in matrix (values < this are set to 0)\n optkeys.append('HDUCLAS3[REDIST|DETECTOR|FULL]') # required if channel numbering doesn't start at 1\n optkeys.append('RMFVERSN[1992A]')\n optkeys.append('HDUVERS1[1.1.0]')\n optkeys.append('HDUVERS2[1.2.0]')\n\n \"\"\"\n Define Required Columns\n \"\"\"\n reqcols = ['ENERG_LO'] # lower energy bound of bin (keV)\n reqcols.append('ENERG_HI') # upper energy bound of bin (keV); generally ENERG_LO(J) = ENERG_HI(J-1)\n reqcols.append('SPECRESP') # the \"effective area\"\n\n\n \"\"\"\n Define Optional Columns\n \"\"\"\n optcols = [] # dispersion order for grating data\n\n specresp = {'KEYWORDS':{'REQUIRED':reqkeys,'RECOMMENDED':optkeys}, 'COLUMNS':{'REQUIRED':reqcols,'RECOMMENDED':optcols}}\n\n extns={'REQUIRED':['SPECRESP'],'OPTIONAL':[]}\n #\n # create structure for the ARF file\n #\n ogip = {'EXTENSIONS':extns,\n 'SPECRESP':specresp,\n 'REFERENCE':'OGIP/92-002',\n 'REFURL':'https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/ofwg_recomm.html',\n 'REFTITLE':'The Calibration Requirements for Spectral Analysis'}\n\n return ogip", "def rpfp(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['rpfp']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"RPFP{0}\".format(str(i))\n distillate_label = \"L{0}-E_C{1}\".format(str(i),str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n lMag_label = 'C{0}MAG'.format(str(i))\n cMag_label = 'C{0}MAG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label, lMag_label, cMag_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n dep_lMag_label = lMag_label\n dep_lMag_name = fields['deps'][2]\n dep_lMag_uuid = self.uuid_map[lMag_label]\n dep_cMag_label = cMag_label\n dep_cMag_name = fields['deps'][3]\n dep_cMag_uuid = self.uuid_map[cMag_label]\n \n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_lMag_label, dep_lMag_name, dep_lMag_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid],\n [dep_cMag_label, dep_cMag_name, dep_cMag_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"RPFP\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map['REAC_PWR{0}'.format(i)] = emitted[-3][-36:]\n output_uuid_map['FUND_PWR{0}'.format(i)] = emitted[-2][-36:]\n\n filename = \"{0}/RPFP_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def design_TIA_inverter(db_n, db_p, sim_env,\n vg_res, rf_res,\n vdd_nom, vdd_vec, cpd, cload, \n rdc_min, fbw_min, pm_min, BER_max,\n vos, isw_pkpk,\n vb_n, vb_p, error_tol=0.05, ibias_max=20e-6):\n # Finds all possible designs for one value of VDD, then\n # confirm which work with all other VDD values.\n possibilities = []\n\n vg_vec = np.arange(0, vdd_nom, vg_res)\n \n for vg in vg_vec:\n print(\"VIN:\\t{0}\".format(vg))\n n_op_info = db_n.query(vgs=vg, vds=vg, vbs=vb_n-0)\n p_op_info = db_p.query(vgs=vg-vdd_nom, vds=vg-vdd_nom, vbs=vb_p-vdd_nom)\n \n if np.isinf(ibias_max):\n nf_n_max = 200\n else:\n nf_n_max = int(round(ibias_max/n_op_info['ibias']))\n \n nf_n_vec = np.arange(1, nf_n_max, 1)\n for nf_n in nf_n_vec:\n # Number of fingers can only be integer,\n # so increase as necessary until you get\n # sufficiently accurate/precise bias + current match\n ratio_good, nf_p = verify_ratio(n_op_info['ibias'],\n p_op_info['ibias'],\n nf_n,\n error_tol)\n if not ratio_good:\n continue\n\n # Getting small signal parameters to constrain Rf\n inv = LTICircuit()\n inv.add_transistor(n_op_info, 'out', 'in', 'gnd', fg=nf_n)\n inv.add_transistor(p_op_info, 'out', 'in', 'gnd', fg=nf_p)\n inv_num, inv_den = inv.get_num_den(in_name='in', out_name='out', in_type='v')\n A0 = abs(inv_num[-1]/inv_den[-1])\n \n gds_n = n_op_info['gds'] * nf_n\n gds_p = p_op_info['gds'] * nf_p\n gds = abs(gds_n) + abs(gds_p)\n ro = 1/gds\n \n # Assume Rdc is negative, bound Rf\n rf_min = max(rdc_min*(1+A0)/A0 + ro/A0, 0)\n rf_vec = np.arange(rf_min, rdc_min*2, rf_res)\n for rf in rf_vec:\n # With all parameters, check if it meets small signal spec\n meets_SS, SS_vals = verify_TIA_inverter_SS(n_op_info, p_op_info,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n # With all parameters, estimate if it will meet noise spec\n meets_noise, BER = verify_TIA_inverter_BER(n_op_info, p_op_info, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n # If it meets small signal spec, append it to the list\n # of possibilities\n if meets_spec:\n possibilities.append(dict(vg=vg,\n vdd=vdd_nom,\n nf_n=nf_n,\n nf_p=nf_p,\n rf=rf,\n rdc=SS_vals['rdc'],\n fbw=SS_vals['fbw'],\n pm=SS_vals['pm'],\n ibias=ibias_n,\n BER=BER))\n elif SS_vals['fbw'] != None and SS_vals['fbw'] < fbw_min:\n # Increasing resistor size won't help bandwidth\n break\n \n # Go through all possibilities which work at the nominal voltage\n # and ensure functionality at other bias voltages\n # Remove any nonviable options\n print(\"{0} working at nominal VDD\".format(len(possibilities)))\n for candidate in possibilities:\n nf_n = candidate['nf_n']\n nf_p = candidate['nf_p']\n rf = candidate['rf']\n for vdd in vdd_vec:\n new_op_dict = vary_supply(vdd, db_n, db_p, nf_n, nf_p, vb_n, vb_p)\n vg = new_op_dict['vb']\n n_op = new_op_dict['n_op']\n p_op = new_op_dict['p_op']\n \n # Confirm small signal spec is met\n meets_SS, scratch = verify_TIA_inverter_SS(n_op, p_op,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n \n # Confirm noise spec is met\n meets_noise, BER = verify_TIA_inverter_BER(n_op, p_op, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n \n if not meets_spec:\n possibilities.remove(candidate)\n break\n \n # Of the remaining possibilities, check for lowest power.\n # If there are none, raise a ValueError.\n if len(possibilities) == 0:\n raise ValueError(\"No final viable solutions\")\n \n print(\"{0} working at all VDD\".format(len(possibilities)))\n best_op = possibilities[0]\n for candidate in possibilities:\n best_op = choose_op_comparison(best_op, candidate)\n \n return best_op", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def create_Hazus_EQ_bldg_injury_db(source_file,\n target_data_file='bldg_injury_DB_Hazus_EQ.csv',\n target_meta_file='bldg_injury_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'LF']\n comps = [f'{cmp_type}.{bt}'\n for cmp_type in cmp_types for bt in building_types]\n DVs = ['S1', 'S2', 'S3', 'S4']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i-1])\n\n # Second, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i - 1])\n\n # remove empty rows\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[:, 'DV-Unit'] = \"injury_rate\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from Hazus \"\n \"EQ\")", "def __init__(self, calc_id, particle_name, xp_partition):\n tdc_FMCI_XP_Data_Base.__init__(self)\n # name and calc_id\n self.name = particle_name\n self.calc_id = calc_id\n # setup XP_Data --------------------\n sample_dict = dict(name='regular', n_reduce=1, n_min=1)\n self.xp = tdc_XP_Data(calc_id, particle_name, sample_dict, get_weight=True)\n # interface to timetable -----------\n self.timetable = self.xp.timetable\n # setup properties -----------------\n setup_props = tdc_Setup_Props(calc_id)\n # normalization parameters\n self.W0 = setup_props.get_papam('FMPProps/W0')\n self.L = setup_props.get_papam('/GridProps/L')\n # physical parameters from \"setup_properties.h5\"\n self.PSR_P = setup_props.get_papam('/PulsarGapProps/P')\n self.PSR_B12 = setup_props.get_papam('/PulsarGapProps/B_12')\n self.PSR_Lcm = setup_props.get_papam('/GridProps/L_cm')\n # physical parameters from \"cascade.input\": THETA and CHI\n infile=AT.FileInput()\n infile.ReadFile(tdc_Filenames.get_full_filename(calc_id, 'cascade.input'))\n infile.ChangeGroup('GEOMETRY')\n self.PSR_Theta = infile.get_param('THETA')\n infile.ChangeGroup() \n infile.ChangeGroup('DIMENSIONAL_CONSTANTS::PSR_ConstsInitializer')\n self.PSR_Chi = infile.get_param('CHI')\n infile.ChangeGroup() \n # set xp_partition =================\n self.set_xp_partition(xp_partition)", "def data(dbfilename = os.path.expanduser('~/python/project/znuc2012.S4.star.el.y.stardb.gz')):\n db = stardb.load(dbfilename) # loads database\n nmass = db.nvalues[0] # finds the number of values\n masses = db.values[0][:nmass] #creates a vector of the initial masses\n isodb = stardb.load(os.path.expanduser('~/python/project/znuc2012.S4.star.deciso.y.stardb.gz'))\n \n massnumber = []\n for x in range(len(isodb.ions)):\n mn = isodb.ions[x].A\n massnumber.append(mn)\n massnumber = np.array(massnumber)\n np.save(os.path.expanduser('~/python/project/filestoload/Massnumber'), massnumber) \n####################### \n# write all energy and mixing values\n\n energyvalues = np.unique(db.fielddata['energy'])\n mixingvalues = np.unique(db.fielddata['mixing'])\n masterremnant = [] # result will be a multidimensional array\n elementdata = []\n isodata = []\n r = len(db.ions) # for loop iteration\n w = len(isodb.ions)\n for energy in energyvalues:\n remmixingarray = [] # reinitialise the next dimension\n elmixingarray = []\n isomixingarray = []\n for mixing in mixingvalues:\n \n \n ii = np.logical_and(np.isclose(db.fielddata['energy'], energy), np.isclose(db.fielddata['mixing'], mixing))\n \n mass = db.fielddata[ii]['remnant']\n remmixingarray.append(mass) # this is an array of remnant masses for one energy and every mixing value\n \n elfill = [] # reinitialise the next dimension again\n isofill = []\n \n \n for m in range(w):\n \n a = isodb.ions[m] #for obtaining the element string\n kk = np.where(isodb.ions==isotope.ion(a)) # finding the indices in db.ions for a particular element\n jj = np.where(ii)\n isotopes = isodb.data[jj, kk][0] # array of abundances for that particular element\n isofill.append(isotopes) # this is an array of element data for every mass for one energy and one mixing value\n\n\n\n\n isomixingarray.append(isofill) \n \n \n masterremnant.append(remmixingarray) # these master arrays have every bit of data under its own energy. so called like elementdata[energy][mixing][elementnumber] gives the element data for every star for a single element.\n \n isodata.append(isomixingarray)\n \n np.save(os.path.expanduser('~/python/project/filestoload/IsoData'), isodata)\n np.save(os.path.expanduser('~/python/project/filestoload/RemnantMasses'), masterremnant)\n np.save(os.path.expanduser('~/python/project/filestoload/Ioninfo'), isodb.ions)\n time = [] \n \n for mass in masses: # for loop will cycle through the masses and grab the lifetime of each star\n s = str(mass) # converts the mass number to a string for file acquiring\n if s.endswith('.0'): # formatting issue, to match the filenames\n s = s[:-2] \n filename = os.path.expanduser('~/python/project/dumps/z{}#presn').format(s)\n # grabs filename corrosponding to this mass\n d = kepdump.load(filename) # loads the kepdump data for this star\n time.append(d.time) \n yr = 365.2425*86400 \n time = np.array(time)/yr\n dataarray = [masses, time]\n\n\n return dataarray", "def read_pdb(self, fname, ff='WK'):\n\n atoms = symmpdb(fname)\n self.atom_pos = atoms[:, 0:3] / 10 ** 10 # convert unit from Angstroms to m\n tmp = (100 * atoms[:, 3] + atoms[:, 4]).astype(\n int) # hack to get split idx from the sorted atom array\n atom_type, idx = np.unique(np.sort(tmp), return_index=True)\n self.num_atom_types = len(atom_type)\n self.split_idx = np.append(idx, [len(tmp)])\n\n if ff == 'WK':\n \"\"\"\n Here, one tries to calculate the form factor from formula and tables.\n Therefore, one needs to setup some reference points for interpolation.\n Here, the qs variable is such a variable containing the momentum length\n at which one calculate the reference values.\n \"\"\"\n # set up q samples and compton\n qs = np.linspace(0, 10, 101) / (2.0 * np.pi * 0.529177206 * 2.0)\n self.q_sample = qs\n self.compton_q_sample = qs\n self.num_q_samples = len(qs)\n self.num_compton_q_samples = len(qs)\n self.sBound = np.zeros(self.num_q_samples)\n self.nFree = np.zeros(self.num_q_samples)\n\n # calculate form factor using WaasKirf coeffs table\n wk_dbase = load_waaskirf_database()\n for i in idx:\n if i == 0:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n idx = np.where(wk_dbase[:, 0] == zz)[0]\n flag = True\n for j in idx:\n if wk_dbase[j, 1] == qq:\n [a1, a2, a3, a4, a5, c, b1, b2, b3, b4, b5] = wk_dbase[j, 2:]\n self.ff_table = (a1 * np.exp(-b1 * self.q_sample ** 2) +\n a2 * np.exp(-b2 * self.q_sample ** 2) +\n a3 * np.exp(-b3 * self.q_sample ** 2) +\n a4 * np.exp(-b4 * self.q_sample ** 2) +\n a5 * np.exp(-b5 * self.q_sample ** 2) + c)\n flag = False\n break\n if flag:\n print('Atom number = ' + str(zz) + ' with charge ' + str(qq))\n raise ValueError('Unrecognized atom type!')\n else:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n idx = np.where(wk_dbase[:, 0] == zz)[0]\n flag = True\n for j in idx:\n if wk_dbase[j, 1] == qq:\n [a1, a2, a3, a4, a5, c, b1, b2, b3, b4, b5] = wk_dbase[j, 2:]\n\n ff = (a1 * np.exp(-b1 * self.q_sample ** 2) +\n a2 * np.exp(-b2 * self.q_sample ** 2) +\n a3 * np.exp(-b3 * self.q_sample ** 2) +\n a4 * np.exp(-b4 * self.q_sample ** 2) +\n a5 * np.exp(-b5 * self.q_sample ** 2) + c)\n\n self.ff_table = np.vstack((self.ff_table, ff))\n flag = False\n break\n if flag:\n print('Atom number = ' + str(zz) + ' with charge ' + str(qq))\n raise ValueError('Unrecognized atom type!')\n\n elif ff == 'pmi':\n # set up ff table\n ffdbase = load_ff_database()\n for i in idx:\n if i == 0:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n self.ff_table = ffdbase[:, zz] * (zz - qq) / (zz * 1.0)\n else:\n zz = int(atoms[i, 3]) # atom type\n qq = int(atoms[i, 4]) # charge\n self.ff_table = np.vstack(\n (self.ff_table, ffdbase[:, zz] * (zz - qq) / (zz * 1.0)))\n\n # set up q samples and compton\n self.q_sample = ffdbase[:, 0] / (2.0 * np.pi * 0.529177206 * 2.0)\n self.compton_q_sample = ffdbase[:, 0] / (2.0 * np.pi * 0.529177206 * 2.0)\n self.num_q_samples = len(ffdbase[:, 0])\n self.num_compton_q_samples = len(ffdbase[:, 0])\n self.sBound = np.zeros(self.num_q_samples)\n self.nFree = np.zeros(self.num_q_samples)\n else:\n raise ValueError('Unrecognized form factor source!')", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df", "def regenerateTable():\n deleteAll()\n\n # Start generating records from start nodes, and continue generating\n # records for their children until either the bottom of the ANAD_PART_OF\n # tree is reached, or stop nodes are reached.\n\n for perspective in Perspectives.Iterator():\n perspectiveName = perspective.getName()\n starts = PerspectiveAmbits.getStartAmbitForPerspective(perspectiveName)\n stops = PerspectiveAmbits.getStopAmbitForPerspective(perspectiveName)\n startNodeOids = sets.Set(starts.keys())\n stopNodeOids = sets.Set(stops.keys())\n \n #print perspectiveName\n #print startNodeOids\n #print stopNodeOids\n \n startApos = [PartOfs.getPrimaryPathApoForNodeOid(nodeOid)\n for nodeOid in startNodeOids]\n apoList = startApos[:]\n\n while len(apoList) > 0:\n partOf = apoList.pop()\n\n # create POP record for this part of.\n \n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(partOf.getOid())\n pop.setIsAncestor(False)\n pop.setNodeOid(partOf.getNodeOid())\n pop.insert()\n \n #if partOf.getOid() == 68470:\n # print \n # print pop.getPerspectiveName()\n # print pop.getApoOid()\n # print pop.isAncestor()\n # print pop.getNodeOid()\n # print\n # print partOf.getOid()\n # print partOf.getSpecies()\n # print partOf.getNodeStartStageOid()\n # print partOf.getNodeEndStageOid()\n # print partOf.getPathStartStageOid()\n # print partOf.getPathEndStageOid()\n # print partOf.getNodeOid()\n # print partOf.getSequence()\n # print partOf.getDepth()\n # print partOf.getFullPathEmapas()\n # print partOf.getFullPath()\n # print partOf.isPrimaryPath()\n # print partOf.getParentApoOid()\n\n _addToKnowledge(pop)\n\n # if this is not a stop node, then add all its part-of kids\n # to the list of APOs to generate POP records for.\n if partOf.getNodeOid() not in stopNodeOids:\n apoList.extend(PartOfs.getByParentOid(partOf.getOid()))\n\n # for each start node, add any ancestor APOs that were not added\n # by the above process.\n ancesApos = sets.Set()\n for apo in startApos:\n parentApoOid = apo.getParentApoOid()\n if parentApoOid != None:\n parentApo = PartOfs.getByOid(parentApoOid)\n if (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None):\n ancesApos.add(parentApo)\n\n while len(ancesApos) > 0:\n ancesApo = ancesApos.pop()\n # create POP record for this ancestor\n pop = AnadPartOfPerspectiveDb.AnadPartOfPerspectiveDbRecord()\n pop.setPerspectiveName(perspectiveName)\n pop.setApoOid(ancesApo.getOid())\n pop.setIsAncestor(True)\n pop.setNodeOid(ancesApo.getNodeOid())\n pop.insert()\n _addToKnowledge(pop)\n\n # if this APO has a parent that hasn't yet been processed then\n # add it to list of ancestor APOs to generate records for.\n parentApoOid = ancesApo.getParentApoOid()\n if (parentApoOid != None and\n (_byApoOid.get(parentApoOid) == None or\n _byApoOid[parentApoOid].get(perspectiveName) == None)):\n parentApo = PartOfs.getByOid(parentApoOid)\n ancesApos.add(parentApo)\n \n \n \n return", "def creatingItemSets(self, iFileName):\n # import pandas as pd\n # global Database\n self.Database = []\n lineNumber = 0\n # data = []\n if isinstance(iFileName, list):\n self.Database = iFileName\n if isinstance(iFileName, pd.DataFrame):\n if iFileName.empty:\n print(\"its empty..\")\n quit()\n i = iFileName.columns.values.tolist()\n if 'Transactions' in i:\n self.Database = iFileName['Transactions'].tolist()\n if 'Patterns' in i:\n self.Database = iFileName['Patterns'].tolist()\n\n if '.CSV' in iFileName:\n file1 = pd.read_csv(iFileName)\n columns = list(file1.head(0))\n if \"Patterns\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Patterns']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n if \"Transactions\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Transactions']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n else:\n try:\n with open(iFileName, 'r', encoding='utf-8') as f:\n for line in f:\n # line.strip()\n if lineNumber == 0:\n lineNumber += 1\n delimiter = self.findDelimiter([*line])\n # li=[lineNumber]\n li = line.split(delimiter)\n li1 = [i.rstrip() for i in li]\n self.Database.append([i.rstrip() for i in li1])\n # else:\n # self.Database.append(li)\n # data.append([lineNumber,li1])\n else:\n lineNumber += 1\n li = line.split(delimiter)\n # if delimiter==',':\n li1 = [i.rstrip() for i in li]\n self.Database.append(li1)\n except IOError:\n print(\"File Not Found\")\n quit()\n\n # else:\n # self.Database=iFileName['Transactions'].tolist()", "def load_cfda(fullpath):\n try:\n with open(fullpath, errors='backslashreplace') as csvfile:\n\n reader = csv.DictReader(csvfile, delimiter=',', quotechar='\"', skipinitialspace='true')\n for row in reader:\n cfda_program, created = CFDAProgram.objects.get_or_create(\n program_number=row['Program Number'])\n\n cfda_program.data_source = \"USA\"\n cfda_program.program_title = row['Program Title']\n cfda_program.popular_name = row['Popular Name (020)']\n cfda_program.federal_agency = row['Federal Agency (030)']\n cfda_program.authorization = row['Authorization (040)']\n cfda_program.objectives = row['Objectives (050)']\n cfda_program.types_of_assistance = row['Types of Assistance (060)']\n cfda_program.uses_and_use_restrictions = row['Uses and Use Restrictions (070)']\n cfda_program.applicant_eligibility = row['Applicant Eligibility (081)']\n cfda_program.beneficiary_eligibility = row['Beneficiary Eligibility (082)']\n cfda_program.credentials_documentation = row['Credentials/Documentation (083)']\n cfda_program.pre_application_coordination = row['Preapplication Coordination (091)']\n cfda_program.application_procedures = row['Application Procedures (092)']\n cfda_program.award_procedure = row['Award Procedure (093)']\n cfda_program.deadlines = row['Deadlines (094)']\n cfda_program.range_of_approval_disapproval_time = row['Range of Approval/Disapproval Time (095)']\n cfda_program.appeals = row['Appeals (096)']\n cfda_program.renewals = row['Renewals (097)']\n cfda_program.formula_and_matching_requirements = row['Formula and Matching Requirements (101)']\n cfda_program.length_and_time_phasing_of_assistance = row['Length and Time Phasing of Assistance (102)']\n cfda_program.reports = row['Reports (111)']\n cfda_program.audits = row['Audits (112)']\n cfda_program.records = row['Records (113)']\n cfda_program.account_identification = row['Account Identification (121)']\n cfda_program.obligations = row['Obligations (122)']\n cfda_program.range_and_average_of_financial_assistance = row['Range and Average of Financial Assistance (123)']\n cfda_program.program_accomplishments = row['Program Accomplishments (130)']\n cfda_program.regulations_guidelines_and_literature = row['Regulations, Guidelines, and Literature (140)']\n cfda_program.regional_or_local_office = row['Regional or Local Office (151) ']\n cfda_program.headquarters_office = row['Headquarters Office (152)']\n cfda_program.website_address = row['Website Address (153)']\n cfda_program.related_programs = row['Related Programs (160)']\n cfda_program.examples_of_funded_projects = row['Examples of Funded Projects (170)']\n cfda_program.criteria_for_selecting_proposals = row['Criteria for Selecting Proposals (180)']\n cfda_program.url = row['URL']\n cfda_program.recovery = row['Recovery']\n cfda_program.omb_agency_code = row['OMB Agency Code']\n cfda_program.omb_bureau_code = row['OMB Bureau Code']\n if row['Published Date']:\n cfda_program.published_date = datetime.strptime(row['Published Date'], '%b, %d %Y')\n if row['Archived Date']:\n cfda_program.archived_date = datetime.strptime(row['Archived Date'], '%b, %d %Y')\n\n cfda_program.save()\n\n # self.logger.log(20, \"loaded %s %s \", cfda_program.program_number, cfda_program)\n\n except IOError:\n logger = logging.getLogger('console')\n logger.log(\"Could not open file to load from\")" ]
[ "0.6898587", "0.59708315", "0.5941394", "0.5933838", "0.5736046", "0.55414313", "0.5455951", "0.532536", "0.5279041", "0.52443", "0.5233434", "0.5197", "0.5160039", "0.5150086", "0.51443994", "0.51228565", "0.50986737", "0.5092603", "0.5084257", "0.5082074", "0.5080069", "0.50736886", "0.50733453", "0.50455433", "0.503462", "0.5031218", "0.5025769", "0.50229347", "0.5020697", "0.5004589" ]
0.6659175
1
Create an injury consequence parameter database based on the FEMA P58 data The method was developed to process v3.1.2 of the FragilityDatabase xls that is provided with FEMA P58 2nd edition.
def create_FEMA_P58_bldg_injury_db( source_file, target_data_file='bldg_injury_DB_FEMA_P58_2nd.csv', target_meta_file='bldg_injury_DB_FEMA_P58_2nd.json'): # parse the source file df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1, true_values=["YES", "Yes", "yes"], false_values=["NO", "No", "no"]) # remove empty rows and columns df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) # filter the columns we need for the injury database cols_to_db = [ "Fragility Unit of Measure", 'DS Hierarchy', ] for DS_i in range(1, 6): cols_to_db += [ f'DS {DS_i}, Potential non-collapse casualty?', f'DS {DS_i} - Casualty Affected Area', f'DS {DS_i} Serious Injury Rate - Median', f'DS {DS_i} Serious Injury Rate - Dispersion', f'DS {DS_i} Loss of Life Rate - Median', f'DS {DS_i} Loss of Life Rate - Dispersion', ] # filter the columns that we need for the metadata cols_to_meta = [ "Component Name", "Component Description", "Construction Quality:", "Seismic Installation Conditions:", "Comments / Notes", "Author", "Fragility Unit of Measure", "Round to Integer Unit?", "DS 1, Description", "DS 2, Description", "DS 3, Description", "DS 4, Description", "DS 5, Description", ] # remove special characters to make it easier to work with column names str_map = { ord(' '): "_", ord('.'): "_", ord('-'): "_", ord(':'): None, ord('('): None, ord(')'): None, ord('?'): None, ord('/'): None, ord(','): None, } df_db_source = df.loc[:, cols_to_db] df_db_source.columns = [s.translate(str_map) for s in cols_to_db] df_db_source.sort_index(inplace=True) df_meta = df.loc[:, cols_to_meta] df_meta.columns = [s.translate(str_map) for s in cols_to_meta] df_db_source.replace('BY USER', np.nan, inplace=True) df_db_source.replace('By User', np.nan, inplace=True) # initialize the output loss table # define the columns out_cols = [ "Index", "Incomplete", "Quantity-Unit", "DV-Unit", ] for DS_i in range(1, 16): out_cols += [ f"DS{DS_i}-Family", f"DS{DS_i}-Theta_0", f"DS{DS_i}-Theta_1", f"DS{DS_i}-AffectedArea", ] # create the MultiIndex comps = df_db_source.index.values DVs = ['S1', 'S2'] df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'Severity']) df_db = pd.DataFrame( columns=out_cols, index=df_MI, dtype=float ) # initialize the dictionary that stores the loss metadata meta_dict = {} # for each component... # (this approach is not efficient, but easy to follow which was considered # more important than efficiency.) for cmp in df_db_source.itertuples(): ID = cmp.Index.split('.') cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}' # store the new index df_db.loc[cmp.Index, 'Index'] = cmpID # assume the component information is complete incomplete_S1 = False incomplete_S2 = False # store units df_db.loc[cmp.Index, 'Quantity-Unit'] = ( ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip()) df_db.loc[(cmp.Index, 'S1'), 'DV-Unit'] = "persons" df_db.loc[(cmp.Index, 'S2'), 'DV-Unit'] = "persons" # get the raw metadata for the component cmp_meta = df_meta.loc[cmp.Index, :] # store the global (i.e., not DS-specific) metadata # every component is assumed to have a comp. description comments = cmp_meta['Component_Description'] # the additional fields are added to the description if they exist if cmp_meta['Construction_Quality'] != 'Not Specified': comments += f'\nConstruction Quality: ' \ f'{cmp_meta["Construction_Quality"]}' if cmp_meta['Seismic_Installation_Conditions'] not in [ 'Not Specified', 'Not applicable', 'Unknown', 'Any']: comments += f'\nSeismic Installation Conditions: ' \ f'{cmp_meta["Seismic_Installation_Conditions"]}' if cmp_meta['Comments__Notes'] != 'None': comments += f'\nNotes: {cmp_meta["Comments__Notes"]}' if cmp_meta['Author'] not in ['Not Given', 'By User']: comments += f'\nAuthor: {cmp_meta["Author"]}' # get the suggested block size and replace the misleading values with ea block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1] meta_data = { "Description": cmp_meta['Component_Name'], "Comments": comments, "SuggestedComponentBlockSize": ' '.join(block_size), "RoundUpToIntegerQuantity": cmp_meta['Round_to_Integer_Unit'], "ControllingDemand": "Damage Quantity", "DamageStates": {} } # Handle components with simultaneous damage states separately if 'Simul' in cmp.DS_Hierarchy: # Note that we are assuming that all damage states are triggered by # a single limit state in these components. # This assumption holds for the second edition of FEMA P58, but it # might need to be revisited in future editions. inj_data = {} ds_tot = 0 # get the p10, p50, and p90 estimates for all damage states for DS_i in range(1, 6): casualty_model = getattr( cmp, f'DS_{DS_i}_Potential_non_collapse_casualty') if casualty_model is True: inj_data.update({f'DS{DS_i}': np.array([ getattr(cmp, f'DS_{DS_i}___Casualty_Affected_Area'), getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Median'), getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Dispersion'), getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Median'), getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Dispersion') ])}) ds_tot += 1 elif casualty_model is False: ds_tot += 1 # only continue if there is injury data if len(inj_data) == 0: continue # now prepare the equivalent mutex damage states sim_ds_count = ds_tot ds_count = 2 ** (sim_ds_count) - 1 # Here we take advantage of knowing that for every component with # simultaneous damage states, only one of the DSs has injury # consequences. # This assumption holds for the second edition of FEMA P58, but it # might need to be revisited in future editions. ds_trig = list(inj_data.keys())[0] inj_data = inj_data[ds_trig] ds_trig = int(ds_trig[2:]) for DS_i in range(1, ds_count + 1): ds_map = format(DS_i, f'0{sim_ds_count}b') if ds_map[-ds_trig] == '1': # store the consequence data for severity in ('S1', 'S2'): A_affected = inj_data[0] if severity == 'S1': theta_0 = inj_data[1] theta_1 = inj_data[2] elif severity == 'S2': theta_0 = inj_data[3] theta_1 = inj_data[4] if theta_0 != 0.0: df_db.loc[(cmp.Index, severity), f'DS{DS_i}-Family'] = 'lognormal' df_db.loc[(cmp.Index, severity), f'DS{DS_i}-Theta_0'] = theta_0 df_db.loc[(cmp.Index, severity), f'DS{DS_i}-Theta_1'] = theta_1 df_db.loc[(cmp.Index, severity), f'DS{DS_i}-AffectedArea'] = A_affected # store the metadata if ds_map.count('1') == 1: ds_pure_id = ds_map[::-1].find('1') + 1 meta_data['DamageStates'].update({f"DS{DS_i}": { "Description": f"Pure DS{ds_pure_id}. " + cmp_meta[ f"DS_{ds_pure_id}_Description"] }}) else: ds_combo = [f'DS{_.start() + 1}' for _ in re.finditer('1', ds_map[::-1])] meta_data['DamageStates'].update({f"DS{DS_i}": { "Description": 'Combination of ' + ' & '.join(ds_combo) }}) # for every other component... else: # now look at each Damage State for DS_i in range(1, 6): casualty_flag = getattr( cmp, f'DS_{DS_i}_Potential_non_collapse_casualty') if casualty_flag is True: A_affected = getattr(cmp, f'DS_{DS_i}___Casualty_Affected_Area') for severity in ('S1', 'S2'): if severity == 'S1': theta_0 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_' f'Rate___Median') theta_1 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_' f'Rate___Dispersion') elif severity == 'S2': theta_0 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_' f'Rate___Median') theta_1 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_' f'Rate___Dispersion') if theta_0 != 0.0: df_db.loc[(cmp.Index, severity), f'DS{DS_i}-Family'] = 'lognormal' df_db.loc[(cmp.Index, severity), f'DS{DS_i}-Theta_0'] = theta_0 df_db.loc[(cmp.Index, severity), f'DS{DS_i}-Theta_1'] = theta_1 df_db.loc[(cmp.Index, severity), f'DS{DS_i}-AffectedArea'] = A_affected if (pd.isna(theta_0) or pd.isna( theta_1) or pd.isna(A_affected)): if severity == 'S1': incomplete_S1 = True else: incomplete_S2 = True if ~np.isnan(casualty_flag): meta_data['DamageStates'].update({ f"DS{DS_i}": {"Description": cmp_meta[f"DS_{DS_i}_Description"]}}) df_db.loc[(cmp.Index, 'S1'), 'Incomplete'] = int(incomplete_S1) df_db.loc[(cmp.Index, 'S2'), 'Incomplete'] = int(incomplete_S2) # store the metadata for this component meta_dict.update({cmpID: meta_data}) # assign the Index column as the new ID df_db.index = pd.MultiIndex.from_arrays( [df_db['Index'].values, df_db.index.get_level_values(1)]) df_db.drop('Index', axis=1, inplace=True) # review the database and drop rows with no information cmp_to_drop = [] for cmp in df_db.index: empty = True for DS_i in range(1, 16): if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']): empty = False break if empty: cmp_to_drop.append(cmp) df_db.drop(cmp_to_drop, axis=0, inplace=True) cmp_kept = df_db.index.get_level_values(0).unique() cmp_to_drop = [] for cmp in meta_dict: if cmp not in cmp_kept: cmp_to_drop.append(cmp) for cmp in cmp_to_drop: del meta_dict[cmp] # convert to optimal datatypes to reduce file size df_db = df_db.convert_dtypes() df_db = base.convert_to_SimpleIndex(df_db, 0) # rename the index df_db.index.name = "ID" # save the consequence data df_db.to_csv(target_data_file) # save the metadata with open(target_meta_file, 'w+', encoding='utf-8') as f: json.dump(meta_dict, f, indent=2) print("Successfully parsed and saved the injury consequence data from FEMA " "P58")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_FEMA_P58_fragility_db(source_file,\n target_data_file='fragility_DB_FEMA_P58_2nd.csv',\n target_meta_file='fragility_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove the empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns that we need for the fragility database\n cols_to_db = [\n \"Demand Parameter (value):\",\n \"Demand Parameter (unit):\",\n \"Demand Location (use floor above? Yes/No)\",\n \"Directional?\",\n \"DS Hierarchy\",\n \"DS 1, Probability\",\n \"DS 1, Median Demand\",\n \"DS 1, Total Dispersion (Beta)\",\n \"DS 2, Probability\",\n \"DS 2, Median Demand\",\n \"DS 2, Total Dispersion (Beta)\",\n \"DS 3, Probability\",\n \"DS 3, Median Demand\",\n \"DS 3, Total Dispersion (Beta)\",\n \"DS 4, Probability\",\n \"DS 4, Median Demand\",\n \"DS 4, Total Dispersion (Beta)\",\n \"DS 5, Probability\",\n \"DS 5, Median Demand\",\n \"DS 5, Total Dispersion (Beta)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n # replace missing values with an empty string\n df_meta.fillna('', inplace=True)\n # the metadata shall be stored in strings\n df_meta = df_meta.astype(str)\n\n # initialize the output fragility table\n df_db = pd.DataFrame(\n columns=[\n \"Index\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=df_db_source.index,\n dtype=float\n )\n\n # initialize the dictionary that stores the fragility metadata\n meta_dict = {}\n\n # conversion dictionary for demand types\n convert_demand_type = {\n 'Story Drift Ratio': \"Peak Interstory Drift Ratio\",\n 'Link Rotation Angle': \"Peak Link Rotation Angle\",\n 'Effective Drift': \"Peak Effective Drift Ratio\",\n 'Link Beam Chord Rotation': \"Peak Link Beam Chord Rotation\",\n 'Peak Floor Acceleration': \"Peak Floor Acceleration\",\n 'Peak Floor Velocity': \"Peak Floor Velocity\"\n }\n\n # conversion dictionary for demand unit names\n convert_demand_unit = {\n 'Unit less': 'unitless',\n 'Radians': 'rad',\n 'g': 'g',\n 'meter/sec': 'mps'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n # create a dotted component index\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # store demand specifications\n df_db.loc[cmp.Index, 'Demand-Type'] = (\n convert_demand_type[cmp.Demand_Parameter_value])\n df_db.loc[cmp.Index, 'Demand-Unit'] = (\n convert_demand_unit[cmp.Demand_Parameter_unit])\n df_db.loc[cmp.Index, 'Demand-Offset'] = (\n int(cmp.Demand_Location_use_floor_above_YesNo))\n df_db.loc[cmp.Index, 'Demand-Directional'] = (\n int(cmp.Directional))\n\n # parse the damage state hierarchy\n DS_setup = parse_DS_Hierarchy(cmp.DS_Hierarchy)\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"LimitStates\": {}\n }\n\n # now look at each Limit State\n for LS_i, LS_contents in enumerate(DS_setup):\n\n LS_i = LS_i + 1\n LS_contents = np.atleast_1d(LS_contents)\n\n ls_meta = {}\n\n # start with the special cases with multiple DSs in an LS\n if LS_contents[0] in {'MutEx', 'Simul'}:\n\n # collect the fragility data for the member DSs\n median_demands = []\n dispersions = []\n weights = []\n for ds in LS_contents[1:]:\n median_demands.append(\n getattr(cmp, f\"DS_{ds[2]}_Median_Demand\"))\n\n dispersions.append(\n getattr(cmp, f\"DS_{ds[2]}_Total_Dispersion_Beta\"))\n\n weights.append(getattr(cmp, f\"DS_{ds[2]}_Probability\"))\n\n # make sure the specified distribution parameters are appropriate\n if ((np.unique(median_demands).size != 1) or (\n np.unique(dispersions).size != 1)):\n raise ValueError(f\"Incorrect mutually exclusive DS \"\n f\"definition in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n if LS_contents[0] == 'MutEx':\n\n # in mutually exclusive cases, make sure the specified DS\n # weights sum up to one\n np.testing.assert_allclose(\n np.sum(np.array(weights, dtype=float)), 1.0,\n err_msg=f\"Mutually exclusive Damage State weights do \"\n f\"not sum to 1.0 in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n # and save all DS metadata under this Limit State\n for ds in LS_contents[1:]:\n ds_id = ds[2]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n else:\n # in simultaneous cases, convert simultaneous weights into\n # mutexc weights\n sim_ds_count = len(LS_contents) - 1\n ds_count = 2 ** (sim_ds_count) - 1\n\n sim_weights = []\n\n for ds_id in range(1, ds_count + 1):\n ds_map = format(ds_id, f'0{sim_ds_count}b')\n\n sim_weights.append(np.product(\n [weights[ds_i]\n if ds_map[-ds_i - 1] == '1' else 1.0-weights[ds_i]\n for ds_i in range(sim_ds_count)]))\n\n # save ds metadata - we need to be clever here\n # the original metadata is saved for the pure cases\n # when only one DS is triggered\n # all other DSs store information about which\n # combination of pure DSs they represent\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # adjust weights to respect the assumption that at least\n # one DS will occur (i.e., the case with all DSs returning\n # False is not part of the event space)\n sim_weights_array = np.array(sim_weights) / np.sum(sim_weights)\n\n weights = sim_weights_array\n\n theta_0 = median_demands[0]\n theta_1 = dispersions[0]\n weights_str = ' | '.join([f\"{w:.6f}\" for w in weights])\n\n df_db.loc[cmp.Index, f'LS{LS_i}-DamageStateWeights'] = weights_str\n\n # then look at the sequential DS cases\n elif LS_contents[0].startswith('DS'):\n\n # this is straightforward, store the data in the table and dict\n ds_id = LS_contents[0][2]\n\n theta_0 = getattr(cmp, f\"DS_{ds_id}_Median_Demand\")\n theta_1 = getattr(cmp, f\"DS_{ds_id}_Total_Dispersion_Beta\")\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n # FEMA P58 assumes lognormal distribution for every fragility\n df_db.loc[cmp.Index, f'LS{LS_i}-Family'] = 'lognormal'\n\n # identify incomplete cases...\n\n # where theta is missing\n if theta_0 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_0'] = theta_0\n else:\n incomplete = True\n\n # where beta is missing\n if theta_1 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_1'] = theta_1\n else:\n incomplete = True\n\n # store the collected metadata for this limit state\n meta_data['LimitStates'].update({f\"LS{LS_i}\": ls_meta})\n\n # store the incomplete flag for this component\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from FEMA P58\")", "def create_FEMA_P58_bldg_redtag_db(\n source_file,\n target_data_file='bldg_redtag_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_redtag_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # take another pass with booleans because the first does not always work\n for true_str in (\"YES\", \"Yes\", \"yes\"):\n df.replace(true_str, True, inplace=True)\n\n for false_str in (\"NO\", \"No\", \"no\"):\n df.replace(false_str, False, inplace=True)\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f'DS {DS_i}, Unsafe Placard Trigger Flag',\n f'DS {DS_i}, Unsafe Placard Damage Median',\n f'DS {DS_i}, Unsafe Placard Damage Dispersion'\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\"\n ]\n\n # create the database index\n comps = df_db_source.index.values\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=comps,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n pass\n # Note that we are assuming that components with simultaneous\n # damage states do not have damage that would trigger a red tag.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n redtag_flag = getattr(\n cmp, f'DS_{DS_i}_Unsafe_Placard_Trigger_Flag')\n\n if redtag_flag is True:\n\n theta_0 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_'\n f'Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[cmp.Index, f'DS{DS_i}-Theta_1'] = theta_1\n\n if (pd.isna(theta_0) or pd.isna(theta_1)):\n\n incomplete = True\n\n if ~np.isnan(redtag_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the red tag consequence data from FEMA \"\n \"P58\")", "def produce_database(database_name, is_debug):\n\t\n\t# read files from a01-a35, every file including whole ecg data and the corresponding annotation\n\tdata_annotations_set = get_ecg_data_annotations(database_name, is_debug)\n\t# divide ECG data to minute-by-minute ECG segments\n\t_ = process_ecg_data_segments(database_name, data_annotations_set, is_debug)", "def create_Hazus_EQ_fragility_db(source_file,\n target_data_file='fragility_DB_Hazus_EQ.csv',\n target_meta_file='fragility_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n design_levels = list(\n raw_data['Structural_Fragility_Groups']['EDP_limits'].keys())\n\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n convert_design_level = {\n 'High_code': 'HC',\n 'Moderate_code': 'MC',\n 'Low_code': 'LC',\n 'Pre_code': 'PC'\n }\n\n # initialize the fragility table\n df_db = pd.DataFrame(\n columns=[\n \"ID\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=np.arange(len(building_types) * len(design_levels) * 5),\n dtype=float\n )\n counter = 0\n\n # First, prepare the structural fragilities\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in S_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'STR.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n S_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n S_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = S_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n # create the component id\n df_db.loc[counter, 'ID'] = 'NSD'\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = NSD_data['EDP_limits'][\n LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSD_data['Fragility_beta']\n\n counter += 1\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for dl in design_levels:\n\n # create the component id\n cmp_id = f'NSA.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Floor Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n NSA_data['EDP_limits'][dl][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSA_data['Fragility_beta']\n\n counter += 1\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in LF_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'LF.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Ground Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n LF_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n LF_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = LF_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Fifth, the ground failure fragilities\n GF_data = raw_data['Ground_Failure']\n\n for direction in ('Horizontal', 'Vertical'):\n for f_depth in ('Shallow', 'Deep'):\n # create the component id\n cmp_id = f'GF.{direction[0]}.{f_depth[0]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Permanent Ground Deformation\"\n df_db.loc[counter, 'Demand-Unit'] = \"inch\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n df_db.loc[counter, 'LS1-Family'] = 'lognormal'\n df_db.loc[counter, 'LS1-Theta_0'] = \\\n GF_data['EDP_limits'][direction][f_depth]\n df_db.loc[counter, 'LS1-Theta_1'] = \\\n GF_data['Fragility_beta'][direction][f_depth]\n p_complete = GF_data['P_Complete']\n df_db.loc[counter, 'LS1-DamageStateWeights'] = (\n f'{1.0 - p_complete} | {p_complete}')\n\n counter += 1\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # none of them are directional,\n df_db.loc[:, 'Demand-Directional'] = 0\n\n # rename the index\n df_db.set_index(\"ID\", inplace=True)\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from Hazus EQ\")", "def create_db(name=_db_indicators,\n indi_file=os.path.join('Source', 'codes_need.csv'),\n country_file=os.path.join('Source', 'work_countries.txt')):\n\n def create_indi_country(pdfI, con, mess, db_name, freq):\n if pdfI.shape[0]==0:\n return\n print('+' * 50, '{} WORKS'.format(mess), '+' * 50)\n\n pdfI.to_sql(cmm.strINDI_db_name, con, if_exists='replace')\n print('CREATE IMF.INDICATORS table for {} indicators'.format(pdfI.shape[0]))\n pdfC = get_countryes(db_name=db_name, country_txt_file=country_file)\n pdfC.to_sql(cmm.strCOUNTRY_db_name, con=con, if_exists='replace')\n print('CREATE IMF.COUNTRIES for {0} countries.'.format(pdfC.shape[0]))\n\n update_db(db_name=db_name, start=1970, end=2000)\n update_db(db_name=db_name, start=1999)\n\n cmm.create_views(db_name, freq=freq)\n\n pdf = cmm.read_indicators_from_csv(indi_file)\n print(indi_file)\n\n pdfQ = pdf[pdf['Freq']=='Q']\n pdfA = pdf[pdf['Freq'] == 'Y']\n pdfM = pdf[pdf['Freq'] == 'M']\n\n #pdfC = cmm.read_countries(file_name=country_file)\n\n nameA=cmm.db_name2annu(name)\n nameM = cmm.db_name2annu(name, suff='_M')\n\n coni = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=name))\n coniA = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameA))\n coniM = sa.create_engine('sqlite+pysqlite:///{name}'.format(name=nameM))\n\n create_indi_country(pdfQ, coni, 'QUARTERLY', name, freq='Q')\n create_indi_country(pdfA, coniA, 'ANNUAL', nameA, freq='A')\n create_indi_country(pdfM, coniM, 'MONTHLY', nameM, freq='M')", "def create_FEMA_P58_bldg_repair_db(\n source_file,\n target_data_file='bldg_repair_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_repair_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.concat(\n [pd.read_excel(source_file, sheet_name=sheet, header=2, index_col=1)\n for sheet in ('Summary', 'Cost Summary', 'Env Summary')], axis=1)\n\n # remove duplicate columns\n # (there are such because we joined two tables that were read separately)\n df = df.loc[:, ~df.columns.duplicated()]\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the repair database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f\"Best Fit, DS{DS_i}\",\n f\"Lower Qty Mean, DS{DS_i}\",\n f\"Upper Qty Mean, DS{DS_i}\",\n f\"Lower Qty Cutoff, DS{DS_i}\",\n f\"Upper Qty Cutoff, DS{DS_i}\",\n f\"CV / Dispersion, DS{DS_i}\",\n\n f\"Best Fit, DS{DS_i}.1\",\n f\"Lower Qty Mean, DS{DS_i}.1\",\n f\"Upper Qty Mean, DS{DS_i}.1\",\n f\"Lower Qty Cutoff, DS{DS_i}.1\",\n f\"Upper Qty Cutoff, DS{DS_i}.1\",\n f\"CV / Dispersion, DS{DS_i}.2\",\n f\"DS {DS_i}, Long Lead Time\",\n\n f'Repair Cost, p10, DS{DS_i}',\n f'Repair Cost, p50, DS{DS_i}',\n f'Repair Cost, p90, DS{DS_i}',\n f'Time, p10, DS{DS_i}',\n f'Time, p50, DS{DS_i}',\n f'Time, p90, DS{DS_i}',\n f'Mean Value, DS{DS_i}',\n f'Mean Value, DS{DS_i}.1',\n\n # Columns added for the Environmental loss\n f\"DS{DS_i} Best Fit\",\n f\"DS{DS_i} CV or Beta\",\n\n f\"DS{DS_i} Best Fit.1\",\n f\"DS{DS_i} CV or Beta.1\",\n\n f\"DS{DS_i} Embodied Carbon (kg CO2eq)\",\n f\"DS{DS_i} Embodied Energy (MJ)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-LongLeadTime\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['Cost', 'Time', 'Carbon', 'Energy']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n convert_family = {\n 'LogNormal': 'lognormal',\n 'Normal': 'normal'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_cost = False\n incomplete_time = False\n incomplete_carbon = False\n incomplete_energy = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'Cost'), 'DV-Unit'] = \"USD_2011\"\n df_db.loc[(cmp.Index, 'Time'), 'DV-Unit'] = \"worker_day\"\n df_db.loc[(cmp.Index, 'Carbon'), 'DV-Unit'] = \"kg\"\n df_db.loc[(cmp.Index, 'Energy'), 'DV-Unit'] = \"MJ\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n cost_est = {}\n time_est = {}\n carbon_est = {}\n energy_est = {}\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n if not pd.isna(getattr(cmp, f'Repair_Cost_p10_DS{DS_i}')):\n\n cost_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Repair_Cost_p10_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p50_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n ])})\n\n time_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Time_p10_DS{DS_i}'),\n getattr(cmp, f'Time_p50_DS{DS_i}'),\n getattr(cmp, f'Time_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1'),\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES')\n ])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta'),\n getattr(cmp, f'DS{DS_i}_Best_Fit')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n carbon_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta_1'),\n getattr(cmp, f'DS{DS_i}_Best_Fit_1')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n energy_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = len(cost_est.keys())\n ds_count = 2 ** (sim_ds_count) - 1\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n cost_vals = np.sum([cost_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(5)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n time_vals = np.sum([time_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(6)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n carbon_vals = np.sum([carbon_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n energy_vals = np.sum([energy_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n # fit a distribution\n family_hat, theta_hat = fit_distribution_to_percentiles(\n cost_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n cost_theta = theta_hat\n if family_hat == 'normal':\n cost_theta[1] = cost_theta[1] / cost_theta[0]\n\n time_theta = [time_vals[1],\n np.sqrt(cost_theta[1] ** 2.0 + 0.25 ** 2.0)]\n\n # fit distributions to environmental impact consequences\n family_hat_carbon, theta_hat_carbon = fit_distribution_to_percentiles(\n carbon_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n carbon_theta = theta_hat_carbon\n if family_hat_carbon == 'normal':\n carbon_theta[1] = carbon_theta[1] / carbon_theta[0]\n\n family_hat_energy, theta_hat_energy = fit_distribution_to_percentiles(\n energy_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n energy_theta = theta_hat_energy\n if family_hat_energy == 'normal':\n energy_theta[1] = energy_theta[1] / energy_theta[0]\n\n # Note that here we assume that the cutoff quantities are\n # identical across damage states.\n # This assumption holds for the second edition of FEMA P58, but\n # it might need to be revisited in future editions.\n cost_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1')\n cost_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1')\n time_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1_1')\n time_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1_1')\n\n # store the results\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{cost_vals[3]:g},{cost_vals[4]:g}|\"\n f\"{cost_qnt_low:g},{cost_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Theta_1'] = f\"{cost_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{time_vals[3]:g},{time_vals[4]:g}|\"\n f\"{time_qnt_low:g},{time_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Theta_1'] = f\"{time_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-LongLeadTime'] = int(time_vals[5] > 0)\n\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = family_hat_carbon\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = f\"{carbon_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Carbon'),\n f'DS{DS_i}-Theta_1'] = f\"{carbon_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = family_hat_energy\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = f\"{energy_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'),\n f'DS{DS_i}-Theta_1'] = f\"{energy_theta[1]:g}\"\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\":\n cmp_meta[f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n # cost\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}')):\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}'):g}\")\n\n else:\n incomplete_cost = True\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\n \"Description\": cmp_meta[f\"DS_{DS_i}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{DS_i}_Repair_Description\"]}})\n\n # time\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}_1')):\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}_1')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}_1')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}_1')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}_2'):g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-LongLeadTime'] = (\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES'))\n\n else:\n incomplete_time = True\n\n # Carbon\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit')):\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit')])\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = getattr(cmp,\n f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta')\n\n # Energy\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit_1')):\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit_1')])\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta_1')\n\n df_db.loc[(cmp.Index, 'Cost'), 'Incomplete'] = int(incomplete_cost)\n df_db.loc[(cmp.Index, 'Time'), 'Incomplete'] = int(incomplete_time)\n df_db.loc[(cmp.Index, 'Carbon'), 'Incomplete'] = int(incomplete_carbon)\n df_db.loc[(cmp.Index, 'Energy'), 'Incomplete'] = int(incomplete_energy)\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n for cmp in cmp_to_drop:\n if cmp[0] in meta_dict:\n del meta_dict[cmp[0]]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from FEMA \"\n \"P58\")", "def create_Hazus_EQ_bldg_injury_db(source_file,\n target_data_file='bldg_injury_DB_Hazus_EQ.csv',\n target_meta_file='bldg_injury_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'LF']\n comps = [f'{cmp_type}.{bt}'\n for cmp_type in cmp_types for bt in building_types]\n DVs = ['S1', 'S2', 'S3', 'S4']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i-1])\n\n # Second, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n\n # create the component id\n cmp_id = f'STR.{bt}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS5 is stored under 'collapse'\n if DS_i == 5:\n ds_i = 'Collapse'\n else:\n ds_i = f'DS{DS_i}'\n\n for S_i in range(1, 5):\n s_label = f'S{S_i}'\n df_db.loc[(cmp_id, s_label), f'DS{DS_i}-Theta_0'] = (\n S_data['Injury_rates'][ds_i][bt][S_i - 1])\n\n # remove empty rows\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[:, 'DV-Unit'] = \"injury_rate\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from Hazus \"\n \"EQ\")", "def _create_sql(self):\n\n pdbfile = self.pdbfile\n sqlfile = self.sqlfile\n\n if self.verbose:\n print('-- Create SQLite3 database')\n\n #name of the table\n #table = 'ATOM'\n\n # column names and types\n self.col = {'serial' : 'INT',\n 'name' : 'TEXT',\n 'altLoc' : 'TEXT',\n 'resName' : 'TEXT',\n 'chainID' : 'TEXT',\n 'resSeq' : 'INT',\n 'iCode' : 'TEXT',\n 'x' : 'REAL',\n 'y' : 'REAL',\n 'z' : 'REAL',\n 'occ' : 'REAL',\n 'temp' : 'REAL'}\n\n # delimtier of the column format\n # taken from http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n self.delimiter = {\n 'serial' : [6,11],\n 'name' : [12,16],\n 'altLoc' : [16,17],\n 'resName' :[17,20],\n 'chainID' :[21,22],\n 'resSeq' :[22,26],\n 'iCode' :[26,26],\n 'x' :[30,38],\n 'y' :[38,46],\n 'z' :[46,54],\n 'occ' :[54,60],\n 'temp' :[60,66]}\n\n if self.no_extra:\n del self.col['occ']\n del self.col['temp']\n\n # size of the things\n ncol = len(self.col)\n ndel = len(self.delimiter)\n\n\n # open the data base\n # if we do not specify a db name\n # the db is only in RAM\n # there might be little advantage to use memory\n # https://stackoverflow.com/questions/764710/sqlite-performance-benchmark-why-is-memory-so-slow-only-1-5x-as-fast-as-d\n if self.sqlfile is None:\n self.conn = sqlite3.connect(':memory:')\n \n # or we create a new db file\n else:\n if os.path.isfile(sqlfile):\n sp.call('rm %s' %sqlfile,shell=True)\n self.conn = sqlite3.connect(sqlfile)\n self.c = self.conn.cursor()\n\n # intialize the header/placeholder\n header,qm = '',''\n for ic,(colname,coltype) in enumerate(self.col.items()):\n header += '{cn} {ct}'.format(cn=colname,ct=coltype)\n qm += '?'\n if ic < ncol-1:\n header += ', '\n qm += ','\n\n # create the table\n query = 'CREATE TABLE ATOM ({hd})'.format(hd=header)\n self.c.execute(query)\n\n\n # read the pdb file\n # this is dangerous if there are ATOM written in the comment part\n # which happends often\n #data = sp.check_output(\"awk '/ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a safer version consist at matching against the first field\n # won't work on windows\n #data = sp.check_output(\"awk '$1 ~ /^ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a pure python way\n # RMK we go through the data twice here. Once to read the ATOM line and once to parse the data ...\n # we could do better than that. But the most time consuming step seems to be the CREATE TABLE query\n # if we path a file we read it\n if isinstance(pdbfile,str):\n if os.path.isfile(pdbfile):\n with open(pdbfile,'r') as fi:\n data = [line.split('\\n')[0] for line in fi if line.startswith('ATOM')]\n else:\n raise FileNotFoundError('File %s was not found',pdbfile)\n\n # if we pass a list as for h5py read/write\n # we directly use that\n elif isinstance(pdbfile,np.ndarray):\n data = [l.decode('utf-8') for l in pdbfile.tolist()]\n\n # if we cant read it\n else:\n print(pdbfile)\n raise ValueError('PDB data not recognized')\n\n # if there is no ATOM in the file\n if len(data)==1 and data[0]=='':\n print(\"-- Error : No ATOM in the pdb file.\")\n self.is_valid = False\n return\n\n # haddock chain ID fix\n del_copy = self.delimiter.copy()\n if data[0][del_copy['chainID'][0]] == ' ':\n del_copy['chainID'] = [72,73]\n\n # get all the data\n data_atom = []\n for iatom,atom in enumerate(data):\n\n # sometimes we still have an empty line somewhere\n if len(atom) == 0:\n continue\n\n # browse all attribute of each atom\n at = ()\n for ik,(colname,coltype) in enumerate(self.col.items()):\n\n # get the piece of data\n data = atom[del_copy[colname][0]:del_copy[colname][1]].strip()\n\n # convert it if necessary\n if coltype == 'INT':\n data = int(data)\n elif coltype == 'REAL':\n data = float(data)\n\n # append keep the comma !!\n # we need proper tuple\n at +=(data,)\n\n # append\n data_atom.append(at)\n\n\n # push in the database\n self.c.executemany('INSERT INTO ATOM VALUES ({qm})'.format(qm=qm),data_atom)", "def update_db(db_name=_db_indicators, start=1950, end=dt.datetime.now().year, write_db=True):\n def read_indicators(pdfI=None, coutries=[], ctry_chunksize=50, write_db=True):\n print('UPDATE IMF: Start reading {0} indicators'.format(pdfI.shape[0]))\n #dct_not_data=dict()\n lst_ret=[]\n for k, v in pdfI.iterrows():\n\n lst_pdf=list()\n lst_not_country=list()\n tbl_name=k #'{0}_{1}'.format(k, freq)\n print('UPDATE IMF ({2}-{3}): reading {0}, tDS={1}\\t'.format(k, v['Dataset'], start, end), end='... ')\n for cs in cmm.iterate_group(coutries, ctry_chunksize):\n\n try:\n pdf = pds.read_imf(strDataSetID=v['Dataset'], indiID=k, countryCode=cs,\n frequency=v['Freq'], startDate=start, endDate=end)\n\n lst_pdf.append(pdf)\n lst_not_country+=pdf.not_country\n #print(pdf.name, pdf.shape, len(pdf.not_country))\n except ValueError as e:\n lst_not_country += cs\n\n #print(e, k, 0, 50)\n try:\n pdfC=pds.DataFrameDATA(pd.concat([ppdf for ppdf in lst_pdf if not ppdf.empty]))\n pdfC.name=tbl_name\n #dct_not_data.update({'IND_NOT':tbl_name, 'NOT_DATA':lst_not_country})\n print('read {name},\\tlen {len_df},\\tnot data countries - {nc}'.format(name=pdfC.name,\n len_df=pdfC.shape[0],\n nc=len(lst_not_country)), end='... ')\n if write_db:\n print('write to DB...', end='')\n\n lstWrite=[c for c in pdfC.columns.tolist() if c !='mult']\n\n pdfC[lstWrite].to_sql(pdfC.name, coni, if_exists='upsert')\n cmm.write_status(db_name, k, pdfC.shape[0], mult=pdfC['mult'].unique()[0])\n\n print('done', end='\\n')\n pdfC['INDI']=k\n lst_ret.append(pdfC)\n #print(dct_not_data)\n except ValueError as e:\n print(e, 'not data for ', k, v['Dataset'], len(cs))\n\n return pd.concat(lst_ret)\n\n coni = sa.create_engine('sqlite+pysqlite:///{db_name}'.format(db_name=db_name))\n # pdfIndi=pd.read_sql('select * from INDICATORS where LastUpdateDateA is NULL', coni, index_col='Code')\n pdfIndi = pd.read_sql('select * from {INDI_NAME}'.format(INDI_NAME=cmm.strINDI_db_name), coni, index_col='Code')#.iloc[:40]\n pdfCountry = pd.read_sql('select * from {COUNTRY_NAME}'.format(COUNTRY_NAME=cmm.strCOUNTRY_db_name), coni, index_col='id')\n country_list = pdfCountry.index.tolist()\n print('UPDATE IMF: reading {0} countries'.format(len(country_list)))\n\n pdfQ=read_indicators(pdfI=pdfIndi.sort_index(), coutries=country_list, write_db=write_db)\n print('=' * 50)\n\n print('UPDATE IMF: all done')\n return pdfQ", "def __init__(self):\n # File settings and locations.\n self.DATA_DIR = 'data'\n self.DATA_COL_DIR = 'data_collated'\n\n self.FIRE_DATABASE = 'FPA_FOD_20170508.sqlite'\n self.CLIMATE_DATA = 'GlobalLandTemperaturesByCity.csv'\n self.STOCK_DATA = 'historical_stock_prices.csv'\n self.COMBINED_DATA = 'combined_data.db'\n\n self.MODEL_PATH = 'models/dnn_wildfires.ckpt'\n\n # Setting to use reduced data for prototyping purposes.\n self.prototyping = False\n self.sample_size = 80000\n\n # Start date of data\n self.start = pd.to_datetime('1992-01-01')\n\n # Stocks in stock data to keep for analysis.\n self.stocks = ['MSFT', 'AAPL', 'GE', 'JNJ', 'JPM', 'PG']\n\n # Settings for validation and test set partitioning.\n self.val_set_ratio = 0.15\n self.test_set_ratio = 0.15\n\n # Separation of features for pipeline preparation \n self.cat_attribs = ['STATE', 'FIRE_SIZE_CLASS', 'OWNER_CODE', 'City']\n self.num_attribs = ['FIRE_YEAR', 'LATITUDE', 'LONGITUDE', 'FIRE_SIZE', \n 'FIRE_LENGTH', 'DIST_TO_MAJOR_CITY', 'AverageTemperature',\n 'AverageTemperatureUncertainty', 'AAPL', 'GE', 'JNJ', \n 'JPM', 'MSFT', 'PG']\n self.cycle_cols = ['DISC_MONTH', 'DISC_DAY_OF_WEEK', 'DISCOVERY_TIME', \n 'DISCOVERY_DOY', 'CONT_MONTH', 'CONT_DAY_OF_WEEK',\n 'CONT_TIME']\n\n # Define the ranges of the cycles in cycle_cols and whether any offset for\n # zero-indexing is needed (i.e., 'DISC_MONTH' cycles over a 12 month period\n # and the months need an offset of one to start the indicies at 0 for Jan.).\n self.cycle_ranges = [12, 7, 2400, 365, 12, 7, 2400]\n self.cycle_offsets = [1, 0, 0, 1, 1, 0, 0]\n\n # Parameters for deep learning model determined from randomized \n # hyperparameter search.\n self.n_hidden_layers = 4\n self.n_neurons = 200\n self.batch_size = 500\n self.batch_norm_momentum = 0.999\n self.dropout_rate = 0.4\n self.learning_rate = 0.01\n self.activation = tf.nn.elu\n\n # Hyperparameter settings .\n self.hp_search = False", "def build_input_db():\n build_input_database()", "def add_AA_sequences_to_db(db_cursor):\n db_cursor.execute(\"SELECT rowid,residue,position,uniprotid FROM phosphositetb\")\n results = db_cursor.fetchall()\n #print results\n\n for rowid, residue, position, uniprotid in results:\n AA_sequence = get_AA_sequence_around_mod(residue,position,uniprotid)\n #print AA_sequence\n #db_cursor.execute(\"SELECT rowid, AA_sequence FROM phosphositetb\")\n #print db_cursor.fetchall()\n db_cursor.execute(\"UPDATE phosphositetb SET AA_sequence=? where rowid=?;\"\\\n ,(AA_sequence,rowid))", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def create_Hazus_EQ_bldg_repair_db(source_file,\n target_data_file='bldg_repair_DB_Hazus_EQ.csv',\n target_meta_file='bldg_repair_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n occupancies = list(\n raw_data['Structural_Fragility_Groups']['Repair_cost'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'NSD', 'NSA', 'LF']\n comps = [f'{cmp_type}.{occ_type}'\n for cmp_type in cmp_types for occ_type in occupancies]\n DVs = ['Cost', 'Time']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'STR.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_cost'][occ_type][ds_i-1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_time'][occ_type][ds_i-1]\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSD.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSD_data['Repair_cost'][occ_type][DS_i-1]\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSA.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSA_data['Repair_cost'][occ_type][DS_i-1]\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'LF.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_cost'][occ_type][ds_i - 1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_time'][occ_type][ds_i - 1]\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[idx[:, 'Cost'], 'DV-Unit'] = \"loss_ratio\"\n df_db.loc[idx[:, 'Time'], 'DV-Unit'] = \"day\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from Hazus \"\n \"EQ\")", "def initialise_bdd(self):\n print(fr.FR[1])\n self.base.create_database(\"sql/p5.sql\")\n print(fr.FR[2])\n self.category_table.save_category()\n print(fr.FR[3])", "def _aiida_ndb_qp(self, data ):\n pdata = ArrayData()\n pdata.set_array('Eo', numpy.array(data['Eo']))\n pdata.set_array('E_minus_Eo', numpy.array(data['E-Eo']))\n pdata.set_array('Z', numpy.array(data['Z']))\n pdata.set_array('qp_table', numpy.array(data['qp_table']))\n try:\n pdata.set_array('So', numpy.array(data['So']))\n except KeyError:\n pass\n return pdata", "def design_TIA_inverter(db_n, db_p, sim_env,\n vg_res, rf_res,\n vdd_nom, vdd_vec, cpd, cload, \n rdc_min, fbw_min, pm_min, BER_max,\n vos, isw_pkpk,\n vb_n, vb_p, error_tol=0.05, ibias_max=20e-6):\n # Finds all possible designs for one value of VDD, then\n # confirm which work with all other VDD values.\n possibilities = []\n\n vg_vec = np.arange(0, vdd_nom, vg_res)\n \n for vg in vg_vec:\n print(\"VIN:\\t{0}\".format(vg))\n n_op_info = db_n.query(vgs=vg, vds=vg, vbs=vb_n-0)\n p_op_info = db_p.query(vgs=vg-vdd_nom, vds=vg-vdd_nom, vbs=vb_p-vdd_nom)\n \n if np.isinf(ibias_max):\n nf_n_max = 200\n else:\n nf_n_max = int(round(ibias_max/n_op_info['ibias']))\n \n nf_n_vec = np.arange(1, nf_n_max, 1)\n for nf_n in nf_n_vec:\n # Number of fingers can only be integer,\n # so increase as necessary until you get\n # sufficiently accurate/precise bias + current match\n ratio_good, nf_p = verify_ratio(n_op_info['ibias'],\n p_op_info['ibias'],\n nf_n,\n error_tol)\n if not ratio_good:\n continue\n\n # Getting small signal parameters to constrain Rf\n inv = LTICircuit()\n inv.add_transistor(n_op_info, 'out', 'in', 'gnd', fg=nf_n)\n inv.add_transistor(p_op_info, 'out', 'in', 'gnd', fg=nf_p)\n inv_num, inv_den = inv.get_num_den(in_name='in', out_name='out', in_type='v')\n A0 = abs(inv_num[-1]/inv_den[-1])\n \n gds_n = n_op_info['gds'] * nf_n\n gds_p = p_op_info['gds'] * nf_p\n gds = abs(gds_n) + abs(gds_p)\n ro = 1/gds\n \n # Assume Rdc is negative, bound Rf\n rf_min = max(rdc_min*(1+A0)/A0 + ro/A0, 0)\n rf_vec = np.arange(rf_min, rdc_min*2, rf_res)\n for rf in rf_vec:\n # With all parameters, check if it meets small signal spec\n meets_SS, SS_vals = verify_TIA_inverter_SS(n_op_info, p_op_info,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n # With all parameters, estimate if it will meet noise spec\n meets_noise, BER = verify_TIA_inverter_BER(n_op_info, p_op_info, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n # If it meets small signal spec, append it to the list\n # of possibilities\n if meets_spec:\n possibilities.append(dict(vg=vg,\n vdd=vdd_nom,\n nf_n=nf_n,\n nf_p=nf_p,\n rf=rf,\n rdc=SS_vals['rdc'],\n fbw=SS_vals['fbw'],\n pm=SS_vals['pm'],\n ibias=ibias_n,\n BER=BER))\n elif SS_vals['fbw'] != None and SS_vals['fbw'] < fbw_min:\n # Increasing resistor size won't help bandwidth\n break\n \n # Go through all possibilities which work at the nominal voltage\n # and ensure functionality at other bias voltages\n # Remove any nonviable options\n print(\"{0} working at nominal VDD\".format(len(possibilities)))\n for candidate in possibilities:\n nf_n = candidate['nf_n']\n nf_p = candidate['nf_p']\n rf = candidate['rf']\n for vdd in vdd_vec:\n new_op_dict = vary_supply(vdd, db_n, db_p, nf_n, nf_p, vb_n, vb_p)\n vg = new_op_dict['vb']\n n_op = new_op_dict['n_op']\n p_op = new_op_dict['p_op']\n \n # Confirm small signal spec is met\n meets_SS, scratch = verify_TIA_inverter_SS(n_op, p_op,\n nf_n, nf_p, rf, cpd, cload,\n rdc_min, fbw_min, pm_min)\n \n # Confirm noise spec is met\n meets_noise, BER = verify_TIA_inverter_BER(n_op, p_op, \n nf_n, nf_p,\n rf, cpd, cload,\n BER_max, vos, isw_pkpk)\n \n meets_spec = meets_SS # and meets_noise\n \n if not meets_spec:\n possibilities.remove(candidate)\n break\n \n # Of the remaining possibilities, check for lowest power.\n # If there are none, raise a ValueError.\n if len(possibilities) == 0:\n raise ValueError(\"No final viable solutions\")\n \n print(\"{0} working at all VDD\".format(len(possibilities)))\n best_op = possibilities[0]\n for candidate in possibilities:\n best_op = choose_op_comparison(best_op, candidate)\n \n return best_op", "def prepare_experiment(assumptions):\n print(\"\\nGenerate species parameters\")\n np.random.seed(assumptions['seed']) \n params = MakeParams(assumptions) \n if assumptions[\"selected_function\"] == \"f5_invader_suppression\":\n print(\"\\nDraw invader feature\")\n params = create_invader(params, assumptions)\n print(params[\"c\"])\n \n print(\"\\nDraw per-capita function and cost\")\n f1_species_smooth, f1_species_rugged, f2_species_smooth, f2_species_rugged = draw_species_function(assumptions)\n params.update({\"f1_species_smooth\": f1_species_smooth, \"f1_species_rugged\": f1_species_rugged, \"f2_species_smooth\": f2_species_smooth, \"f2_species_rugged\": f2_species_rugged})\n gi = draw_species_cost(f1_species_smooth, assumptions)\n params.update({\"g\": gi})\n \n print(\"\\nConstruct plate\")\n np.random.seed(assumptions['seed']) \n plate = make_plate(assumptions,params)\n \n print(\"\\nAdd community function to plate\")\n plate = add_community_function(plate, assumptions, params)\n \n if not pd.isnull(assumptions[\"overwrite_plate\"]) :\n print(\"\\nUpdating the initial plate composition by overwrite_plate\")\n plate = overwrite_plate(plate, assumptions)\n \n print(\"\\nPrepare Protocol\")\n #Extract Protocol from protocol database\n algorithms = make_algorithms(assumptions)\n params_algorithm = algorithms[algorithms['algorithm_name'] == assumptions['protocol']]\n \n #Params_simulation by default contains all assumptions not stored in params.\n params_simulation = dict((k, assumptions[k]) for k in assumptions.keys() if k not in params.keys())\n \n return params, params_simulation , params_algorithm, plate", "def generate_siaf_pre_flight_reference_files_nircam():\n\n instrument = 'NIRCam'\n overwrite_wedge_file = False\n overwrite_grism_file = False\n\n\n # wedge definitions\n wedge_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_wedge_offsets.txt'.format(instrument.lower()))\n\n if (not os.path.isfile(wedge_file) or (overwrite_wedge_file)):\n\n wedge_offsets = Table.read(os.path.join(JWST_SOURCE_DATA_ROOT, instrument, 'wedge_offsets.txt'), format='ascii.basic', delimiter=' ', guess=False)\n\n comments = []\n comments.append('{} detector parameter definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the wedge offsets.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n wedge_offsets.meta['comments'] = comments\n wedge_offsets.write(wedge_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)\n\n # grism definitions\n grism_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_grism_parameters.txt'.format(instrument.lower()))\n\n if (not os.path.isfile(wedge_file) or (overwrite_grism_file)):\n # grism parameters, see WFSS worksheet in EXCEL SIAF\n grism_parameters = Table.read(grism_file, format='ascii.basic', delimiter=',', guess=False)\n\n # Save a backup copy of the grism file\n cmd = 'cp {} {}'.format(grism_file,os.path.join(JWST_TEMPORARY_DATA_ROOT, instrument, 'nircam_siaf_grism_parameters_backup.txt'))\n os.system(cmd)\n\n # different sign in Y for NRCB apertures\n factor = np.array(\n [1. if 'NRCA' in grism_parameters['aperture_name'][i] else -1. for i in range(len(grism_parameters))])\n\n for col in grism_parameters.colnames[1:]:\n # these are Sci coordinates\n if col[0] != 'D':\n if 'X' in col:\n grism_parameters['D{}'.format(col)] = grism_parameters[col].data - 1024.5\n elif 'Y' in col:\n grism_parameters['D{}'.format(col)] = factor * (grism_parameters[col].data - 1024.5)\n\n\n\n comments = []\n comments.append('{} grism parameter definition file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the grism parameters.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n grism_parameters.meta['comments'] = comments\n grism_parameters.write(grism_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)\n\n # Transformation parameters, mapping used to select rows in cold_fit_[] file\n coldfit_name_mapping = {\n 'NRCA1_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA2_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA3_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA4_FULL': {'degrees_to_mm':'OTESKYToNIRCAMASW_201609161431',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASWoOTESKY_201609161431',\n },\n 'NRCA5_FULL' :{'degrees_to_mm':'OTESKYToNIRCAMALW_RT_20170307121022',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALWToOTESKY_RT_20170307121022',\n },\n 'NRCB1_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_1_20161025081604',\n 'pixels_to_mm':'NIRCAMBSW_1ToNIRCAMBSW_20161025081604',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB2_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_2_20161025081912',\n 'pixels_to_mm':'NIRCAMBSW_2ToNIRCAMBSW_20161025081912',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB3_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_3_20161025082300',\n 'pixels_to_mm':'NIRCAMBSW_3ToNIRCAMBSW_20161025082300',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB4_FULL': {'degrees_to_mm':'OTESKYToNIRCAMBSW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBSWToNIRCAMBSW_4_20161025082647',\n 'pixels_to_mm':'NIRCAMBSW_4ToNIRCAMBSW_20161025082647',\n 'mm_to_degrees':'NIRCAMBSWToOTESKY_RT_20170307121024',\n },\n 'NRCB5_FULL' :{'degrees_to_mm':'OTESKYToNIRCAMBLW_RT_20170307121023',\n 'mm_to_pixels':'NIRCAMBLWToNIRCAMBLW_1_20161227162336',\n 'pixels_to_mm':'NIRCAMBLW_1ToNIRCAMBLW_20161227162336',\n 'mm_to_degrees':'NIRCAMBLWToOTESKY_RT_20170307121023',\n },\n 'NRCA1_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RNDNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASW_RNDNCToOTESKY_202110261138',\n },\n 'NRCA2_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASW_RNDToOTESKY_202005150434',\n },\n 'NRCA3_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RNDNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASW_RNDNCToOTESKY_202110261138',\n },\n 'NRCA4_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMASW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASW_RNDToOTESKY_202005150434',\n },\n 'NRCA1_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BARNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_1_20161025081540',\n 'pixels_to_mm':'NIRCAMASW_1ToNIRCAMASW_20161025081540',\n 'mm_to_degrees':'NIRCAMASW_BARNCToOTESKY_202110261138',\n },\n 'NRCA2_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_2_20161025081547',\n 'pixels_to_mm':'NIRCAMASW_2ToNIRCAMASW_20161025081547',\n 'mm_to_degrees':'NIRCAMASW_BARToOTESKY_202005150434',\n },\n 'NRCA3_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BARNC_202110261138',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_3_20161025081552',\n 'pixels_to_mm':'NIRCAMASW_3ToNIRCAMASW_20161025081552',\n 'mm_to_degrees':'NIRCAMASW_BARNCToOTESKY_202110261138',\n },\n 'NRCA4_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMASW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMASWToNIRCAMASW_4_20161025081557',\n 'pixels_to_mm':'NIRCAMASW_4ToNIRCAMASW_20161025081557',\n 'mm_to_degrees':'NIRCAMASW_BARToOTESKY_202005150434',\n },\n 'NRCA5_FULL_WEDGE_RND' :{'degrees_to_mm':'OTESKYToNIRCAMALW_RND_202005150434',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALW_RNDToOTESKY_202005150434',\n },\n 'NRCA5_FULL_WEDGE_BAR' :{'degrees_to_mm':'OTESKYToNIRCAMALW_BAR_202005150434',\n 'mm_to_pixels':'NIRCAMALWToNIRCAMALW_1_20161227162042',\n 'pixels_to_mm':'NIRCAMALW_1ToNIRCAMALW_20161227162042',\n 'mm_to_degrees':'NIRCAMALW_BARToOTESKY_202005150434',\n }\n }\n\n # coldfit_source_data_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}'.format('cold_fit_201703071210.csv'))\n coldfit_source_data_file = os.path.join(JWST_SOURCE_DATA_ROOT, instrument, '{}'.format('nircam_cold_fit.txt'))\n print('NIRCam coldfit data from', coldfit_source_data_file)\n t = open(coldfit_source_data_file)\n coldfit_source_data = t.readlines()\n t.close()\n # remove comments from read content\n coldfit_source_data = [line for line in coldfit_source_data if line[0] != '#']\n\n siaf_detector_layout = iando.read.read_siaf_detector_layout()\n # siaf_alignment_parameters = iando.read.read_siaf_alignment_parameters(instrument)\n siaf_aperture_definitions = iando.read.read_siaf_aperture_definitions(instrument)\n # aperture_dict = {}\n aperture_name_list = siaf_aperture_definitions['AperName'].tolist()\n\n # generate alignment reference file, one file for all master apertures\n outfile = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_alignment.txt'.format(instrument.lower()))\n siaf_alignment = Table()\n\n for AperName in aperture_name_list:\n\n # process the master apertures of NIRCam\n if AperName in siaf_detector_layout['AperName']:\n (A, B, C, D, betaX, betaY, V2Ref, V3Ref) = nircam_get_polynomial_both(AperName, siaf_aperture_definitions, coldfit_name_mapping, coldfit_source_data)\n\n #generate distortion reference file\n number_of_coefficients = len(A)\n polynomial_degree = int((np.sqrt(8 * number_of_coefficients + 1) - 3) / 2)\n siaf_index = []\n exponent_x = []\n exponent_y = []\n for i in range(polynomial_degree + 1):\n for j in np.arange(i + 1):\n siaf_index.append('{:d}{:d}'.format(i, j))\n exponent_x.append(i - j)\n exponent_y.append(j)\n\n distortion_reference_table = Table((siaf_index, exponent_x, exponent_y, A, B, C, D), names=(\n 'siaf_index', 'exponent_x', 'exponent_y', 'Sci2IdlX', 'Sci2IdlY', 'Idl2SciX', 'Idl2SciY'))\n distortion_reference_table.add_column(\n Column([AperName] * len(distortion_reference_table), name='AperName'), index=0)\n distortion_reference_file_name = os.path.join(JWST_SOURCE_DATA_ROOT, instrument,\n '{}_siaf_distortion_{}.txt'.format(instrument.lower(),\n AperName.lower()))\n # distortion_reference_table.pprint()\n comments = []\n comments.append('{} distortion reference file for SIAF\\n'.format(instrument))\n comments.append('Aperture: {}\\n'.format(AperName))\n comments.append('Based on coefficients given in {},'.format(os.path.basename(coldfit_source_data_file)))\n # comments.append('that were rescaled, shifted for a different reference pixel location, and rotated:')\n # comments.append('Rotation of {:2.3f} deg was removed and is carried separately in V3IdlYangle.'.format(\n # np.rad2deg(V3angle))) # *units.deg.to(units.arcsecond)\n # if 'may_2015' in distortion_file_name:\n # comments.append(\n # 'These parameters are stored in the currently (January 2018) active SIAF (PRDOPSSOC-G-012). ')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n distortion_reference_table.meta['comments'] = comments\n distortion_reference_table.write(distortion_reference_file_name, format='ascii.fixed_width',\n delimiter=',', delimiter_pad=' ', bookend=False, overwrite=True)\n\n V3SciYAngle = betaY\n V3SciXAngle = betaX\n if np.abs(V3SciYAngle) < 90.:\n V3IdlYAngle = V3SciYAngle\n else:\n V3IdlYAngle = V3SciYAngle - np.sign(V3SciYAngle) * 180.\n\n if len(siaf_alignment) == 0: # first entry\n siaf_alignment['AperName'] = ['{:>30}'.format(AperName)]\n siaf_alignment['V3IdlYAngle'] = [V3IdlYAngle]\n siaf_alignment['V3SciXAngle'] = V3SciXAngle #[np.rad2deg(betaX)]\n siaf_alignment['V3SciYAngle'] = V3SciYAngle #[np.rad2deg(betaY)]\n siaf_alignment['V2Ref'] = [V2Ref]\n siaf_alignment['V3Ref'] = [V3Ref]\n else:\n siaf_alignment.add_row(['{:>30}'.format(AperName), V3IdlYAngle, V3SciXAngle, V3SciYAngle, V2Ref,V3Ref])\n comments = []\n comments.append('{} alignment parameter reference file for SIAF'.format(instrument))\n comments.append('')\n comments.append('This file contains the focal plane alignment parameters of master apertures calibrated')\n comments.append('during FGS-SI alignment.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n siaf_alignment.meta['comments'] = comments\n siaf_alignment.write(outfile, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False, overwrite=True)", "def create_database(db_file: str, table_data: List) -> None:\n connection = None\n add_col = []\n\n table_root = '''id INTEGER PRIMARY KEY,\n iteration INTEGER NOT NULL,\n best_local_min TEXT NOT NULL,\n current_epoch INTEGER NOT NULL,\n trades_count INTEGER NOT NULL,\n avg_profit_pct REAL NOT NULL,\n total_profit_currency REAL NOT NULL,\n total_profit_pct REAL NOT NULL,\n avg_duration_minutes REAL NOT NULL,\n loss_func REAL NOT NULL, '''\n\n spaces_col = {'buy': 'buy TEXT NOT NULL',\n 'sell': 'sell TEXT NOT NULL',\n 'roi': 'roi TEXT NOT NULL',\n 'stoploss': 'stoploss TEXT NOT NULL',\n 'trailing': 'trailing TEXT NOT NULL'}\n\n try:\n os.remove(db_file)\n except OSError as err:\n print(err)\n\n try:\n connection = sqlite3.connect(db_file)\n cursor = connection.cursor()\n print(f\"{Fore.MAGENTA}Successfully connected to SQLite DB - {db_file}{Fore.RESET}\")\n\n if 'all' in table_data:\n table_data = ['buy', 'sell', 'roi', 'stoploss', 'trailing']\n elif 'default' in table_data:\n table_data = ['buy', 'sell', 'roi', 'stoploss']\n\n for param in table_data:\n add_col.append(spaces_col[param])\n\n table_root += ', '.join(add_col)\n\n create_hyperopt_data_table = 'CREATE TABLE hyperopt_results (' + table_root + ');'\n\n cursor.execute(create_hyperopt_data_table)\n connection.commit()\n print(f'{Fore.MAGENTA}Table successfully created.{Fore.RESET}')\n\n cursor.close()\n except sqlite3.Error as err:\n print(err)\n finally:\n if connection:\n connection.close()\n print(f'{Fore.MAGENTA}The SQLite connection is closed{Fore.RESET}')", "def makeAmcvnTable(size=100, database=VARIABILITY_DB, **kwargs):\n\n # a haphazard sample of white dwarf SEDs\n sedFiles = ['bergeron_He_4750_70.dat_4950', 'bergeron_50000_85.dat_54000']\n\n conn = sqlite3.connect(database)\n c = conn.cursor()\n try:\n c.execute('''CREATE TABLE amcvn\n (varsimobjid int, variability text, sedfilename text, parallax real, ebv real)''')\n conn.commit()\n except:\n return\n\n rng = np.random.RandomState(32)\n doesBurst = rng.randint(0, 2, size=size)\n burst_freq = rng.randint(10, 150, size=size)\n burst_scale = 115.0\n amp_burst = rng.random_sample(size)*8.0\n color_excess_during_burst = rng.random_sample(size)*0.2-0.4\n amplitude = rng.random_sample(size)*0.2\n period = rng.random_sample(size)*200.0\n mjDisplacement = rng.random_sample(size)*500.0\n for i in range(size):\n sedFile = sedFiles[rng.randint(0, len(sedFiles))]\n varParam = {'varMethodName': 'applyAmcvn',\n 'pars': {'does_burst': int(doesBurst[i]), # have to cast to int from np.int for json\n 'burst_freq': int(burst_freq[i]),\n 'burst_scale': burst_scale,\n 'amp_burst': amp_burst[i],\n 'color_excess_during_burst': color_excess_during_burst[i],\n 'amplitude': amplitude[i],\n 'period': period[i],\n 't0': 51500.0-mjDisplacement[i]}}\n\n paramStr = json.dumps(varParam)\n\n qstr = '''INSERT INTO amcvn VALUES (%i, '%s', '%s', 0.01, 0.7)''' % (i, paramStr, sedFile)\n c.execute(qstr)\n conn.commit()\n conn.close()", "def insertSQL(data):\n \"\"\"Has abbrv1, full name, abbrv2, full name, date \"\"\"\n# print(data)\n gameInfo = data[0] \n date = gameInfo[4][0] + '_' + gameInfo[4][1] + '_' + gameInfo[4][2]\n tableName = gameInfo[0] + '_' + gameInfo[2] + '_' + date\n \n gameTemp = executeReturn(\"SELECT * FROM allgames2017\")\n tableNames = []\n for i in gameTemp:\n tableNames.append(i[1])\n if(len(gameTemp) != 0):\n lastRow = int(gameTemp[len(gameTemp) - 1][0])\n lastRow += 1\n else:\n lastRow = 0\n \n if(tableName in tableNames):\n return\n \n execute(\"INSERT INTO allgames2017 VALUES (? , ?)\", [lastRow, tableName])\n \n for i in data:\n if(gameInfo[0] in i):\n i[4] = date\n \n# for i in data:\n# print(i)\n \n longestRow = 0 \n for i in data:\n if(len(i) > longestRow):\n longestRow = len(i) \n \n ''' Make text CC1 TEXT, CC2 TEXT, .... '''\n columnText = \"\"\n questionText = \"\"\n \n for i in range(0,longestRow):\n columnText += 'CC' + str(i) + ' TEXT,'\n questionText += '?,'\n columnText = columnText[0:len(columnText)-1]\n questionText = questionText[0:len(questionText)-1]\n \n# print(columnText)\n# for i in data:\n# print(i)\n \n execute('DROP TABLE IF EXISTS ' + tableName, None)\n execute('CREATE TABLE ' + tableName + '(' + columnText + ')', None)\n \n '''Feed data into sqlite'''\n for i in range(0, len(data)):\n tempArray = []\n \n for j in range(0,longestRow):\n if(j < len(data[i])):\n tempArray.append(data[i][j])\n else:\n tempArray.append(\"\")\n# print(tempArray)\n execute('INSERT INTO ' + tableName + ' VALUES (' + questionText + ')', tempArray)", "def doParametersOfInterest(self):\n \n self.modelBuilder.doVar('expr::cosW(\"0.87681811112\",)')\n self.modelBuilder.doVar('expr::sinW(\"0.48082221247\",)')\n self.modelBuilder.doVar('expr::mZ(\"91.2\",)')\n self.modelBuilder.doVar('expr::Lambda1(\"100.0\",)')\n self.modelBuilder.doVar('expr::e2(\"0.0917\",)')\n self.modelBuilder.doVar('expr::gs2(\"1.533\",)')\n\n # EFT Higgs basis couplings\n\n self.modelBuilder.doVar('cZ[0,-1,1]') \n self.modelBuilder.doVar(\"cZZ[0,-2,2]\") \n self.modelBuilder.doVar(\"cZZt[0,-2,2]\") \n self.modelBuilder.doVar(\"cZB[0,-6,6]\") \n\n poi='cZ,cZZ,cZZt,cZB'\n\n # Amplitude couplings from EFT couplings \n\n self.modelBuilder.doVar('expr::a1(\"@0+1\",cZ)') # (\"2*(@0+1)\",cZ) in AN/Paper but a1 = 1 for signal model and width calculation\n self.modelBuilder.doVar('expr::a2(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZ,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::a3(\"-1*@0*(@1/(2*pow(@2,2)*pow(@3,2)))\",cZZt,e2,sinW,cosW)')\n self.modelBuilder.doVar('expr::k1(\"@0*(@1*pow(@2,2)/(pow(@3,2)*pow(@4,2)))\",cZB,e2,Lambda1,sinW,mZ)')\n self.modelBuilder.doVar('expr::k1L1(\"@0/pow(@1,2)\",k1,Lambda1)')\n\n ###### gamma_H ########\n\n # SMEFT relationships for VV couplings (Expressed using amplitude couplings)\n\n self.modelBuilder.doVar('expr::kappa(\"1.0\",)')\n self.modelBuilder.doVar('expr::kappa_tilde(\"0.0\",)') \n\n self.modelBuilder.doVar('expr::a1_WW(\"@0\",a1)')\n self.modelBuilder.doVar('expr::a2_WW(\"@0*@0*@1\",cosW,a2)')\n self.modelBuilder.doVar('expr::a3_WW(\"@0*@0*@1\",cosW,a3)')\n self.modelBuilder.doVar('expr::k1_WW(\"(@2 / (@0*@0 - @1*@1) - 2*@1*@1*@3*@4*@4 /(@5*@5*(@0*@0 - @1*@1)))\",cosW,sinW,k1,a2,Lambda1,mZ)')\n self.modelBuilder.doVar('expr::k2_k1(\"2*@0*@1*@2/(@0*@0 - @1*@1)\",cosW,sinW,k1)')\n self.modelBuilder.doVar('expr::k2_a2(\"-2*@0*@1*@3*@4*@4/((@2*@2)*(@0*@0 - @1*@1))\",cosW,sinW,mZ,a2,Lambda1)')\n self.modelBuilder.doVar('expr::k2(\"@0 + @1\",k2_k1,k2_a2)')\n\n # Determine gamma_H from VV couplings\n\n zz_expr = '\"4*(@0*@0/4. + 0.1695*@3*@3 + 0.09076*@1*@1 + 0.03809*@2*@2 + 0.8095*@0*@3/2. + 0.5046*@0*@1/2. + 0.2092*@1*@3 + 0.1023*@4*@4 + 0.1901*@0*@4/2. + 0.07429*@3*@4 + 0.04710*@1*@4) \",a1,a2,a3,k1,k2'\n ww_expr = '\"4*(@0*@0/4. + 0.1320*@3*@3 + 0.1944*@1*@1 + 0.08075*@2*@2 + 0.7204*@0*@3/2. + 0.7437*@0*@1/2. + 0.2774*@3*@1) \",a1_WW,a2_WW,a3_WW,k1_WW'\n zgamma_expr = '\"4*(1.118600*@0*@0/4. +0.0035*@1*@1 - 0.125010*@0*@1/2. + 0.000003*@1*@1 - 0.00018*@1*@1 + 0.003100*@0*@1/2. +0.00126*@2*@2 + 0.000005*@2*@2 -0.00047*@2*@2)\",a1_WW,kappa,kappa_tilde'\n gg_expr = '\"(1.1068*@0*@0 + 0.0082*@0*@0 - 0.1150*@0*@0 + 2.5717*@1*@1 + 0.0091*@1*@1 - 0.1982*@1*@1)\",kappa,kappa_tilde'\n bb_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n cc_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n tautau_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n mumu_expr = '\"(@0*@0 + @1*@1)\",kappa,kappa_tilde'\n gmgm_expr = '\"4*(1.6054*@0*@0/4. + 0.07312*@1*@1 - 0.6854*@0*@1/2. + 0.00002*@1*@1 - 0.0018*@1*@1 + 0.0085*@0*@1/2. + 0.1699*@2*@2 + 0.00002*@2*@2 - 0.0031*@2*@2)\",a1_WW,kappa,kappa_tilde'\n \n self.modelBuilder.doVar('expr::R_WW('+str(ww_expr)+')')\n self.modelBuilder.doVar('expr::R_ZZ('+str(zz_expr)+')')\n self.modelBuilder.doVar('expr::R_Zgamma('+str(zgamma_expr)+')')\n self.modelBuilder.doVar('expr::R_gg('+str(gg_expr)+')')\n self.modelBuilder.doVar('expr::R_bb('+str(bb_expr)+')')\n self.modelBuilder.doVar('expr::R_cc('+str(cc_expr)+')')\n self.modelBuilder.doVar('expr::R_tautau('+str(tautau_expr)+')')\n self.modelBuilder.doVar('expr::R_mumu('+str(mumu_expr)+')')\n self.modelBuilder.doVar('expr:R_gammagamma('+str(gmgm_expr)+')')\n\n self.modelBuilder.doVar('expr::gammaH(\"(0.5824*@0 + 0.2137*@1 + 0.08187*@2 + 0.06272*@3 + 0.02891*@4 + 0.02619*@5 + 0.002270*@6 + 0.001533*@7 + 0.0002176*@8 )/0.9998\",R_bb,R_WW,R_gg,R_tautau,R_cc,R_ZZ,R_gammagamma,R_Zgamma,R_mumu)') \n\n ###########################\n\n self.g1V = GetCoupTerms(1,1,1,-0.0001,\"1V\") # Compensate for scaling of k1 templates \n self.g2V = GetCoupTerms(1,1,1,-0.0001,\"2V\") \n \n self.modelBuilder.doVar(\"expr::g2V_1(\\\"\"+str(self.g2V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1(\\\"((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T1_Neg(\\\"-1*((pow(@0,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_1)\") \n self.modelBuilder.doVar(\"expr::g2V_2(\\\"\"+str(self.g2V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2(\\\"((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T2_Neg(\\\"-1*((pow(@0,3)*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_2)\") \n self.modelBuilder.doVar(\"expr::g2V_3(\\\"\"+str(self.g2V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3(\\\"((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T3_Neg(\\\"-1*((pow(@0,2)*pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_3)\") \n self.modelBuilder.doVar(\"expr::g2V_4(\\\"\"+str(self.g2V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4(\\\"((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T4_Neg(\\\"-1*((@0*pow(@1,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_4)\") \n self.modelBuilder.doVar(\"expr::g2V_5(\\\"\"+str(self.g2V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5(\\\"((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T5_Neg(\\\"-1*((pow(@1,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_5)\") \n self.modelBuilder.doVar(\"expr::g2V_6(\\\"\"+str(self.g2V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6(\\\"((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T6_Neg(\\\"-1*((pow(@0,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_6)\") \n self.modelBuilder.doVar(\"expr::g2V_7(\\\"\"+str(self.g2V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7(\\\"((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T7_Neg(\\\"-1*((pow(@0,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_7)\") \n self.modelBuilder.doVar(\"expr::g2V_8(\\\"\"+str(self.g2V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8(\\\"((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T8_Neg(\\\"-1*((@0*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_8)\") \n self.modelBuilder.doVar(\"expr::g2V_9(\\\"\"+str(self.g2V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9(\\\"((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T9_Neg(\\\"-1*((pow(@2,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_9)\") \n self.modelBuilder.doVar(\"expr::g2V_10(\\\"\"+str(self.g2V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10(\\\"((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T10_Neg(\\\"-1*((pow(@0,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_10)\") \n self.modelBuilder.doVar(\"expr::g2V_11(\\\"\"+str(self.g2V[10])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11(\\\"((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T11_Neg(\\\"-1*((pow(@0,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_11)\") \n self.modelBuilder.doVar(\"expr::g2V_12(\\\"\"+str(self.g2V[11])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12(\\\"((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T12_Neg(\\\"-1*((@0*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_12)\") \n self.modelBuilder.doVar(\"expr::g2V_13(\\\"\"+str(self.g2V[12])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13(\\\"((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T13_Neg(\\\"-1*((pow(@3,4))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_13)\") \n self.modelBuilder.doVar(\"expr::g2V_14(\\\"\"+str(self.g2V[13])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14(\\\"((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T14_Neg(\\\"-1*((pow(@1,3)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_14)\") \n self.modelBuilder.doVar(\"expr::g2V_15(\\\"\"+str(self.g2V[14])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15(\\\"((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T15_Neg(\\\"-1*((pow(@1,2)*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_15)\") \n self.modelBuilder.doVar(\"expr::g2V_16(\\\"\"+str(self.g2V[15])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16(\\\"((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T16_Neg(\\\"-1*((@1*pow(@2,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_16)\") \n self.modelBuilder.doVar(\"expr::g2V_17(\\\"\"+str(self.g2V[16])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17(\\\"((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T17_Neg(\\\"-1*((pow(@1,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_17)\") \n self.modelBuilder.doVar(\"expr::g2V_18(\\\"\"+str(self.g2V[17])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18(\\\"((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T18_Neg(\\\"-1*((pow(@1,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_18)\") \n self.modelBuilder.doVar(\"expr::g2V_19(\\\"\"+str(self.g2V[18])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19(\\\"((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T19_Neg(\\\"-1*((@1*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_19)\") \n self.modelBuilder.doVar(\"expr::g2V_20(\\\"\"+str(self.g2V[19])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20(\\\"((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T20_Neg(\\\"-1*((pow(@2,3)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_20)\") \n self.modelBuilder.doVar(\"expr::g2V_21(\\\"\"+str(self.g2V[20])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21(\\\"((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T21_Neg(\\\"-1*((pow(@2,2)*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_21)\") \n self.modelBuilder.doVar(\"expr::g2V_22(\\\"\"+str(self.g2V[21])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22(\\\"((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T22_Neg(\\\"-1*((@2*pow(@3,3))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_22)\") \n self.modelBuilder.doVar(\"expr::g2V_23(\\\"\"+str(self.g2V[22])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23(\\\"((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T23_Neg(\\\"-1*((@0*@1*pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_23)\") \n self.modelBuilder.doVar(\"expr::g2V_24(\\\"\"+str(self.g2V[23])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24(\\\"((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T24_Neg(\\\"-1*((@0*pow(@1,2)*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_24)\") \n self.modelBuilder.doVar(\"expr::g2V_25(\\\"\"+str(self.g2V[24])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25(\\\"((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T25_Neg(\\\"-1*((pow(@0,2)*@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_25)\") \n self.modelBuilder.doVar(\"expr::g2V_26(\\\"\"+str(self.g2V[25])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26(\\\"((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T26_Neg(\\\"-1*((@0*@1*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_26)\") \n self.modelBuilder.doVar(\"expr::g2V_27(\\\"\"+str(self.g2V[26])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27(\\\"((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T27_Neg(\\\"-1*((@0*pow(@1,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_27)\") \n self.modelBuilder.doVar(\"expr::g2V_28(\\\"\"+str(self.g2V[27])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28(\\\"((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T28_Neg(\\\"-1*((pow(@0,2)*@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_28)\") \n self.modelBuilder.doVar(\"expr::g2V_29(\\\"\"+str(self.g2V[28])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29(\\\"((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T29_Neg(\\\"-1*((@0*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_29)\") \n self.modelBuilder.doVar(\"expr::g2V_30(\\\"\"+str(self.g2V[29])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30(\\\"((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T30_Neg(\\\"-1*((@0*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_30)\") \n self.modelBuilder.doVar(\"expr::g2V_31(\\\"\"+str(self.g2V[30])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31(\\\"((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T31_Neg(\\\"-1*((pow(@0,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_31)\") \n self.modelBuilder.doVar(\"expr::g2V_32(\\\"\"+str(self.g2V[31])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32(\\\"((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T32_Neg(\\\"-1*((@1*@2*pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_32)\") \n self.modelBuilder.doVar(\"expr::g2V_33(\\\"\"+str(self.g2V[32])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33(\\\"((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T33_Neg(\\\"-1*((@1*pow(@2,2)*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_33)\") \n self.modelBuilder.doVar(\"expr::g2V_34(\\\"\"+str(self.g2V[33])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34(\\\"((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T34_Neg(\\\"-1*((pow(@1,2)*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_34)\") \n self.modelBuilder.doVar(\"expr::g2V_35(\\\"\"+str(self.g2V[34])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35(\\\"((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n self.modelBuilder.factory_(\"expr::scale_Ewk_T35_Neg(\\\"-1*((@0*@1*@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g2V_35)\") \n \n self.modelBuilder.doVar(\"expr::g1V_1(\\\"\"+str(self.g1V[0])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1(\\\"((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T1_Neg(\\\"-1*((pow(@0,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_1)\") \n self.modelBuilder.doVar(\"expr::g1V_2(\\\"\"+str(self.g1V[1])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2(\\\"((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T2_Neg(\\\"-1*((@0*@1)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_2)\") \n self.modelBuilder.doVar(\"expr::g1V_3(\\\"\"+str(self.g1V[2])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3(\\\"((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T3_Neg(\\\"-1*((pow(@1,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_3)\") \n self.modelBuilder.doVar(\"expr::g1V_4(\\\"\"+str(self.g1V[3])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4(\\\"((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T4_Neg(\\\"-1*((@0*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_4)\") \n self.modelBuilder.doVar(\"expr::g1V_5(\\\"\"+str(self.g1V[4])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5(\\\"((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T5_Neg(\\\"-1*((pow(@2,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_5)\") \n self.modelBuilder.doVar(\"expr::g1V_6(\\\"\"+str(self.g1V[5])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6(\\\"((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T6_Neg(\\\"-1*((@0*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_6)\") \n self.modelBuilder.doVar(\"expr::g1V_7(\\\"\"+str(self.g1V[6])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7(\\\"((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T7_Neg(\\\"-1*((pow(@3,2))/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_7)\") \n self.modelBuilder.doVar(\"expr::g1V_8(\\\"\"+str(self.g1V[7])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8(\\\"((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T8_Neg(\\\"-1*((@1*@2)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_8)\") \n self.modelBuilder.doVar(\"expr::g1V_9(\\\"\"+str(self.g1V[8])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9(\\\"((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T9_Neg(\\\"-1*((@1*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_9)\") \n self.modelBuilder.doVar(\"expr::g1V_10(\\\"\"+str(self.g1V[9])+\"\\\",)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10(\\\"((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n self.modelBuilder.factory_(\"expr::scale_ggH_T10_Neg(\\\"-1*((@2*@3)/@4)*@5\\\", a1, a2, a3, k1L1, gammaH, g1V_10)\") \n \n self.modelBuilder.doSet(\"POI\",poi)", "def pre_process(db):\n conn = sqlite3.connect(db)\n data = pd.read_sql_query(\"Select Delta_T, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V20, V21, V22, V23, V24, V25, V26, V27, V28, Amount , Class from transactions;\", conn)\n train_split = int(0.8*len(data))\n train = data[0:train_split]\n test = data[train_split:len(data)]\n train_x = train.loc[:, ['Delta_T', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'Amount']]\n train_y = train.loc[:, ['Class']]\n test_x = test.loc[:, ['Delta_T', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'Amount']]\n test_y = test.loc[:, ['Class']]\n train_x = train_x.to_numpy()\n train_y = np.squeeze(train_y.to_numpy(),axis=1)\n test_x = test_x.to_numpy()\n test_y = np.squeeze(test_y.to_numpy(),axis=1)\n return train_x,train_y,test_x,test_y", "def create_database(excelpath, database=None):\n\n def generate_database_from_metadatas(metadata_dict, stimulus_dict):\n \"\"\"\n Given a dictionary of session objects with their metadata creates a new database\n with all the sessions in the database and the associated metadata\n \"\"\"\n # Create empty database from template class\n indexes = sorted(metadata_dict.keys())\n database = pd.DataFrame(index=indexes, columns=['Number', 'Metadata', 'Tracking', 'Registration', 'Stimuli'])\n\n # Fill in metadata from the dictionary\n for sessname, metadata in sorted(metadata_dict.items()):\n database['Metadata'][sessname] = metadata\n database['Number'][sessname] = metadata['number']\n\n for sessname, stimulus in sorted(stimulus_dict.items()):\n database['Stimuli'][sessname] = stimulus\n\n print(colored('Database initialized.','yellow'))\n return database\n\n def get_session_videodata(videos):\n \"\"\"\n Get relevant variables for video files\n \"\"\"\n # Get first frame of first video for future processing and number of frames in each video\n videos_data = {'Frame rate': [], 'Number frames': []}\n for idx, videofile in enumerate(videos):\n cap = cv2.VideoCapture(videofile)\n videos_data['Frame rate'].append(cap.get(cv2.CAP_PROP_FPS))\n videos_data['Number frames'].append(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)))\n videos_data['Cumu. Num Frames'] = np.cumsum(videos_data['Number frames'])\n return videos_data\n\n def get_stim_onset_times(sessions, metadata_dict):\n \"\"\"\n loops over a list of dictionary with the info for each session and gets all the stimulus onset times\n \"\"\"\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n if database is not None and session_name in database.index:\n continue\n session_stimuli = {}\n session_stimuli['session_id'] = session_id\n session_stimuli['stimuli'] = {}\n session_stimuli['stimuli']['visual'] = []\n session_stimuli['stimuli']['audio'] = []\n session_stimuli['stimuli']['digital'] = []\n videopaths = []\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n print(videopaths)\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n # Loop over each .tdms file and extract stimuli frames\n print(colored('Loading {}: {}'.format(session_name,os.path.basename(tdmspath)),'yellow'))\n tdms = TdmsFile(tdmspath)\n if metadata_dict[session_name]['software'] == 'behaviour':\n visual_rec_stims, audio_rec_stims, digital_rec_stims = [], [], []\n for group in tdms.groups():\n for obj in tdms.group_channels(group):\n if 'stimulis' in str(obj).lower():\n for idx in obj.as_dataframe().loc[0].index:\n if \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n elif \"/' \" in idx:\n framen = int(idx.split(\"/' \")[1].split('-')[0])\n else:\n framen = int(idx.split(\"/'\")[2].split('-')[0])\n if 'visual' in str(obj).lower():\n visual_rec_stims.append(framen)\n elif 'audio' in str(obj).lower():\n audio_rec_stims.append(framen)\n elif 'digital' in str(obj).lower():\n digital_rec_stims.append(framen)\n else:\n print(colored('Couldnt load stim correctly','yellow'))\n # Now use the AI channels to find the *real* stimulus onset times and replace them\n if audio_rec_stims:\n stimulus_on_idx = np.where(tdms.group_channels('AI')[3].data > .55)[0] #in first data sets this is AI 1, later AI 2\n idx_since_last_stimulus_on = np.diff(stimulus_on_idx)\n if stimulus_on_idx.size:\n stimulus_start_idx = stimulus_on_idx[np.append(np.ones(1).astype(bool),idx_since_last_stimulus_on>2*10000)] #usually 10 or 30\n stimulus_start_frame = np.ceil(stimulus_start_idx / 10000 / (33 + 1 / 3) * 1000).astype(int)\n stimulus_start_frame = stimulus_start_frame[stimulus_start_frame > 300]\n else:\n stimulus_start_frame = np.array(audio_rec_stims)\n print('NO STIMULI FOUND!!')\n\n if len(stimulus_start_frame) != len(audio_rec_stims):\n print('audio AI channel does not match number of timestamps by ' + str(len(audio_rec_stims)-len(stimulus_start_frame)) )\n else:\n discrepancy = stimulus_start_frame - audio_rec_stims\n if sum(discrepancy>8):\n print('audio AI channel does not match values of timestamps')\n else:\n print(discrepancy)\n # for conditioning experiment, just use what the tdms says\n # if 'food' in line['Experiment']:\n # stimulus_start_frame = np.array(audio_rec_stims)\n audio_rec_stims = list(stimulus_start_frame)\n\n session_stimuli['stimuli']['visual'].append(visual_rec_stims)\n session_stimuli['stimuli']['audio'].append(audio_rec_stims)\n session_stimuli['stimuli']['digital'].append(digital_rec_stims)\n\n else:\n \"\"\" HERE IS WHERE THE CODE TO GET THE STIM TIMES IN MANTIS WILL HAVE TO BE ADDEDD \"\"\"\n pass\n\n # Add to dictionary (or update entry)\n stimulus_dict[session_name] = session_stimuli\n return stimulus_dict\n\n def get_metadata(sessions):\n \"\"\"\n loops over a list of dictionary with the info for each session and gets all the metadata\n \"\"\"\n if not isinstance(sessions, list):\n sessions = list(sessions)\n\n for line in sessions:\n session_id = line['Sess.ID']\n if session_id: # we loaded a line with session info\n session_name = '{}_{}'.format(line['Experiment'], line['Sess.ID'])\n\n # Check if session is already in database\n # if database is not None and session_name in database.index:\n # print(colored('Session is already in database','yellow'))\n # continue\n\n # Create the metadata\n session_metadata = {}\n session_metadata['session_id'] = session_id\n session_metadata['experiment'] = line['Experiment']\n session_metadata['date'] = line['Date']\n session_metadata['mouse_id'] = line['MouseID']\n session_metadata['software'] = line['Software']\n session_metadata['number'] = line['Number']\n\n # initialize video data\n session_metadata['video_file_paths'] = []\n session_metadata['tdms_file_paths'] = []\n session_metadata['videodata'] = []\n\n # load data from .tdms and .avi fils\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n videopaths = []\n for f in os.listdir(path):\n if '.avi' in f:\n videopaths.append(os.path.join(path, f))\n elif '.tdms' == f[-5:]:\n tdmspath = os.path.join(path, f)\n\n # add file paths to metadata\n session_metadata['video_file_paths'].append(videopaths)\n session_metadata['tdms_file_paths'].append(tdmspath)\n\n # Loop over each video and get the relevant data [e.g., number of frames, fps...]\n session_metadata['videodata'].append(get_session_videodata(videopaths))\n\n # Add to dictionary (or update entry)\n metadata_dict[session_name] = session_metadata\n return metadata_dict\n\n\n ''' MAIN SECTION OF SCRIPT TO GENERATE DATABASE '''\n\n loaded_excel = pyexcel.get_records(file_name=excelpath)\n\n # Create a dictionary with each session's name as key and its metadata as value\n stimulus_dict, metadata_dict, all_metadata = {}, {}, []\n for line in loaded_excel: # Read each line in the excel spreadsheet and load info\n temp = {\n 'Sess.ID': line['Sess.ID'],\n 'Date': line['Date'],\n 'MouseID': line['MouseID'],\n 'Experiment': line['Experiment'],\n 'Software': line['Software'],\n 'Base fld': line['Base fld'],\n 'Exp fld': line['Exp fld'],\n 'Recordings': line['Sub Folders'].split('; '),\n 'Number': line['Number']\n }\n all_metadata.append(temp)\n\n # Loop over each recordings subfolder and check that the paths are correct [fast check]\n for line in all_metadata:\n for recording in line['Recordings']:\n path = os.path.join(line['Base fld'], line['Exp fld'], recording)\n if not os.path.exists(path):\n raise ValueError('Folder not found\\n{}'.format(path))\n print(colored('Excel spreadsheet loaded correctly. Now loading metadata.','yellow'))\n\n # Use loaded metadata to create the database. Threadpooled for faster execution\n num_parallel_processes = 4\n splitted_all_metadata = [all_metadata[i::num_parallel_processes] for i in range(num_parallel_processes)]\n pool = ThreadPool(num_parallel_processes)\n\n # get metadata for *all* sessions\n _ = pool.map(get_metadata, splitted_all_metadata)\n\n # get stimulus information for *new* sessions\n splitted_all_metadata = [(all_metadata[i::num_parallel_processes], metadata_dict) for i in range(num_parallel_processes)]\n _ = pool.starmap(get_stim_onset_times, splitted_all_metadata)\n\n # Create new database, and add to the old one if applicable\n if database is None:\n return generate_database_from_metadatas(metadata_dict, stimulus_dict)\n else:\n new_database = generate_database_from_metadatas(metadata_dict, stimulus_dict)\n for index, row in new_database.iterrows():\n if (index in database.index):\n # loop in case of erroneous duplicate entries (only take the first)\n # for stimuli, registration, tracking in zip(database.loc[index].Stimuli, database.loc[index].Registration, database.loc[index].Tracking):\n stimuli, registration, tracking = database.loc[index].Stimuli, database.loc[index].Registration, database.loc[index].Tracking\n new_database.loc[index].Stimuli = stimuli\n new_database.loc[index].Registration = registration\n new_database.loc[index].Tracking = tracking\n # break\n new_database = new_database.sort_values(by='Number')\n\n return new_database.sort_values(by='Number')", "def creatingItemSets(self, iFileName):\n # import pandas as pd\n # global Database\n self.Database = []\n lineNumber = 0\n # data = []\n if isinstance(iFileName, list):\n self.Database = iFileName\n if isinstance(iFileName, pd.DataFrame):\n if iFileName.empty:\n print(\"its empty..\")\n quit()\n i = iFileName.columns.values.tolist()\n if 'Transactions' in i:\n self.Database = iFileName['Transactions'].tolist()\n if 'Patterns' in i:\n self.Database = iFileName['Patterns'].tolist()\n\n if '.CSV' in iFileName:\n file1 = pd.read_csv(iFileName)\n columns = list(file1.head(0))\n if \"Patterns\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Patterns']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n if \"Transactions\" in columns:\n with open(iFileName, newline='') as csvFile:\n data = csv.DictReader(csvFile)\n for row in data:\n listValue = row['Transactions']\n l1 = listValue.replace(\"[\", \"\")\n l2 = l1.replace(\"]\", \"\")\n li = list(l2.split(\",\"))\n li1 = [int(i) for i in li]\n self.Database.append(li1)\n else:\n try:\n with open(iFileName, 'r', encoding='utf-8') as f:\n for line in f:\n # line.strip()\n if lineNumber == 0:\n lineNumber += 1\n delimiter = self.findDelimiter([*line])\n # li=[lineNumber]\n li = line.split(delimiter)\n li1 = [i.rstrip() for i in li]\n self.Database.append([i.rstrip() for i in li1])\n # else:\n # self.Database.append(li)\n # data.append([lineNumber,li1])\n else:\n lineNumber += 1\n li = line.split(delimiter)\n # if delimiter==',':\n li1 = [i.rstrip() for i in li]\n self.Database.append(li1)\n except IOError:\n print(\"File Not Found\")\n quit()\n\n # else:\n # self.Database=iFileName['Transactions'].tolist()", "def csv2_db_start_end(self, csv_filename):\n conn = sqlite3.connect(self.dbName)\n cursor = conn.cursor()\n df_se = pd.read_csv(csv_filename).dropna().astype(\"int64\")\n arrayFrameStart = df_se[\"StartFrame\"].astype(\"int64\").tolist()\n arrayFrameEnd = df_se[\"EndFrame\"].astype(\"int64\").tolist()\n arrayZeros = np.zeros(len(arrayFrameStart), dtype=int)\n arrayZeroColumns = [[0, 0] for i in range(len(arrayFrameStart))]\n arrayKara = [np.NaN for i in range(len(arrayFrameStart))]\n\n df = pd.DataFrame(\n {\n \"StartFrame\": arrayFrameStart,\n \"EndFrame\": arrayFrameEnd,\n \"Set\": arrayKara,\n \"Game\": arrayKara,\n \"Score\": arrayKara,\n \"ScoreResult\": arrayKara,\n \"FirstSecond\": arrayZeros,\n \"Server\": arrayKara,\n \"PointWinner\": arrayKara,\n \"PointWinA\": arrayKara,\n \"PointWinB\": arrayKara,\n \"PointPattern\": arrayKara,\n \"Fault\": arrayKara,\n \"ContactServeX\": arrayZeros,\n \"ContactServeY\": arrayZeros,\n \"Court1X\": arrayZeros,\n \"Court1Y\": arrayZeros,\n \"Court2X\": arrayZeros,\n \"Court2Y\": arrayZeros,\n \"Court3X\": arrayZeros,\n \"Court3Y\": arrayZeros,\n \"Court4X\": arrayZeros,\n \"Court4Y\": arrayZeros,\n }\n )\n\n df_basic = pd.DataFrame(\n {\n \"playerA\": self.playerA,\n \"playerB\": self.playerB,\n \"number\": self.number,\n \"totalGame\": self.totalGame,\n \"faultFlug\": self.faultFlug,\n },\n index=[0],\n )\n point = []\n frame = []\n bx = []\n by = []\n pax = []\n pay = []\n pbx = []\n pby = []\n h = []\n bh = []\n fb = []\n d = []\n for i in range(len(self.array_ball_position_shot)):\n for j in range(len(self.array_ball_position_shot[i])):\n point.append(self.array_ball_position_shot[i][j][0])\n frame.append(self.array_ball_position_shot[i][j][1])\n bx.append(self.array_ball_position_shot[i][j][2])\n by.append(self.array_ball_position_shot[i][j][3])\n pax.append(self.arrayPlayerAPosition[i][j][2])\n pay.append(self.arrayPlayerAPosition[i][j][3])\n pbx.append(self.arrayPlayerBPosition[i][j][2])\n pby.append(self.arrayPlayerBPosition[i][j][3])\n h.append(self.arrayHitPlayer[i][j])\n bh.append(self.arrayBounceHit[i][j])\n fb.append(self.arrayForeBack[i][j]) # arrayDirection\n d.append(self.arrayDirection[i][j])\n # print(self.arrayHitPlayer[i][j])\n df_shot = pd.DataFrame(\n {\n \"point\": point,\n \"frame\": frame,\n \"ballx\": bx,\n \"bally\": by,\n \"playerAx\": pax,\n \"playerAy\": pay,\n \"playerBx\": pbx,\n \"playerBy\": pby,\n \"hitplayer\": h,\n \"bouncehit\": bh,\n \"foreback\": fb,\n \"direction\": d,\n }\n )\n\n df_basic.to_sql(\"match\", conn, if_exists=\"replace\")\n df.to_sql(\"score\", conn, if_exists=\"replace\")\n df_shot.to_sql(\"shot\", conn, if_exists=\"replace\")\n\n conn.commit()\n conn.close()", "def readVCTPINPUTS(self): \n #fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.CTPINPUTS\"\n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/ctpinputs.cfg\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n count=0\n #print \"look for me if you want different inputs range...\"\n for i in lines:\n if(i[0] == 'l' and i[1] == '0'): continue\n if i == \"\\n\": continue\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n continue\n #return None\n if items[3] == 'M': continue\n count=count+1\n #if count<6 or count>11 : continue\n #if count>10 and count<24 : continue\n #if count<16: continue\n #if count > 4 and count < 15: continue\n #if items[3] != '1': continue\n #if items[2] != \"EMCAL\": continue\n #if (items[2] != \"SPD\") and (items[2] != \"T0\"): continue\n flag=1\n for i in self.detectors:\n if items[2].find(i)>=0 or i.find(\"ALL\")>=0:\n flag=0;\n break\n if flag: continue\n # input not connected\n if items[7] == '0' and items[3] == '0': continue\n db={}\n db['name']=items[0]\n db['detector']=items[2]\n db['level']='L'+items[3]\n db['signature']=items[4]\n #db['number']=items[5]\n db['number']=items[7]\n db['numberDIM']=items[6]\n db['ctpnum']=items[5]\n db['Edge'] = items[8]\n db['Delay'] = items[9]\n dbinputs.append(db)\n #print \"Adding: \", db\n return dbinputs", "def create_output_database():\n\n# Do not alter the hdf5 file if it already exists\n if os.path.exists(database_path):\n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" already exists and is ready to store the results of computations\")\n return None\n# Create hdf5 file. The flag \"-w\" means \"create file, fail if exists\" \n else:\n computations_database = h5py.File(database_path, \"w-\")\n\n# Create initial data datasets and write initial data into them \n for initial_condition in initial_conditions:\n for k in range (6,17):\n dataset_initial_path = initial_condition + \"/k = \" + str(k) + \" initial_data\"\n computations_database[dataset_initial_path] = initial_data(initial_condition, k)\n# Create data groups for storing the results of computations \n for flux in fluxes: \n group_path = initial_condition + \"/\" + flux\n computations_database.create_group(group_path)\n\n# Write the appropriate attributes that are needed for particular computations, \n# i.e. create the appropriate environment for each computational method \n computations_database[group_path].attrs[\"a\"] = 3.0\n computations_database[group_path].attrs[\"T\"] = 9.0\n if flux == \"Lax_Wendroff_Fourth_Order\": \n computations_database[group_path].attrs[\"CFL\"] = 0.2\n elif flux in [\"Fromm_CFL_0.5\", \"Fromm_van_Leer_CFL_0.5\"]:\n computations_database[group_path].attrs[\"CFL\"] = 0.5\n else:\n computations_database[group_path].attrs[\"CFL\"] = 0.9\n \n computations_database.close() \n print(\"DATABASE STATUS:\")\n print(\"\\t\" + database_path + \" has been created and is ready to store the results of computations\")", "def builder(plates, start, name, assay, isolate, layout, exp_date, mic):\n plateno = 1\n rid = start # record ID\n readno = 1\n segno = 1\n for plate in plates:\n seg = plateno * 8\n startseg = seg - 8\n segment = layout[startseg:seg]\n plate_mic = mic[startseg:seg]\n with open(plate, 'r') as infile:\n # 3 reads per plate\n front = 'INSERT INTO `mic` VALUES ('\n sep = ','\n row = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n row_num = 0\n for line in infile:\n this_row = row[row_num]\n pep = segment[row_num].split(' ')[0]\n this_mic = plate_mic[row_num]\n # note that blood is hard-coded to NA right now\n buff = [str(rid), str(assay), str(isolate), '1', str(pep), name, 'assayed', 'experiment',\n str(readno), exp_date, this_row]\n rec = line.strip().split(' ')\n buff.extend(rec)\n buff.extend([this_mic, 'NA'])\n buff_form = buff[:5] + [\"'\" + x + \"'\" for x in buff[5:]] + ['NULL', 'NULL);']\n outbuff = front + ','.join(buff_form)\n outbuff = re.sub(\"experiment','4',\",\"experiment','AVERAGE',\",outbuff)\n\n # increment counters\n rid += 1\n if row_num == 7:\n row_num = 0\n if readno == 4: # assumes 3 reads and an average\n plateno += 1\n readno = 1\n else:\n readno += 1\n else:\n row_num += 1\n\n yield outbuff" ]
[ "0.6338688", "0.5884251", "0.5718049", "0.57067966", "0.5664042", "0.5621252", "0.5532487", "0.5456979", "0.53769165", "0.53531015", "0.5353015", "0.5327349", "0.5312414", "0.5305771", "0.5293567", "0.5258516", "0.5220751", "0.5206867", "0.51643485", "0.5132311", "0.51212096", "0.5118295", "0.51046175", "0.50936115", "0.5090406", "0.5077896", "0.5061796", "0.5053264", "0.50445634", "0.50424886" ]
0.6213285
1
Create an red tag consequence parameter database based on the FEMA P58 data The method was developed to process v3.1.2 of the FragilityDatabase xls that is provided with FEMA P58 2nd edition.
def create_FEMA_P58_bldg_redtag_db( source_file, target_data_file='bldg_redtag_DB_FEMA_P58_2nd.csv', target_meta_file='bldg_redtag_DB_FEMA_P58_2nd.json'): # parse the source file df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1, true_values=["YES", "Yes", "yes"], false_values=["NO", "No", "no"]) # take another pass with booleans because the first does not always work for true_str in ("YES", "Yes", "yes"): df.replace(true_str, True, inplace=True) for false_str in ("NO", "No", "no"): df.replace(false_str, False, inplace=True) # remove empty rows and columns df.dropna(axis=0, how='all', inplace=True) df.dropna(axis=1, how='all', inplace=True) # filter the columns we need for the injury database cols_to_db = [ 'DS Hierarchy', ] for DS_i in range(1, 6): cols_to_db += [ f'DS {DS_i}, Unsafe Placard Trigger Flag', f'DS {DS_i}, Unsafe Placard Damage Median', f'DS {DS_i}, Unsafe Placard Damage Dispersion' ] # filter the columns that we need for the metadata cols_to_meta = [ "Component Name", "Component Description", "Construction Quality:", "Seismic Installation Conditions:", "Comments / Notes", "Author", "Fragility Unit of Measure", "Round to Integer Unit?", "DS 1, Description", "DS 2, Description", "DS 3, Description", "DS 4, Description", "DS 5, Description", ] # remove special characters to make it easier to work with column names str_map = { ord(' '): "_", ord('.'): "_", ord('-'): "_", ord(':'): None, ord('('): None, ord(')'): None, ord('?'): None, ord('/'): None, ord(','): None, } df_db_source = df.loc[:, cols_to_db] df_db_source.columns = [s.translate(str_map) for s in cols_to_db] df_db_source.sort_index(inplace=True) df_meta = df.loc[:, cols_to_meta] df_meta.columns = [s.translate(str_map) for s in cols_to_meta] df_db_source.replace('BY USER', np.nan, inplace=True) df_db_source.replace('By User', np.nan, inplace=True) # initialize the output loss table # define the columns out_cols = [ "Index", "Incomplete", ] for DS_i in range(1, 6): out_cols += [ f"DS{DS_i}-Family", f"DS{DS_i}-Theta_0", f"DS{DS_i}-Theta_1" ] # create the database index comps = df_db_source.index.values df_db = pd.DataFrame( columns=out_cols, index=comps, dtype=float ) # initialize the dictionary that stores the loss metadata meta_dict = {} # for each component... # (this approach is not efficient, but easy to follow which was considered # more important than efficiency.) for cmp in df_db_source.itertuples(): ID = cmp.Index.split('.') cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}' # store the new index df_db.loc[cmp.Index, 'Index'] = cmpID # assume the component information is complete incomplete = False # get the raw metadata for the component cmp_meta = df_meta.loc[cmp.Index, :] # store the global (i.e., not DS-specific) metadata # every component is assumed to have a comp. description comments = cmp_meta['Component_Description'] # the additional fields are added to the description if they exist if cmp_meta['Construction_Quality'] != 'Not Specified': comments += f'\nConstruction Quality: ' \ f'{cmp_meta["Construction_Quality"]}' if cmp_meta['Seismic_Installation_Conditions'] not in [ 'Not Specified', 'Not applicable', 'Unknown', 'Any']: comments += f'\nSeismic Installation Conditions: ' \ f'{cmp_meta["Seismic_Installation_Conditions"]}' if cmp_meta['Comments__Notes'] != 'None': comments += f'\nNotes: {cmp_meta["Comments__Notes"]}' if cmp_meta['Author'] not in ['Not Given', 'By User']: comments += f'\nAuthor: {cmp_meta["Author"]}' # get the suggested block size and replace the misleading values with ea block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1] meta_data = { "Description": cmp_meta['Component_Name'], "Comments": comments, "SuggestedComponentBlockSize": ' '.join(block_size), "RoundUpToIntegerQuantity": cmp_meta['Round_to_Integer_Unit'], "ControllingDemand": "Damage Quantity", "DamageStates": {} } # Handle components with simultaneous damage states separately if 'Simul' in cmp.DS_Hierarchy: pass # Note that we are assuming that components with simultaneous # damage states do not have damage that would trigger a red tag. # This assumption holds for the second edition of FEMA P58, but it # might need to be revisited in future editions. # for every other component... else: # now look at each Damage State for DS_i in range(1, 6): redtag_flag = getattr( cmp, f'DS_{DS_i}_Unsafe_Placard_Trigger_Flag') if redtag_flag is True: theta_0 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_' f'Median') theta_1 = getattr(cmp, f'DS_{DS_i}_Unsafe_Placard_Damage_' f'Dispersion') if theta_0 != 0.0: df_db.loc[cmp.Index, f'DS{DS_i}-Family'] = 'lognormal' df_db.loc[cmp.Index, f'DS{DS_i}-Theta_0'] = theta_0 df_db.loc[cmp.Index, f'DS{DS_i}-Theta_1'] = theta_1 if (pd.isna(theta_0) or pd.isna(theta_1)): incomplete = True if ~np.isnan(redtag_flag): meta_data['DamageStates'].update({ f"DS{DS_i}": {"Description": cmp_meta[f"DS_{DS_i}_Description"]}}) df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete) # store the metadata for this component meta_dict.update({cmpID: meta_data}) # assign the Index column as the new ID df_db.set_index('Index', inplace=True) # review the database and drop rows with no information cmp_to_drop = [] for cmp in df_db.index: empty = True for DS_i in range(1, 6): if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']): empty = False break if empty: cmp_to_drop.append(cmp) df_db.drop(cmp_to_drop, axis=0, inplace=True) cmp_kept = df_db.index.get_level_values(0).unique() cmp_to_drop = [] for cmp in meta_dict: if cmp not in cmp_kept: cmp_to_drop.append(cmp) for cmp in cmp_to_drop: del meta_dict[cmp] # convert to optimal datatypes to reduce file size df_db = df_db.convert_dtypes() # rename the index df_db.index.name = "ID" # save the consequence data df_db.to_csv(target_data_file) # save the metadata with open(target_meta_file, 'w+', encoding='utf-8') as f: json.dump(meta_dict, f, indent=2) print("Successfully parsed and saved the red tag consequence data from FEMA " "P58")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_FEMA_P58_fragility_db(source_file,\n target_data_file='fragility_DB_FEMA_P58_2nd.csv',\n target_meta_file='fragility_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove the empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns that we need for the fragility database\n cols_to_db = [\n \"Demand Parameter (value):\",\n \"Demand Parameter (unit):\",\n \"Demand Location (use floor above? Yes/No)\",\n \"Directional?\",\n \"DS Hierarchy\",\n \"DS 1, Probability\",\n \"DS 1, Median Demand\",\n \"DS 1, Total Dispersion (Beta)\",\n \"DS 2, Probability\",\n \"DS 2, Median Demand\",\n \"DS 2, Total Dispersion (Beta)\",\n \"DS 3, Probability\",\n \"DS 3, Median Demand\",\n \"DS 3, Total Dispersion (Beta)\",\n \"DS 4, Probability\",\n \"DS 4, Median Demand\",\n \"DS 4, Total Dispersion (Beta)\",\n \"DS 5, Probability\",\n \"DS 5, Median Demand\",\n \"DS 5, Total Dispersion (Beta)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n # replace missing values with an empty string\n df_meta.fillna('', inplace=True)\n # the metadata shall be stored in strings\n df_meta = df_meta.astype(str)\n\n # initialize the output fragility table\n df_db = pd.DataFrame(\n columns=[\n \"Index\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=df_db_source.index,\n dtype=float\n )\n\n # initialize the dictionary that stores the fragility metadata\n meta_dict = {}\n\n # conversion dictionary for demand types\n convert_demand_type = {\n 'Story Drift Ratio': \"Peak Interstory Drift Ratio\",\n 'Link Rotation Angle': \"Peak Link Rotation Angle\",\n 'Effective Drift': \"Peak Effective Drift Ratio\",\n 'Link Beam Chord Rotation': \"Peak Link Beam Chord Rotation\",\n 'Peak Floor Acceleration': \"Peak Floor Acceleration\",\n 'Peak Floor Velocity': \"Peak Floor Velocity\"\n }\n\n # conversion dictionary for demand unit names\n convert_demand_unit = {\n 'Unit less': 'unitless',\n 'Radians': 'rad',\n 'g': 'g',\n 'meter/sec': 'mps'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n # create a dotted component index\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete = False\n\n # store demand specifications\n df_db.loc[cmp.Index, 'Demand-Type'] = (\n convert_demand_type[cmp.Demand_Parameter_value])\n df_db.loc[cmp.Index, 'Demand-Unit'] = (\n convert_demand_unit[cmp.Demand_Parameter_unit])\n df_db.loc[cmp.Index, 'Demand-Offset'] = (\n int(cmp.Demand_Location_use_floor_above_YesNo))\n df_db.loc[cmp.Index, 'Demand-Directional'] = (\n int(cmp.Directional))\n\n # parse the damage state hierarchy\n DS_setup = parse_DS_Hierarchy(cmp.DS_Hierarchy)\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"LimitStates\": {}\n }\n\n # now look at each Limit State\n for LS_i, LS_contents in enumerate(DS_setup):\n\n LS_i = LS_i + 1\n LS_contents = np.atleast_1d(LS_contents)\n\n ls_meta = {}\n\n # start with the special cases with multiple DSs in an LS\n if LS_contents[0] in {'MutEx', 'Simul'}:\n\n # collect the fragility data for the member DSs\n median_demands = []\n dispersions = []\n weights = []\n for ds in LS_contents[1:]:\n median_demands.append(\n getattr(cmp, f\"DS_{ds[2]}_Median_Demand\"))\n\n dispersions.append(\n getattr(cmp, f\"DS_{ds[2]}_Total_Dispersion_Beta\"))\n\n weights.append(getattr(cmp, f\"DS_{ds[2]}_Probability\"))\n\n # make sure the specified distribution parameters are appropriate\n if ((np.unique(median_demands).size != 1) or (\n np.unique(dispersions).size != 1)):\n raise ValueError(f\"Incorrect mutually exclusive DS \"\n f\"definition in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n if LS_contents[0] == 'MutEx':\n\n # in mutually exclusive cases, make sure the specified DS\n # weights sum up to one\n np.testing.assert_allclose(\n np.sum(np.array(weights, dtype=float)), 1.0,\n err_msg=f\"Mutually exclusive Damage State weights do \"\n f\"not sum to 1.0 in component {cmp.Index} at \"\n f\"Limit State {LS_i}\")\n\n # and save all DS metadata under this Limit State\n for ds in LS_contents[1:]:\n ds_id = ds[2]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n else:\n # in simultaneous cases, convert simultaneous weights into\n # mutexc weights\n sim_ds_count = len(LS_contents) - 1\n ds_count = 2 ** (sim_ds_count) - 1\n\n sim_weights = []\n\n for ds_id in range(1, ds_count + 1):\n ds_map = format(ds_id, f'0{sim_ds_count}b')\n\n sim_weights.append(np.product(\n [weights[ds_i]\n if ds_map[-ds_i - 1] == '1' else 1.0-weights[ds_i]\n for ds_i in range(sim_ds_count)]))\n\n # save ds metadata - we need to be clever here\n # the original metadata is saved for the pure cases\n # when only one DS is triggered\n # all other DSs store information about which\n # combination of pure DSs they represent\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # adjust weights to respect the assumption that at least\n # one DS will occur (i.e., the case with all DSs returning\n # False is not part of the event space)\n sim_weights_array = np.array(sim_weights) / np.sum(sim_weights)\n\n weights = sim_weights_array\n\n theta_0 = median_demands[0]\n theta_1 = dispersions[0]\n weights_str = ' | '.join([f\"{w:.6f}\" for w in weights])\n\n df_db.loc[cmp.Index, f'LS{LS_i}-DamageStateWeights'] = weights_str\n\n # then look at the sequential DS cases\n elif LS_contents[0].startswith('DS'):\n\n # this is straightforward, store the data in the table and dict\n ds_id = LS_contents[0][2]\n\n theta_0 = getattr(cmp, f\"DS_{ds_id}_Median_Demand\")\n theta_1 = getattr(cmp, f\"DS_{ds_id}_Total_Dispersion_Beta\")\n\n ls_meta.update({f\"DS{ds_id}\": {\n \"Description\": cmp_meta[f\"DS_{ds_id}_Description\"],\n \"RepairAction\": cmp_meta[f\"DS_{ds_id}_Repair_Description\"]\n }})\n\n # FEMA P58 assumes lognormal distribution for every fragility\n df_db.loc[cmp.Index, f'LS{LS_i}-Family'] = 'lognormal'\n\n # identify incomplete cases...\n\n # where theta is missing\n if theta_0 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_0'] = theta_0\n else:\n incomplete = True\n\n # where beta is missing\n if theta_1 != 'By User':\n df_db.loc[cmp.Index, f'LS{LS_i}-Theta_1'] = theta_1\n else:\n incomplete = True\n\n # store the collected metadata for this limit state\n meta_data['LimitStates'].update({f\"LS{LS_i}\": ls_meta})\n\n # store the incomplete flag for this component\n df_db.loc[cmp.Index, 'Incomplete'] = int(incomplete)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.set_index('Index', inplace=True)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from FEMA P58\")", "def make_tag_data_raw_fast(mdp,filename):\n #\n fin = open(filename,'r')\n iter = 0\n for line in fin:\n lsp = line.split(' ')\n if len(lsp) > 1: # skip empty lines\n if lsp[0] == \"comb_path\":\n update_params(mdp,lsp)\n if not mdp.flag_out_open: ## -- try to open output file\n try:\n if mdp.flag_overwrite == \"True\": ## check string value!\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ## -- try open output file\n for file in glob.glob(mdp.input_path):\n # get sign which corrects for boundary condition\n tvals = file.split('/')[-1].split('_')[3].split('t')\n try:\n ## flip sign if requested\n bcsign = ((int(tvals[1])+int(tvals[2])) != (int(tvals[1])+int(tvals[2])) % mdp.corr_len)\n except IndexError:\n ## 2-point function\n bcsign = False\n try:\n # open correlator file\n mdp.corr_file = open(file,'r')\n except IOError:\n print \"Could not open file \",file\n continue\n ## -- get tag\n ## baryons:\n #mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_r'+file.split('/')[-1].split('_')[4][-1]\n ## with time source tag\n #mdp.tag = file.split('/')[-1].split('_')[3][:3]\\\n # +'_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n # +file.split('/')[-1].split('_')[4][3:]\n ## no time source tag\n mdp.tag = '_'+file.split('/')[-1].split('_')[1][1:]+'_'+file.split('/')[-1].split('_')[4][0]\\\n +file.split('/')[-1].split('_')[4][3:]\n #print file,',',mdp.tag\n iter+=1\n ##endif ! flag_out_open\n\n #save_data_fast(mdp)\n save_data_fast_bc(mdp,bcsign)\n mdp.corr_file.close()\n if iter%400 == 0:\n print \"file\",iter\n max_iter = None\n if not(max_iter is None) and iter==max_iter:\n print \"reached max file iterations, ending loop...\"\n break\n ## end comb_path\n pass\n\n elif lsp[0] == \"for\": # indicates when to get correlator\n lsp.pop(0)\n update_params(mdp,lsp)\n try:\n # open correlator file\n mdp.corr_file = open(mdp.input_path + '/' + mdp.input_fname,'r')\n except IOError:\n print \"Could not open file \",mdp.input_fname\n continue\n print mdp.input_fname\n if not mdp.flag_out_open:\n try:\n if mdp.flag_overwrite:\n ## -- open save file for read+write\n try:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0) # go to beginning\n mdp.save_file.truncate() # delete whatever was there before\n except IOError:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'w')\n mdp.save_file.close()\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n mdp.flag_overwrite= False\n else:\n mdp.save_file = open(mdp.output_path + '/' + mdp.output_fname,'r+')\n mdp.save_file.seek(0,2) # seek the end of file\n mdp.flag_out_open = True\n #for num,key in zip(mdp.corr_num,mdp.key):\n # corr_key=uf.get_str_key(mdp.corr_file,\"correlator_key\",num)\n #except (IOError):\n # pass\n except (AttributeError):\n print \"Attempted to open invalid output file\"\n ##endif ! flag_out_open\n save_data_fast(mdp)\n mdp.corr_file.close()\n ##else \"for\" not found in control file\n else:\n update_params(mdp,lsp)\n ##endif lsp[0]==for\n ##endif len(lsp) > 1\n try:\n mdp.save_file.close()\n mdp.flag_out_open = False\n except (IOError,AttributeError):\n pass\n fin.close()\n return", "def produce_database(database_name, is_debug):\n\t\n\t# read files from a01-a35, every file including whole ecg data and the corresponding annotation\n\tdata_annotations_set = get_ecg_data_annotations(database_name, is_debug)\n\t# divide ECG data to minute-by-minute ECG segments\n\t_ = process_ecg_data_segments(database_name, data_annotations_set, is_debug)", "def create_FEMA_P58_bldg_repair_db(\n source_file,\n target_data_file='bldg_repair_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_repair_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.concat(\n [pd.read_excel(source_file, sheet_name=sheet, header=2, index_col=1)\n for sheet in ('Summary', 'Cost Summary', 'Env Summary')], axis=1)\n\n # remove duplicate columns\n # (there are such because we joined two tables that were read separately)\n df = df.loc[:, ~df.columns.duplicated()]\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the repair database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n f\"Best Fit, DS{DS_i}\",\n f\"Lower Qty Mean, DS{DS_i}\",\n f\"Upper Qty Mean, DS{DS_i}\",\n f\"Lower Qty Cutoff, DS{DS_i}\",\n f\"Upper Qty Cutoff, DS{DS_i}\",\n f\"CV / Dispersion, DS{DS_i}\",\n\n f\"Best Fit, DS{DS_i}.1\",\n f\"Lower Qty Mean, DS{DS_i}.1\",\n f\"Upper Qty Mean, DS{DS_i}.1\",\n f\"Lower Qty Cutoff, DS{DS_i}.1\",\n f\"Upper Qty Cutoff, DS{DS_i}.1\",\n f\"CV / Dispersion, DS{DS_i}.2\",\n f\"DS {DS_i}, Long Lead Time\",\n\n f'Repair Cost, p10, DS{DS_i}',\n f'Repair Cost, p50, DS{DS_i}',\n f'Repair Cost, p90, DS{DS_i}',\n f'Time, p10, DS{DS_i}',\n f'Time, p50, DS{DS_i}',\n f'Time, p90, DS{DS_i}',\n f'Mean Value, DS{DS_i}',\n f'Mean Value, DS{DS_i}.1',\n\n # Columns added for the Environmental loss\n f\"DS{DS_i} Best Fit\",\n f\"DS{DS_i} CV or Beta\",\n\n f\"DS{DS_i} Best Fit.1\",\n f\"DS{DS_i} CV or Beta.1\",\n\n f\"DS{DS_i} Embodied Carbon (kg CO2eq)\",\n f\"DS{DS_i} Embodied Energy (MJ)\",\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 1, Repair Description\",\n \"DS 2, Description\",\n \"DS 2, Repair Description\",\n \"DS 3, Description\",\n \"DS 3, Repair Description\",\n \"DS 4, Description\",\n \"DS 4, Repair Description\",\n \"DS 5, Description\",\n \"DS 5, Repair Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-LongLeadTime\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['Cost', 'Time', 'Carbon', 'Energy']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n convert_family = {\n 'LogNormal': 'lognormal',\n 'Normal': 'normal'\n }\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_cost = False\n incomplete_time = False\n incomplete_carbon = False\n incomplete_energy = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'Cost'), 'DV-Unit'] = \"USD_2011\"\n df_db.loc[(cmp.Index, 'Time'), 'DV-Unit'] = \"worker_day\"\n df_db.loc[(cmp.Index, 'Carbon'), 'DV-Unit'] = \"kg\"\n df_db.loc[(cmp.Index, 'Energy'), 'DV-Unit'] = \"MJ\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n cost_est = {}\n time_est = {}\n carbon_est = {}\n energy_est = {}\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n if not pd.isna(getattr(cmp, f'Repair_Cost_p10_DS{DS_i}')):\n\n cost_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Repair_Cost_p10_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p50_DS{DS_i}'),\n getattr(cmp, f'Repair_Cost_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n ])})\n\n time_est.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'Time_p10_DS{DS_i}'),\n getattr(cmp, f'Time_p50_DS{DS_i}'),\n getattr(cmp, f'Time_p90_DS{DS_i}'),\n getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1'),\n getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1'),\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES')\n ])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Carbon_kg_CO2eq'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta'),\n getattr(cmp, f'DS{DS_i}_Best_Fit')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n carbon_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')):\n\n theta_0, theta_1, family = [\n getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ'),\n getattr(cmp, f'DS{DS_i}_CV_or_Beta_1'),\n getattr(cmp, f'DS{DS_i}_Best_Fit_1')\n ]\n\n if family == 'Normal':\n p10, p50, p90 = norm.ppf([0.1, 0.5, 0.9], loc=theta_0, scale=theta_0 * theta_1)\n elif family == 'LogNormal':\n p10, p50, p90 = np.exp(norm.ppf([0.1, 0.5, 0.9], loc=np.log(theta_0), scale=theta_1))\n\n energy_est.update({f'DS{DS_i}': np.array([p10, p50, p90])})\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = len(cost_est.keys())\n ds_count = 2 ** (sim_ds_count) - 1\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n cost_vals = np.sum([cost_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(5)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n time_vals = np.sum([time_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(6)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n carbon_vals = np.sum([carbon_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n energy_vals = np.sum([energy_est[f'DS{ds_i + 1}']\n if ds_map[-ds_i - 1] == '1' else np.zeros(3)\n for ds_i in range(sim_ds_count)],\n axis=0)\n\n # fit a distribution\n family_hat, theta_hat = fit_distribution_to_percentiles(\n cost_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n cost_theta = theta_hat\n if family_hat == 'normal':\n cost_theta[1] = cost_theta[1] / cost_theta[0]\n\n time_theta = [time_vals[1],\n np.sqrt(cost_theta[1] ** 2.0 + 0.25 ** 2.0)]\n\n # fit distributions to environmental impact consequences\n family_hat_carbon, theta_hat_carbon = fit_distribution_to_percentiles(\n carbon_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n carbon_theta = theta_hat_carbon\n if family_hat_carbon == 'normal':\n carbon_theta[1] = carbon_theta[1] / carbon_theta[0]\n\n family_hat_energy, theta_hat_energy = fit_distribution_to_percentiles(\n energy_vals[:3], [0.1, 0.5, 0.9], ['normal', 'lognormal'])\n\n energy_theta = theta_hat_energy\n if family_hat_energy == 'normal':\n energy_theta[1] = energy_theta[1] / energy_theta[0]\n\n # Note that here we assume that the cutoff quantities are\n # identical across damage states.\n # This assumption holds for the second edition of FEMA P58, but\n # it might need to be revisited in future editions.\n cost_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1')\n cost_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1')\n time_qnt_low = getattr(cmp, 'Lower_Qty_Cutoff_DS1_1')\n time_qnt_up = getattr(cmp, 'Upper_Qty_Cutoff_DS1_1')\n\n # store the results\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{cost_vals[3]:g},{cost_vals[4]:g}|\"\n f\"{cost_qnt_low:g},{cost_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Theta_1'] = f\"{cost_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = family_hat\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{time_vals[3]:g},{time_vals[4]:g}|\"\n f\"{time_qnt_low:g},{time_qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Theta_1'] = f\"{time_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-LongLeadTime'] = int(time_vals[5] > 0)\n\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = family_hat_carbon\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = f\"{carbon_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Carbon'),\n f'DS{DS_i}-Theta_1'] = f\"{carbon_theta[1]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = family_hat_energy\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = f\"{energy_theta[0]:g}\"\n\n df_db.loc[(cmp.Index, 'Energy'),\n f'DS{DS_i}-Theta_1'] = f\"{energy_theta[1]:g}\"\n\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[f\"DS_{ds_pure_id}_Description\"],\n \"RepairAction\":\n cmp_meta[f\"DS_{ds_pure_id}_Repair_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo),\n \"RepairAction\": 'Combination of pure DS repair '\n 'actions.'\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n # cost\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}')):\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Cost'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Cost'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}'):g}\")\n\n else:\n incomplete_cost = True\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\n \"Description\": cmp_meta[f\"DS_{DS_i}_Description\"],\n \"RepairAction\": cmp_meta[\n f\"DS_{DS_i}_Repair_Description\"]}})\n\n # time\n if not pd.isna(getattr(cmp, f'Best_Fit_DS{DS_i}_1')):\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'Best_Fit_DS{DS_i}_1')])\n\n if not pd.isna(getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')):\n\n theta_0_low = getattr(cmp, f'Lower_Qty_Mean_DS{DS_i}_1')\n theta_0_up = getattr(cmp, f'Upper_Qty_Mean_DS{DS_i}_1')\n qnt_low = getattr(cmp, f'Lower_Qty_Cutoff_DS{DS_i}_1')\n qnt_up = getattr(cmp, f'Upper_Qty_Cutoff_DS{DS_i}_1')\n\n if theta_0_low == 0. and theta_0_up == 0.:\n df_db.loc[(cmp.Index, 'Time'),\n f'DS{DS_i}-Family'] = np.nan\n\n else:\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_0'] = (\n f\"{theta_0_low:g},{theta_0_up:g}|\"\n f\"{qnt_low:g},{qnt_up:g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-Theta_1'] = (\n f\"{getattr(cmp, f'CV__Dispersion_DS{DS_i}_2'):g}\")\n\n df_db.loc[(cmp.Index, 'Time'), f'DS{DS_i}-LongLeadTime'] = (\n int(getattr(cmp, f'DS_{DS_i}_Long_Lead_Time') == 'YES'))\n\n else:\n incomplete_time = True\n\n # Carbon\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit')):\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit')])\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_0'] = getattr(cmp,\n f'DS{DS_i}_Embodied_Carbon_kg_CO2eq')\n\n df_db.loc[(cmp.Index, 'Carbon'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta')\n\n # Energy\n if not pd.isna(getattr(cmp, f'DS{DS_i}_Best_Fit_1')):\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Family'] = (\n convert_family[getattr(cmp, f'DS{DS_i}_Best_Fit_1')])\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_0'] = getattr(cmp, f'DS{DS_i}_Embodied_Energy_MJ')\n\n df_db.loc[(cmp.Index, 'Energy'), f'DS{DS_i}-Theta_1'] = getattr(cmp, f'DS{DS_i}_CV_or_Beta_1')\n\n df_db.loc[(cmp.Index, 'Cost'), 'Incomplete'] = int(incomplete_cost)\n df_db.loc[(cmp.Index, 'Time'), 'Incomplete'] = int(incomplete_time)\n df_db.loc[(cmp.Index, 'Carbon'), 'Incomplete'] = int(incomplete_carbon)\n df_db.loc[(cmp.Index, 'Energy'), 'Incomplete'] = int(incomplete_energy)\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 6):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n for cmp in cmp_to_drop:\n if cmp[0] in meta_dict:\n del meta_dict[cmp[0]]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from FEMA \"\n \"P58\")", "def create_FEMA_P58_bldg_injury_db(\n source_file,\n target_data_file='bldg_injury_DB_FEMA_P58_2nd.csv',\n target_meta_file='bldg_injury_DB_FEMA_P58_2nd.json'):\n\n # parse the source file\n df = pd.read_excel(source_file, sheet_name='Summary', header=2, index_col=1,\n true_values=[\"YES\", \"Yes\", \"yes\"],\n false_values=[\"NO\", \"No\", \"no\"])\n\n # remove empty rows and columns\n df.dropna(axis=0, how='all', inplace=True)\n df.dropna(axis=1, how='all', inplace=True)\n\n # filter the columns we need for the injury database\n cols_to_db = [\n \"Fragility Unit of Measure\",\n 'DS Hierarchy',\n ]\n for DS_i in range(1, 6):\n cols_to_db += [\n\n f'DS {DS_i}, Potential non-collapse casualty?',\n f'DS {DS_i} - Casualty Affected Area',\n f'DS {DS_i} Serious Injury Rate - Median',\n f'DS {DS_i} Serious Injury Rate - Dispersion',\n f'DS {DS_i} Loss of Life Rate - Median',\n f'DS {DS_i} Loss of Life Rate - Dispersion',\n ]\n\n # filter the columns that we need for the metadata\n cols_to_meta = [\n \"Component Name\",\n \"Component Description\",\n \"Construction Quality:\",\n \"Seismic Installation Conditions:\",\n \"Comments / Notes\",\n \"Author\",\n \"Fragility Unit of Measure\",\n \"Round to Integer Unit?\",\n \"DS 1, Description\",\n \"DS 2, Description\",\n \"DS 3, Description\",\n \"DS 4, Description\",\n \"DS 5, Description\",\n ]\n\n # remove special characters to make it easier to work with column names\n str_map = {\n ord(' '): \"_\",\n ord('.'): \"_\",\n ord('-'): \"_\",\n ord(':'): None,\n ord('('): None,\n ord(')'): None,\n ord('?'): None,\n ord('/'): None,\n ord(','): None,\n }\n\n df_db_source = df.loc[:, cols_to_db]\n df_db_source.columns = [s.translate(str_map) for s in cols_to_db]\n df_db_source.sort_index(inplace=True)\n\n df_meta = df.loc[:, cols_to_meta]\n df_meta.columns = [s.translate(str_map) for s in cols_to_meta]\n\n df_db_source.replace('BY USER', np.nan, inplace=True)\n df_db_source.replace('By User', np.nan, inplace=True)\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Index\",\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 16):\n out_cols += [\n f\"DS{DS_i}-Family\",\n f\"DS{DS_i}-Theta_0\",\n f\"DS{DS_i}-Theta_1\",\n f\"DS{DS_i}-AffectedArea\",\n ]\n\n # create the MultiIndex\n comps = df_db_source.index.values\n DVs = ['S1', 'S2']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'Severity'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # initialize the dictionary that stores the loss metadata\n meta_dict = {}\n\n # for each component...\n # (this approach is not efficient, but easy to follow which was considered\n # more important than efficiency.)\n for cmp in df_db_source.itertuples():\n\n ID = cmp.Index.split('.')\n cmpID = f'{ID[0][0]}.{ID[0][1:3]}.{ID[0][3:5]}.{ID[1]}'\n\n # store the new index\n df_db.loc[cmp.Index, 'Index'] = cmpID\n\n # assume the component information is complete\n incomplete_S1 = False\n incomplete_S2 = False\n\n # store units\n\n df_db.loc[cmp.Index, 'Quantity-Unit'] = (\n ' '.join(cmp.Fragility_Unit_of_Measure.split(' ')[::-1]).strip())\n df_db.loc[(cmp.Index, 'S1'), 'DV-Unit'] = \"persons\"\n df_db.loc[(cmp.Index, 'S2'), 'DV-Unit'] = \"persons\"\n\n # get the raw metadata for the component\n cmp_meta = df_meta.loc[cmp.Index, :]\n\n # store the global (i.e., not DS-specific) metadata\n\n # every component is assumed to have a comp. description\n comments = cmp_meta['Component_Description']\n\n # the additional fields are added to the description if they exist\n if cmp_meta['Construction_Quality'] != 'Not Specified':\n comments += f'\\nConstruction Quality: ' \\\n f'{cmp_meta[\"Construction_Quality\"]}'\n\n if cmp_meta['Seismic_Installation_Conditions'] not in [\n 'Not Specified', 'Not applicable', 'Unknown', 'Any']:\n comments += f'\\nSeismic Installation Conditions: ' \\\n f'{cmp_meta[\"Seismic_Installation_Conditions\"]}'\n\n if cmp_meta['Comments__Notes'] != 'None':\n comments += f'\\nNotes: {cmp_meta[\"Comments__Notes\"]}'\n\n if cmp_meta['Author'] not in ['Not Given', 'By User']:\n comments += f'\\nAuthor: {cmp_meta[\"Author\"]}'\n\n # get the suggested block size and replace the misleading values with ea\n block_size = cmp_meta['Fragility_Unit_of_Measure'].split(' ')[::-1]\n\n meta_data = {\n \"Description\": cmp_meta['Component_Name'],\n \"Comments\": comments,\n \"SuggestedComponentBlockSize\": ' '.join(block_size),\n \"RoundUpToIntegerQuantity\": cmp_meta['Round_to_Integer_Unit'],\n \"ControllingDemand\": \"Damage Quantity\",\n \"DamageStates\": {}\n }\n\n # Handle components with simultaneous damage states separately\n if 'Simul' in cmp.DS_Hierarchy:\n\n # Note that we are assuming that all damage states are triggered by\n # a single limit state in these components.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n inj_data = {}\n ds_tot = 0\n\n # get the p10, p50, and p90 estimates for all damage states\n for DS_i in range(1, 6):\n\n casualty_model = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_model is True:\n\n inj_data.update({f'DS{DS_i}': np.array([\n getattr(cmp, f'DS_{DS_i}___Casualty_Affected_Area'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Serious_Injury_Rate___Dispersion'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Median'),\n getattr(cmp, f'DS_{DS_i}_Loss_of_Life_Rate___Dispersion')\n ])})\n ds_tot += 1\n\n elif casualty_model is False:\n ds_tot += 1\n\n # only continue if there is injury data\n if len(inj_data) == 0:\n continue\n\n # now prepare the equivalent mutex damage states\n sim_ds_count = ds_tot\n ds_count = 2 ** (sim_ds_count) - 1\n\n # Here we take advantage of knowing that for every component with\n # simultaneous damage states, only one of the DSs has injury\n # consequences.\n # This assumption holds for the second edition of FEMA P58, but it\n # might need to be revisited in future editions.\n\n ds_trig = list(inj_data.keys())[0]\n inj_data = inj_data[ds_trig]\n ds_trig = int(ds_trig[2:])\n\n for DS_i in range(1, ds_count + 1):\n ds_map = format(DS_i, f'0{sim_ds_count}b')\n\n if ds_map[-ds_trig] == '1':\n\n # store the consequence data\n for severity in ('S1', 'S2'):\n\n A_affected = inj_data[0]\n\n if severity == 'S1':\n theta_0 = inj_data[1]\n theta_1 = inj_data[2]\n elif severity == 'S2':\n theta_0 = inj_data[3]\n theta_1 = inj_data[4]\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n # store the metadata\n if ds_map.count('1') == 1:\n\n ds_pure_id = ds_map[::-1].find('1') + 1\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": f\"Pure DS{ds_pure_id}. \" +\n cmp_meta[\n f\"DS_{ds_pure_id}_Description\"]\n }})\n\n else:\n\n ds_combo = [f'DS{_.start() + 1}'\n for _ in re.finditer('1', ds_map[::-1])]\n\n meta_data['DamageStates'].update({f\"DS{DS_i}\": {\n \"Description\": 'Combination of ' +\n ' & '.join(ds_combo)\n }})\n\n # for every other component...\n else:\n # now look at each Damage State\n for DS_i in range(1, 6):\n\n casualty_flag = getattr(\n cmp, f'DS_{DS_i}_Potential_non_collapse_casualty')\n\n if casualty_flag is True:\n\n A_affected = getattr(cmp,\n f'DS_{DS_i}___Casualty_Affected_Area')\n\n for severity in ('S1', 'S2'):\n\n if severity == 'S1':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Serious_Injury_'\n f'Rate___Dispersion')\n elif severity == 'S2':\n theta_0 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Median')\n theta_1 = getattr(cmp, f'DS_{DS_i}_Loss_of_Life_'\n f'Rate___Dispersion')\n\n if theta_0 != 0.0:\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Family'] = 'lognormal'\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_0'] = theta_0\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-Theta_1'] = theta_1\n\n df_db.loc[(cmp.Index, severity),\n f'DS{DS_i}-AffectedArea'] = A_affected\n\n if (pd.isna(theta_0) or pd.isna(\n theta_1) or pd.isna(A_affected)):\n\n if severity == 'S1':\n incomplete_S1 = True\n else:\n incomplete_S2 = True\n\n if ~np.isnan(casualty_flag):\n\n meta_data['DamageStates'].update({\n f\"DS{DS_i}\": {\"Description\":\n cmp_meta[f\"DS_{DS_i}_Description\"]}})\n\n df_db.loc[(cmp.Index, 'S1'), 'Incomplete'] = int(incomplete_S1)\n df_db.loc[(cmp.Index, 'S2'), 'Incomplete'] = int(incomplete_S2)\n\n # store the metadata for this component\n meta_dict.update({cmpID: meta_data})\n\n # assign the Index column as the new ID\n df_db.index = pd.MultiIndex.from_arrays(\n [df_db['Index'].values, df_db.index.get_level_values(1)])\n\n df_db.drop('Index', axis=1, inplace=True)\n\n # review the database and drop rows with no information\n cmp_to_drop = []\n for cmp in df_db.index:\n\n empty = True\n\n for DS_i in range(1, 16):\n if not pd.isna(df_db.loc[cmp, f'DS{DS_i}-Family']):\n empty = False\n break\n\n if empty:\n cmp_to_drop.append(cmp)\n\n df_db.drop(cmp_to_drop, axis=0, inplace=True)\n cmp_kept = df_db.index.get_level_values(0).unique()\n\n cmp_to_drop = []\n for cmp in meta_dict:\n if cmp not in cmp_kept:\n cmp_to_drop.append(cmp)\n\n for cmp in cmp_to_drop:\n del meta_dict[cmp]\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata\n with open(target_meta_file, 'w+', encoding='utf-8') as f:\n json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the injury consequence data from FEMA \"\n \"P58\")", "def create_Hazus_EQ_fragility_db(source_file,\n target_data_file='fragility_DB_Hazus_EQ.csv',\n target_meta_file='fragility_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n design_levels = list(\n raw_data['Structural_Fragility_Groups']['EDP_limits'].keys())\n\n building_types = list(\n raw_data['Structural_Fragility_Groups']['P_collapse'].keys())\n\n convert_design_level = {\n 'High_code': 'HC',\n 'Moderate_code': 'MC',\n 'Low_code': 'LC',\n 'Pre_code': 'PC'\n }\n\n # initialize the fragility table\n df_db = pd.DataFrame(\n columns=[\n \"ID\",\n \"Incomplete\",\n \"Demand-Type\",\n \"Demand-Unit\",\n \"Demand-Offset\",\n \"Demand-Directional\",\n \"LS1-Family\",\n \"LS1-Theta_0\",\n \"LS1-Theta_1\",\n \"LS1-DamageStateWeights\",\n \"LS2-Family\",\n \"LS2-Theta_0\",\n \"LS2-Theta_1\",\n \"LS2-DamageStateWeights\",\n \"LS3-Family\",\n \"LS3-Theta_0\",\n \"LS3-Theta_1\",\n \"LS3-DamageStateWeights\",\n \"LS4-Family\",\n \"LS4-Theta_0\",\n \"LS4-Theta_1\",\n \"LS4-DamageStateWeights\"\n ],\n index=np.arange(len(building_types) * len(design_levels) * 5),\n dtype=float\n )\n counter = 0\n\n # First, prepare the structural fragilities\n S_data = raw_data['Structural_Fragility_Groups']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in S_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'STR.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n S_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n S_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = S_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n # create the component id\n df_db.loc[counter, 'ID'] = 'NSD'\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Roof Drift Ratio\"\n df_db.loc[counter, 'Demand-Unit'] = \"rad\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = NSD_data['EDP_limits'][\n LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSD_data['Fragility_beta']\n\n counter += 1\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for dl in design_levels:\n\n # create the component id\n cmp_id = f'NSA.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Floor Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n NSA_data['EDP_limits'][dl][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = NSA_data['Fragility_beta']\n\n counter += 1\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for bt in building_types:\n for dl in design_levels:\n if bt in LF_data['EDP_limits'][dl].keys():\n\n # create the component id\n cmp_id = f'LF.{bt}.{convert_design_level[dl]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Peak Ground Acceleration\"\n df_db.loc[counter, 'Demand-Unit'] = \"g\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n for LS_i in range(1, 5):\n\n df_db.loc[counter, f'LS{LS_i}-Family'] = 'lognormal'\n df_db.loc[counter, f'LS{LS_i}-Theta_0'] = \\\n LF_data['EDP_limits'][dl][bt][LS_i - 1]\n df_db.loc[counter, f'LS{LS_i}-Theta_1'] = \\\n LF_data['Fragility_beta'][dl]\n\n if LS_i == 4:\n p_coll = LF_data['P_collapse'][bt]\n df_db.loc[counter, f'LS{LS_i}-DamageStateWeights'] = (\n f'{1.0 - p_coll} | {p_coll}')\n\n counter += 1\n\n # Fifth, the ground failure fragilities\n GF_data = raw_data['Ground_Failure']\n\n for direction in ('Horizontal', 'Vertical'):\n for f_depth in ('Shallow', 'Deep'):\n # create the component id\n cmp_id = f'GF.{direction[0]}.{f_depth[0]}'\n df_db.loc[counter, 'ID'] = cmp_id\n\n # store demand specifications\n df_db.loc[counter, 'Demand-Type'] = \"Permanent Ground Deformation\"\n df_db.loc[counter, 'Demand-Unit'] = \"inch\"\n df_db.loc[counter, 'Demand-Offset'] = 0\n\n # store the Limit State parameters\n df_db.loc[counter, 'LS1-Family'] = 'lognormal'\n df_db.loc[counter, 'LS1-Theta_0'] = \\\n GF_data['EDP_limits'][direction][f_depth]\n df_db.loc[counter, 'LS1-Theta_1'] = \\\n GF_data['Fragility_beta'][direction][f_depth]\n p_complete = GF_data['P_Complete']\n df_db.loc[counter, 'LS1-DamageStateWeights'] = (\n f'{1.0 - p_complete} | {p_complete}')\n\n counter += 1\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # none of them are directional,\n df_db.loc[:, 'Demand-Directional'] = 0\n\n # rename the index\n df_db.set_index(\"ID\", inplace=True)\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the fragility data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the fragility data from Hazus EQ\")", "def builddataframe(brick, path = \"..\", cutstring = \"1\", major = 0, minor = 0, newzprojection = None, charmsim = False):\n nplate =0\n\n print(\"Reading ScanSet at path \",path)\n\n #reading scanset\n sproc = r.EdbScanProc()\n sproc.eProcDirClient=path\n id = r.EdbID(brick,nplate,major,minor)\n ss = sproc.ReadScanSet(id)\n ss.Brick().SetID(brick)\n \n #preparing patterns\n npl = ss.eIDS.GetEntries()\n\n cut = r.TCut(cutstring)\n\n #intial empty arrays\n IDall = np.zeros(0,dtype=int)\n PIDall = np.zeros(0,dtype=int)\n\n xall = np.zeros(0,dtype=np.float32)\n yall = np.zeros(0,dtype=np.float32)\n zall = np.zeros(0,dtype=np.float32)\n TXall = np.zeros(0,dtype=np.float32)\n TYall = np.zeros(0,dtype=np.float32)\n\n MCEvtall = np.zeros(0,dtype=int)\n MCTrackall = np.zeros(0,dtype=int)\n Pall = np.zeros(0,dtype=np.float32)\n Flagall = np.zeros(0,dtype=int)\n\n print (\"Cut on couples \")\n cut.Print()\n\n print(\"Try to open folders at path \",path+\"/b00000\"+str(brick))\n for i in range(npl):\n idplate = ss.GetID(i)\n \n nplate = idplate.ePlate\n plate = ss.GetPlate(idplate.ePlate)\n #read pattern information\n p = r.EdbPattern()\n\n ect = r.EdbCouplesTree()\n if (nplate) <10:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p00{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n else:\n ect.InitCouplesTree(\"couples\",path+\"/b00000\"+str(brick)+\"/p0{}/{}.{}.{}.{}.cp.root\".format(nplate,brick,nplate,major,minor),\"READ\")\n\n #addingcut\n ect.eCut = cut \n cutlist = ect.InitCutList()\n \n nsegcut = cutlist.GetN()\n nseg = ect.eTree.GetEntries()\n\n IDarray_plate = np.zeros(nsegcut,dtype=int)\n PIDarray_plate = np.zeros(nsegcut,dtype=int)\n\n xarray_plate = np.zeros(nsegcut,dtype=np.float32)\n yarray_plate = np.zeros(nsegcut,dtype=np.float32)\n zarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TXarray_plate = np.zeros(nsegcut,dtype=np.float32)\n TYarray_plate = np.zeros(nsegcut,dtype=np.float32)\n \n MCEvtarray_plate = np.zeros(nsegcut,dtype=int)\n MCTrackarray_plate = np.zeros(nsegcut,dtype=int)\n Parray_plate = np.zeros(nsegcut,dtype=np.float32)\n Flagarray_plate = np.zeros(nsegcut,dtype=int)\n\n print (\"loop on {} segments over {} for plate {}\".format(nsegcut, nseg,nplate))\n for ientry in range(nsegcut):\n iseg = cutlist.GetEntry(ientry)\n ect.GetEntry(iseg)\n \n seg=ect.eS\n #//setting z and affine transformation\n seg.SetZ(plate.Z())\n seg.SetPID(i)\n seg.Transform(plate.GetAffineXY())\n\n if(newzprojection is not None):\n seg.PropagateTo(newzprojection[i])\n\n IDarray_plate[ientry] = seg.ID()\n PIDarray_plate[ientry] = seg.PID()\n \n xarray_plate[ientry] = seg.X()\n yarray_plate[ientry] = seg.Y()\n zarray_plate[ientry] = seg.Z()\n TXarray_plate[ientry] = seg.TX()\n TYarray_plate[ientry] = seg.TY()\n\n MCEvtarray_plate[ientry] = seg.MCEvt()\n MCTrackarray_plate[ientry] = seg.MCTrack()\n Parray_plate[ientry] = seg.P() \n if charmsim: #different place where pdgcode is stored\n Flagarray_plate[ientry] = seg.Vid(0)\n else:\n Flagarray_plate[ientry] = seg.Flag() \n\n #end of loop, storing them in global arrays\n IDall = np.concatenate((IDall,IDarray_plate),axis=0)\n PIDall = np.concatenate((PIDall,PIDarray_plate),axis=0)\n\n xall = np.concatenate((xall,xarray_plate),axis=0)\n yall = np.concatenate((yall,yarray_plate),axis=0)\n zall = np.concatenate((zall,zarray_plate),axis=0)\n TXall = np.concatenate((TXall,TXarray_plate),axis=0)\n TYall = np.concatenate((TYall,TYarray_plate),axis=0)\n MCEvtall = np.concatenate((MCEvtall,MCEvtarray_plate),axis=0)\n MCTrackall = np.concatenate((MCTrackall,MCTrackarray_plate),axis=0)\n Pall = np.concatenate((Pall,Parray_plate),axis=0)\n Flagall = np.concatenate((Flagall,Flagarray_plate),axis=0)\n\n data = {'ID':IDall,'PID':PIDall,'x':xall,'y':yall,'z':zall,'TX':TXall,'TY':TYall,'MCEvent':MCEvtall,'MCTrack':MCTrackall,'P':Pall,'Flag':Flagall}\n df = pd.DataFrame(data, columns = ['ID','PID','x','y','z','TX','TY','MCEvent','MCTrack','P','Flag'] )\n\n return df", "def readdatabase2(self):\n fname=\"/home/alice/rl/v/vme/ADCI/DB/INPUTS.txt\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n for i in lines:\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n return None\n db={}\n db['number']=items[0]\n db['numberDIM']=items[1]\n db['level']=items[2]\n db['name']=items[3]\n db['detector']=items[4]\n db['signature']=items[5]\n dbinputs.append(db)\n return dbinputs", "def populateSQlite(tagDf): \n conn = sqlite3.connect(os.path.join(prefix, args.db))\n with conn:\n cur = conn.cursor()\n cmds = ['INSERT INTO value VALUES(%d, \\\"%s\\\", %d);' % (r[0], r[1], r[2]) for i, r in tagDf.iterrows()]\n cmds = \"\\n\".join(cmds)\n cur.executescript(cmds)\n conn.commit()", "def readVCTPINPUTS(self): \n #fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/VALID.CTPINPUTS\"\n fname= os.environ['VMECFDIR'] +\"/CFG/ctp/DB/ctpinputs.cfg\"\n try:\n database=open(fname,\"r\") \n except IOError:\n print \"Cannot open \",fname\n return None\n else:\n print \"File \",fname,\" open successfuly.\"\n #print \"database= \",database\n lines=database.readlines()\n database.close() \n #print lines,len(lines) \n dbinputs=[]\n count=0\n #print \"look for me if you want different inputs range...\"\n for i in lines:\n if(i[0] == 'l' and i[1] == '0'): continue\n if i == \"\\n\": continue\n if(i[0] != '#'):\n items=string.split(i)\n #print 'items= ',items,len(items)\n if(len(items)<6):\n print \"Error parsing database, not enough items in line:\"\n print items\n continue\n #return None\n if items[3] == 'M': continue\n count=count+1\n #if count<6 or count>11 : continue\n #if count>10 and count<24 : continue\n #if count<16: continue\n #if count > 4 and count < 15: continue\n #if items[3] != '1': continue\n #if items[2] != \"EMCAL\": continue\n #if (items[2] != \"SPD\") and (items[2] != \"T0\"): continue\n flag=1\n for i in self.detectors:\n if items[2].find(i)>=0 or i.find(\"ALL\")>=0:\n flag=0;\n break\n if flag: continue\n # input not connected\n if items[7] == '0' and items[3] == '0': continue\n db={}\n db['name']=items[0]\n db['detector']=items[2]\n db['level']='L'+items[3]\n db['signature']=items[4]\n #db['number']=items[5]\n db['number']=items[7]\n db['numberDIM']=items[6]\n db['ctpnum']=items[5]\n db['Edge'] = items[8]\n db['Delay'] = items[9]\n dbinputs.append(db)\n #print \"Adding: \", db\n return dbinputs", "def create_patolli(database='red_cod-db.pkl', sites = -1, elements=-1, maxatoms=-1,\r\n dictionary='structure_dictionary', features='datosrahm.csv',\r\n control_file='model_control_file', \r\n verbose=1, test_frac = 0.15, local_function='fij_2.0_25_diccio',\r\n test_with_all_false = False):\r\n \r\n start_main=time.time()\r\n \r\n X, _, _, _, df = raw_features_extractor(database=database, sites = sites, \r\n elements = elements, maxatoms = maxatoms, \r\n dictionary=dictionary, features=features)\r\n \r\n X = compute_quotients(X=X)\r\n X, df = append_local_functions(X = X, df = df, local_function = local_function)\r\n X, _ , df, _ = split_collection(X = X, df = df, frac = test_frac)\r\n \r\n Y = df['target'].values\r\n class_names=list(set(df['target']))\r\n \r\n subnets=X.shape[1]\r\n features=X.shape[2]\r\n \r\n \r\n average = np.mean(X, axis=0) \r\n stdev = np.std(X, axis=0)\r\n \r\n X = (X - average)/stdev\r\n \r\n dicfeatstand = {'mean':average,'std':stdev}\r\n np.save('feature_standarisation',dicfeatstand)\r\n \r\n with open('feature_standarisation.txt','w') as f:\r\n f.write('X matrix has dimensions '+str(X.shape[0])+' samples x ' + \\\r\n str(X.shape[1]) + ' sites x ' + str(X.shape[2]) + \\\r\n ' features'+'\\n'+'\\n')\r\n f.write('Features - mean:'+'\\n'+'\\n')\r\n f.write(str(average)+'\\n'+'\\n')\r\n f.write('Features - std:'+'\\n'+'\\n')\r\n f.write(str(stdev))\r\n f.close()\r\n \r\n Xor=copy.deepcopy(X)\r\n X,y = shuffle(X,Y,random_state=0)\r\n \r\n x={}\r\n xor={}\r\n \r\n for subnet in range(subnets):\r\n x[subnet] = X[:,subnet,:]\r\n xor[subnet] = Xor[:,subnet,:]\r\n \r\n directorio = time.ctime().replace(' ', '_').replace(':','_')\r\n os.system('mkdir ' + directorio)\r\n os.system('mv compounds_collection.csv ' + directorio +'/')\r\n os.system('mv multiplicities.npy ' + directorio +'/')\r\n os.system('mv occupation_fractions.npy ' + directorio +'/')\r\n os.system('mv output_values.npy ' + directorio +'/')\r\n os.system('mv raw_features.npy ' + directorio +'/')\r\n os.system('mv X*.npy ' + directorio +'/')\r\n os.system('mv db*.csv ' + directorio +'/')\r\n os.system('mv feature_standarisation* ' + directorio +'/')\r\n \r\n \r\n ctrl_diccio = ctrl_dictionary(archivo=control_file)\r\n print('\\n')\r\n print('*************************************************************'+\r\n '*************************************************************'+\r\n '*************************************************************'+\r\n '*************************************************************')\r\n print('ANNs TRAINING WILL START NOW.')\r\n print('\\n')\r\n print('There are ',len(ctrl_diccio.keys()),' ANNs to train')\r\n \r\n for item in list(ctrl_diccio):\r\n print('Training ', item+1,'/',len(ctrl_diccio.keys()))\r\n diccionary = ctrl_diccio[item]\r\n \r\n hidden_layers=[float(x) for x in diccionary['HIDDEN_LAYERS'].split(\",\")]\r\n epochs=int(diccionary['EPOCHS'])\r\n batch_size=int(diccionary['BATCH_SIZE'])\r\n test_val=float(diccionary['TEST_VAL'])\r\n cost_function=diccionary['COST_FUNCTION']\r\n learning_rate=float(diccionary['LEARNING_RATE'])\r\n beta_1=float(diccionary['BETA_1'])\r\n beta_2=float(diccionary['BETA_2'])\r\n decay=float(diccionary['DECAY'])\r\n dropout=float(diccionary['DROPOUT'])\r\n activation=diccionary['ACTIVATION']\r\n name=diccionary['NAME']\r\n \r\n hidden_layers = np.asarray(hidden_layers)*features\r\n hidden_layers = [int(x) for x in hidden_layers]\r\n \r\n model = modelo(hidden_layers=hidden_layers, activation=activation,\r\n features=features, beta_1=beta_1, beta_2=beta_2, lr=learning_rate, decay=decay, \r\n dropout=dropout)\r\n \r\n start=time.time()\r\n data, dataframe, model = training(model, X=[x[i] for i in range(subnets)], Y = y, epochs=epochs, \r\n batch_size=batch_size, test_val=test_val, saveas=name,\r\n verbose=verbose)\r\n \r\n print('NN training lasted ',np.round(time.time() - start,2),'s')\r\n print('\\n')\r\n plotgraph(readfile=name+'.csv', outfiles=name, cost_function=cost_function)\r\n \r\n y_pred = (model.predict([xor[i] for i in range(subnets)]) > 0.5)\r\n \r\n precision, recall, fscore, support = PRFS(df['target'],y_pred)\r\n cnf_matrix=confusion_matrix(df['target'],y_pred)\r\n np.save(str(name)+'_cnfmat.npy',cnf_matrix)\r\n precision = np.round(100*precision,2)\r\n recall = np.round(100*recall,2)\r\n fscore = np.round(100*fscore,2)\r\n \r\n with open('PRFS_'+str(control_file)+'.txt', 'a') as prfs:\r\n prfs.write(str(name)+'\\n')\r\n prfs.write('classes: '+str(class_names)+'\\n')\r\n prfs.write('samples: '+str(support)+'\\n')\r\n prfs.write('precision: '+str(precision)+'\\n')\r\n prfs.write('recall: '+str(recall)+'\\n')\r\n prfs.write('f1-score: '+str(fscore)+'\\n')\r\n prfs.write('\\n')\r\n prfs.close()\r\n \r\n plt.figure(1)\r\n plot_confusion_matrix(cnf_matrix, classes=class_names,\r\n title='Confusion matrix, without normalization')\r\n plt.savefig('cnfmat_'+str(name)+'.png')\r\n \r\n plt.figure(2)\r\n plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\r\n title='Normalized confusion matrix')\r\n plt.savefig('normcnfmat_'+str(name)+'.png')\r\n \r\n plt.close('all')\r\n \r\n os.system('mv *' + name + '* ' + directorio)\r\n os.system('mv PRFS_' + str(control_file) + '.txt ' + directorio)\r\n os.system('cp ' + control_file + '.txt ' + directorio)\r\n os.system('cp ' + dictionary + '.txt ' + directorio)\r\n \r\n if test_frac != 0:\r\n test_models(directorio=directorio)\r\n \r\n if test_with_all_false:\r\n test_all_false(directorio=directorio, database=database, \r\n local_function=local_function)\r\n\r\n print('Whole process lasted ', np.round(-start_main+time.time(),2),'s') \r\n return", "def create_Hazus_EQ_bldg_repair_db(source_file,\n target_data_file='bldg_repair_DB_Hazus_EQ.csv',\n target_meta_file='bldg_repair_DB_Hazus_EQ.json'):\n\n # parse the source file\n with open(source_file, 'r', encoding='utf-8') as f:\n raw_data = json.load(f)\n\n # prepare lists of labels for various building features\n occupancies = list(\n raw_data['Structural_Fragility_Groups']['Repair_cost'].keys())\n\n # initialize the output loss table\n # define the columns\n out_cols = [\n \"Incomplete\",\n \"Quantity-Unit\",\n \"DV-Unit\",\n ]\n for DS_i in range(1, 6):\n out_cols += [\n f\"DS{DS_i}-Theta_0\",\n ]\n\n # create the MultiIndex\n cmp_types = ['STR', 'NSD', 'NSA', 'LF']\n comps = [f'{cmp_type}.{occ_type}'\n for cmp_type in cmp_types for occ_type in occupancies]\n DVs = ['Cost', 'Time']\n df_MI = pd.MultiIndex.from_product([comps, DVs], names=['ID', 'DV'])\n\n df_db = pd.DataFrame(\n columns=out_cols,\n index=df_MI,\n dtype=float\n )\n\n # First, prepare the structural damage consequences\n S_data = raw_data['Structural_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'STR.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_cost'][occ_type][ds_i-1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = S_data['Repair_time'][occ_type][ds_i-1]\n\n # Second, the non-structural drift sensitive one\n NSD_data = raw_data['NonStructural_Drift_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSD.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSD_data['Repair_cost'][occ_type][DS_i-1]\n\n # Third, the non-structural acceleration sensitive fragilities\n NSA_data = raw_data['NonStructural_Acceleration_Sensitive_Fragility_Groups']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'NSA.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 5):\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = NSA_data['Repair_cost'][occ_type][DS_i-1]\n\n # Fourth, the lifeline facilities\n LF_data = raw_data['Lifeline_Facilities']\n\n for occ_type in occupancies:\n\n # create the component id\n cmp_id = f'LF.{occ_type}'\n\n # store the consequence values for each Damage State\n for DS_i in range(1, 6):\n\n # DS4 and DS5 have identical repair consequences\n if DS_i == 5:\n ds_i = 4\n else:\n ds_i = DS_i\n\n df_db.loc[\n (cmp_id, 'Cost'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_cost'][occ_type][ds_i - 1]\n\n df_db.loc[\n (cmp_id, 'Time'),\n f'DS{DS_i}-Theta_0'] = LF_data['Repair_time'][occ_type][ds_i - 1]\n\n # remove empty rows (from the end)\n df_db.dropna(how='all', inplace=True)\n\n # All Hazus components have complete fragility info,\n df_db.loc[:, 'Incomplete'] = 0\n\n # The damage quantity unit is the same for all consequence values\n df_db.loc[:, 'Quantity-Unit'] = \"1 EA\"\n\n # The output units are also indentical among all components\n df_db.loc[idx[:, 'Cost'], 'DV-Unit'] = \"loss_ratio\"\n df_db.loc[idx[:, 'Time'], 'DV-Unit'] = \"day\"\n\n # convert to simple index\n df_db = base.convert_to_SimpleIndex(df_db, 0)\n\n # rename the index\n df_db.index.name = \"ID\"\n\n # convert to optimal datatypes to reduce file size\n df_db = df_db.convert_dtypes()\n\n # save the consequence data\n df_db.to_csv(target_data_file)\n\n # save the metadata - later\n # with open(target_meta_file, 'w+') as f:\n # json.dump(meta_dict, f, indent=2)\n\n print(\"Successfully parsed and saved the repair consequence data from Hazus \"\n \"EQ\")", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def ogip_dictionary_arf():\n \"\"\"\n this function returns the required and optional keywords and columns\n as defined by OGIP 92-002 and 92-002a\n \"\"\"\n global status\n global REPORT\n\n \"\"\"\n FOR the ARF file:\n \"\"\"\n \"\"\"\n Define REQUIRED Keywords for SPECRESP EXTENSION (note: EXTNAME is SPECRESP)\n \"\"\"\n reqkeys = ['TELESCOP', 'INSTRUME']\n reqkeys.append('FILTER')\n reqkeys.append('CHANTYPE[PHA|PI]')\n reqkeys.append('DETCHANS')\n reqkeys.append('HDUCLASS[OGIP]')\n reqkeys.append('HDUCLAS1[RESPONSE]')\n reqkeys.append('HDUCLAS2[SPECRESP]')\n reqkeys.append('HDUVERS[1.1.0]')\n reqkeys.append('TLMIN*')\n reqkeys.append('NUMGRP')\n reqkeys.append('NUMELT')\n reqkeys.append('CCLS0001[CPF]')\n reqkeys.append('CCNM0001[SPECRESP]')\n reqkeys.append('CDTP0001[DATA]')\n reqkeys.append('CVSD0001')\n reqkeys.append('CVST0001')\n reqkeys.append('CDES0001')\n\n \"\"\"\n Define recommended Keywords\n \"\"\"\n optkeys = ['PHAFILE']\n optkeys.append('LO_THRES') # minimum probability threshold in matrix (values < this are set to 0)\n optkeys.append('HDUCLAS3[REDIST|DETECTOR|FULL]') # required if channel numbering doesn't start at 1\n optkeys.append('RMFVERSN[1992A]')\n optkeys.append('HDUVERS1[1.1.0]')\n optkeys.append('HDUVERS2[1.2.0]')\n\n \"\"\"\n Define Required Columns\n \"\"\"\n reqcols = ['ENERG_LO'] # lower energy bound of bin (keV)\n reqcols.append('ENERG_HI') # upper energy bound of bin (keV); generally ENERG_LO(J) = ENERG_HI(J-1)\n reqcols.append('SPECRESP') # the \"effective area\"\n\n\n \"\"\"\n Define Optional Columns\n \"\"\"\n optcols = [] # dispersion order for grating data\n\n specresp = {'KEYWORDS':{'REQUIRED':reqkeys,'RECOMMENDED':optkeys}, 'COLUMNS':{'REQUIRED':reqcols,'RECOMMENDED':optcols}}\n\n extns={'REQUIRED':['SPECRESP'],'OPTIONAL':[]}\n #\n # create structure for the ARF file\n #\n ogip = {'EXTENSIONS':extns,\n 'SPECRESP':specresp,\n 'REFERENCE':'OGIP/92-002',\n 'REFURL':'https://heasarc.gsfc.nasa.gov/docs/heasarc/ofwg/ofwg_recomm.html',\n 'REFTITLE':'The Calibration Requirements for Spectral Analysis'}\n\n return ogip", "def create_DESeqRscript_replicates(infile=\"/projects/dowellde/groseq/data/replicates/gffcoverage/set1andset2.coverage.protein_coding\",columns=\"[10, 15, 11, 14]\", type_transcript=\"gffcoverage\", conditions=\"['DMS0', 'DMSO', 'Nutlin', 'Nutlin']\", condition1=\"DMSO\", condition2=\"Nutlin\",title_of_names_column=\"group\"):\n\n f = open(infile)\n headers = f.readline()\n headers = headers.strip(\"\\n\")\n headers = headers.split(\"\\t\")\n f.close()\n infile_dir = infile.split(\"/\")[:-1]\n infile_dir = \"/\".join(infile_dir)+\"/\"\n infile_root = infile.split(\"/\")[-1].strip(\".txt\")\n\tset_conditions = set(eval(conditions))\n\tset_conditions = list(set_conditions)\n outfile = infile_dir+infile_root+\".\"+condition1+condition2+type_transcript\n write_file = outfile+\".R\"\n print write_file\n wf = open(write_file ,\"w\")\n R_dump_file = outfile+\".Rout\"\n graph_file = outfile+\".png\"\n outfileallinputs = outfile+\".res.txt\"\n outfilesig = outfile+\".resSig.txt\"\n outfilesig_orderpval = outfile+\".resSig_pvalue.txt\"\n wf.write('sink(\"'+R_dump_file+'\")\\n')\n wf.write('library( DESeq )\\n')\n wf.write('data <- read.delim(\"'+infile+r'\", sep=\"\\t\", header=TRUE)'+\"\\n\")#need to check that \\t comes out like it should. Might write it wrong.\n\tcolumns_list = []\n\tcolumns = eval(columns)\n\tline = \", \".join(map(str,columns))\n wf.write('countsTable <- subset(data, select=c('+line+'))\\n')\n wf.write('rownames(countsTable) <- data$'+title_of_names_column+'\\n')\n\tconditions = eval(conditions)\n line = '\", \"'.join(conditions)\n wf.write('conds <- c(\"'+line+'\")\\n')\n wf.write('cds <- newCountDataSet( countsTable, conds )\\n')\n wf.write('cds <- estimateSizeFactors( cds )\\n')\n wf.write('sizeFactors(cds)\\n')\n wf.write(\"cds <- estimateDispersions( cds )\\n\")\n wf.write('res <- nbinomTest( cds, \"'+condition1+'\", \"'+condition2+'\" )\\n')\n wf.write('plotDE <- function( res ) plot(res$baseMean,res$log2FoldChange, log=\"x\", pch=20, cex=.1, col = ifelse( res$padj < .1, \"red\", \"black\" ) )\\n')\n wf.write(\"png('\"+graph_file+\"')\\n\")\n wf.write('plotDE( res )\\n')\n wf.write('dev.off()\\n')\n wf.write('resSig <- res[ res$padj < .1, ]\\n')\n wf.write('write.table(res, file = \"'+outfileallinputs+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('write.table(resSig, file = \"'+outfilesig+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('write.table(resSig[ order(resSig$pval), ], file = \"'+outfilesig_orderpval+r'\", append = FALSE, sep = \"\\t\")'+\"\\n\")\n wf.write('sink()\\n')", "def rpfp(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['rpfp']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for i in xrange(1,4):\n label = \"RPFP{0}\".format(str(i))\n distillate_label = \"L{0}-E_C{1}\".format(str(i),str(i))\n lAng_label = 'L{0}ANG'.format(str(i))\n cAng_label = 'C{0}ANG'.format(str(i))\n lMag_label = 'C{0}MAG'.format(str(i))\n cMag_label = 'C{0}MAG'.format(str(i))\n distillate_label = get_distillate_label([lAng_label, cAng_label, lMag_label, cMag_label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_lAng_label = lAng_label\n dep_lAng_name = fields['deps'][0]\n dep_lAng_uuid = self.uuid_map[lAng_label]\n dep_cAng_label = cAng_label\n dep_cAng_name = fields['deps'][1]\n dep_cAng_uuid = self.uuid_map[cAng_label]\n dep_lMag_label = lMag_label\n dep_lMag_name = fields['deps'][2]\n dep_lMag_uuid = self.uuid_map[lMag_label]\n dep_cMag_label = cMag_label\n dep_cMag_name = fields['deps'][3]\n dep_cMag_uuid = self.uuid_map[cMag_label]\n \n deps = [[dep_lAng_label, dep_lAng_name, dep_lAng_uuid],\n [dep_lMag_label, dep_lMag_name, dep_lMag_uuid],\n [dep_cAng_label, dep_cAng_name, dep_cAng_uuid],\n [dep_cMag_label, dep_cMag_name, dep_cMag_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"RPFP\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map['REAC_PWR{0}'.format(i)] = emitted[-3][-36:]\n output_uuid_map['FUND_PWR{0}'.format(i)] = emitted[-2][-36:]\n\n filename = \"{0}/RPFP_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def genPrimerPairs_3Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 3\\' extension half-asstemers')\n\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[8:10]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_L10 = forwPrimer5_3[10:]\n print(f\"Last 10 Nucleotides of forward primer: {forwPrimer_L10}\")\n\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_L10[::-1])):\n revPrimer_L10 = GenOligoGC(10,GC_low, GC_high)\n\n \"\"\"First 10 Nuc of rev primer must be identical to last 10 Nuc of forward Primer\"\"\"\n revPrimer5_3 = forwPrimer_L10 + revPrimer_L10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def _create_sql(self):\n\n pdbfile = self.pdbfile\n sqlfile = self.sqlfile\n\n if self.verbose:\n print('-- Create SQLite3 database')\n\n #name of the table\n #table = 'ATOM'\n\n # column names and types\n self.col = {'serial' : 'INT',\n 'name' : 'TEXT',\n 'altLoc' : 'TEXT',\n 'resName' : 'TEXT',\n 'chainID' : 'TEXT',\n 'resSeq' : 'INT',\n 'iCode' : 'TEXT',\n 'x' : 'REAL',\n 'y' : 'REAL',\n 'z' : 'REAL',\n 'occ' : 'REAL',\n 'temp' : 'REAL'}\n\n # delimtier of the column format\n # taken from http://www.wwpdb.org/documentation/file-format-content/format33/sect9.html#ATOM\n self.delimiter = {\n 'serial' : [6,11],\n 'name' : [12,16],\n 'altLoc' : [16,17],\n 'resName' :[17,20],\n 'chainID' :[21,22],\n 'resSeq' :[22,26],\n 'iCode' :[26,26],\n 'x' :[30,38],\n 'y' :[38,46],\n 'z' :[46,54],\n 'occ' :[54,60],\n 'temp' :[60,66]}\n\n if self.no_extra:\n del self.col['occ']\n del self.col['temp']\n\n # size of the things\n ncol = len(self.col)\n ndel = len(self.delimiter)\n\n\n # open the data base\n # if we do not specify a db name\n # the db is only in RAM\n # there might be little advantage to use memory\n # https://stackoverflow.com/questions/764710/sqlite-performance-benchmark-why-is-memory-so-slow-only-1-5x-as-fast-as-d\n if self.sqlfile is None:\n self.conn = sqlite3.connect(':memory:')\n \n # or we create a new db file\n else:\n if os.path.isfile(sqlfile):\n sp.call('rm %s' %sqlfile,shell=True)\n self.conn = sqlite3.connect(sqlfile)\n self.c = self.conn.cursor()\n\n # intialize the header/placeholder\n header,qm = '',''\n for ic,(colname,coltype) in enumerate(self.col.items()):\n header += '{cn} {ct}'.format(cn=colname,ct=coltype)\n qm += '?'\n if ic < ncol-1:\n header += ', '\n qm += ','\n\n # create the table\n query = 'CREATE TABLE ATOM ({hd})'.format(hd=header)\n self.c.execute(query)\n\n\n # read the pdb file\n # this is dangerous if there are ATOM written in the comment part\n # which happends often\n #data = sp.check_output(\"awk '/ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a safer version consist at matching against the first field\n # won't work on windows\n #data = sp.check_output(\"awk '$1 ~ /^ATOM/' %s\" %pdbfile,shell=True).decode('utf8').split('\\n')\n\n # a pure python way\n # RMK we go through the data twice here. Once to read the ATOM line and once to parse the data ...\n # we could do better than that. But the most time consuming step seems to be the CREATE TABLE query\n # if we path a file we read it\n if isinstance(pdbfile,str):\n if os.path.isfile(pdbfile):\n with open(pdbfile,'r') as fi:\n data = [line.split('\\n')[0] for line in fi if line.startswith('ATOM')]\n else:\n raise FileNotFoundError('File %s was not found',pdbfile)\n\n # if we pass a list as for h5py read/write\n # we directly use that\n elif isinstance(pdbfile,np.ndarray):\n data = [l.decode('utf-8') for l in pdbfile.tolist()]\n\n # if we cant read it\n else:\n print(pdbfile)\n raise ValueError('PDB data not recognized')\n\n # if there is no ATOM in the file\n if len(data)==1 and data[0]=='':\n print(\"-- Error : No ATOM in the pdb file.\")\n self.is_valid = False\n return\n\n # haddock chain ID fix\n del_copy = self.delimiter.copy()\n if data[0][del_copy['chainID'][0]] == ' ':\n del_copy['chainID'] = [72,73]\n\n # get all the data\n data_atom = []\n for iatom,atom in enumerate(data):\n\n # sometimes we still have an empty line somewhere\n if len(atom) == 0:\n continue\n\n # browse all attribute of each atom\n at = ()\n for ik,(colname,coltype) in enumerate(self.col.items()):\n\n # get the piece of data\n data = atom[del_copy[colname][0]:del_copy[colname][1]].strip()\n\n # convert it if necessary\n if coltype == 'INT':\n data = int(data)\n elif coltype == 'REAL':\n data = float(data)\n\n # append keep the comma !!\n # we need proper tuple\n at +=(data,)\n\n # append\n data_atom.append(at)\n\n\n # push in the database\n self.c.executemany('INSERT INTO ATOM VALUES ({qm})'.format(qm=qm),data_atom)", "def mainFunction(f):\n\n #############################################################################\n \n \n # biomass hexagon\n predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'\n trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = -32768\n predND = -9999\n trgField = 'id'\n descrField = 'id'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'\n xyLim = (500, 500)\n xLab = 'Reference (tons/ha)'\n yLab = 'Prediction (tons/ha)'\n annoXY = (15,420)\n \n \n \"\"\"\n # cc\n predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'\n trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'\n #shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = 255\n predND = 255\n trgField = 'id'\n descrField = 'id'\n #trgField = 'US_L3CODE'\n #descrField = 'US_L3NAME'\n #outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'\n xyLim = (100, 100)\n xLab = 'Reference (%)'\n yLab = 'Prediction (%)'\n annoXY = (5,82)\n \"\"\"\n #############################################################################\n\n\n # get color setup\n norm = colors.Normalize(vmin=0, vmax=1)\n f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))\n \n # open the shapefile\t\n vDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n vSrc = vDriver.Open(shpF, 0)\n vLayer = vSrc.GetLayer()\n \n commonBox = get_intersec([predF, trainF])\n\n#for f in range(vLayer.GetFeatureCount()):\n feature = vLayer[f]\n name = feature.GetField(trgField)\n print('f: '+str(f))\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n if os.path.exists(outFig):\n #break\n return\n \n descr = feature.GetField(descrField)\n \n predP, coords = get_zone_pixels(feature, shpF, predF, 1, [commonBox[0], commonBox[2], commonBox[3], commonBox[1]])#.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]\n trainP, coords = get_zone_pixels(feature, shpF, trainF, 1, [coords[0], coords[1], coords[2], coords[3]])#.compressed()\n \n predP = ma.masked_equal(predP, predND)\n trainP = ma.masked_equal(trainP, trainND)\n trainP = ma.masked_equal(trainP, 0)\n\n combMask = np.logical_not(np.logical_not(predP.mask) * np.logical_not(trainP.mask))\n predP[combMask] = ma.masked\n trainP[combMask] = ma.masked\n predP = predP.compressed()\n trainP = trainP.compressed()\n if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP==0).all() | (trainP==0).all():\n predP = np.array([0,0,1,1], dtype='float64')\n trainP = np.array([0,0,1,1], dtype='float64')\n mae = round(np.mean(np.absolute(np.subtract(predP, trainP))),1)\n rmse = round(np.sqrt(np.mean((predP-trainP)**2)),1)\n \n\n totPixs = trainP.shape[0]\n sampSize = round(totPixs*1)\n pickFrom = range(sampSize)\n #sampIndex = np.random.choice(pickFrom, size=sampSize)\n sampIndex = pickFrom\n\n r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)\n if (mae == 0) & (r == 1):\n r = 0.0\n rColor = f2hex(f2rgb, r)\n p = sns.jointplot(trainP[sampIndex], predP[sampIndex], kind=\"hex\", color='blue', xlim=(0,xyLim[0]), ylim=(0,xyLim[1]), size=5)\n p.ax_joint.set_xlabel(xLab)\n p.ax_joint.set_ylabel(yLab)\n p.ax_joint.annotate('r: '+str(r)+'\\nrmse: '+str(rmse)+'\\nmae: '+str(mae), annoXY)\n plt.tight_layout()\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n p.savefig(outFig)\n \n df = pd.DataFrame({'id':name, 'descr':descr, 'r':r, 'rmse':rmse, 'mae':mae, 'color':rColor, 'img':os.path.basename(outFig)}, index=[0])\n outCSV = outFig.replace('.png','.csv')\n df.to_csv(outCSV, ',', index=False)", "def main():\n ref_seq = {}\n ent_spe_sero = {}\n tag_dict = {\"Contigs_with_VP1\":\"contigs\", \"P1_sequences\":\"p1\",\n \"VP1_sequences\":\"vp1\", \"5UTR_sequences\":\"5utr\", \"3D_sequences\":\"3d\"}\n args = get_arguments()\n # Load query elements\n print(\"Load resume file\")\n (query_dict, classify_list,\n classify_specie_list, serotype_list) = get_query(args.resume_file,\n args.tag,\n args.incomplete)\n print(\"{} descriptions loaded\".format(len(query_dict)))\n # Load specie association\n if args.ent_serotype_file and args.template_seq_file:\n # Load enterovirus serotype\n print(\"Load enterovirus serotype association\")\n ent_spe_sero = load_spe_sero(args.ent_serotype_file)\n # Load template sequence\n print(\"Load template sequence\")\n ref_seq = get_template_sequence(args.template_seq_file, ent_spe_sero)\n # Grab query sequence in the database\n print(\"Load database sequence\")\n sequence_data = get_sequence(query_dict, args.fasta_file)\n print(\"{} sequences loaded\".format(len(sequence_data)))\n # Write the new fasta file\n print(\"Write the new fasta\")\n write_sequence(args.results, sequence_data, query_dict, classify_list,\n tag_dict[args.tag], ref_seq, ent_spe_sero)\n #print(save_association)\n print(\"Write the itol label\")\n write_itol_label(args.itol_dir, sequence_data, query_dict, classify_list,\n tag_dict[args.tag])\n print(\"Write the itol tree color\")\n write_itol_tree_color(args.itol_dir, sequence_data, query_dict, classify_specie_list, serotype_list,\n tag_dict[args.tag])\n print(\"Done\")", "def pre_process(db):\n conn = sqlite3.connect(db)\n data = pd.read_sql_query(\"Select Delta_T, V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V20, V21, V22, V23, V24, V25, V26, V27, V28, Amount , Class from transactions;\", conn)\n train_split = int(0.8*len(data))\n train = data[0:train_split]\n test = data[train_split:len(data)]\n train_x = train.loc[:, ['Delta_T', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'Amount']]\n train_y = train.loc[:, ['Class']]\n test_x = test.loc[:, ['Delta_T', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'V7', 'V8', 'V9', 'V10', 'V11', 'V12', 'V13', 'V14', 'V15', 'V16', 'V17', 'V18', 'V19', 'V20', 'V21', 'V22', 'V23', 'V24', 'V25', 'V26', 'V27', 'V28', 'Amount']]\n test_y = test.loc[:, ['Class']]\n train_x = train_x.to_numpy()\n train_y = np.squeeze(train_y.to_numpy(),axis=1)\n test_x = test_x.to_numpy()\n test_y = np.squeeze(test_y.to_numpy(),axis=1)\n return train_x,train_y,test_x,test_y", "def Create_AlignmentDBFile_From_Gear(gearfile=None, truthdbfilename=None): \n\n if gearfile == None:\n return None\n\n if truthdbfilename == None:\n return None\n\n # Open db file\n dbfile = TFile( truthdbfilename, 'RECREATE', 'alignment parameters from ' + gearfile )\n\n # Define lists of alignment parameters\n id_list = []\n xpos_list = []\n ypos_list = []\n zpos_list = []\n xrot_list = []\n yrot_list = []\n zrot_list = []\n\n tree = xml.etree.ElementTree.parse(gearfile)\n root = tree.getroot()\n\n # Read out the alignment parameters\n for detectors in root.findall('detectors'): \n for detector in detectors.findall('detector'):\n for layers in detector.findall('layers'):\n for layer in layers.findall('layer'):\n\n for sensitive in layer.findall('sensitive'):\n xpos_list.append(float(sensitive.get('positionX')))\n ypos_list.append(float(sensitive.get('positionY')))\n zpos_list.append(float(sensitive.get('positionZ')))\n xrot_list.append(float(sensitive.get('alpha')))\n yrot_list.append(float(sensitive.get('beta')))\n zrot_list.append(float(sensitive.get('gamma')))\n id_list.append(int(sensitive.get('ID')))\n\n # Sort z position list and the corresponding sensor id list\n zpos_list2, id_list2 = (list(t) for t in zip(*sorted(zip(zpos_list, id_list))))\n\n # get number of planes\n nentries=len(id_list2)\n\n # ID histogram\n hSensorID = TH1F(\"hSensorID\",\"\",nentries,0,nentries)\n hSensorID.SetTitle(\"\")\n hSensorID.GetXaxis().SetTitle(\"plane\")\n hSensorID.GetYaxis().SetTitle(\"Sebsor ID\") \n\n # X position histogram\n hPositionX = TH1F(\"hPositionX\",\"\",nentries,0,nentries)\n hPositionX.SetTitle(\"\")\n hPositionX.GetXaxis().SetTitle(\"plane\")\n hPositionX.GetYaxis().SetTitle(\"position x [mm]\") \n\n # X position histogram\n hPositionY = TH1F(\"hPositionY\",\"\",nentries,0,nentries)\n hPositionY.SetTitle(\"\")\n hPositionY.GetXaxis().SetTitle(\"plane\")\n hPositionY.GetYaxis().SetTitle(\"position y [mm]\")\n\n # Z position histogram\n hPositionZ = TH1F(\"hPositionZ\",\"\",nentries,0,nentries)\n hPositionZ.SetTitle(\"\")\n hPositionZ.GetXaxis().SetTitle(\"plane\")\n hPositionZ.GetYaxis().SetTitle(\"position z [mm]\")\n\n # alpha rotation histogram\n hRotationAlpha = TH1F(\"hRotationAlpha\",\"\",nentries,0,nentries)\n hRotationAlpha.SetTitle(\"\")\n hRotationAlpha.GetXaxis().SetTitle(\"plane\")\n hRotationAlpha.GetYaxis().SetTitle(\"rotation alpha [rad]\") \n\n # beta rotation histogram\n hRotationBeta = TH1F(\"hRotationBeta\",\"\",nentries,0,nentries)\n hRotationBeta.SetTitle(\"\")\n hRotationBeta.GetXaxis().SetTitle(\"plane\")\n hRotationBeta.GetYaxis().SetTitle(\"rotation beta [rad]\")\n\n # gamma rotation histogram\n hRotationGamma = TH1F(\"hRotationGamma\",\"\",nentries,0,nentries)\n hRotationGamma.SetTitle(\"\")\n hRotationGamma.GetXaxis().SetTitle(\"plane\")\n hRotationGamma.GetYaxis().SetTitle(\"rotation gamma [rad]\")\n\n # Loop over sensor ids\n for bin,sensid in enumerate(id_list2):\n\n # Find list index for this sensor id\n index = id_list.index(sensid)\n \n # Fill histograms\n hSensorID.SetBinContent(bin+1,id_list[index])\n hPositionX.SetBinContent(bin+1,xpos_list[index])\n hPositionY.SetBinContent(bin+1,ypos_list[index])\n hPositionZ.SetBinContent(bin+1,zpos_list[index])\n hRotationAlpha.SetBinContent(bin+1,xrot_list[index]/180*3.1415) # angles in gear file are given in degree -> change to rad\n hRotationBeta.SetBinContent(bin+1,yrot_list[index]/180*3.1415) # angles in gear file are given in degree -> change to rad\n hRotationGamma.SetBinContent(bin+1,zrot_list[index]/180*3.1415) # angles in gear file are given in degree -> change to rad\n \n dbfile.Write()\n dbfile.Close()", "def __init__(self, csv = None, wp_key = None, effs = None):\n #set_trace()\n parsed_csv = reshuffle_sf_dict(\n convert_btag_csv_file(csv),\n algorithm = wp_key[0]\n )\n self.sf_ = recursive_compile(parsed_csv[wp_key[0]][wp_key[1]][wp_key[2]])\n #set_trace()\n # FIXME: move to correlated/uncorrelated\n # Define, by hand, the proper correlation among taggers, \n # somewhere unfortunately needs to be hardcoded by hand\n # tuple of names for UDSG, B, C\n self.schema_ = { \n \"central\" : (\"UDSG_central\", \"C_central\", \"B_central\"),\n \"bc_up\" : (\"UDSG_central\", \"C_up\", \"B_up\"),\n \"bc_up_correlated\" : (\"UDSG_central\", \"C_up_correlated\", \"B_up_correlated\"),\n \"bc_up_uncorrelated\" : (\"UDSG_central\", \"C_up_uncorrelated\", \"B_up_uncorrelated\"),\n \"bc_down\" : (\"UDSG_central\", \"C_down\", \"B_down\"),\n \"bc_down_correlated\" : (\"UDSG_central\", \"C_down_correlated\", \"B_down_correlated\"),\n \"bc_down_uncorrelated\" : (\"UDSG_central\", \"C_down_uncorrelated\", \"B_down_uncorrelated\"),\n \"l_up\" : (\"UDSG_up\", \"C_central\", \"B_central\"),\n \"l_up_correlated\" : (\"UDSG_up_correlated\", \"C_central\", \"B_central\"),\n \"l_up_uncorrelated\" : (\"UDSG_up_uncorrelated\", \"C_central\", \"B_central\"),\n \"l_down\" : (\"UDSG_down\", \"C_central\", \"B_central\"),\n \"l_down_correlated\" : (\"UDSG_down_correlated\", \"C_central\", \"B_central\"),\n \"l_down_uncorrelated\" : (\"UDSG_down_uncorrelated\", \"C_central\", \"B_central\"),\n #\"udsg_up\" : (\"UDSG_up\", \"C_central\", \"B_central\"),\n #\"udsg_up_correlated\" : (\"UDSG_up_correlated\", \"C_central\", \"B_central\"),\n #\"udsg_up_uncorrelated\" : (\"UDSG_up_uncorrelated\", \"C_central\", \"B_central\"),\n #\"udsg_down\" : (\"UDSG_down\", \"C_central\", \"B_central\"),\n #\"udsg_down_correlated\" : (\"UDSG_down_correlated\", \"C_central\", \"B_central\"),\n #\"udsg_down_uncorrelated\" : (\"UDSG_down_uncorrelated\", \"C_central\", \"B_central\"),\n \"bc_jes_up\" : (\"UDSG_central\", \"C_up_jes\", \"B_up_jes\"),\n \"bc_pileup_up\" : (\"UDSG_central\", \"C_up_pileup\", \"B_up_pileup\"),\n \"bc_statistic_up\" : (\"UDSG_central\", \"C_up_statistic\", \"B_up_statistic\"),\n \"bc_bfragmentation_up\" : (\"UDSG_central\", \"C_up_bfragmentation\", \"B_up_bfragmentation\"),\n \"bc_btempcorr_up\" : (\"UDSG_central\", \"C_up_btempcorr\", \"B_up_btempcorr\"),\n \"bc_cb_up\" : (\"UDSG_central\", \"C_up_cb\", \"B_up_cb\"),\n \"bc_cfragmentation_up\" : (\"UDSG_central\", \"C_up_cfragmentation\", \"B_up_cfragmentation\"),\n \"bc_cjets_up\" : (\"UDSG_central\", \"C_up_cjets\", \"B_up_cjets\"),\n \"bc_dmux_up\" : (\"UDSG_central\", \"C_up_dmux\", \"B_up_dmux\"),\n \"bc_gluonsplitting_up\" : (\"UDSG_central\", \"C_up_gluonsplitting\", \"B_up_gluonsplitting\"),\n \"bc_jetaway_up\" : (\"UDSG_central\", \"C_up_jetaway\", \"B_up_jetaway\"),\n \"bc_ksl_up\" : (\"UDSG_central\", \"C_up_ksl\", \"B_up_ksl\"),\n \"bc_l2c_up\" : (\"UDSG_central\", \"C_up_l2c\", \"B_up_l2c\"),\n \"bc_ltothers_up\" : (\"UDSG_central\", \"C_up_ltothers\", \"B_up_ltothers\"),\n \"bc_mudr_up\" : (\"UDSG_central\", \"C_up_mudr\", \"B_up_mudr\"),\n \"bc_mupt_up\" : (\"UDSG_central\", \"C_up_mupt\", \"B_up_mupt\"),\n \"bc_ptrel_up\" : (\"UDSG_central\", \"C_up_ptrel\", \"B_up_ptrel\"),\n #\"bc_type3_up\" : (\"UDSG_central\", \"C_up_type3\", \"B_up_type3\"),\n\n \"bc_jes_down\" : (\"UDSG_central\", \"C_down_jes\", \"B_down_jes\"),\n \"bc_pileup_down\" : (\"UDSG_central\", \"C_down_pileup\", \"B_down_pileup\"),\n \"bc_statistic_down\" : (\"UDSG_central\", \"C_down_statistic\", \"B_down_statistic\"),\n \"bc_bfragmentation_down\" : (\"UDSG_central\", \"C_down_bfragmentation\", \"B_down_bfragmentation\"),\n \"bc_btempcorr_down\" : (\"UDSG_central\", \"C_down_btempcorr\", \"B_down_btempcorr\"),\n \"bc_cb_down\" : (\"UDSG_central\", \"C_down_cb\", \"B_down_cb\"),\n \"bc_cfragmentation_down\" : (\"UDSG_central\", \"C_down_cfragmentation\", \"B_down_cfragmentation\"),\n \"bc_cjets_down\" : (\"UDSG_central\", \"C_down_cjets\", \"B_down_cjets\"),\n \"bc_dmux_down\" : (\"UDSG_central\", \"C_down_dmux\", \"B_down_dmux\"),\n \"bc_gluonsplitting_down\" : (\"UDSG_central\", \"C_down_gluonsplitting\", \"B_down_gluonsplitting\"),\n \"bc_jetaway_down\" : (\"UDSG_central\", \"C_down_jetaway\", \"B_down_jetaway\"),\n \"bc_ksl_down\" : (\"UDSG_central\", \"C_down_ksl\", \"B_down_ksl\"),\n \"bc_l2c_down\" : (\"UDSG_central\", \"C_down_l2c\", \"B_down_l2c\"),\n \"bc_ltothers_down\" : (\"UDSG_central\", \"C_down_ltothers\", \"B_down_ltothers\"),\n \"bc_mudr_down\" : (\"UDSG_central\", \"C_down_mudr\", \"B_down_mudr\"),\n \"bc_mupt_down\" : (\"UDSG_central\", \"C_down_mupt\", \"B_down_mupt\"),\n \"bc_ptrel_down\" : (\"UDSG_central\", \"C_down_ptrel\", \"B_down_ptrel\"),\n #\"bc_type3_down\" : (\"UDSG_central\", \"C_down_type3\", \"B_down_type3\"),\n }\n\n self.eff_ = {\n \"B\" : effs[\"bottom\"],\n \"C\" : effs[\"charm\" ],\n \"UDSG\" : effs[\"light\" ],\n }", "def __init__(self):\n # File settings and locations.\n self.DATA_DIR = 'data'\n self.DATA_COL_DIR = 'data_collated'\n\n self.FIRE_DATABASE = 'FPA_FOD_20170508.sqlite'\n self.CLIMATE_DATA = 'GlobalLandTemperaturesByCity.csv'\n self.STOCK_DATA = 'historical_stock_prices.csv'\n self.COMBINED_DATA = 'combined_data.db'\n\n self.MODEL_PATH = 'models/dnn_wildfires.ckpt'\n\n # Setting to use reduced data for prototyping purposes.\n self.prototyping = False\n self.sample_size = 80000\n\n # Start date of data\n self.start = pd.to_datetime('1992-01-01')\n\n # Stocks in stock data to keep for analysis.\n self.stocks = ['MSFT', 'AAPL', 'GE', 'JNJ', 'JPM', 'PG']\n\n # Settings for validation and test set partitioning.\n self.val_set_ratio = 0.15\n self.test_set_ratio = 0.15\n\n # Separation of features for pipeline preparation \n self.cat_attribs = ['STATE', 'FIRE_SIZE_CLASS', 'OWNER_CODE', 'City']\n self.num_attribs = ['FIRE_YEAR', 'LATITUDE', 'LONGITUDE', 'FIRE_SIZE', \n 'FIRE_LENGTH', 'DIST_TO_MAJOR_CITY', 'AverageTemperature',\n 'AverageTemperatureUncertainty', 'AAPL', 'GE', 'JNJ', \n 'JPM', 'MSFT', 'PG']\n self.cycle_cols = ['DISC_MONTH', 'DISC_DAY_OF_WEEK', 'DISCOVERY_TIME', \n 'DISCOVERY_DOY', 'CONT_MONTH', 'CONT_DAY_OF_WEEK',\n 'CONT_TIME']\n\n # Define the ranges of the cycles in cycle_cols and whether any offset for\n # zero-indexing is needed (i.e., 'DISC_MONTH' cycles over a 12 month period\n # and the months need an offset of one to start the indicies at 0 for Jan.).\n self.cycle_ranges = [12, 7, 2400, 365, 12, 7, 2400]\n self.cycle_offsets = [1, 0, 0, 1, 1, 0, 0]\n\n # Parameters for deep learning model determined from randomized \n # hyperparameter search.\n self.n_hidden_layers = 4\n self.n_neurons = 200\n self.batch_size = 500\n self.batch_norm_momentum = 0.999\n self.dropout_rate = 0.4\n self.learning_rate = 0.01\n self.activation = tf.nn.elu\n\n # Hyperparameter settings .\n self.hp_search = False", "def genPrimerPairs_5Ext(primer_length=20, anneal_length=10, GC_low=40, GC_high=60):\n\n print('Primers for 5\\' extension half-asstemers')\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n \"\"\"re.match checks if the first 2 Nuc are GC in the forward and backwards direction\"\"\"\n while not (re.match(\"[GC]{2}\",str(forwTemplate5_3)) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[::-1])) and\n re.match(\"[GC]{2}\", str(forwTemplate5_3[10:12]))):\n\n forwTemplate5_3 = GenOligoGC(primer_length,GC_low, GC_high)\n\n forwTemp3_5 = forwTemplate5_3[::-1]\n forwPrimer5_3 = forwTemp3_5.complement()\n print(f\"Template Seq 3\\' - > 5\\': {forwTemp3_5}\")\n print(f\"ForwPrimer Seq 5\\' - > 3\\': {forwPrimer5_3}\")\n\n forwPrimer_f10 = forwPrimer5_3[:10]\n print(f\"First 10 Nucleotides of forward primer: {forwPrimer_f10}\")\n\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n while not re.match(\"[GC]{2}\",str(revPrimer_f10)):\n revPrimer_f10 = GenOligoGC(10,GC_low, GC_high)\n\n revPrimer5_3 = revPrimer_f10 + forwPrimer_f10\n\n print(f\"RevPrimer Seq 5\\' - > 3\\': {revPrimer5_3}\")\n\n return forwPrimer5_3, revPrimer5_3", "def main(inFilepath, outFilepath):\n\n gff_df=read_gff(inFilepath, additional_lst=[\"ID\"])\n attribute_lst=[]\n for _, row in gff_df.iterrows():\n orfId = \"{}_{}\".format(row[\"seqname\"], row[\"ID\"].split(\"_\")[-1])\n att = \"{};orf_id={}\".format(row[\"attribute\"], orfId)\n attribute_lst.append(att)\n gff_df[\"attribute\"]=attribute_lst\n write_gff(gff_df, outFilepath)\n print(\"DONE: output {}\".format(outFilepath))", "def parameterize(param_directory,pdb_file,topology_file,polymer_code,polymer_length):\n\n terphenyl_top = get_terphenyl_top_directory()\n if not os.path.exists(param_directory):\n os.mkdir(param_directory)\n param_topology = str(str(param_directory)+\"/topol.top\")\n copyfile(topology_file,param_topology)\n cwd = os.getcwd()\n if cwd != param_directory:\n os.chdir(param_directory)\n param_pdb = str(str(param_directory)+\"/\"+str(polymer_length)+\".pdb\")\n copyfile(pdb_file,param_pdb)\n\n # Parameterize our polymer using 'antechamber', from AmberTools.\n#\n # We parameterize the PDB structure using the param.sh BASH script written by Ben Coscia as a template: \"https://github.com/shirtsgroup/useful-scripts/blob/master/Paramaterization/GAFF/param.sh\"\n gaff_directory = str(str(terphenyl_top)+\"/setup_files/gaff\")\n replace(param_topology,'$TERPHENYL_TOP',terphenyl_top)\n replace(param_topology,'$RUN_DIRECTORY',param_directory)\n replace(param_topology,'$POLYMER_CODE ',str(\"{:<15}\".format(polymer_code)))\n replace(param_topology,'$POLYMER_CODE ',str(\"{:<3}\".format(polymer_code)))\n copyfile(str(str(gaff_directory)+\"/acpype.py\"),str(str(param_directory)+\"/acpype.py\"))\n copyfile(str(str(gaff_directory)+\"/insertmol2charges.py\"),str(str(param_directory)+\"/insertmol2charges.py\"))\n# copyfile(str(str(gaff_directory)+\"/anneal.mdp\"),str(run_directory+\"/anneal.mdp\"))\n # Replace the variable keyword '$NAME' in param.sh with the name of the current polymer length\n copyfile(str(str(gaff_directory)+\"/param.sh\"),str(str(param_directory)+\"/param.sh\"))\n replace(str(param_directory+\"/param.sh\"),'$NAME',polymer_length)\n replace(str(param_directory+\"/param.sh\"),'$RES',polymer_code)\n # Place the residue name in the input PDB file residue name columns\n with open(pdb_file, \"rt\") as fin:\n\n new_pdb_file = param_pdb\n with open(new_pdb_file, \"wt\") as fout:\n for line in fin:\n line_list = [char for char in line]\n line_start = ''.join(line_list[0:6])\n residue_code = ''.join(line_list[17:20])\n if line_start == 'HETATM' or line_start == 'ATOM ':\n if residue_code == ' ':\n line_list[17:20] = str(\"{:<3}\".format(polymer_code)).split()\n #del line_list[29]\n line = ''.join(line_list)\n fout.write(line)\n subprocess.run([\"chmod\",\"+x\",str(str(param_directory)+\"/param.sh\")])\n os.chdir(param_directory)\n subprocess.run([str(str(param_directory)+\"/param.sh\")])\n solute_gro_file = str(str(param_directory)+\"/\"+str(polymer_length)+\".gro\")\n solute_topology_file = str(str(param_directory)+\"/\"+str(polymer_code)+\".top\")\n if cwd != param_directory:\n os.chdir(cwd)\n return(solute_gro_file,solute_topology_file)", "def getPoolFileCatalogDQ2(baseURL, guids):\n\n xml_source = \"DQ2\"\n\n # get PoolFileCatalog\n iGUID = 0\n xml_from_PFC ='' \n\n # In LCG land use dq2_poolFCjobO\n region = readpar('region')\n if region != 'US' and not os.environ.has_key('Nordugrid_pilot'):\n tolog(\"!!FAILED!!2999!! Can not get PFC with LRC method for region %s\" % (region))\n return '', xml_source\n\n strGUIDs = 'guids='\n # if no site service\n if baseURL == '':\n tolog('!!FAILED!!2999!! DQ2 URL not set')\n return xml_from_PFC, xml_source\n tolog(\"Number of GUIDs: %d\" % len(guids))\n for guid in guids:\n iGUID += 1\n # make argument\n strGUIDs += '%s+' % guid\n if iGUID % 40 == 0 or iGUID == len(guids):\n # get PoolFileCatalog\n strGUIDs = strGUIDs[:-1]\n try:\n f = urllib.urlopen( baseURL+'/lrc/PoolFileCatalog?'+strGUIDs )\n except:\n tolog('!!FAILED!!2999!! Error opening DDM URL (%s)' % (get_exc_short()))\n return xml_from_PFC, xml_source\n ret = f.read()\n if ret.find('POOLFILECATALOG') == -1:\n tolog('!!FAILED!!2999!! Getting POOL FileCatalog failed: could not find the file/s in LRC!')\n tolog('Error returned from LRC: %s' % (ret))\n continue\n # append\n xml_from_PFC += ret\n strGUIDs = 'guids='\n # remove redundant trailer and header\n th = \\\n\"\"\"\n</POOLFILECATALOG>\n<\\?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"no\" \\?>\n<!-- Edited By POOL -->\n<!DOCTYPE POOLFILECATALOG SYSTEM \"InMemory\">\n<POOLFILECATALOG>\n\n\\s*<META name=\"fsize\" type=\"string\"/>\n\n\\s*<META name=\"md5sum\" type=\"string\"/>\n\n\\s*<META name=\"lastmodified\" type=\"string\"/>\n\n\\s*<META name=\"archival\" type=\"string\"/>\n\"\"\"\n xml_from_PFC = re.sub(th, '', xml_from_PFC)\n # return XML\n return xml_from_PFC, xml_source", "def add_AA_sequences_to_db(db_cursor):\n db_cursor.execute(\"SELECT rowid,residue,position,uniprotid FROM phosphositetb\")\n results = db_cursor.fetchall()\n #print results\n\n for rowid, residue, position, uniprotid in results:\n AA_sequence = get_AA_sequence_around_mod(residue,position,uniprotid)\n #print AA_sequence\n #db_cursor.execute(\"SELECT rowid, AA_sequence FROM phosphositetb\")\n #print db_cursor.fetchall()\n db_cursor.execute(\"UPDATE phosphositetb SET AA_sequence=? where rowid=?;\"\\\n ,(AA_sequence,rowid))", "def load_BindingDB_kd():\n affinity = pd.read_csv('./dataset/regression/BindingDB/BindingDB_Kd.txt', header=None)\n target = pd.read_csv('./dataset/regression/BindingDB/BindingDB_Target_Sequence_new.txt', header=None)\n drug = pd.read_csv('./dataset/regression/BindingDB/BindingDB_SMILES_new.txt', header=None)\n \n SMILES=[]\n Target=[]\n y=[]\n drugcnt=[]\n \n for i in range(len(target)):\n Target.append(target[0][i])\n y.append(affinity[0][i])\n SMILES.append(drug[0][i])\n\n aff=[]\n total=[]\n for i in range(len(target)):\n aff.insert(i, y[i].split(\" \"))\n for i in aff:\n total += i\n for i in range(len(SMILES)):\n drugcnt.insert(i, len(SMILES[i].split()))\n\n smile = []\n for segments in SMILES:\n for x in segments.split():\n smile.extend(x)\n #smile = [x for segments in SMILES for x in segments.split()]\n smiles_res=[]\n y_tmp=[]\n target_res=[]\n tmp=[]\n\n for i in range(len(drugcnt)):\n tmp.extend(repeat(Target[i], drugcnt[i]))\n for i in range(len(total)):\n if total[i] != '-1':\n y_tmp.append(total[i])\n smiles_res.append(smile[i])\n target_res.append(tmp[i])\n\n y_res = [float(i) for i in y_tmp]\n y_res = convert_y_unit(np.array(y_res), 'nM', 'p')\n return np.array(smiles_res), np.array(target_res), np.array(y_res)" ]
[ "0.6308902", "0.58502406", "0.5834245", "0.5675954", "0.5508745", "0.5472608", "0.53253484", "0.53151536", "0.5244673", "0.5226974", "0.519828", "0.5179325", "0.5178727", "0.5168326", "0.5158303", "0.51469505", "0.5117985", "0.5113623", "0.51080567", "0.5100592", "0.5092436", "0.50654453", "0.5050797", "0.50439185", "0.5042429", "0.50366724", "0.50294477", "0.50253063", "0.5023172", "0.5015647" ]
0.6131606
1
Attach this plugin to the PostProvision trigger to automatically set set the expiration date for each server in the given job
def run(job, logger=None): one_day = datetime.datetime.now() + datetime.timedelta(days=int('{{ number_of_days }}')) date_string = "{:%m/%d/%Y}".format(one_day) job.set_progress("Setting expiration date for servers in this job to: {}".format(date_string)) for server in job.server_set.all(): server.set_value_for_custom_field("expiration_date", date_string) return "", "", ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postdeploy_plan(plan_name):\n pass", "def postdeploy(verbose_level=1):\n check_arg(verbose_level, u._('Verbose level'), int)\n\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='post-deploy.yml')\n ansible_job = action.postdeploy()\n return Job(ansible_job)", "def post_deploy(self) -> Any:\n raise NotImplementedError", "def schedule_deploy():\n\n logger.info(\"Scheduling deploy\")\n scheduler.schedule_job(\"op_deploy\", {}, \"#general\", 60)", "def jobPostRun(job):\n\tif 'e' in job.proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying job ends')\n\t\tEMAIL.send('job', job, 'end')", "def register_publisher(self, hostname, expire=-1):", "def task_instance_pre_save_handler(instance, **_):\n if instance.state in (SUCCESSFUL, FAILED):\n instance.datetime_finished = timezone.now()", "def job_post_save(sender, instance, created, **kwargs):\n\n if created:\n jp = JobPermission.objects.create(\n job=instance,\n content_object=instance.user,\n permission=JobPermissionLevel.ADMIN.value,\n )\n jp.save()", "def on_expire(self, *args):\n\t\traise NotImplementedError", "def on_expire(self):\n pass", "def save(self, *args, **kwargs):\n super(News, self).save(*args, **kwargs)\n pigeonpost_queue.send(sender=self, defer_for=6*60*60)", "def post_provider_attachment_create(self, resource_dict):\n pass", "def setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(60, clean_empty_entity_attrs, name='Clean Entity Attributes')", "def task_instance_post_save_handler(instance, created, **_):\n # Only start the job if the instance was just created\n if created:\n # Use the specified queue else the default queue\n kwargs = {\n 'uuid': instance.uuid,\n 'container_image': instance.task_type.container_image,\n 'container_type': instance.task_type.container_type,\n 'script_path': instance.task_type.script_path,\n 'logs_path': instance.task_type.logs_path,\n 'args_dict': instance.arguments,}\n\n run_task.apply_async(\n kwargs=kwargs,\n queue=instance.task_queue.name,\n task_id=str(instance.uuid),)", "def trigger_expiration_notices(at_time=None, nb_days=15, dry_run=False):\n\n def _handle_organization_notices(organization):\n if organization.processor_card_key:\n card = organization.retrieve_card()\n try:\n exp_month, exp_year = card['exp_date'].split('/')\n exp_date = datetime(year=int(exp_year),\n month=int(exp_month), day=1, tzinfo=at_time.tzinfo)\n if lower >= exp_date:\n LOGGER.info(\"payment method expires soon for %s\",\n organization)\n if not dry_run:\n signals.card_expires_soon.send(\n sender=__name__, organization=organization,\n nb_days=nb_days)\n except (KeyError, ValueError):\n # exp info is missing or the format is incorrect\n pass\n else:\n LOGGER.info(\"%s doesn't have a payment method attached\",\n organization)\n if not dry_run:\n signals.payment_method_absent.send(sender=__name__,\n organization=organization)\n\n at_time = datetime_or_now(at_time)\n lower = at_time + relativedelta(days=nb_days)\n upper = at_time + relativedelta(days=nb_days + 1)\n LOGGER.info(\n \"trigger notifications for subscription expiring within [%s,%s[ ...\",\n lower, upper)\n prev_organization = None\n subscription = None\n for subscription in Subscription.objects.valid_for(ends_at__gte=lower,\n ends_at__lt=upper).order_by('organization'):\n org = subscription.organization\n plan = subscription.plan\n\n try:\n if subscription.auto_renew:\n if plan.renewal_type == plan.AUTO_RENEW:\n if org.id != prev_organization:\n _handle_organization_notices(org)\n\n prev_organization = org.id\n else:\n if plan.renewal_type == plan.ONE_TIME:\n LOGGER.info(\"trigger upgrade soon for %s\", subscription)\n if not dry_run:\n signals.subscription_upgrade.send(sender=__name__,\n subscription=subscription, nb_days=nb_days)\n\n elif plan.renewal_type == plan.REPEAT:\n LOGGER.info(\"trigger expires soon for %s\", subscription)\n if not dry_run:\n signals.expires_soon.send(sender=__name__,\n subscription=subscription, nb_days=nb_days)\n\n except Exception as err: #pylint:disable=broad-except\n # We use `Exception` because the email server might be\n # unavailable but ConnectionRefusedError is not a subclass\n # of RuntimeError.\n LOGGER.exception(\"error: %s\", err)\n\n # flushing the last organization\n if subscription and subscription.organization.id != prev_organization:\n if subscription.auto_renew:\n if subscription.plan.renewal_type == subscription.plan.AUTO_RENEW:\n _handle_organization_notices(subscription.organization)", "def provision_server(self, body):\n if not body:\n raise AssertionError(\"Payload cannot be empty\")\n\n self.nodes = len(body.get('nodes')) if body.get('os') else 1\n\n _cmd = 'mktemp -d'\n workspace = self._remote_cmd(_cmd).get('output')\n xml = self._pre_tasks(body, workspace)\n log = workspace + '/' + 'rg_cpt_deploy.log'\n\n _bin = '/usr/bin/nohup /usr/bin/l2add'\n _cmd = '{} -f {} -c y -r > {} 2>&1 &'.format(_bin, xml, log)\n\n if self._remote_cmd(_cmd, block=False).get('output') is None:\n raise AssertionError(\"Error encountered during provisioning\")\n\n return log", "def setDeactivationTime(*argv):", "def apply(self):\n changed = False\n job_schedule_exists = False\n results = netapp_utils.get_cserver(self.server)\n cserver = netapp_utils.setup_ontap_zapi(\n module=self.module, vserver=results)\n netapp_utils.ems_log_event(\"na_ontap_job_schedule\", cserver)\n job_details = self.get_job_schedule()\n if job_details:\n job_schedule_exists = True\n if self.state == 'absent': # delete\n changed = True\n elif self.state == 'present': # modify\n if job_details['job_minutes'] != str(self.job_minutes):\n changed = True\n else:\n if self.state == 'present': # create\n changed = True\n if changed:\n if self.module.check_mode:\n pass\n else:\n if self.state == 'present': # execute create\n if not job_schedule_exists:\n self.create_job_schedule()\n else: # execute modify minute\n self.modify_minute_job_schedule()\n elif self.state == 'absent': # execute delete\n self.delete_job_schedule()\n self.module.exit_json(changed=changed)", "def run_post_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name, publisher_type=POST_PUBLISHER_TYPE)\n # do not forget to clean up the staging area\n staging.clear()", "def set_autoscaled_instances(self, instance_count: int) -> None:\n set_instances_for_marathon_service(\n service=self.service,\n instance=self.instance,\n instance_count=instance_count,\n )", "def on_post(self, req, resp, account, container):\n _handle_script_upload(req, resp, account, container)", "def post_provider_attachment_update(self, resource_id, resource_dict):\n pass", "def auto_expire_after(self, auto_expire_after):\n\n self._auto_expire_after = auto_expire_after", "def uses_after_args(self, args):\n self.pod_args['uses_after'] = args", "def handle_wps_update(self, data):\n\n self.jobs = data", "def on_expiration_time(self, alarm) -> None:\r\n return", "def attach(self, tasklet):\n self.runtime.tasklet = tasklet\n self.runtime.update(dict(timeout=self.timeout, cnt=0))", "def post_backup(self, backup, manifest_file):\n pass", "def set_time_based_auto_scaling(InstanceId=None, AutoScalingSchedule=None):\n pass", "def post_routine(self, instance):\n super(OptionsRoutine, self).post_routine(instance)\n instance.do_validate()\n instance.do_post_process()" ]
[ "0.5408284", "0.5041925", "0.48933625", "0.48644656", "0.4844124", "0.48364186", "0.48095712", "0.47300762", "0.47154123", "0.47019136", "0.46962067", "0.46852458", "0.46838868", "0.46812868", "0.4620007", "0.46096227", "0.46080133", "0.45921993", "0.4573173", "0.45462424", "0.45404747", "0.4530696", "0.45298362", "0.45256054", "0.4520394", "0.45182237", "0.45024765", "0.4501874", "0.44871292", "0.4477599" ]
0.66426796
0
return commits that contains file.
def get_commits_contains(self, file): full_path = self.files_parser.get_full(file) commits = self.__commit_contains_file_hash.get(full_path) return commits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[-1] if commits else None", "def get_last_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[0] if commits else None", "def get_modified_files(repo, args):\n commit = repo.commit(args.commit)\n return commit.stats.files", "def parseCommit() -> str:\n cmd_tag = f\"git --no-pager diff --diff-filter=ACMR --name-only HEAD~1 HEAD\"\n print(f\"COMMAND: {cmd_tag}\")\n print(\"\", flush=True)\n fileList = subprocess.check_output(cmd_tag, shell=True)\n return fileList.decode('utf-8').splitlines()", "def _get_relevant_files(self, local_repo_path):\n r = GitRepo(local_repo_path)\n all_commits = r.git.log('--name-only', '--pretty=format:').split()\n counted_commits = Counter(all_commits)\n # Sort the files according to the number of commits they appear in\n sorted_commits = sorted(counted_commits.items(),\n key=lambda x: x[1],\n reverse=True)\n # Return the file names sorted per commits number\n return list(zip(*sorted_commits))[0]", "def get_filenames_in_commit(git_reference: str = \"\"):\n c = cmd.run(f\"git show --name-only --pretty=format: {git_reference}\")\n if c.return_code == 0:\n return c.out.strip().split(\"\\n\")\n else:\n raise GitCommandError(c.err)", "def get_files_to_be_committed():\n current_staging_hashes = get_all_path_hashes(staging_path)\n head_path = get_wit_path(keyword=get_current_commit_id())\n head_hashes = get_all_path_hashes(path=head_path)\n new_file_hashes = []\n files_to_be_committed = []\n for staging_hash in current_staging_hashes:\n if staging_hash not in head_hashes:\n new_file_hashes.append(staging_hash)\n files_to_be_committed = [staging_hash_decoder(h) for h in new_file_hashes]\n return files_to_be_committed", "def grepCommits(query):\n with SHELL.execute(\n 'git', 'rev-list', 'HEAD', '--grep', query,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE) as commits:\n return commits.stdout.read()", "def get_files_changed():\n files_list = []\n test = os.popen('git show --name-only')\n repo_location = os.popen('git rev-parse --show-toplevel')\n repo_location = repo_location.readlines()\n repo_location = repo_location[0]\n repo_location = repo_location.replace('\\n', '')\n if \"Not a git repository\" in repo_location:\n files_list.append(\"Not a git repository\")\n return files_list\n files_list.append(repo_location.split('/')[-1])\n output = test.readlines()\n for a in range(6, len(output)):\n files_list.append(output[a].replace('\\n', ''))\n return files_list", "def get_relevant_files(self):\n relevant_files = []\n\n if self.tree_cache is None:\n return relevant_files\n\n tree = json.loads(self.tree_cache)\n if \"commit\" in tree:\n commit_data = tree[\"commit\"]\n master_commit_files = commit_data[\"files\"]\n\n for patched_files in master_commit_files:\n relevant_file_path = \"./\" + patched_files[\"path\"]\n relevant_files.append(relevant_file_path)\n\n return relevant_files", "def commits(self):\r\n return repocommits.RepoCommits(self)", "def commit_names(self, commit):\n return []", "def get_list_of_comitted_files():\n files = []\n output = []\n try:\n output = subprocess.check_output(['git','diff-index', '--name-status', '--cached','HEAD']\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError:\n print(\"Error diff files get: trace %s\" % subprocess.CalledProcessError)\n return files\n\n for result in output.split(\"\\n\"):\n logging.info(result)\n if result != '':\n match = modified.match(result)\n if match:\n files.append(match.group('name'))\n\n return files", "def commits(self) -> Sequence['outputs.GetBranchCommitResult']:\n return pulumi.get(self, \"commits\")", "def get_changed_files_from(old_commit_sha, new_commit_sha):\n return check_output(\n \"git diff-tree --no-commit-id --name-only -r {0}..{1}\".format(\n old_commit_sha,\n new_commit_sha\n ).split(\" \")\n ).decode('utf-8').strip()", "def commits(self):\n p = Popen(['git', 'rev-list', '--all', '--timestamp', '--parents'], \n cwd=self.path, stdout=PIPE)\n for line in p.stdout:\n commit_info = line.split()\n if len(commit_info) < 2:\n print >> sys.stderr, \"error: bad line: %r\" % line\n continue\n timestamp = int(commit_info.pop(0))\n commit_info = map(CommitId, commit_info)\n commit_id = commit_info.pop(0)\n yield (timestamp, commit_id, commit_info)", "def contributions_by_file(self, owner, repo, start=None, end=None):\n df = []\n for commit in self.__api.get_repo((owner + \"/\" + repo)).get_commits(since=start,until=end):\n for file in commit.files:\n try:\n df.append({'user': commit.author.login, 'file': file.filename, 'additions': file.additions, 'deletions': file.deletions, 'total': file.changes})\n except AttributeError:\n pass\n\n df = pd.DataFrame(df)\n\n df.groupby([\"file\" ,\"user\"]).sum()\n\n return df", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def commits(self):\r\n url = '{0}/commits'.format(self.get_url())\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_commits(git_path):\n\n proc = subprocess.Popen(\n [\"git\", \"--git-dir=%s\" % git_path, \"log\", \"--full-history\",\n \"--format=NEW COMMIT%n%ct%n%aN%n%aE\", \"--numstat\"],\n stdout=subprocess.PIPE)\n line_stack = []\n\n def peek_line():\n if not line_stack:\n line_stack.append(proc.stdout.readline())\n return line_stack[-1]\n\n def pop_line():\n if line_stack:\n return line_stack.pop()\n return proc.stdout.readline()\n\n def push_line(line):\n line_stack.append(line)\n\n def read_commit():\n while peek_line() and not peek_line().strip():\n pop_line()\n if not peek_line(): return None\n assert peek_line().strip() == \"NEW COMMIT\"\n pop_line()\n\n date = int(pop_line())\n name = pop_line().strip()\n email = pop_line().strip()\n author = sanitize_author(name, email)\n\n if peek_line().strip() == \"NEW COMMIT\":\n return date, author, 0, 0, 0\n\n pop_line()\n insertion_count = 0\n deletion_count = 0\n file_count = 0\n while peek_line().strip() and peek_line().strip() != \"NEW COMMIT\":\n insertions, deletions, path = pop_line().strip().split(None, 2)\n if insertions == \"-\": insertions = 0\n if deletions == \"-\": deletions = 0\n insertion_count += int(insertions)\n deletion_count += int(deletions)\n file_count += 1\n\n return date, author, insertion_count, deletion_count, file_count\n\n while True:\n commit = read_commit()\n if commit is None:\n break\n yield commit", "def get_files_from_git() -> Sequence[Path]:\n\n def get_files(cmd: str) -> Sequence[str]:\n output = subprocess.check_output(cmd, shell=True)\n return [os.fsdecode(x) for x in output.splitlines()]\n\n root = os.fsdecode(subprocess.check_output(\"git rev-parse --show-toplevel\", shell=True).strip())\n result: Set[str] = set()\n result.update(get_files(\"git diff --name-only --diff-filter=ACM --staged\"))\n result.update(get_files(\"git diff --name-only --diff-filter=ACM\"))\n result.update(get_files(\"git ls-files -o --full-name --exclude-standard\"))\n return sorted(Path(root, x) for x in result)", "def all_commits(change_id):\n commits = []\n manifest = ET.ElementTree(file='.repo/manifest.xml')\n url = (GERRIT_ROOT + 'changes/?o=CURRENT_REVISION&q=status:open+' +\n change_id)\n changes = request.urlopen(url)\n for change in parse_response(changes):\n project = change['project']\n fetch = change['revisions'][change['current_revision']]['fetch']\n # The `ref` is the same for every download scheme, hence we can use\n # the first one that is there\n ref = fetch.values()[0]['ref']\n path = project_path(manifest, project)\n commits.append((project, path, ref))\n return commits", "def _get_file_contributors_from_revisions(self, repo_id, file_path):\n commits = []\n try:\n commits = seafile_api.get_file_revisions(repo_id, file_path, -1, -1)\n except SearpcError, e:\n return [], 0, ''\n\n if not commits:\n return [], 0, ''\n\n # Commits are already sorted by date, so the user list is also sorted.\n users = [ commit.creator_name for commit in commits if commit.creator_name ]\n\n # Remove duplicate elements in a list\n email_list = []\n for user in users:\n if user not in email_list:\n email_list.append(user)\n\n return email_list, commits[0].ctime, commits[0].id", "async def __last_commit(self, file_path: str) -> SourceResponses:\n files_api_url = await self._gitlab_api_url(\n f\"repository/files/{file_path}?ref={self._parameter('branch', quote=True)}\"\n )\n response = await self._session.head(files_api_url, headers=self._headers())\n last_commit_id = response.headers[\"X-Gitlab-Last-Commit-Id\"]\n commit_api_url = await self._gitlab_api_url(f\"repository/commits/{last_commit_id}\")\n return await super()._get_source_responses(commit_api_url)", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def commits_between(repo_path, start, end):\n \n git = subprocess.Popen([\"git\", \"log\", \"%s..%s\" % (start, end)], stdout=subprocess.PIPE, cwd=repo_path)\n log = git.stdout.read().decode(\"utf-8\")\n \n cur = None\n commits = []\n \n for line in log.splitlines():\n cm = re.match(r'commit ([a-f0-9]{40})', line)\n if cm is not None:\n if cur:\n commits.append(cur)\n cur = Commit(cm.group(1))\n \n if cur is not None and cm is None:\n if cur.message is None:\n if line.startswith(\"Author:\"):\n cur.author = line[len(\"Author: \"):]\n elif line.startswith(\"Date:\"):\n cur.date = line[len(\"Date: \"):]\n else:\n cur.message = \"\"\n else:\n cur.message += line.strip() + \"\\n\"\n \n if cur is not None:\n commits.append(cur)\n \n return commits", "def get_compares_by_commit(commit_url):\n compare_sql = \"SELECT * from git_compare where commit_url=?\"\n return dbutils.execute_query(compare_sql, (commit_url,), DATABASE_FILE)", "def get_commits_in_branch(branch_name):\n output = subprocess.check_output(\"git log --pretty=format:'{}' {} {}\".format(git_format, branch_name, args.extra_args), shell=True)\n lines = output.decode(\"utf-8\").split(\"\\n\")\n out = []\n for line in lines:\n if len(line) <= 1: break\n [sha, author, message] = line.split(\"\t\", 2)\n out.append((sha, author, message))\n out.reverse()\n return out", "def get_sha_commit(self):\n self.get_meta()\n filename = 'lastshacommit'\n # For unittest read from localfile\n if app.config['TEST']:\n filename = 'lastshacommittest'\n app.logger.debug(\"App config set to TEST. Reading shacommit from file \" + filename)\n\n try:\n handle = open(filename, \"r\")\n except Exception as e:\n app.logger.error(\"Error occurred when opening file \" + filename)\n app.logger.error(e)\n raise\n l_shacommit = handle.read().rstrip()\n handle.close()\n return l_shacommit", "def _get_changes_to_be_committed(wit_path, current_id):\n\n if current_id != 'None':\n files = {os.path.relpath(file, os.path.join(wit_path, '.wit', 'images', current_id)): file\n for file in _get_all_files_names(\n wit_path, dir_name=os.path.join(wit_path, '.wit', 'images', current_id))}\n\n for file in _get_staging_area_files(wit_path, plus_root=True):\n if os.path.relpath(file, wit_path) in files:\n yield {os.path.basename(file): _compare_file(os.path.join(wit_path, '.wit', 'staging_area', file),\n files[os.path.relpath(file, wit_path)])}\n else:\n yield ''" ]
[ "0.71101016", "0.70415473", "0.66927785", "0.66129005", "0.6595046", "0.6557422", "0.6439894", "0.6426685", "0.6374402", "0.6330902", "0.6290782", "0.6146598", "0.61373854", "0.61027384", "0.6098769", "0.60661334", "0.60144174", "0.60071295", "0.5986308", "0.5985794", "0.5971924", "0.59635997", "0.5900705", "0.58743894", "0.5815377", "0.5808839", "0.5795003", "0.57904357", "0.57827574", "0.5768938" ]
0.83925456
0
return commit that file are added first.
def get_last_commit_contains(self, file): commits = self.get_commits_contains(file) return commits[0] if commits else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[-1] if commits else None", "def next_commit(self, commit):\n return RainerCommit(\n {\"version\" : int(commit.meta[\"version\"]) + 1,\n \"author\" : os.getlogin(),\n \"comment\" : \"\"},\n commit.value if commit.value is not None else \"\"\n )", "def add(self, filename, top_repo_path):\n my_output = subprocess.check_output([\"git\", \"add\", filename], cwd=top_repo_path)\n return my_output", "def commit(self):\n return settings.GIT_COMMIT", "def cur_commit():\n result = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, stderr=PIPE, encoding=\"utf-8\",\n )\n result.check_returncode()\n return result.stdout.strip()", "def __first_commit_date(self):\n return utils.run('git',\n ['log', '--all', '--format=%cI', '--first-parent',\n '--reverse', '--max-parents=0'],\n self.__project.location).splitlines()[0].rstrip()", "def add_commit(self, commit):\n sha1 = commit.hex\n if sha1 in self._commits:\n return self._commits[sha1]\n title, separator, body = commit.message.partition(\"\\n\")\n commit = {\n 'explored': False,\n 'sha1': sha1,\n 'name': GitUtils.abbreviate_sha1(sha1),\n 'describe': GitUtils.describe(sha1),\n 'refs': GitUtils.refs_to(sha1, self.repo()),\n 'author_name': commit.author.name,\n 'author_mail': commit.author.email,\n 'author_time': commit.author.time,\n 'author_offset': commit.author.offset,\n 'committer_name': commit.committer.name,\n 'committer_mail': commit.committer.email,\n 'committer_time': commit.committer.time,\n 'committer_offset': commit.committer.offset,\n # 'message': commit.message,\n 'title': title,\n 'separator': separator,\n 'body': body.lstrip(\"\\n\"),\n }\n self._json['commits'].append(commit)\n self._commits[sha1] = len(self._json['commits']) - 1\n return self._commits[sha1]", "def _path_added(self, path, fecommit):\n # Because git-fast-export includes the entire tree in its output,\n # regardless of whether the requested commit is the first in the\n # branch or not, we need to check the repo itself to be certain if\n # this path was truly introduced in this commit, or simply existed\n # in the tree prior to the \"first\" commit.\n commit = self.ctx.repo.get(fecommit['sha1'])\n if commit is None:\n # empty repository?\n LOG.debug2(\"_path_added() commit {} is missing\".format(fecommit['sha1']))\n return True\n for parent in commit.parents:\n if p4gf_git.exists_in_tree(self.ctx.repo, path, parent.tree):\n LOG.debug2(\"_path_added() {} exists in parent tree {}\".format(\n path, p4gf_util.abbrev(p4gf_pygit2.object_to_sha1(parent))))\n return False\n return True", "def commit(self):\n return", "def prepare_for_commit(self):", "def _do_commit(self):", "def maybe_commit(job):", "def _ensure_commit(git_sha1):\n cmd = [\"git\", \"cat-file\", \"-e\", git_sha1 + \"^{commit}\"]\n p = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n if p.returncode == 0:\n # we have the commit locally\n return\n # we don't have the commit, must fetch\n cmd = [\"git\", \"fetch\", \"https://github.com/pytorch/pytorch.git\", git_sha1]\n p = subprocess.run(cmd, check=True)", "def test_commit_message_default(repository: Repository) -> None:\n (repository.path / \"a\").touch()\n\n repository.commit()\n\n head = repository.head.commit\n assert \"\" == head.message", "def __add_recent_file(self, fname):\r\n if fname is None:\r\n return\r\n if not fname in self.recent_files:\r\n self.recent_files.insert(0, fname)\r\n if len(self.recent_files) > 9:\r\n self.recent_files.pop(-1)", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self):\n pass", "def commit(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"commit\")", "def backup(self):\n\n\t\twith temp_dir(self.path):\n\t\t\t# only if changes made\n\t\t\tcheck = sp.check_output(['git', 'status', '--porcelain'])\n\t\t\t# check if untracked files\n\t\t\tuntracked = sp.check_output(['git', 'ls-files', '--others', '--exclude-standard'])\n\n\t\t\tif check:\n\t\t\t\tif untracked:\n\t\t\t\t\t# just add them all ... probably a better/safer/more direct way to do this\n\t\t\t\t\t_ = sp.check_output(['git', 'add', '.'])\n\t\t\t\t_ = sp.check_output([\n\t\t\t\t\t\t\"git\", \"commit\", \"-am\", f\"AUTO update on {dt.date.today().isoformat()}\"])\n\n\t\t\t# presumes that there is a remote!\n\t\t\toutput = sp.check_output([\n\t\t\t\t\t\"git\", \"push\"],\n\t\t\t\t\tstderr=sp.STDOUT\n\t\t\t\t\t)\n\n\t\t\treturn output.decode()\n\t\t\t# else:\n\t\t\t# \treturn 'No changes to commit'", "def get_auto_commit(self):\n return self.__aceQLHttpApi.get_auto_commit()", "def commit(self) -> None:\n pass", "def commit(self):\n # PEP 249\n pass", "def test_two_commits(self, tmpgitdir):\n with tmpgitdir.join('file_a.txt').open('w') as handle:\n handle.write('first file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'first'])\n first_hash = subprocess.check_output(\n ['git', 'show', '-s', '--format=format:%H']).decode()\n\n with tmpgitdir.join('file_b.txt').open('w') as handle:\n handle.write('second file')\n\n subprocess.check_call(['git', 'add', '.'])\n subprocess.check_call(['git', 'commit', '-m', 'second'])\n second_hash = subprocess.check_output(\n ['git', 'show', '-s', '--format=format:%H']).decode()\n\n assert is_git_ancestor(tmpgitdir, first_hash, second_hash)\n assert not is_git_ancestor(tmpgitdir, second_hash, first_hash)", "def add_commit( self\n , cl\n , mark_number\n , parent_commit_list\n , first_branch_from_branch_id\n , first_branch_from_change_number\n , dest_branch\n , branch_name):\n with Timer(OVERALL):\n with Timer(BUILD):\n self.__append(NTR('commit refs/heads/{0}\\n').format(branch_name))\n self.__append(NTR('mark : {0}\\n').format(mark_number))\n desc_info = DescInfo.from_text(cl.description)\n committer_added = False\n if desc_info:\n for key in ('author', 'committer'):\n v = desc_info[key]\n if v:\n self.__append(NTR('{key} {fullname} {email} {time} {timezone}\\n').\n format( key = key\n , fullname = v['fullname']\n , email = v['email' ]\n , time = v['time' ]\n , timezone = v['timezone']))\n committer_added = True\n desc = desc_info.clean_desc\n\n # Convoluted logic gates but avoids duplicating code. The point\n # is that we add the best possible committer data _before_\n # adding the description.\n if not committer_added:\n if desc_info:\n # old change description that lacked detailed author info,\n # deserves a warning, but otherwise push onward even if the\n # commit checksums will likely differ from the originals\n LOG.warn('commit description did not match committer regex: @{} => {}'.\n format(cl.change, desc_info.suffix))\n timezone = self.__get_timezone_offset(cl.time)\n self.__append(NTR('committer {fullname} {email} {time} {timezone}\\n').\n format(fullname=self.__full_name_for_user(cl.user),\n email=self.__email_for_user(cl.user),\n time=cl.time,\n timezone=timezone))\n desc = cl.description\n self.__add_data(desc)\n\n self._add_commit_parent_list(parent_commit_list)\n if first_branch_from_branch_id \\\n and first_branch_from_change_number:\n self.__branch_from( dest_branch\n , cl\n , first_branch_from_branch_id\n , first_branch_from_change_number)\n self.__add_files(cl.files)\n if desc_info and desc_info.gitlinks:\n self.__add_gitlinks(desc_info.gitlinks)", "def commit(self):", "def amend_commit_with_file(tmp_file_name):\n command = f\"git commit --amend --allow-empty -F {tmp_file_name}\"\n logging.debug(f\"Executing command: {command}\")\n p = subprocess.Popen(command, shell=True)\n p.communicate()" ]
[ "0.65479827", "0.573802", "0.5700632", "0.5668144", "0.5611786", "0.55983216", "0.5593553", "0.5582935", "0.55622244", "0.55585754", "0.5541501", "0.5509702", "0.5496934", "0.5496763", "0.54933196", "0.5491435", "0.54855376", "0.54855376", "0.54855376", "0.54855376", "0.54855376", "0.54791933", "0.5476992", "0.54214984", "0.5414182", "0.5410499", "0.5408741", "0.5380162", "0.53653485", "0.5359458" ]
0.5772377
1
Return 401 info, inlclude login_url to PAASLOGINPLATFORM, width & height for adjusting iframe window, login_url as
def _build_ajax_401_response(self, request): _next = request.build_absolute_uri(reverse('account:login_success')) if self._conf.ADD_CROSS_PREFIX: _next = self._conf.CROSS_PREFIX + _next _login_url = build_redirect_url(_next, self._conf.LOGIN_PLAIN_URL, self._conf.C_URL, extra_args=self._build_extra_args()) context = { 'login_url': _login_url, 'width': self._conf.IFRAME_WIDTH, 'height': self._conf.IFRAME_HEIGHT, 'has_plain': True } return JsonResponse(context, status=401)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_page_401_response_to_platform(self, request):\n _next = request.build_absolute_uri()\n if self._conf.ADD_CROSS_PREFIX:\n _next = self._conf.CROSS_PREFIX + _next\n\n _login_url = build_redirect_url(_next,\n self._conf.LOGIN_URL,\n self._conf.C_URL,\n extra_args=self._build_extra_args())\n return HttpResponseRedirect(_login_url)", "def get_login_info(opener):\n request = urllib.request.Request(URL)\n response = opener.open(request)\n html = response.read().decode()\n # print(html)\n\n params_base64 = re.search(\"window.atob\\('(.+?)'\\)\", html).group(1)\n params = json.loads(base64.b64decode(params_base64).decode())\n # print(params)\n\n return params", "def _show_login_page(self, ticket: SSOLoginData, requested_authn_context: Optional[str], redirect_uri) -> bytes:\n argv = eduid_idp.mischttp.get_default_template_arguments(self.context.config)\n argv.update(\n {\n \"action\": \"/verify\",\n \"username\": \"\",\n \"password\": \"\",\n \"key\": ticket.key,\n \"authn_reference\": requested_authn_context,\n \"redirect_uri\": redirect_uri,\n \"alert_msg\": \"\",\n \"sp_entity_id\": \"\",\n \"failcount\": ticket.FailCount,\n # SAMLRequest, RelayState and binding are used to re-create the ticket state if not found using `key'\n \"SAMLRequest\": escape(ticket.SAMLRequest, quote=True),\n \"RelayState\": escape(ticket.RelayState, quote=True),\n \"binding\": escape(ticket.binding, quote=True),\n }\n )\n\n # Set alert msg if FailCount is greater than zero\n if ticket.FailCount:\n argv[\"alert_msg\"] = \"INCORRECT\" # \"Incorrect username or password ({!s} attempts)\".format(ticket.FailCount)\n\n try:\n argv[\"sp_entity_id\"] = ticket.saml_req.sp_entity_id\n except KeyError:\n pass\n\n self.logger.debug(\"Login page HTML substitution arguments :\\n{!s}\".format(pprint.pformat(argv)))\n\n # Look for login page in user preferred language\n content = eduid_idp.mischttp.localized_resource(self.start_response, 'login.html', self.config, self.logger)\n if not content:\n raise eduid_idp.error.NotFound()\n\n # apply simplistic HTML formatting to template in 'res'\n return content.format(**argv).encode('utf-8')", "def view_login(self):\n with self.client.get(\"/login\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.failure(\"Logged on: Got redirect to /home\")", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def sign_in_failure(self, urlrequest, failure_data):\n # self.hide_loading_screen()\n # self.email_not_found = False # Triggers hiding the sign in button\n print(failure_data)\n # msg = failure_data['error']['message'].replace(\"_\", \" \").capitalize()\n # # Check if the error msg is the same as the last one\n # if msg == self.sign_in_msg:\n # # Need to modify it somehow to make the error popup display\n # msg = \" \" + msg + \" \"\n # self.sign_in_msg = msg\n # if msg == \"Email not found\":\n # self.email_not_found = True\n # if self.debug:\n # print(\"Couldn't sign the user in: \", failure_data)", "def login_page():\n text = '<a href=\"%s\">Authenticate with Okta</a>'\n return text % create_auth_url()", "def authenticate():\n return Response('Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with Web Manager credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def log_into_overdrive():\n\tprint \"in log_into_overdrive function \\n\"\n\n\toverdrive_client_app_fields = {}\n\n\tkeys = '%s:%s' % (OD_API_CLIENT_KEY, OD_API_SECRET_KEY)\n\tencoded_keys = base64.b64encode(keys)\n\tod_oauth_url = 'https://oauth.overdrive.com/token'\n\theaders = {'Host' : 'oauth.overdrive.com',\n\t\t\t\t'Authorization' : 'Basic %s' % encoded_keys,\n\t\t\t\t'Content-Type' : 'application/x-www-form-urlencoded;charset=UTF-8'}\n\tpayload ={}\n\tpayload = {'grant_type' : 'client_credentials'}\n\tresponse = requests.post(od_oauth_url, data=payload, headers=headers)\n\t\"\"\" add logic to handle error > 200 or 201 \n\n print response.status_code, \" == \", response.reason\n if response.status_code > 201:\n flash((\"Action was not successful. %s == %s\\n\") % \n (response.status_code, response.reason))\n return render_template('login.html')\n elif response.status_code == 200:\n return redirect('/main')\n client_credentials = response.content\n elif response.status_code == 201:\n print \"Post to get access token was successful\"\n return redirect('/main')\n\n\t\"\"\"\n\n\tresponse_data = json.loads(response.content)\n\tprint \" overdrive access token = \", response_data['access_token'], \"\\n\"\n\n\toverdrive_client_app_fields['url'] = response.url\n\toverdrive_client_app_fields['access_token'] = response_data['access_token']\n\n\treturn overdrive_client_app_fields, response", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})" ]
[ "0.5833281", "0.5807944", "0.57253546", "0.5551472", "0.55454785", "0.55313236", "0.5505527", "0.54835105", "0.54654", "0.54211766", "0.5410585", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261", "0.5391261" ]
0.6192411
0
Turn the encrypted s field into a working signature
def _decrypt_signature(self, s): if len(s) == 92: return s[25] + s[3:25] + s[0] + s[26:42] + s[79] + s[43:79] + s[91] + s[80:83] elif len(s) == 90: return s[25] + s[3:25] + s[2] + s[26:40] + s[77] + s[41:77] + s[89] + s[78:81] elif len(s) == 88: return s[48] + s[81:67:-1] + s[82] + s[66:62:-1] + s[85] + s[61:48:-1] + s[67] + s[47:12:-1] + s[3] + s[11:3:-1] + s[2] + s[12] elif len(s) == 87: return s[4:23] + s[86] + s[24:85] elif len(s) == 86: return s[83:85] + s[26] + s[79:46:-1] + s[85] + s[45:36:-1] + s[30] + s[35:30:-1] + s[46] + s[29:26:-1] + s[82] + s[25:1:-1] elif len(s) == 85: return s[2:8] + s[0] + s[9:21] + s[65] + s[22:65] + s[84] + s[66:82] + s[21] elif len(s) == 84: return s[83:36:-1] + s[2] + s[35:26:-1] + s[3] + s[25:3:-1] + s[26] elif len(s) == 83: return s[6] + s[3:6] + s[33] + s[7:24] + s[0] + s[25:33] + s[53] + s[34:53] + s[24] + s[54:] elif len(s) == 82: return s[36] + s[79:67:-1] + s[81] + s[66:40:-1] + s[33] + s[39:36:-1] + s[40] + s[35] + s[0] + s[67] + s[32:0:-1] + s[34] elif len(s) == 81: return s[56] + s[79:56:-1] + s[41] + s[55:41:-1] + s[80] + s[40:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9] elif len(s) == 79: return s[54] + s[77:54:-1] + s[39] + s[53:39:-1] + s[78] + s[38:34:-1] + s[0] + s[33:29:-1] + s[34] + s[28:9:-1] + s[29] + s[8:0:-1] + s[9] else: self.logger.warning("Unable to decrypt signature, key length {0} not supported; retrying might work", len(s)) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_signature(self):\n sig_contents = \\\n self.payload + \".\" + \\\n b64encode(b\"application/xml\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"base64url\").decode(\"ascii\") + \".\" + \\\n b64encode(b\"RSA-SHA256\").decode(\"ascii\")\n sig_hash = SHA256.new(sig_contents.encode(\"ascii\"))\n cipher = PKCS1_v1_5.new(self.private_key)\n sig = urlsafe_b64encode(cipher.sign(sig_hash))\n key_id = urlsafe_b64encode(bytes(self.author_handle, encoding=\"utf-8\"))\n return sig, key_id", "def sign(self, message):\n\n # if not already a byte string turn it to making sure\n if not isinstance(message, (bytes, str)):\n return None\n elif isinstance(message, str):\n message = message.encode()\n\n hash_of_message = SHA256.new(message)\n\n signer = DSS.new(self.privkey, mode=\"fips-186-3\")\n\n digital_signature = signer.sign(hash_of_message)\n digital_signature = base64.b85encode(digital_signature).decode()\n\n return digital_signature", "def get_signed(self, sig_str):\n sig_str = base64.b64encode(sig_str)\n signature = base64.b64encode(hmac.new(self.secret, sig_str, digestmod=hashlib.sha1).digest())\n return signature", "def __sign(self, text):\n signature = HMAC.new(self.sign_key, text.encode('utf-8'), SHA256).digest()\n return base64.standard_b64encode(signature)", "def sign(self, msg):\n z = int.from_bytes(helper.hash256(msg), \"big\")\n k = self.deterministic_k(z)\n k_inv = pow(k, N-2, N)\n r = (k*G).x.num\n s = (z + r * self.secret) * k_inv % N\n if s > N/2:\n s = N - s\n\n return Signature(r, s)", "def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))", "def create_signature(self, string_to_sign: str) -> str:\n begin_signature = hmac.new(key=base64.b64decode(self.secret),\n msg=string_to_sign.encode(),\n digestmod=hashlib.sha1)\n end_signature = begin_signature.digest()\n final_signature = base64.b64encode(end_signature).decode()\n return final_signature", "def generate_cybersource_sa_signature(payload):\n # This is documented in certain CyberSource sample applications:\n # http://apps.cybersource.com/library/documentation/dev_guides/Secure_Acceptance_SOP/html/wwhelp/wwhimpl/js/html/wwhelp.htm#href=creating_profile.05.6.html\n keys = payload[\"signed_field_names\"].split(\",\")\n message = \",\".join(f\"{key}={payload[key]}\" for key in keys)\n\n digest = hmac.new(\n settings.CYBERSOURCE_SECURITY_KEY.encode(\"utf-8\"),\n msg=message.encode(\"utf-8\"),\n digestmod=hashlib.sha256,\n ).digest()\n\n return b64encode(digest).decode(\"utf-8\")", "def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q-1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)", "def _sign(self, data, salt):\r\n strBuffer = \"\"\r\n # print data.keys()\r\n for k in sorted(data.iterkeys()):\r\n\r\n # Handle the BOOL special case\r\n v = data[k]\r\n if type(v) == bool:\r\n if v:\r\n v = 1\r\n else:\r\n v = 0\r\n data[k] = v\r\n\r\n # Update buffer\r\n strBuffer += \"%s=%s\\n\" % (str(k).lower(), vmcp.myquote(str(v)))\r\n\r\n # Append salt\r\n strBuffer += salt\r\n return strBuffer", "def _generate_signature(self):\n self.logger.debug(f'body payload {self.body_payload}')\n return hmac.new(self.__decrypted_secret, self.body_payload, hashlib.sha1).hexdigest()", "def signed(self, encoded):\n signature = self.sign(encoded)\n return encoded + signature", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def _generate_signature(self, key, msg):\n key = to_bytes(key)\n msg = to_bytes(msg)\n\n hash_obj = hmac.new(key, msg=msg, digestmod=hashlib.sha256)\n digest = hash_obj.digest() # abstract\n\n signature = base64.b64encode(digest) # Signature\n return to_unicode(signature)", "def rsa_sha1_signature(base_string, rsa_private_key):\n from .rsa import sign_sha1\n base_string = to_bytes(base_string)\n s = sign_sha1(to_bytes(base_string), rsa_private_key)\n sig = binascii.b2a_base64(s)[:-1]\n return to_unicode(sig)", "def _get_signature(value):\n mySha = hashlib.sha256()\n mySha.update(value)\n # print mySha.hexdigest()\n return mySha.hexdigest()", "def signature_unsafe(m: bytes, sk: bytes, pk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n r = Hint(h[b // 8 : b // 4] + m)\n R = scalarmult_B(r)\n S = (r + Hint(encodepoint(R) + pk + m) * a) % l\n return encodepoint(R) + encodeint(S)", "def ecdsa_sign(G, priv_sign, message):\n plaintext = message.encode(\"utf8\")\n digest = sha256(plaintext).digest()\n sig = do_ecdsa_sign(G,priv_sign,digest)\n\n return sig", "def sign(self, message, randombytes=urandom):\r\n int_header = 0x30 + logn[self.n]\r\n header = int_header.to_bytes(1, \"little\")\r\n\r\n salt = randombytes(SALT_LEN)\r\n hashed = self.hash_to_point(message, salt)\r\n\r\n # We repeat the signing procedure until we find a signature that is\r\n # short enough (both the Euclidean norm and the bytelength)\r\n '''\r\n print(\"---------Inside sign----------\")\r\n '''\r\n while(1):\r\n if (randombytes == urandom):\r\n s = self.sample_preimage(hashed)\r\n '''\r\n print(\"s: \", s)\r\n '''\r\n else:\r\n seed = randombytes(SEED_LEN)\r\n s = self.sample_preimage(hashed, seed=seed)\r\n norm_sign = sum(coef ** 2 for coef in s[0])\r\n norm_sign += sum(coef ** 2 for coef in s[1])\r\n # Check the Euclidean norm\r\n if norm_sign <= self.signature_bound:\r\n\r\n enc_s = compress(s[1], self.sig_bytelen - HEAD_LEN - SALT_LEN)\r\n # Check that the encoding is valid (sometimes it fails)\r\n if (enc_s is not False):\r\n return header + salt + enc_s\r\n '''\r\n else:\r\n print(\"-------------INVALID encoding---------------\")\r\n\r\n else:\r\n print(\"-------------NOT within signature bound---------------\")\r\n '''", "def serialize(self):\n # Curve order and halforder, used to tame ECDSA malleability (see BIP-0062)\n order = Curve.N\n halforder = order >> 1\n # low 'S' malleability breaker\n sigS = self.s\n if sigS > halforder:\n sigS = order - sigS\n # Ensure the encoded bytes for the r and s values are canonical and\n # thus suitable for DER encoding.\n rb = canonicalizeInt(self.r)\n sb = canonicalizeInt(sigS)\n\n # total length of returned signature is 1 byte for each magic and\n # length (6 total), plus lengths of r and s\n length = 6 + len(rb) + len(sb)\n b = ByteArray(0, length=length)\n\n b[0] = 0x30\n b[1] = ByteArray(length - 2, length=1)\n b[2] = 0x02\n b[3] = ByteArray(len(rb), length=1)\n offset = 4\n b[offset] = rb\n offset += len(rb)\n b[offset] = 0x02\n offset += 1\n b[offset] = ByteArray(len(sb), length=1)\n offset += 1\n b[offset] = sb\n return b", "def sign(self, msg: Dict) -> Dict:\n ser = serialize_msg_for_signing(msg, topLevelKeysToIgnore=[f.SIG.nm,\n f.SIGS.nm])\n bsig = self.naclSigner.signature(ser)\n sig = base58.b58encode(bsig).decode(\"utf-8\")\n return sig", "def Sign(self, msg):\n return hmac.new(self.key_bytes, msg, sha1).digest()", "def sign_plaintext(client, request):\n return plaintext_signature(client.client_secret, client.token_secret)", "def sign_data(data):\n\n rv = \"\"\n\n for i in signing_keys:\n sk = ecdsa.SigningKey.from_der(i)\n\n if sk is not None and sk.verifying_key is not None:\n sig = sk.sign(data)\n rv += encode_line(\"signature\", sk.verifying_key.to_der(), sig)\n\n return rv", "def Sign(self, msg):\n # Need to chose a random k per-message, SystemRandom() is available\n # since Python 2.4.\n k = random.SystemRandom().randint(2, self.key.q - 1)\n (r, s) = self.key.sign(util.Hash(msg), k)\n return util.MakeDsaSig(r, s)", "def _buildSignatureString(self):\n self.params=self.kargs\n \n try: method_details=self.MMAP[self.method]\n except: \n raise RuntimeError(\"unsupported method\")\n \n api_key_required=method_details[\"api_key_required\"]\n if api_key_required:\n self.params.update({\"api_key\": self.API_KEY, \"method\":self.method})\n \n signature_required=method_details[\"signature_required\"]\n if not signature_required:\n self.signature_string=\"\"\n return\n \n sorted_keys=self.params.keys().sort()\n \n str=\"\"\n try:\n for key in sorted_keys:\n if key not in self.PARAMS_TO_EXCLUDE_FROM_SIGNATURE:\n \n ## assume the parameter's value is valid\n try: \n if key not in self.PARAMS_TO_EXCLUDE_FROM_UTF8_ENCODING:\n value=self.params[key].encode(\"utf-8\")\n else:\n value=self.params[key]\n except: value=self.params[key]\n str=\"%s%s\" % (key, value)\n except:\n pass\n \n str += self.API_SECRET\n m=hashlib.md5()\n m.update(str)\n self.signature_string=m.hexdigest()\n \n self.kargs.update({\"api_sig\": self.signature_string})", "def get_signed(self, **payload):\n param = ''\n for k in payload:\n param += '&' + k + '=' + str(payload[k])\n param = param.lstrip('&')\n signature = hmac.new(self.secret, param, digestmod=hashlib.sha256).hexdigest()\n\n return signature", "def GenSampleSignature(text):\r\n demo_keypair = ('RSA.mVgY8RN6URBTstndvmUUPb4UZTdwvwmddSKE5z_jvKUEK6yk1'\r\n 'u3rrC9yN8k6FilGj9K0eeUPe2hf4Pj-5CmHww=='\r\n '.AQAB'\r\n '.Lgy_yL3hsLBngkFdDw1Jy9TmSRMiH6yihYetQ8jy-jZXdsZXd8V5'\r\n 'ub3kuBHHk4M39i3TduIkcrjcsiWQb77D8Q==')\r\n\r\n signer = SignatureAlgRsaSha256(demo_keypair)\r\n return signer.Sign(text)", "def parse_signature(data: bytes):\n return base58_encode(data, b'sig').decode()", "def sign(self, message):\n\n assert len(message) == 32\n assert self.sec is not None\n r, s = do_ecdsa_sign(self.G, self.sec, message, self.optim)\n r0, s0 = r.binary(), s.binary()\n assert len(r0) <= 32 and len(s0) <= 32\n sig = pack(\"H32sH32s\", len(r0), r0, len(s0), s0)\n return sig" ]
[ "0.65453", "0.63674563", "0.6332326", "0.6304266", "0.6247069", "0.62329787", "0.6162837", "0.6110081", "0.6079472", "0.6066191", "0.6039318", "0.6027544", "0.60205823", "0.60150886", "0.5985069", "0.598436", "0.59802085", "0.59756666", "0.59607977", "0.59597117", "0.59544945", "0.59407336", "0.59333175", "0.5924568", "0.59221184", "0.5912011", "0.5887666", "0.58725667", "0.58606815", "0.58533144" ]
0.69796395
0
Returns a list of message ids that match a given query string.
def get_message_ids_by_query(query_string, service): message_results = service.users().messages().list(userId='me', q=query_string).execute() if(message_results['resultSizeEstimate'] == 0): return [] return message_results['messages'] if message_results['messages'] is not None else []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_email_ids(conn, query='ALL'):\n if conn.state != \"SELECTED\":\n raise imaplib.IMAP4.error(\"Cannot search without selecting a folder\")\n\n rv, data = conn.uid('search', None, query)\n if rv != 'OK':\n print (\"Could not fetch email ids\") # for some reason...\n return []\n\n return data[0].split()", "def search_message(service, user_id, search_string):\n try:\n # initiate the list for returning\n list_ids = []\n\n # get the id of all messages that are in the search string\n search_ids = service.users().messages().list(userId=user_id, q=search_string).execute()\n # if there were no results, print warning and return empty string\n try:\n ids = search_ids['messages']\n except KeyError:\n print(\"WARNING: the search queried returned 0 results\")\n print(\"returning an empty string\")\n return \"\"\n\n if len(ids)>1:\n for msg_id in ids:\n list_ids.append(msg_id['id'])\n return(list_ids)\n\n else:\n list_ids.append(ids[0]['id'])\n return list_ids\n \n except errors.HttpError as error:\n print(\"An error occured: %s\") % error", "def getMessagesMatchingQuery(self, search_query_dict: Dict[str, str]) -> List[Dict[str, str]]:\n\n valid_operators = [\n 'from',\n 'to',\n 'subject',\n 'label',\n 'after',\n 'before',\n 'phrase',\n ]\n\n search_query = ''\n for operator in search_query_dict:\n if operator in valid_operators:\n search_query += '{query_type}:{query}'.format(\n query_type=operator, \n query=search_query_dict[operator]\n )\n search_query += ' '\n \n results = self.service.users().messages().list(userId='me', q=search_query).execute()\n messages_ids = results.get('messages', [])\n error = 'No messages found.'\n if not messages_ids:\n print(error)\n # return [error]\n \n return messages_ids", "def search(self, query, labels=[]):\n qstring = query + ' ' + self.opts.query\n if labels:\n query += ' (' + ' OR '.join(['label:' + l for l in labels]) + ')'\n print(query)\n cmd = self.service.users().messages()\n try:\n results = cmd.list(userId='me', q=query,\n includeSpamTrash=True).execute()\n if 'messages' not in results:\n return []\n gids = [m['id'] for m in results['messages']]\n \n while 'nextPageToken' in results:\n page_token = results['nextPageToken']\n results = cmd.list(userId='me', q=query,\n pageToken=page_token,\n includeSpamTrash=True).execute()\n gids.extend([m['id'] for m in results['messages']])\n return gids\n except errors.HttpError as ex:\n print('An error occurred: %s' % ex)\n return []", "def search(self, query_string):\n terms = query_string.lower().split()\n result = set(self.wordDict[terms[0]])\n if len(result) == 0:\n return list()\n else:\n for t in terms[2:]:\n records_containing_t = self.wordDict[t]\n result = result.intersection(records_containing_t)\n return [self.get_record_dict(id).getTuple() for id in result]", "def listMessagesMatchingQuery(service, user_id, query = ''):\n try:\n response = service.users().messages().list(userId = user_id, q = query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId = user_id, q = query, pageToken = page_token).execute()\n messages.extend(response['messages'])\n return messages\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def ListMessagesMatchingQuery(service, user_id, query=''):\n try:\n response = service.users().messages().list(userId=user_id,\n q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError:\n print(\"An error occurred\")", "def ListMessagesMatchingQuery(service, user_id, query=''):\n try:\n response = service.users().messages().list(userId=user_id, q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query, pageToken=page_token).execute()\n messages.extend(response['messages'])\n return messages\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def ListMessagesMatchingQuery(service, user_id, query=''):\n try:\n response = service.users().messages().list(userId=user_id,\n q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print ('An error occurred: %s' % error)", "def ListMessagesMatchingQuery(service, user_id, query=''):\n try:\n response = service.users().messages().list(userId=user_id,\n q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def ListMessagesMatchingQuery(service, user_id, query=''):\n try:\n response = service.users().messages().list(userId=user_id,\n q=query).execute()\n messages = []\n if 'messages' in response:\n messages.extend(response['messages'])\n\n while 'nextPageToken' in response:\n page_token = response['nextPageToken']\n response = service.users().messages().list(userId=user_id, q=query,\n pageToken=page_token).execute()\n messages.extend(response['messages'])\n\n return messages\n\n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def findIds(self, query):\t\t\t\t\t\t\t## Multiple Elements\n\t\ttry:\n\t\t\tassert(type(query)) == str or Pattern\n\t\t\treturn self.driver.find_elements_by_id(query)\n\t\texcept Exception as e:\n\t\t\tprint(\"Could not find ID: {}\\n\\n{}\".format(query, e))\n\t\t\treturn -1", "def list_emails_ids(self, *criteria, mailbox=None) -> List[str]:\n ids = self.get_raw_emails_ids(*criteria, mailbox=mailbox)\n if ids is None:\n raise Exception(f'Cannot get the list of email in the mailbox {self._selected_mailbox}! No list of IDs is returned!')\n return self._search(ids)\n # ListEmailIds.reset()\n # ListEmailIds.parse(ids[0].decode())\n # return ListEmailIds.get_tokens_values()", "def _search(emails_ids: List[bytes]) -> Union[None, List[str]]:\n if len(emails_ids) != 1:\n return None\n ListEmailIds.reset()\n ListEmailIds.parse(emails_ids[0].decode())\n return ListEmailIds.get_tokens_values()", "def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]", "def get_ids(self) -> List[str]:", "def fetch_process_queries(self):\n url = \"/api/investigate/v1/orgs/{}/processes/search_jobs\".format(\n self.credentials.org_key\n )\n ids = self.get_object(url)\n return ids.get(\"query_ids\", [])", "def messages_search_match(token, channel_id, query_str):\n verify_token(token)\n # Empty result\n if query_str == \"\":\n return {}\n \n user = get_user_from_token(token)\n # Searches all messages and compares query_str\n search_results = []\n\n channel = Channel.query.filter_by(id=channel_id).first()\n all_messages = channel.messages_sent\n\n for message_obj in all_messages:\n curr_message = message_obj.message\n print(curr_message)\n print(message_obj.time_created)\n # Case-insensitive matching\n if curr_message.lower().find(query_str.lower()) != -1:\n print(\"{} matches {}!\".format(curr_message, query_str))\n search_results.append({\n \"message_id\": message_obj.id,\n \"user_id\": message_obj.user_id,\n \"message\": message_obj.message,\n \"time_created\": message_obj.time_created.timestamp()\n })\n \n sorted_messages = sorted(search_results, key=lambda k: k['time_created'])\n \n # Returns messages that contain query_str\n # Contains all info on message (message_id, user_id, message, time_created)\n return {\n 'messages': sorted_messages\n }", "def __get_issue_ids(self, string):\n import re\n matches = re.findall(r\"#(\\d{1,20})\", string, re.MULTILINE)\n return matches", "def search(folderName):\n\n result, data = mailBox.select(folderName, True)\n\n if TESTING:\n searchResult, uid = mailBox.uid('SEARCH', None, 'UNSEEN')\n else:\n searchResult, uid = mailBox.uid('SEARCH', None, 'ALL')\n\n number_messages = len(uid[0].split(' ')) if uid[0] != \"\" else 0\n if number_messages == 0:\n print \"\\nERROR: No messages found in %s\\n\" % folderName\n print \"\\n* Exiting... *\\n\"\n sys.exit(0)\n print \"\\nNumber of messages in %s: %d\" % (folderName, number_messages)\n\n uidList = \"\"\n for i in uid[0].split(' '):\n if i.isdigit():\n uidList += i + \",\"\n uidList = uidList[:-1]\n\n return uidList", "def lookup(root: dict, query: str, exact: bool = False) -> List[Set[int]]:\n if not query:\n return set()\n\n word_ids: List[Set[int]] = [] # ids of items that correspond to query\n for word in preprocess_words(query):\n node = root\n for c in word:\n node: Optional[dict] = node.get(c) # type: ignore\n if not node:\n # dead-end for this word\n word_ids.append(set())\n break\n else:\n word_ids.append(collect(node, exact))\n\n return word_ids", "def test_searchMessageSetWithList(self):\n # 6 is bigger than the biggest message sequence number, but that's\n # okay, because N:* includes the biggest message sequence number even\n # if N is bigger than that (read the rfc nub).\n return self._messageSetSearchTest('(6:*)', [5])", "def get_all_matching_location_ids(location_name):\n locations_list = Location.objects.filter(\n city__icontains=location_name\n ).all()\n return [location.id for location in locations_list]", "def get_camp_ids_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [x.get(\"id\") for x in all_campaigns if string in x[\"name\"]]", "def get_ids(**metafilter):\n\n metafilter = _clean(metafilter)\n search = _build(metafilter).source(False)\n\n for hit in search.scan():\n yield hit.meta.id", "def _get_matching_node_ids(self, node_name):\n try:\n with closing(self.connection) as con:\n with con:\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT id\n FROM nodes\n WHERE name LIKE (?)\n \"\"\", (node_name,))\n res = cursor.fetchall()\n\n except sqlite3.OperationalError as e:\n print(\"ERROR: An error occurred when retrieving node ids: {}\".format(e))\n\n if len(res) == 0:\n print(\"ERROR: Could not find node ID for name '{0}'.\".format(node_name))\n return []\n\n elif len(res) > 1:\n print(\"Found multiple node IDs for name '{0}', returning first result.\".format(node_name))\n\n # e.g. [(10,), (11,)] => [10, 11]\n return [x[0] for x in res]", "def test_queryWithMesssageSet(self):\n query = imap4.Query(messages=imap4.MessageSet(1, None))\n self.assertEqual(query, '(MESSAGES \"1:*\")')", "def job_ids(config):\n errcode, output = queue(config)\n parse_line = False\n current_sched = None\n ids = []\n if errcode != 0:\n logger.debug('queue command issued return code: %s', errcode)\n return ids\n\n for line in output.splitlines():\n line = line.strip()\n parse_line = parse_line and bool(line)\n if parse_line:\n assert current_sched\n ids.append( (current_sched, line.split()[0]) )\n continue\n\n if line.startswith('--'):\n current_sched = line.split()[2].strip()\n\n if line.startswith('ID'):\n parse_line = True\n\n logger.debug('found the following jobs in Condor queue: %s', ids)\n return ids", "def get_messages(self,honeypotids,expect_dict):\n if type(honeypotids) == str:\n honeypotids = [honeypotids]\n if \"ALL\" in honeypotids:\n msg_list = self.network.wait_for_messages()\n if msg_list:\n msg_list = self.extract_messages(msg_list)\n msg_list = self.check_answer(msg_list,honeypotids,expect_dict)\n else:\n msg_count = len(honeypotids)\n msg_list = []\n while(msg_count > 0):\n msgs = self.network.get_message()\n if msgs:\n msgs = self.extract_messages(msgs)\n msgs = self.check_answer(msgs,honeypotids,expect_dict)\n if msgs:\n msg_list = msg_list + msgs\n msg_count -= len(msgs)\n else:\n msg_count = 0\n return msg_list", "def search(query_string):" ]
[ "0.6726846", "0.6582666", "0.6582302", "0.63143134", "0.61436373", "0.609995", "0.60605496", "0.6045243", "0.6023286", "0.6019033", "0.59572846", "0.5849049", "0.5801364", "0.5611535", "0.54922795", "0.54554754", "0.5429563", "0.5421312", "0.5379711", "0.53616273", "0.53476137", "0.52299273", "0.5208551", "0.5203468", "0.5202202", "0.51922905", "0.51914126", "0.5190782", "0.5188342", "0.51443654" ]
0.7509429
0
Returns a string representation of a decoded email message part.
def decode_message_part(message_part): return base64.urlsafe_b64decode(message_part['body']['data']).decode().strip()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode_message(self, raw):\n return raw.decode('utf-8')", "def get_decoded_email_body(message_body):\n\n msg = email.message_from_string(message_body)\n\n text = \"\"\n if msg.is_multipart():\n html = None\n for part in msg.walk():\n\n # print \"%s, %s\" % (part.get_content_type(), part.get_content_charset())\n\n if part.get_content_charset() is None:\n # We cannot know the character set, so return decoded \"something\"\n text = part.get_payload(decode=True)\n continue\n\n charset = part.get_content_charset()\n\n if part.get_content_type() == 'text/plain':\n text = unicode(part.get_payload(decode=True), str(charset), \"ignore\").encode('utf8', 'replace')\n\n if part.get_content_type() == 'text/html':\n html = unicode(part.get_payload(decode=True), str(charset), \"ignore\").encode('utf8', 'replace')\n\n else:\n continue\n\n if text is not None:\n return text.strip()\n else:\n return html.strip()\n else:\n text = unicode(msg.get_payload(decode=True), msg.get_content_charset(), 'ignore').encode('utf8', 'replace')\n return text.strip()", "def decode(b64_msg: str) -> str:\n\n b64_bytes = b64_msg.encode(\"ascii\")\n b64_bytes = base64.b64decode(b64_bytes)\n return b64_bytes.decode(\"ascii\")", "def message(self):\n if not hasattr(self, '_message'):\n self._message = email.message_from_string(self.data)\n return self._message", "def get_text_from_email(msg):\n parts = []\n for part in msg.walk():\n if part.get_content_type() == 'text/plain':\n parts.append(part.get_payload())\n return ''.join(parts)", "def decode_field(field):\r\n field = field.replace('\\r\\n','')\r\n field = field.replace('\\n','')\r\n\r\n list = email.Header.decode_header (field)\r\n\r\n decoded = \" \".join([\"%s\" % k for (k,v) in list])\r\n\r\n #print \"Decoding [%s] to [%s]\" % (field, decoded)\r\n\r\n return decoded", "def decode(encoded_message: str, rails: int) -> str:\n message: str = ''\n decoded: list = get_rails(encoded_message, rails)\n\n i = 0\n for row, row_decoded in enumerate(decoded):\n for c, col_decoded in enumerate(row_decoded):\n if col_decoded:\n decoded[row][c] = encoded_message[i]\n i += 1\n\n for c in range(len(decoded[0])):\n for row in range(len(decoded)):\n if decoded[row][c]:\n message += decoded[row][c]\n\n return message", "def decode_email(email):\n return", "def to_string(msg):\n if type(msg) is bytes:\n msg = str(msg)\n msg = msg[2:]\n return msg[:-1]\n else:\n return msg", "def message(self) -> str:\n return self.fields.get('msg', self.raw_string)", "def de_base64(msg):\n try:\n msg_ascii = msg.encode('ascii')\n msg_bytes = base64.b64decode(msg_ascii)\n msg_decoded = msg_bytes.decode('ascii')\n return msg_decoded\n except:\n print('Invalid base64-encoded string')", "def getMessage(self, msg_id: str) -> str:\n message = self.service.users().messages().get(userId='me', id=msg_id, format='raw').execute()\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n mime_msg = email.message_from_bytes(msg_str)\n message_main_type = mime_msg.get_content_maintype()\n \n if message_main_type == 'multipart':\n for part in mime_msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif message_main_type == 'text':\n return mime_msg.get_payload()", "def _decode_text(self):\n\n print(f\"Hex decode; received message is {self.message}\")\n return bytes.fromhex(self.message).decode('utf-8')", "def formatted_message(self):\n message = MIMEMultipart()\n message['From'] = self.sender\n message['To'] = self.receiver\n message['Subject'] = self.subject.format(**self.params)\n message.attach(MIMEText(self.body.format(**self.params), 'plain'))\n return message.as_string()", "def get_message(obj):\n if isinstance(obj, email.Message.Message):\n return obj\n if hasattr(obj, \"read\"):\n obj = obj.read()\n try:\n msg = email.message_from_string(obj)\n except email.Errors.MessageParseError:\n msg = None\n return msg", "def decode_message(self, key):\n\n decoded_message = ''\n for char in self.message:\n if char.isalpha():\n decoded_char = self.convert_char(char, key)\n decoded_message = decoded_message + decoded_char\n else:\n decoded_message = decoded_message + char\n return decoded_message", "def get_message(self, email):\n\n message = MIMEText(self.message, 'html')\n\n message['Subject'] = self.subject\n message['From'] = self.from_\n message['To'] = email\n\n return message", "def getMimeMessage(service, userID, msgID):\n try:\n message = service.users().messages().get(userId = userID, id = msgID, format = 'raw').execute()\n msgStr = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n return msgStr\n\n except errors.HttpError, error:\n print ('An error occurred: %s' % error)", "def DecodeCodedMessage(codedmessage):\n message = CODE.GetMessage(codedmessage)\n return message", "def get_message(self):\n context = self.context\n\n charset = str(context.charset)\n contentType = context.content_type\n\n mail_body = context.render()\n maintype, subtype = contentType.split('/')\n\n return MIMEText(mail_body, subtype, charset)", "def GetMimeMessage(service, user_id, msg_id):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id,\n format='raw').execute()\n\n #print('Message snippet: %s' % message['snippet'])\n \n\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n\n \n\n mime_msg = email.message_from_string(msg_str)\n\n return mime_msg\n \n except errors.HttpError, error:\n print('An error occurred: %s' % error)", "def _encoded_string_to_string(encoded_blob):\n try:\n return encoded_blob.decode(\"base64\")\n except Exception:\n raise InvalidDeckDataException(\"Cannot decode deck data into anything readable.\")", "def get_message_content(self):\n body = self.doc.find(\n \".//{http://salmon-protocol.org/ns/magic-env}data\").text\n\n body = urlsafe_b64decode(body.encode(\"ascii\"))\n\n logger.debug(\"diaspora.protocol.get_message_content: %s\", body)\n return body", "def base64url_decode(msg):\n bmsg = to_bytes(msg)\n pad = len(bmsg) % 4\n if pad > 0:\n bmsg += b'=' * (4 - pad)\n\n return base64.urlsafe_b64decode(bmsg)", "def unsigned(self, encoded):\n message, _ = self.split(encoded)\n return message", "def encode(msg: str) -> str:\n\n msg_bytes = msg.encode(\"ascii\")\n b64_bytes = base64.b64encode(msg_bytes)\n return b64_bytes.decode(\"ascii\")", "def get_message(service, user_id, msg_id):\n try:\n # grab the message instance\n message = service.users().messages().get(userId=user_id, id=msg_id,format='raw').execute()\n\n # decode the raw string, ASCII works pretty well here\n msg_str = base64.urlsafe_b64decode(message['raw'].encode('ASCII'))\n\n # grab the string from the byte object\n mime_msg = email.message_from_bytes(msg_str)\n\n # check if the content is multipart (it usually is)\n content_type = mime_msg.get_content_maintype()\n if content_type == 'multipart':\n # there will usually be 2 parts the first will be the body in text\n # the second will be the text in html\n parts = mime_msg.get_payload()\n\n # return the encoded text\n final_content = parts[0].get_payload()\n #return final_content\n return final_content\n\n elif content_type == 'text':\n return mime_msg.get_payload()\n #return mime_msg.get_payload()\n\n else:\n return \"\"\n print(\"\\nMessage is not text or multipart, returned an empty string\")\n # unsure why the usual exception doesn't work in this case, but \n # having a standard Exception seems to do the trick\n except Exception as error:\n print(\"An error occured: {}\".format(error))", "def decode_email_address(address, charset=\"utf8\"):\r\n name = decode_email_header(address[0])\r\n addr = address[1]\r\n addr = \"<\" + addr + \">\"\r\n if not name:\r\n return addr\r\n return name + \" \" + addr", "def _get_plain_message (self) :\n return self._message", "def msg(self):\n if \"msg\" in self._json:\n return self._json[\"msg\"]\n elif \"detail\" in self._json:\n return self._json[\"detail\"]\n else:\n return self._json" ]
[ "0.71435714", "0.6644162", "0.6545203", "0.6496529", "0.64006305", "0.63261145", "0.63129026", "0.6311837", "0.62922305", "0.6237837", "0.62142855", "0.6155156", "0.6114391", "0.6081689", "0.6068862", "0.60137045", "0.5987597", "0.5945252", "0.59316266", "0.5877409", "0.58527523", "0.5799404", "0.57752156", "0.57737887", "0.5760689", "0.57591116", "0.5757573", "0.5756236", "0.57561195", "0.5687197" ]
0.75111616
0
Returns a trimmed dictionary representation of an emails headers.
def trim_headers(all_headers, relevant_headers=["From", "To", "Subject", "Date"]): data = {} for header in all_headers: if header['name'] in relevant_headers: data[header['name']] = header['value'] return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unpack_headers(self, headers):\n return dict((k,v[0]) for (k,v) in headers.getAllRawHeaders())", "def GetResponseHeadersDict(self):\n headers = collections.defaultdict(list)\n for (key, value) in self._wpr_response.original_headers:\n headers[key.lower()].append(value)\n return {k: ','.join(v) for (k, v) in headers.items()}", "def _headers(self) -> Mapping[str, str]:\n return {}", "def _parse_headers(headers):\n try:\n return dict(header.split(\":\") for header in headers)\n except:\n raise ValueError(\"Invalid headers %s\" % headers)", "def headers(self):\r\n return dict(**self._get_headers())", "def scrub_headers(headers):\n if isinstance(headers, dict):\n headers = headers.items()\n headers = [\n (parse_header_string(key), parse_header_string(val))\n for (key, val) in headers\n ]\n if not logger_settings.get('redact_sensitive_headers', True):\n return dict(headers)\n if logger_settings.get('reveal_sensitive_prefix', 16) < 0:\n logger_settings['reveal_sensitive_prefix'] = 16\n return {key: safe_value(key, val) for (key, val) in headers}", "def _headers(self) -> Mapping[str, str]:\n return self.auth.headers() if self.auth else {}", "def headers_raw_to_dict(headers_raw):\n\n if headers_raw is None:\n return None\n headers = headers_raw.splitlines()\n headers_tuples = [header.split(':', 1) for header in headers]\n\n result_dict = {}\n for header_item in headers_tuples:\n if not len(header_item) == 2:\n continue\n\n item_key = header_item[0].strip()\n item_value = header_item[1].strip()\n result_dict[item_key] = item_value\n\n return result_dict", "def headers(self):\n return Dict(**self._get_headers())", "def __get_headers(self):\n\n return {}", "def _parse_headers(raw_headers: List[str]) -> Dict[str, str]:\n headers: Dict[str, str] = {}\n for header in raw_headers:\n name = header[: header.find(\":\")].strip()\n value = header[header.find(\":\") + 1 :].strip()\n headers[name.lower()] = value\n\n return headers", "def get_email_details(header: str) -> dict:\r\n try:\r\n m = re.match(\r\n r\"\"\"\r\n ([\\w\\W]* # remove lines \r\n (\r\n ^Date: \\s*(?P<date>[\\w\\W]{25}) # obtain date (\"date\")\r\n |^From: \\s*(?P<from>[\\w\\W]*?$) # obtain sender (\"from\")\r\n |^To: \\s*(?P<to>[\\w\\W]*?$) # obtain receiver (\"to\")\r\n |^Subject: \\s*(?P<subject>[\\w\\W]*?$) # obtain subject (\"subject\")\r\n )){4}\r\n \"\"\",\r\n header,\r\n re.VERBOSE | re.MULTILINE,\r\n )\r\n\r\n return m.groupdict()\r\n\r\n except:\r\n return None", "def headers(self) -> Mapping[str, str]:\n return pulumi.get(self, \"headers\")", "def get_email_details(header: str) -> dict:\n # this is one way to solve the exercise\n # result_keys = [\"from\", \"to\", \"subject\", \"date\"]\n # search_strings = [\n # r\"From\\:\\s(.*)\",\n # r\"To\\:\\s(.*)\",\n # r\"Subject\\:\\s(.*)\",\n # r\"Date\\:\\s(.*)\\s[+-]\",\n # ]\n # result_values = [re.search(s, EMAIL_HEADER).group(1) for s in search_strings]\n # print(dict(zip(result_keys, result_values)))\n\n # or we could use groupdict as suggested\n m = re.search(\n r\"From\\:\\s(?P<from>.*)\\n.*To\\:\\s(?P<to>.*)\\n.*Subject\\:\\s(?P<subject>.+?)\\n.*Date\\:\\s(?P<date>.*)\\s[+-]\",\n header,\n re.MULTILINE | re.DOTALL,\n )\n return m.groupdict() if m else None", "def getAllHeaders(self, req):\n headers = {}\n for k, v in req.requestHeaders.getAllRawHeaders():\n headers[k.lower()] = v[-1]\n return headers", "def get_headers(headers: HTTPHeaders) -> Mapping[str, List[str]]:\r\n return {header.lower(): headers.get_list(header) for header in headers.keys()}", "def headers(self) -> dict:\n raise NotImplementedError # pragma: no cover", "def scrub_headers(self, header_dict):\n return self.__headers_scrubber(header_dict)", "def get_headers(s, sep=': ', strip_cookie=False, strip_cl=True, strip_headers: list = []) -> dict():\n d = dict()\n for kv in s.split('\\n'):\n kv = kv.strip()\n if kv and sep in kv:\n v=''\n k = kv.split(sep)[0]\n if len(kv.split(sep)) == 1:\n v = ''\n else:\n v = kv.split(sep)[1]\n if v == '\\'\\'':\n v =''\n # v = kv.split(sep)[1]\n if strip_cookie and k.lower() == 'cookie': continue\n if strip_cl and k.lower() == 'content-length': continue\n if k in strip_headers: continue\n d[k] = v\n return d", "def get_headers(s, sep=': ', strip_cookie=False, strip_cl=True, strip_headers: list = []) -> dict():\n d = dict()\n for kv in s.split('\\n'):\n kv = kv.strip()\n if kv and sep in kv:\n v=''\n k = kv.split(sep)[0]\n if len(kv.split(sep)) == 1:\n v = ''\n else:\n v = kv.split(sep)[1]\n if v == '\\'\\'':\n v =''\n # v = kv.split(sep)[1]\n if strip_cookie and k.lower() == 'cookie': continue\n if strip_cl and k.lower() == 'content-length': continue\n if k in strip_headers: continue\n d[k] = v\n return d", "def to_header(self) -> Dict[str, str]:\n tracer_from_context = self.get_current_tracer()\n temp_headers: Dict[str, str] = {}\n if tracer_from_context is not None:\n ctx = tracer_from_context.span_context\n try:\n temp_headers = tracer_from_context.propagator.to_headers(ctx)\n except AttributeError:\n pass\n return temp_headers", "def webhook_headers(self) -> \"Dict[str, List[str]]\":\n return self._attrs.get(\"webhookHeaders\")", "def webhook_headers(self) -> \"Dict[str, List[str]]\":\n return self._attrs.get(\"webhookHeaders\")", "def _parse_wsgi_headers(wsgi_environ):\n prefix = 'HTTP_'\n p_len = len(prefix)\n # use .items() despite suspected memory pressure bc GC occasionally\n # collects wsgi_environ.iteritems() during iteration.\n headers = {\n key[p_len:].replace('_', '-').lower():\n val for (key, val) in wsgi_environ.items()\n if key.startswith(prefix)}\n return headers", "def spamHeaders(self) -> Tuple[List[str], Dict[str, str]]:\n sections = [\"STATUS\", \"TITLE\", \"PROJECT\", \"FILE\", \"SITE\", \"CHANNAME\", \"DATA\"]\n sectionHeaders = {}\n sectionHeaders[\"STATUS\"] = [\"STATUS\"]\n sectionHeaders[\"TITLE\"] = [\"AUTHOR\", \"VERSION\", \"DATE\", \"COMMENT\"]\n sectionHeaders[\"FILE\"] = [\"NAME\", \"FREQBAND\", \"DATE\"]\n sectionHeaders[\"CHANNAME\"] = [\"ITEMS\", \"NAME\"]\n sectionHeaders[\"DATA\"] = [\"ITEMS\", \"CHAN\"]\n return sections, sectionHeaders", "def _get_cleaned_headers(headers):\r\n cleaned_headers = []\r\n for header in headers:\r\n # Google strips special characters, whitespace, and underscores first,\r\n # and then strips any *leading* digits. This order is extremely\r\n # important!\r\n sanitized = sub(r'^\\d+', '', sub(r'[\\W_]', '', header.lower()))\r\n if len(sanitized) > 0:\r\n cleaned_headers.append(sanitized)\r\n else:\r\n raise GoogleSpreadsheetError(\"Encountered a header '%s' that was \"\r\n \"either blank or consisted only of special characters. \"\r\n \"Could not map the header to the internal representation \"\r\n \"used by the Google Spreadsheet. Please change the header \"\r\n \"to consist of at least one alphanumeric character.\"\r\n % header)\r\n\r\n # When the same sanitized header appears multiple times in the first row\r\n # of a spreadsheet, _n is appended to the name to make it unique.\r\n header_count = defaultdict(int)\r\n results = []\r\n\r\n for header, cleaned_header in zip(headers, cleaned_headers):\r\n new_header = cleaned_header\r\n\r\n if header_count[cleaned_header] > 0:\r\n # Google's numbering starts from _2, hence the +1.\r\n new_header = '%s_%d' % (cleaned_header,\r\n header_count[cleaned_header] + 1)\r\n\r\n header_count[cleaned_header] += 1\r\n results.append(new_header)\r\n\r\n return results", "def _split_headers(headers):\n amz_headers = {}\n reg_headers = {}\n for cur in headers:\n if cur.lower().startswith('x-amz-'):\n amz_headers[cur] = headers[cur]\n else:\n reg_headers[cur] = headers[cur]\n return (amz_headers, reg_headers)", "def getHeaders(self):\n hd = {}\n line = self.conn.readline()\n while line != \"\\r\\n\":\n print \":\"+line+\":\"+\" len = \",len(line)\n key,value = line.split(':',1)\n hd[key] = value.rstrip()\n line = self.conn.readline()\n return hd", "def generate_header_dic(self, header_strings):\n headers = dict()\n\n for header_values in header_strings:\n header_list = header_values.split(':')\n headers[header_list[0]] = header_list[1]\n return headers", "def fitsio_header_to_dict(hdr):\n d = {}\n for key in hdr.keys():\n if key != 'HISTORY' or key != \"COMMENT\":\n d[key.lower()] = hdr.get(key)\n return d" ]
[ "0.70060724", "0.67045885", "0.66727537", "0.6565173", "0.6542235", "0.64860284", "0.64273584", "0.63836926", "0.6382047", "0.6355729", "0.63376004", "0.62960345", "0.6279265", "0.6251078", "0.6250599", "0.6228501", "0.6137159", "0.61062986", "0.60977423", "0.60977423", "0.603498", "0.6026937", "0.6026937", "0.5927466", "0.589736", "0.58906204", "0.5877665", "0.587681", "0.5867362", "0.5847488" ]
0.73881096
0
REANA workflow status publisher object creation.
def __new__(cls, instance=None): if instance: REANAWorkflowStatusPublisher.__instance = instance elif REANAWorkflowStatusPublisher.__instance is None: REANAWorkflowStatusPublisher.__instance = WorkflowStatusPublisher() return REANAWorkflowStatusPublisher.__instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n super(Publisher, self).__init__(*args, **kwargs)\n self._original = {\n 'status': self.status\n }", "def _create_pub(name, rostype, *args, **kwargs):\n # counting publisher instance per topic name\n if name in TopicBack.pub_instance_count.keys():\n TopicBack.pub_instance_count[name] += 1\n else:\n TopicBack.pub_instance_count[name] = 1\n\n return rospy.Publisher(name, rostype, *args, **kwargs)", "def __init__(self, workflow):\n self.workflow = workflow", "def createStatusObject(self):\n if self.config_filepath is None:\n return False\n\n self.status = GARunStatus(self.config_filepath)\n return True", "async def createStatus(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"createStatus\"], *args, **kwargs)", "def _publish_status(self, status, parent=None):\n self.session.send(\n self.iopub_socket,\n \"status\",\n {\"execution_state\": status},\n parent=parent or self._parent_header,\n ident=self._topic(\"status\"),\n metadata={\"picky\": True},\n )", "def workflow_status(self, workflow_status):\n self._workflow_status = workflow_status", "def _publish_status(self, state='complete'):\n self.logger.debug('Recording catalog status: \"{}\"'.format(state))\n self.status_table.update_item(\n {'api_version': self.api_version},\n {\n 'state': state,\n 'timestamp': time.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'catalog_url': '{0}/v{1}/catalog.json'.format(self.api_url, self.api_version)\n }\n )", "def publish_status(client):\n client.publish(config.topic_get, payload=getlight())", "def test_create_status(self):\n self.basic_login()\n cassette_name = self.cassette_name('create_status')\n with self.recorder.use_cassette(cassette_name):\n repository = self.gh.repository('sigmavirus24', 'github3.py')\n assert repository is not None\n deployment = find(lambda d: d.id == 801,\n repository.iter_deployments())\n assert deployment is not None\n status = deployment.create_status('success')\n\n assert isinstance(status, github3.repos.deployment.DeploymentStatus)", "def _create_status(self):\n if self.headers['Accept'] != CONTENT_TYPE_STATUS:\n raise NotAcceptable()\n\n body = self.server.status()\n self._write_response(\n 200, body,\n content_type='application/se.novafaen.smrt.status.v1+json'\n )\n self.server.successful_response()", "def broadcast_publisher_info(sender, instance, created, **kwargs):\n if created:\n # notify creation to generic channel\n broadcast('pulse', {\n 'status': instance.status,\n 'pk': instance.pk,\n 'slug': instance.slug,\n 'date_last_crawled': '{0}'.format(instance.date_last_crawled)\n }, event_type=settings.OBER_EVENTS_CREATE_PUBLISHER)\n else:\n # notify updates, with basic serialized instance\n broadcast('pulse', {\n 'status': instance.status,\n 'status_changed': instance.status != instance._original.get('status'),\n 'pk': instance.pk,\n 'slug': instance.slug,\n 'date_last_crawled': '{0}'.format(instance.date_last_crawled)\n }, event_type=settings.OBER_EVENTS_UPDATE_PUBLISHER)", "def __init__(__self__, *,\n status: pulumi.Input[str],\n type: pulumi.Input[str],\n last_transition_time: Optional[pulumi.Input[str]] = None,\n message: Optional[pulumi.Input[str]] = None,\n reason: Optional[pulumi.Input[str]] = None,\n severity: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"status\", status)\n pulumi.set(__self__, \"type\", type)\n if last_transition_time is not None:\n pulumi.set(__self__, \"last_transition_time\", last_transition_time)\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if reason is not None:\n pulumi.set(__self__, \"reason\", reason)\n if severity is not None:\n pulumi.set(__self__, \"severity\", severity)", "def __init__(self):\n self.label = \"Stream Network to RAPID\"\n self.description = (\"Processes stream network data into files for RAPID\")\n self.canRunInBackground = False\n self.category = \"Workflows\"", "def __init__( self, title, description, status, notes=None, type=None ):\n self.title = title\n self.description = description\n self.notes = notes\n expected_status = [ 'to-run', 'incomplete', 'finished', 'dropped' ]\n if status not in expected_status:\n raise ValueError( status )\n self.status = status\n expected_types = [ 'single-point', 'neb' ]\n if type:\n if type not in expected_types:\n raise ValueError( type )\n self.type = type \n else:\n self.type = None", "def __init__(self, status, message, **kwargs):\r\n kwargs['status'] = status\r\n kwargs['message'] = message\r\n\r\n # we need to hash down the payload if there is one\r\n if 'payload' in kwargs and kwargs['payload'] is not None:\r\n kwargs['payload'] = unicode(\r\n json.dumps(dict(kwargs.get('payload')))\r\n )\r\n\r\n AppLogMgr.store(**kwargs)", "def __init__(self, status, message, **kwargs):\r\n kwargs['status'] = status\r\n kwargs['message'] = message\r\n\r\n # we need to hash down the payload if there is one\r\n if 'payload' in kwargs and kwargs['payload'] is not None:\r\n kwargs['payload'] = json.dumps(dict(kwargs.get('payload')))\r\n\r\n AppLogMgr.store(**kwargs)", "def adc_api_workflow_create():\n workflow_json = request.get_json(force=True)\n\n return jsonify(adc.workflow_create(workflow_json=workflow_json))", "def __init__(__self__, *,\n status: Optional[str] = None,\n type: Optional[str] = None):\n if status is None:\n status = 'disabled'\n if status is not None:\n pulumi.set(__self__, \"status\", status)\n if type is None:\n type = 'Notary'\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def publication_status(self):\n content = self.context.get_silva_object()\n status = None\n unapproved_version = content.get_unapproved_version()\n approved_version = content.get_approved_version()\n public_version = content.get_public_version()\n previous_versions = content.get_previous_versions()\n if unapproved_version and unapproved_version == self.context.id:\n status = \"unapproved\"\n elif approved_version and approved_version == self.context.id:\n status = \"approved\"\n elif public_version and public_version == self.context.id:\n status = \"public\"\n else:\n if previous_versions and previous_versions[-1] == self.context.id:\n status = \"last_closed\"\n elif self.context.id in previous_versions:\n status = \"closed\"\n else:\n # this is a completely new version not even registered\n # with the machinery yet\n status = 'unapproved'\n return status", "def __init__(self):\n self.messages = {\n \"updated\": \"Incident status successfully updated\",\n \"read\": \"Incident(s) successfully retrieved\"\n }\n\n self.status_types = {\n \"DRAFT\": \"draft\",\n \"RESOLVED\": \"resolved\",\n \"REJECTED\": \"rejected\",\n \"UNDER_INVESTIGATION\": \"under investigation\",\n }", "def main(workflow):\n \n if SHOW_UPDATES and workflow.update_available:\n workflow.add_item('A new version is available',\n 'Action this item to install the update',\n autocomplete='workflow:update',\n icon=ICON_SYNC)\n\n LOGGER.debug('Started create workflow')\n query = workflow.args[0]\n LOGGER.debug(query)\n\n core.autocompleteTags(workflow, LOGGER, query)\n\n # construct result\n title, tags = core.separateTags(query)\n\n tags_string = ', '.join(tags)\n query_string = constructCreateQuery(title, tags)\n\n LOGGER.debug('title: {!r}'.format(title))\n LOGGER.debug('query_string: {!r}'.format(query_string))\n if tags:\n workflow.add_item(title=\"Create note with title '{}' \".format(title),\n subtitle='Tags: ' + tags_string, arg=query_string, valid=True)\n else:\n workflow.add_item(title=\"Create note with title '{}'\".format(title),\n arg=query_string, valid=True)\n\n workflow.send_feedback()", "def __init__(self):\n\n # Application handle\n self.application = None\n\n # Workflow name\n self.name = None\n\n # Workflow data\n self.data = None", "def create_resultado(self, data):\n return Status(**data)", "def publisher(self, iTag, msgType, addr):\r\n return ROSPublisher(self, iTag, msgType, addr)", "def __init__(__self__, *,\n config: 'outputs.ServingConfigResponse',\n create_time: str,\n create_user: 'outputs.ActingUserResponse',\n delete_time: str,\n delete_user: 'outputs.ActingUserResponse',\n file_count: str,\n finalize_time: str,\n finalize_user: 'outputs.ActingUserResponse',\n labels: Mapping[str, str],\n name: str,\n status: str,\n version_bytes: str):\n pulumi.set(__self__, \"config\", config)\n pulumi.set(__self__, \"create_time\", create_time)\n pulumi.set(__self__, \"create_user\", create_user)\n pulumi.set(__self__, \"delete_time\", delete_time)\n pulumi.set(__self__, \"delete_user\", delete_user)\n pulumi.set(__self__, \"file_count\", file_count)\n pulumi.set(__self__, \"finalize_time\", finalize_time)\n pulumi.set(__self__, \"finalize_user\", finalize_user)\n pulumi.set(__self__, \"labels\", labels)\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"status\", status)\n pulumi.set(__self__, \"version_bytes\", version_bytes)", "def status(self, status: dict):\n pass", "def test_setup_succeeds(self):\n assert self.add_statestream(base_topic='pub')", "def __init__(\n self,\n publication: Publication,\n author: Identity,\n source_name: str,\n object_markings: List[MarkingDefinition],\n create_observables: bool,\n create_indicators: bool,\n confidence_level: int,\n report_type: str,\n report_status: int,\n excluded_ioc_indicator_types: Set[str],\n opencti_regions: Set[str],\n ) -> None:\n self.publication = publication\n self.author = author\n self.source_name = source_name\n self.object_markings = object_markings\n self.create_observables = create_observables\n self.create_indicators = create_indicators\n self.confidence_level = confidence_level\n self.report_type = report_type\n self.report_status = report_status\n self.excluded_ioc_indicator_types = excluded_ioc_indicator_types\n self.opencti_regions = opencti_regions", "def publish(self):\n return" ]
[ "0.7170993", "0.5967973", "0.59654903", "0.5892892", "0.587929", "0.5851673", "0.56647253", "0.5647875", "0.5630605", "0.5596893", "0.55781054", "0.5548198", "0.5542202", "0.5540318", "0.54856986", "0.54379517", "0.54240894", "0.54150003", "0.53984153", "0.538884", "0.5378284", "0.5349785", "0.5335247", "0.5329787", "0.5328025", "0.52872586", "0.52709705", "0.5263571", "0.52564895", "0.5246737" ]
0.73234653
0
An abstract Method A
def method_a(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fA(self):\n pass", "def a(self):\n pass", "def a(self):\n pass", "def fun_a(self):\n pass", "def method_b(self):", "def __call__(self):\n raise NotImplementedError", "def method(self, a):", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self):\r\n raise NotImplementedError('override me')", "def __call__(self):\n raise NotImplementedError()", "def partial_a(self):\n # STUDENT CODE HERE\n raise NotImplementedError", "def partial_a(self):\n # STUDENT CODE HERE\n raise NotImplementedError", "def partial_a(self):\n # STUDENT CODE HERE\n raise NotImplementedError", "def method(self):", "def function(self):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def method_a(self):\n return self.AdapterB.method_b()", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, **kwargs):\n raise NotImplementedError", "def __call__(self) -> None:", "def act(self):\n raise NotImplementedError", "def logic(self):\r\n raise NotImplementedError", "def __call__( self ):\n pass", "def __call__(self, f):\n raise NotImplementedError()", "def __call__(self):\n pass" ]
[ "0.7112726", "0.7052506", "0.7052506", "0.70522475", "0.7051604", "0.6883051", "0.67497706", "0.6745835", "0.6745835", "0.6745835", "0.6690918", "0.66639715", "0.6636465", "0.6636465", "0.6636465", "0.65145385", "0.6477189", "0.63304025", "0.63304025", "0.63304025", "0.63279426", "0.6238483", "0.6238483", "0.62035537", "0.6187171", "0.6185397", "0.61852026", "0.61160505", "0.6108821", "0.6086208" ]
0.732056
0
An abstract Method B
def method_b(self):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self):\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self):\r\n raise NotImplementedError('override me')", "def __call__(self):\n raise NotImplementedError()", "def partial_b(self):\n # STUDENT CODE HERE\n raise NotImplementedError", "def partial_b(self):\n # STUDENT CODE HERE\n raise NotImplementedError", "def partial_b(self):\n # STUDENT CODE HERE\n raise NotImplementedError", "def method_a(self):", "def b(self):\n pass", "def b(self):\n pass", "def fA(self):\n pass", "def apply_to(self, b):\n raise NotImplementedError(\"base class called\")", "def __call__(self) -> None:", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def method_a(self):\n return self.AdapterB.method_b()", "def __call__(self, **kwargs):\n raise NotImplementedError", "def B(arg1, arg2):\n # +++your code here+++\n print('implement me')", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def method(self):", "def __call__(self):\n pass", "def __call__(self):\n pass", "def logic(self):\r\n raise NotImplementedError", "def base():", "def partial_a(self):\n # STUDENT CODE HERE\n raise NotImplementedError", "def partial_a(self):\n # STUDENT CODE HERE\n raise NotImplementedError" ]
[ "0.70421946", "0.70402884", "0.70402884", "0.70402884", "0.6849722", "0.6817127", "0.67778647", "0.67778647", "0.67778647", "0.668075", "0.6639233", "0.6639233", "0.6492132", "0.6420923", "0.6414457", "0.6367774", "0.6367774", "0.6367774", "0.6337042", "0.63304216", "0.63296384", "0.63123137", "0.63123137", "0.6309976", "0.62735295", "0.62735295", "0.6253695", "0.62507725", "0.6248984", "0.6248984" ]
0.74599755
0
We are calling method_a from Class B using an adapter
def method_a(self): return self.AdapterB.method_b()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def method_b(self):", "def method_a(self):", "def another_method(self):\n pass", "def method(self, a):", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def call(self):", "def _call_method(self, call, method):\n raise Exception(\"_call_method must be implemented by subclasses.\")", "def method(self):", "def __call__(self):\r\n raise NotImplementedError('override me')", "def __call__(self, *args, **kwargs):\n return self.method(*args, **kwargs)", "def fun_a(self):\n pass", "def __call__(self):\n raise NotImplementedError", "def __call__(self, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def apply_to(self, b):\n raise NotImplementedError(\"base class called\")", "def add_adapter(self, func):\n self.adapter = func", "def someMethod (self):\n pass", "def __call__(self):\n raise NotImplementedError()", "def __call__(self):\n pass", "def __call__(self):\n pass", "def __call__(self, *args, **kwargs):\n return self.method(self.receiver, *args, **kwargs)", "def __call__( self ):\n pass", "def call(self, *args, **kwargs):", "def partial_a(self):\n # STUDENT CODE HERE\n raise NotImplementedError" ]
[ "0.75815064", "0.73203146", "0.64877784", "0.6271177", "0.61200273", "0.61200273", "0.61200273", "0.59982276", "0.5979013", "0.5966935", "0.59444237", "0.58661395", "0.58052456", "0.57878673", "0.5777832", "0.57048213", "0.57048213", "0.5666838", "0.5666838", "0.5666838", "0.56614023", "0.5655089", "0.5615885", "0.55886257", "0.55840325", "0.55840325", "0.5580886", "0.5571724", "0.54815125", "0.5474734" ]
0.8095476
0
Create balanced binary tree from ordered collection
def balanced_tree(ordered): bt = BinaryTree() add_range(bt, ordered, 0, len(ordered)-1) return bt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def binaryTree(r):\r\n return [r, [], []]", "def _sorted_list_to_bst(cls, items=[], start=None, end=None, parent=None):\n if start > end:\n return None\n mid = start + (end - start) // 2\n node = Node(items[mid], parent)\n node.left = cls._sorted_list_to_bst(items, start, mid - 1, node)\n node.right = cls._sorted_list_to_bst(items, mid + 1, end, node)\n return node", "def binarizetree(tree):\n queue = [tree]\n while queue:\n node = queue.pop(0)\n queue += node.nodelist\n # Construct binary tree\n if len(node.nodelist) == 2:\n node.lnode = node.nodelist[0]\n node.rnode = node.nodelist[1]\n # Parent node\n node.lnode.pnode = node\n node.rnode.pnode = node\n elif len(node.nodelist) > 2:\n # Remove one node from the nodelist\n node.lnode = node.nodelist.pop(0)\n newnode = SpanNode(node.nodelist[0].prop)\n newnode.nodelist += node.nodelist\n # Right-branching\n node.rnode = newnode\n # Parent node\n node.lnode.pnode = node\n node.rnode.pnode = node\n # Add to the head of the queue\n # So the code will keep branching\n # until the nodelist size is 2\n queue.insert(0, newnode)\n # Clear nodelist for the current node\n node.nodelist = []\n return tree", "def make_tree(arr):\n\n for i in range(len(arr)):\n arr, val = mid(arr)\n\n if i == 0: \n binary = BinaryNode(val)\n\n else:\n binary.insert(val)\n\n return binary", "def list_to_binary_tree(list_input):\n pass", "def build_tree(preorder, inorder):\n\n # Base case\n if preorder == [] or inorder == []:\n return\n\n root = preorder[0]\n\n # Breaks the lists by root, left side, and right side\n in_index = inorder.index(root)\n in_left = inorder[:in_index]\n in_right = inorder[in_index + 1:]\n pre_left = preorder[1 : len(in_left) + 1]\n pre_right = preorder[len(in_left) + 1 :]\n\n # Recursively creates smaller binary trees to make a big binary tree\n tree = BinaryTree(root)\n tree.set_left(build_tree(pre_left, in_left))\n tree.set_right(build_tree(pre_right, in_right))\n\n return tree", "def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n # Tree indices work as follows:\n # 0 is the root\n # 2n+1 is the left child of n\n # 2n+2 is the right child of n\n # So we now rearrange `values` into that format...\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[:2 * len_ragged_row:2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1:2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree", "def test_binarytree_instantiate_list():\n input = [13, 42, 7]\n c = BinaryTree(input)\n assert isinstance(c, BinaryTree)", "def test_tree_binary_tree() -> None:\n t = generate_binary_tree_resources(4, 3)\n field(t, (\"root\", \"ds\", \"f1\")).identity = \"email\"\n field(t, (\"root.0.1.0\", \"ds.0.1.0\", \"f1\")).identity = \"ssn\"\n field(t, (\"root.1.1\", \"ds.1.1\", \"f1\")).identity = \"user_id\"\n assert generate_traversal({\"email\": \"X\"}, *t)\n assert generate_traversal({\"ssn\": \"X\"}, *t)\n assert generate_traversal({\"user_id\": \"X\"}, *t)", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def build():\n # root = TreeNode(5)\n # root.left = TreeNode(2)\n # root.right = TreeNode(7)\n # return root\n\n \"\"\"\n 5\n / \\\n 2 6\n / \\\n 1 3\n [5,2,1,3,6]\n \"\"\"\n _5 = TreeNode(5)\n _2 = TreeNode(2)\n _6 = TreeNode(6)\n _1 = TreeNode(1)\n _3 = TreeNode(3)\n _5.left = _2\n _5.right = _6\n _2.left = _1\n _2.right = _3\n return _5", "def build_tree_from_ordered(input, tree=None):\n\n if len(input) == 0:\n return tree\n if len(input) == 1:\n tree = BSTNode(input[0])\n return tree\n\n middle_pos = int(len(input) / 2)\n right_tree = input[:middle_pos - 1]\n left_tree = input[middle_pos + 1:]\n actual_node = input[middle_pos - 1:middle_pos][0]\n\n # insert tree node\n if tree is None:\n tree = BSTNode(actual_node)\n else:\n tree.value = actual_node\n tree.right = build_tree_from_ordered(right_tree, tree=tree.right)\n tree.left = build_tree_from_ordered(left_tree, tree=tree.left)\n\n return tree", "def generate_tree_postorder(node_lst, root_index):", "def binarize(tree):\n if isinstance(tree, str):\n return Tree('0',[tree])\n elif len(tree) == 1:\n# print(tree)\n# print('\\n')\n return binarize(tree[0])\n else:\n label = tree.label()\n# print(type(label))\n return reduce(lambda x, y: Tree(label, (binarize(x), binarize(y))), tree)", "def build_tree(data, B, serializer):\n # root will have a block position of 0\n if serializer.read_mode:\n # if we're already in read mode, then the file's already been built, so\n # return the last node in file\n # TODO: allow negative indexing for root node in serializer\n return serializer.loads(-1)\n\n # sort the fieldnames in each data item before doing field checks\n # last element of each data item is now a unique id for the record\n [data_item.sort(key=lambda field: field[0]) for data_item in data]\n data.sort(key=lambda data_item: data_item[0][1])\n data = [data_item + [i] for i, data_item in enumerate(data)]\n\n # If the sequence of keys is not the same in every other data item\n # as well, you fucked up. We make it immutable just to be safe.\n seq = tuple(d[0] for d in data[0][:-1])\n for item in data:\n if tuple(d[0] for d in item[:-1]) != seq:\n raise Exception('You fucked up. The sequence of keys is \\\n not the same in every data item.')\n\n # Now that we've got leaves, let's build their parents, recursively.\n build_upwards(data, 0, B, RangeLeaf, serializer)\n\n # should be done serializing\n serializer.flush()\n # and output the root\n return serializer.loads(-1)", "def construct_binary_tree(preorder, inorder):\n if len(preorder) == 0 or preorder == None or inorder == None:\n return None\n\n val = preorder[0]\n root = BinaryTreeNode(val)\n\n if len(preorder) > 1:\n inorder_root_index = inorder.index(val)\n inorder_left_sub_tree = inorder[:inorder_root_index]\n inorder_right_sub_tree = inorder[inorder_root_index+1:]\n preorder_left_sub_tree = preorder[1:len(inorder_left_sub_tree)+1]\n preorder_right_sub_tree = preorder[len(inorder_left_sub_tree) + 1:]\n root.left = construct_binary_tree(preorder_left_sub_tree, inorder_left_sub_tree)\n root.right = construct_binary_tree(preorder_right_sub_tree, inorder_right_sub_tree)\n return root", "def _gen_test_tree_4():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(10)\n tree.right = BinaryNode(9)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n return tree", "def _treeify(values):\n if len(values) == 1: # this case causes problems later\n return values\n tree = np.empty_like(values)\n\n # The first step is to remove the bottom row of leaves, which might not be exactly full\n last_full_row = int(np.log2(len(values) + 1) - 1)\n len_ragged_row = len(values) - (2 ** (last_full_row + 1) - 1)\n if len_ragged_row > 0:\n bottom_row_ix = np.s_[: 2 * len_ragged_row : 2]\n tree[-len_ragged_row:] = values[bottom_row_ix]\n values = np.delete(values, bottom_row_ix)\n\n # Now `values` is length 2**n - 1, so can be packed efficiently into a tree\n # Last row of nodes is indices 0, 2, ..., 2**n - 2\n # Second-last row is indices 1, 5, ..., 2**n - 3\n # nth-last row is indices (2**n - 1)::(2**(n+1))\n values_start = 0\n values_space = 2\n values_len = 2 ** last_full_row\n while values_start < len(values):\n tree[values_len - 1 : 2 * values_len - 1] = values[values_start::values_space]\n values_start += int(values_space / 2)\n values_space *= 2\n values_len = int(values_len / 2)\n return tree", "def test_left_rotation_four_node_tree():\n from bbst import Bst\n tree = Bst([1, 2, 3, 4, 5])\n assert tuple(tree.in_order()) == (1, 2, 3, 4, 5)\n assert tuple(tree.breadth_first()) == (2, 1, 4, 3, 5)\n assert tuple(tree.pre_order()) == (2, 1, 4, 3, 5)\n assert tuple(tree.post_order()) == (1, 3, 5, 4, 2)\n assert tree.depth() == 3\n assert tree.balance() == 1", "def test_list_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def BinaryTree(root):\n return [root, [], []]", "def build(cls, m, data):\n # TODO\n data = sorted(data, key=lambda x: x[0]) # Sort pairs by key.\n nodes = {} # Holds nodes and they governing value as key.\n\n while True:\n # Split into chunks of size m\n chunks = [data[i:i+m] for i in range(0, len(data), m)]\n data = []\n for chunk in chunks:\n parent = chunk.pop()\n data.append(parent)\n node = BTreeNode(m)\n node.keys = map(lambda i: i[0], chunk)\n node.values = map(lambda i: i[0], chunk)\n nodes[parent[0]] = node", "def __init__(self, container=[]):\n # Initialize empty tree.\n self.root = None\n # Insert every item from container.\n for item in container:\n self.insert(item)", "def construct_tree():\n root = TreeNode(5)\n root.left = TreeNode(3)\n root.right = TreeNode(8)\n root.left.left = TreeNode(2)\n root.left.right = TreeNode(4)\n root.right.left = TreeNode(7)\n return root", "def test_binarytree_pre_order_on_given(given_list, capsys):\n expected = [20, 18, 12, 11, 14, 19, 40, 31, 22, 33]\n given_list.pre_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual", "def test_bin_tree():\n n1 = BinTreeNode(1)\n n2 = BinTreeNode(2)\n n3 = BinTreeNode(3)\n n4 = BinTreeNode(4)\n n5 = BinTreeNode(5)\n n1.left = n2\n n1.right = n3\n n2.left = n4\n n3.right = n5\n t = BinTree(n1)\n print('pre order')\n preorder_trav(t.root)\n print('in order')\n inorder_trav(t.root)\n print('post order')\n postorder_trav(t.root)", "def minimal_tree(array: list):\n bst = BST()\n def build(l, r):\n if l == r: bst.insert(array[l]); return\n m = (l+r)//2\n # insert into the tree\n bst.insert(array[m])\n # build recursively\n build(l, m)\n build(m+1, r)\n build(0, len(array)-1)\n return bst", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree", "def make_tree(self, l):\n\t\tfor el in l:\n\t\t\tself.insert(el)", "def create_bst(lst, start, end):\n if end < start:\n return None\n mid = (start + end) // 2\n root = BinaryTree(lst[mid])\n root.left_child = create_bst(lst, start, mid - 1)\n root.right_child = create_bst(lst, mid + 1, end)\n # post-order traversal\n print(root.get_root_val())\n return root" ]
[ "0.6875716", "0.666865", "0.6641243", "0.66342694", "0.6591895", "0.63320524", "0.6254881", "0.62543494", "0.6193552", "0.61894536", "0.6180937", "0.61638397", "0.61598194", "0.61437345", "0.6138858", "0.61327463", "0.61318773", "0.613097", "0.6128159", "0.61017716", "0.60840523", "0.6075775", "0.60455763", "0.6035379", "0.60331684", "0.60319084", "0.6001901", "0.59890133", "0.5978657", "0.5971916" ]
0.802619
0
Add range to the bt in a way that Bt remains balanced
def add_range(bt, ordered, low, high): if low <= high: mid = (low+high)//2 bt.add(ordered[mid]) add_range(bt, ordered, low, mid-1) add_range(bt, ordered, mid+1, high)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tentative_add_range(self, memrange):\n # optimize the easy case. makes rest of code simpler\n if not len(self.ranges):\n bisect.insort(self.ranges, memrange)\n return\n\n probeaddr = memrange.startaddr\n while probeaddr < memrange.endaddr:\n index = bisect.bisect(self.ranges, MemoryRange.from_addr(probeaddr))\n\n # before first\n if index == 0:\n nxt = self.ranges[index]\n lastaddr = nxt.startaddr\n bisect.insort(self.ranges, MemoryRange.part_of(memrange, probeaddr, lastaddr - probeaddr))\n probeaddr = nxt.endaddr\n continue\n\n # after last\n if index >= len(self.ranges):\n prev = self.ranges[index-1]\n startaddr = prev.endaddr\n if startaddr <= probeaddr:\n bisect.insort(self.ranges, MemoryRange.part_of(memrange, probeaddr, memrange.endaddr - probeaddr))\n probeaddr = memrange.endaddr\n else:\n probeaddr = startaddr\n continue\n\n # in between 2\n prev = self.ranges[index-1]\n if probeaddr in prev:\n probeaddr = prev.endaddr\n continue\n\n nxt = self.ranges[index]\n if nxt.startaddr in memrange:\n bisect.insort(self.ranges, MemoryRange.part_of(memrange, probeaddr, nxt.startaddr - probeaddr))\n probeaddr = nxt.endaddr\n else:\n bisect.insort(self.ranges, MemoryRange.part_of(memrange, probeaddr, memrange.endaddr - probeaddr))\n probeaddr = memrange.endaddr", "def test_backwards_100_balance_remains_between_1_and_negative_1():\n from bbst import Bst\n tree = Bst([x for x in range(100)][::-1])\n assert tree.balance() in range(-1, 2)", "def rangeB(self):\r\n if self._range_B is not None:\r\n return round(self._range_B,2)\r\n else:\r\n return self._range_B", "def calcBRange(c,n=10):\n \n bMin = -abs(c)/2.0 \n bMax = abs(c)/2.0 \n return np.linspace(bMin,bMax,n)", "def bcRange(self):\n\t\treturn fabs(self.Upper - self.Lower)", "def add(self, rng: Rangelike) -> None:\n # if it's a RangeSet, then do extend instead\n if isinstance(rng, RangeSet):\n self.extend(rng)\n return\n elif _is_iterable_non_string(rng):\n raise ValueError(\"argument is iterable and not Range-like; use .extend() instead\")\n # otherwise, convert Range to a list at first\n rng = Range(rng)\n # change the error message if necessary\n try:\n temp_ranges = self._ranges.copy()\n # if the list of ranges is empty, then add the node at the beginning\n if len(temp_ranges) == 0:\n temp_ranges.append(rng)\n inserted_node = temp_ranges.first\n # otherwise, if our range would fit at the end, then put it there\n elif rng > temp_ranges.last.value:\n temp_ranges.append(rng)\n inserted_node = temp_ranges.last\n # otherwise, find the node *before which* our range fits\n else:\n node = temp_ranges.first\n while rng > node.value:\n node = node.next\n temp_ranges.insert_before(node, rng)\n inserted_node = node.prev\n # now, merge this range with the previous range(s):\n if inserted_node.prev:\n prev_union = inserted_node.value.union(inserted_node.prev.value)\n while prev_union and inserted_node.prev:\n inserted_node.value = prev_union\n temp_ranges.pop_before(inserted_node)\n prev_union = inserted_node.value.union(inserted_node.prev.value) if inserted_node.prev else None\n # merge this range with the next range(s)\n if inserted_node.next:\n next_union = inserted_node.value.union(inserted_node.next.value)\n while next_union and inserted_node.next:\n inserted_node.value = next_union\n temp_ranges.pop_after(inserted_node)\n next_union = inserted_node.value.union(inserted_node.next.value) if inserted_node.next else None\n except TypeError:\n raise TypeError(f\"Range '{rng}' is not comparable with the other Ranges in this RangeSet\")\n # apply changes\n self._ranges = temp_ranges\n # TODO python 3.8 update - use an assignment operator (see the following code):\n # while inserted_node.prev and (prev_union := inserted_node.value.union(inserted_node.prev.value)):\n # inserted_node.value = prev_union\n # self._ranges.pop_before(inserted_node)\n # while inserted_node.next and (next_union := inserted_node.value.union(inserted_node.next.value)):\n # inserted_node.value = next_union\n # self._ranges.pop_after(inserted_node)", "def bst_right_balance():\n from bbst import Bst\n return Bst([5, 8, 6, 9, 2, 7])", "def add_user_range(self, queue_list: VectorQueue):", "def glitr_range_to_bed(in_range, out_bed):\n summit_size = cfg.get('peaks', 'peak_summit_size')\n with open(in_range) as infile:\n with open(out_bed, 'w') as outfile:\n with open(out_bed + '_summits.%s_around' % summit_size, 'w') \\\n as outfile_summits:\n for i, line in enumerate(infile):\n fields = line.strip('\\n').split('\\t')\n chrom, start, stop = parse_ucsc_range(fields[0])\n start = max(0, start)\n foldchange = fields[3]\n outfile.write('\\t'.join([chrom, str(start), str(stop),\n 'GLITR_peak_%s'%(i+1),\n str(int(float(foldchange))),'+'])\n + '\\n')\n # take bases around center as summit\n center = start + (stop - start) / 2\n center_start = center - summit_size / 2\n center_stop = center + summit_size / 2\n outfile_summits.write('\\t'.join([chrom, str(center_start),\n str(center_stop), 'GLITR_peak_%s'%(i+1),\n str(int(float(foldchange))),'+']) + '\\n')", "def test_straight_100_balance_remains_between_1and_negative_1():\n from bbst import Bst\n tree = Bst(x for x in range(100))\n assert tree.balance() in range(-1, 2)", "def merge_ranges():", "def balanced_tree(ordered):\n bt = BinaryTree()\n\n add_range(bt, ordered, 0, len(ordered)-1)\n\n return bt", "def update_upper_bounds(self, B):\n for arc in self.arcs():\n if self.arc_info[arc[0]]['upper_bound'] == -1:\n self.arc_info[arc[0]]['upper_bound'] = B", "def _bi_range(start, end):\n if start == end:\n return (start,)\n\n elif end < start:\n return reversed(range(end, start + 1))\n\n else:\n return range(start, end + 1)", "def __add__(self, other):\n return Base40Interval(self.base40 + other.base40)", "def test_create_one_end(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 19000, 20000, {\"from\": accounts[2]})\n check_ranges([(1, 10001)], [(10001, 19000), (20000, 20001)], [(20001, 30001)], [(19000, 20000)])", "def sum_range(lower, upper):\n\n def copies(pmin, pmax):\n if lower <= pmin and pmax <= upper:\n return True\n elif pmax > upper:\n return False\n return copies(pmin+50, pmax+60)\n\n return copies(0, 0)", "def prepare_arbitrage(min, max):\n min = min or BALANCE_BOTTOM_MARGIN\n max = max or BALANCE_TOP_MARGIN\n loop.run_until_complete(_prepare_arbitrage(min, max))", "def fit_to_range(val: float, a: float, b: float, a1: float, b1: float) -> float:\n new_value = ((val - a) / (b - a)) * (b1 - a1) + a1\n return new_value", "def range_(self):\n return self.bset.range_", "def bst_balanced():\n from bbst import Bst\n return Bst([5, 6, 2, 3, 1, 7])", "def test_create_one_end_abs(check_ranges, accounts, nft):\n nft.transferRange(accounts[4], 29000, 30000, {\"from\": accounts[3]})\n check_ranges([(1, 10001)], [(10001, 20001)], [(20001, 29000), (30000, 30001)], [(29000, 30000)])", "def l457_ts_bt_band_func(dn, lmin, lmax, qcalmin, qcalmax, k1, k2):\n ts_bt = np.copy(dn).astype(np.float64)\n ts_bt -= qcalmin\n ts_bt *= ((lmax - lmin) / (qcalmax - qcalmin))\n ts_bt += lmin\n return ts_bt_func(ts_bt, k1, k2).astype(np.float32)", "def upper_bound(self) -> float:\n ...", "def range(self) -> ty.Tuple[float, float]:\r\n ...", "def cal_bl(self, offset, size):\n blbegin = offset / conf.blsize\n blend = (offset + size - 1) / conf.blsize + 1\n blnum = range(blbegin, blend)\n\n blfrom = [offset % conf.blsize, ]\n blfrom.extend([0 for i in range(len(blnum) - 1)])\n\n blto = [conf.blsize for i in range(len(blnum) - 1)]\n least = (offset + size) % conf.blsize\n\n if least == 0:\n least = conf.blsize\n blto.append(least)\n\n return zip(blnum, blfrom, blto)", "def _adjustRange(self, start, end):\n adjusted_start = start\n if self._start:\n if end < self._start:\n return None\n adjusted_start = max(self._start, start)\n \n adjusted_end = end\n if self._end:\n if self._end < start:\n return None\n adjusted_end = min(self._end, end)\n \n return (adjusted_start, adjusted_end)", "def test_nested_subranges(self):\n br1 = ByteRange(0, 100)\n self.check_partitions(br1, True)\n\n # Add 2nd layer\n br11 = br1.add_subrange(0, 20)\n self.check_partitions(br1, False, br11, True)\n\n br12 = br1.add_subrange(20, 50)\n self.check_partitions(br1, False, br12, True)\n\n br13 = br1.add_subrange(70, 30)\n self.check_partitions(br1, True, br13, True)\n\n # Add 3rd layer\n br121 = br12.add_subrange(0, 30)\n self.check_partitions(br1, False, br12, False, br121, True)\n\n br122 = br12.add_subrange(30, 20)\n self.check_partitions(br1, True, br12, True, br122, True)\n\n # Add 4th layer\n br1211 = br121.add_subrange(0, 15)\n self.check_partitions(br1, False, br12, False, br121, False, br1211, True)\n\n br1212 = br121.add_subrange(15, 10)\n self.check_partitions(br1, False, br12, False, br121, False, br1212, True)\n\n br1213 = br121.add_subrange(25, 5)\n self.check_partitions(br1, True, br12, True, br121, True, br1213, True)\n\n # Verify all the absolute offsets\n self.assertEqual((0, 20), br11.abs_range())\n self.assertEqual((20, 35), br1211.abs_range())\n self.assertEqual((35, 45), br1212.abs_range())\n self.assertEqual((45, 50), br1213.abs_range())\n self.assertEqual((50, 70), br122.abs_range())\n self.assertEqual((70, 100), br13.abs_range())", "def sum_range(lower, upper):\n\n def copies(pmin, pmax):\n if lower <= pmin and pmax <= upper:\n return True\n elif pmax > upper:\n return False\n return copies(pmin + 50, pmax + 60) or copies(pmin + 130, pmax + 140)\n\n return copies(0, 0)", "def lower_bound(self) -> float:\n ..." ]
[ "0.5963314", "0.57457644", "0.57033575", "0.5670653", "0.5641155", "0.56268615", "0.558927", "0.5557739", "0.5524271", "0.55229443", "0.54947835", "0.5489055", "0.548258", "0.544948", "0.5430252", "0.5424991", "0.5393245", "0.5370487", "0.5359171", "0.53217083", "0.531139", "0.5287733", "0.52739626", "0.52595276", "0.52558935", "0.521242", "0.52091455", "0.5190977", "0.5187472", "0.5182873" ]
0.7508206
0
Returns VPN Connection status
def xqsystem_vpn_status(self) -> models.VPNStatusResponse: return apply_model( models.VPNStatusResponse, self.do_get_request("/xqsystem/vpn_status") )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_openvpn():\n p = Popen(\"service openvpn status\", shell=True, stdout=PIPE, stderr=PIPE)\n (so, se) = p.communicate()\n return True if p.returncode == 0 else False", "def getConnectionStatus(self): \n return getI1ProConnectionStatus()", "def status(self) -> 'outputs.ConnectionStatusResponse':\n return pulumi.get(self, \"status\")", "def connection_status(self):\n return self._connection_status", "def get_status(self):\n return self._conn_state", "def status(self) -> Optional[pulumi.Input[Union[str, 'ConnectionStatus']]]:\n return pulumi.get(self, \"status\")", "def get_connection_status(self):\n\n if not self.pk:\n status = self.NOT_SAVED\n msg = \"<span class='errors'>No connection. Please save and reload to connect to dropbox </span>\"\n\n # if no access keys have been set validation still needs to occur\n elif self.access_token_key == '' or self.access_token_secret == '':\n try: #check whether keys for COMIC itself are present.\n self.get_dropbox_app_keys()\n except AttributeError as e:\n status = self.ERROR\n msg = \"ERROR: A key required for this app to connect to dropbox could not be found in settings..\\\n Has this been forgotten?. Original error: \"+ str(e)\n\n status = self.READY_FOR_AUTH\n msg = \"Ready for authorization.\"\n\n else: #if access keys have been filled, Try to get dropbox info to test connection\n try:\n\n info = self.get_info()\n status = self.CONNECTED\n msg = \"Connected to dropbox '\" + info[\"display_name\"] + \"', owned by '\"+info[\"email\"]+\"'\"\n except ErrorResponse as e:\n status = self.ERROR\n msg = str(e)\n\n if self.pk:\n DropboxFolder.objects.filter(pk=self.pk).update(last_status_msg=msg)\n\n return status, msg", "def status(ctx):\n return show_network_status()", "def remote_status():", "def vpn():\n num_ipsec = _get_ipsec_stats()\n num_openvpn = _get_openvpn_stats()\n num_wireguard = _get_wireguard_stats()\n\n return {\n \"stats_type\": \"vpn\",\n \"vpn\": {\n \"ipsec_connections\": num_ipsec,\n \"openvpn_connections\": num_openvpn,\n \"wireguard_connections\": num_wireguard,\n }\n }", "def check_vpn_interface():\n return validate_vpn_interface(call_command('netstat -i')[0].split('\\n'))", "def get_vpn_status():\n html = requests.get('https://freevpn.me/').text\n soup = BeautifulSoup(html, 'lxml')\n\n table = soup.find_all(\n 'table', attrs={'class': \"table table-striped table-bordered dataTable\"})\n if table:\n table = table[0]\n tbody = table.tbody\n trows = tbody.find_all('tr')\n for row in trows:\n name = None\n loc = None\n online = False\n load = None\n tcols = row.find_all('td')\n for i, tcol in enumerate(tcols):\n if i == 1:\n name = tcol.text.strip().lower()\n elif i == 2:\n loc = tcol.text.strip()\n elif i == 4:\n if tcol.text.strip() == TEXT_ONLINE:\n online = True\n elif i == 8:\n load = tcol.text.strip()[:-1]\n if name:\n yield {'name': name, 'loc': loc,\n 'online': online, 'load': load}", "def get_vpnssl_status(iface):\n if iface in netifaces.interfaces():\n addr = netifaces.ifaddresses(iface)\n if len(addr) > 0: # vpn0 remains in the array even when gone, for whatever reason. So check if there is anything in there.\n return True\n\n return False", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()", "def is_connected(self):\n\t\treturn call_sdk_function('PrlSrv_IsConnected', self.handle)", "def state(self):\n return pn_connection_state(self._impl)", "def Bg_get_ping_result():\r\n return BgPing.analyse_result()", "def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")", "def check_connection_status(status):\n\n if status.status_code == 200:\n return True\n else:\n return False", "def __CheckConnectStatus(self):\r\n if not self.tn:\r\n print \"Connection is down!\"\r\n return False\r\n else:\r\n print \"Connection is alive!\"\r\n return True", "def status(self, *args):\n st = dict()\n st[\"num_sockets\"] = 0 # TODO: when sockets implemented\n st[\"mem_free\"] = gc.mem_free()\n st[\"wlan_connected\"] = network.WLAN(network.STA_IF).isconnected()\n return True, json.dumps(st).encode()", "def nat_waitforconn_alive():\r\n return NAT_STATE_DATA[\"mux\"] != None and NAT_STATE_DATA[\"mux\"].isAlive()", "def get_connection_state(self):\n return self.connection_state", "def status(self):\n return self._select_interface(self._rc_status, self._http_status)", "def getStatus(self, request, context):\n \n statusDrone = str(self.vehicle.system_status).rpartition(':')[2]\n\t \n return droneconnect_pb2.Status(status = statusDrone)", "def status(self):\n try:\n capabilities = []\n with manager.connect(host=netconf_server_ip,\n port=int(netconf_server_port),\n username= netconf_server_username,\n password=netconf_server_password,\n hostkey_verify=False) as m:\n\n for c in m.server_capabilities:\n capabilities.append(c)\n return capabilities\n\n except:\n return \"Can not establish connection with the server, something went wrong\"", "def rtt_get_status(self):\n status = structs.JLinkRTTerminalStatus()\n res = self.rtt_control(enums.JLinkRTTCommand.GETSTAT, status)\n return status" ]
[ "0.72203964", "0.7181919", "0.70604736", "0.68924755", "0.6871677", "0.6732765", "0.6496315", "0.6462414", "0.64421576", "0.6429181", "0.6426909", "0.64139163", "0.6348037", "0.6317235", "0.6228951", "0.6224312", "0.6174834", "0.6111198", "0.6111198", "0.6111198", "0.6111198", "0.60923195", "0.6058914", "0.6022127", "0.6008264", "0.595136", "0.5948854", "0.5920316", "0.59144384", "0.59072214" ]
0.7532402
0
The method to create all the objects of the commands, registered in register.json file.
def register_commands(self): with open(self._full_register_name, 'r') as file_to_read: command_register = json.loads(file_to_read.read()) commands = command_register.get("commands") if commands is None: logging.error("Command register is incorrect") return [] command_objects = [] for command in commands: module_name = command.get("module") class_name = command.get("class_name") if (module_name is None) or (class_name is None): logging.error("Commands in the register are described in incorrect way.") raise KeyError() try: command_module = importlib.import_module(module_name) command_class = getattr(command_module, class_name) command_object = command_class() command_objects.append(command_object) except ModuleNotFoundError as e: logging.error("Command modules specified in the register are not found!") raise e return command_objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_command_objects(self):\n super().init_command_objects()\n device_data = DeviceData.get_instance()\n args = (device_data, self.state_model, self.logger)\n self.register_command_object(\"TelescopeOn\", TelescopeOn(*args))\n self.register_command_object(\"TelescopeOff\", TelescopeOff(*args))\n self.register_command_object(\"Disable\", Disable(*args))\n self.register_command_object(\n \"TelescopeStandby\", TelescopeStandby(*args)\n )", "def init_command_objects(self):\n super().init_command_objects()\n device_args = (self, self.state_model, self.logger)\n # resource_args = (self.resource_manager, self.state_model, self.logger) \n # only use resource_args if we want to have separate resource_manager object\n\n self.register_command_object(\n \"Configure\",\n self.ConfigureCommand(*device_args)\n ) \n self.register_command_object(\n \"AddReceptors\",\n self.AddReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"RemoveReceptors\",\n self.RemoveReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"RemoveAllReceptors\",\n self.RemoveAllReceptorsCommand(*device_args)\n )\n self.register_command_object(\n \"ConfigureScan\",\n self.ConfigureScanCommand(*device_args)\n )\n self.register_command_object(\n \"StartScan\",\n self.ScanCommand(*device_args)\n )\n self.register_command_object(\n \"GoToIdle\",\n self.GoToIdleCommand(*device_args)\n )", "def init_command_objects(self):\n super().init_command_objects()\n device_data = DeviceData.get_instance()\n\n args = (device_data, self.state_model, self.logger)\n\n self.register_command_object(\"SetStowMode\", SetStowMode(*args))\n self.register_command_object(\n \"SetStandbyLPMode\", SetStandbyLPMode(*args)\n )\n self.register_command_object(\"SetOperateMode\", SetOperateMode(*args))\n self.register_command_object(\"Scan\", Scan(*args))\n self.register_command_object(\"EndScan\", EndScan(*args))\n self.register_command_object(\"Configure\", Configure(*args))\n self.register_command_object(\"StartCapture\", StartCapture(*args))\n self.register_command_object(\"StopCapture\", StopCapture(*args))\n self.register_command_object(\n \"SetStandbyFPMode\", SetStandbyFPMode(*args)\n )\n self.register_command_object(\"Slew\", Slew(*args))\n self.register_command_object(\"Track\", Track(*args))\n self.register_command_object(\"StopTrack\", StopTrack(*args))\n self.register_command_object(\"Abort\", Abort(*args))\n self.register_command_object(\"Restart\", Restart(*args))\n self.register_command_object(\"ObsReset\", ObsReset(*args))", "def commands():", "def __init__(self, commands=None):\n self.commands = {}\n self.context = None", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def init_command_objects(self):\n super().init_command_objects()\n\n device_args = (\n self.component_manager,\n self.op_state_model,\n self.obs_state_model,\n self.logger,\n )\n self.register_command_object(\n \"ConfigureScan\", self.ConfigureScanCommand(*device_args)\n )\n self.register_command_object(\"GoToIdle\", self.GoToIdleCommand(*device_args))", "def create_command_list(device):\n command = XmlApiObject({})\n command.name = \"test\"\n device.commands[command.name] = command", "def __init__(self, *commands):\n \n self.cmds = dict()\n \n for nm, attr in commands:\n self[nm] = attr", "async def __add_commands(self):\r\n commands_to_add: List[ClientCommandStructure] = [\r\n cmd for cmd in ChatCommandHandler.register.values()\r\n if cmd.app not in self._api_commands\r\n ]\r\n\r\n if commands_to_add:\r\n for cmd in commands_to_add:\r\n endpoint = f\"applications/{self.client.bot.id}\"\r\n\r\n if cmd.app.guild_id is not MISSING:\r\n endpoint += f\"/guilds/{cmd.app.guild_id}\"\r\n\r\n await self.client.http.post(\r\n endpoint + \"/commands\",\r\n cmd.app.to_dict()\r\n )", "def get_commands(self, component_loads):\n return {}", "def _register(cls):\r\n command_name = cls.__dict__.get('__command__', None)\r\n if command_name:\r\n Command._commands[command_name] = cls", "def __init__(self, command_list: list = None) -> None:\n if command_list is None:\n command_list = implemented_commands\n for command in command_list:\n setattr(self, command.get(\"name\").replace(\" \", \"_\"), self._SingleCommand(command))", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)", "def __init__(self, command_list, ):\n self.command_list = [] # all addition via function below\n self.add_command( command_list )", "def _init_commands(self):\n\t\tself.commands = {}\n\t\tself.log.info(\"Initializing commands...\")\n\t\t# Get all the commands and iterate over them\n\t\tfor command in self.conf_commands:\n\t\t\t\n\t\t\t# Verify the necessary config elements exist at all\n\t\t\tdisabled = command.get('disabled', False) # Disabled is optional, defaults to False\n\t\t\tif(disabled == True):\n\t\t\t\tcontinue;\n\t\t\tcommand_name = command.get('name', \"unknown\").lower()\n\t\t\tdescription = command.get('description', \"\")\n\t\t\tpermission_str = command.get('permission', None)\n\t\t\taction = command.get('action', None)\n\t\t\tmin_votes = command.get('min_votes', None)\n\t\t\targs = command.get('args', None)\n\t\t\taliases = command.get('aliases', None)\n\t\t\tif(command_name is None \n\t\t\t\tor permission_str is None \n\t\t\t\tor action is None \n\t\t\t\tor min_votes is None \n\t\t\t\tor args is None):\n\t\t\t\tself.log.warn(\"Command '{}': Error, missing 'permission', 'action', 'min_votes', or 'args' elements for command \".format(command_name))\n\t\t\t\tcontinue\n\n\t\t\t# Verify the votes and permission string are valid\n\t\t\tif(min_votes < 0):\n\t\t\t\tself.log.warn(\"Command '{}': Error, min_votes cannot be less than zero for command {}\".format(command_name, min_votes))\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': minimum votes is {}\".format(command_name, min_votes))\n\n\t\t\ttry:\n\t\t\t\tpermission = Permission[permission_str]\n\t\t\t\tself.log.debug(\"Command '{}': permission is {}\".format(command_name, permission))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, permission string '{}' is invalid, must be one of: {}\".format(command_name, permission_str, Permission.__members__))\n\t\t\t\tcontinue\n\n\t\t\t# Try to get the corresponding action class\n\t\t\ttry:\n\t\t\t\tmodule = import_module(\"obs.actions.\"+action)\n\t\t\t\tclass_ = getattr(module, action)\n\t\t\t\tself.log.debug(\"Command {}: action is {}\".format(command_name, class_))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, no such action {} is defined. Full error: {}\".format(command_name, action, e))\n\t\t\t\tcontinue\n\n\t\t\t# Try to instantiate the action class\n\t\t\ttry:\n\t\t\t\tself.log.debug(\"Command {}: args are: {}\".format(command_name, args))\n\t\t\t\tcommand_obj = class_(self, command_name, aliases, description, permission, min_votes, args)\n\t\t\texcept ValueError as e:\n\t\t\t\tself.log.warn(e)\n\t\t\t\tcontinue\n\n\t\t\t# Add command_obj to internal reference\n\t\t\tself.commands[command_name] = command_obj\n\n\t\t\t# If there are aliases, add them too\n\t\t\t\n\t\t\tif(not aliases is None and isinstance(aliases, (list,) )):\n\t\t\t\tself.log.debug(\"Command '{}': Found aliases {}\".format(command_name, aliases))\n\t\t\t\tfor alias in aliases:\n\t\t\t\t\tself.commands[alias] = command_obj\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': No aliases\".format(command_name, aliases))\n\n\t\t# Finally after all commands have been initialized then add the help command\n\t\t#self.commands['help'] = Help(self)\n\n\t\t# Done initializing\n\t\tself.log.info(\"...Commands initialized: {}\".format(\n\t\t\t\tlist( self.commands.keys()) \n\t\t\t)\n\t\t)", "def load(self):\n\n self.commands = {\n # Usual text commands (e.g. \"/echo 123\")\n 'user': {},\n 'owner': {\n 'load': self.load,\n 'modprobe': self.modprobe,\n 'rmmod': self.rmmod\n },\n # Modules for bot's reaction to a different message types\n 'text': {},\n 'photo': {},\n 'audio': {},\n 'video': {},\n 'sticker': {},\n 'voice': {}\n }\n\n for file in os.listdir('modules'):\n if file.endswith('.py'):\n command_type, command = file.split('_', 1)\n self.modprobe(self, command[:-3])", "async def _register_command(self) -> JSON:\n loop = asyncio.get_event_loop()\n async with aiohttp.ClientSession() as session:\n async with session.post(\n url=InteractionRoute().application(self._application_id).commands(self._id).url,\n json=self._data\n ) as response:\n interaction: JSON = await response.json(encoding='utf-8')\n return interaction", "def commands(self, commands):\n\n self._commands = commands", "def load_commands():\n return [AddBook, FindBook, FindBooks, EditBook, RemoveBook, ReviewBook]", "def register_cli(cls):\n for cmd in cls.SUB_GROUP_COMMANDS:\n getattr(cls, cls.SUB_GROUP_NAME).add_command(getattr(cls, cmd))", "def at_cmdset_creation(self):\n self.add(Command())", "def getCommands(self):", "def loadStdCommands(self, player):\n player.addCommand('spawn', self.commands['spawn']())\n player.addCommand('edit', self.commands['edit']())\n player.addCommand('search', self.commands['search']())\n player.addCommand('warp', self.commands['warp']())\n player.addCommand('addstat', self.commands['addstat']())\n player.addCommand('delstat', self.commands['delstat']())\n player.addCommand('savezone', self.commands['savezone']())\n player.addCommand('obliterate', self.commands['obliterate']())", "def at_cmdset_creation(self):\n self.add(power.CmdPower())\n self.add(CmdCursedBone())\n # self.add(CmdDeathSpike())\n \"\"\"\n self.add(CmdAnchor())\n self.add(CmdBloodCloak())\n self.add(CmdBloodShield())\n self.add(CmdBloodWard())\n self.add(CmdBodyToMind())\n self.add(CmdBoneScythe())\n self.add(CmdCircleDeath())\n self.add(CmdCorpseBurst())\n self.add(CmdCorpseDrain())\n self.add(CmdCreateBloodGem())\n self.add(CmdCurseDeathLink())\n self.add(CmdDeathRain())\n self.add(CmdDeathWard())\n self.add(CmdDisease())\n self.add(CmdBoneDust())\n self.add(CmdGloom())\n self.add(CmdImbueBlood())\n self.add(CmdImbueDeath())\n self.add(CmdMassSilence())\n self.add(CmdMassSleep())\n self.add(CmdMassAnchor())\n self.add(CmdMassWeakness())\n self.add(CmdPlague())\n self.add(CmdPoison())\n self.add(CmdPoisonCloud())\n self.add(CmdSilence())\n self.add(CmdSleep())\n self.add(CmdSpectralHunter())\n self.add(CmdSummon())\n self.add(CmdSummonCorruptedMan())\n self.add(CmdSummonCursedArmy())\n self.add(CmdSummonCursedMan())\n self.add(CmdSummonReanimatedMan())\n self.add(CmdTeleport())\n self.add(CmdTeleportOther())\n self.add(CmdTransferPain())\n self.add(CmdVampiricClaw())\n self.add(CmdVampiricTouch())\n self.add(CmdWeakness())\n \"\"\"" ]
[ "0.693104", "0.69148487", "0.68428314", "0.6794718", "0.67374337", "0.6665606", "0.6656452", "0.66422254", "0.66422254", "0.66422254", "0.66422254", "0.66411626", "0.6629551", "0.6580605", "0.6538936", "0.6527246", "0.65155095", "0.64533067", "0.6446554", "0.6403348", "0.6400683", "0.6277905", "0.6251568", "0.6199836", "0.61210585", "0.61151886", "0.6099425", "0.6067446", "0.60435516", "0.60411686" ]
0.77935547
0
Retrieve UserRoles object. This is a very important and useful API call, it returns a complete list of privileges the currently authorized user has within all the teams and companies they have access to.
def get_user_roles(self): url = 'userroles' result = self.get(url) return result.get('userroles', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def roles(self):\n params = {\n \"f\" : \"json\"\n }\n uURL = self._url + \"/roles\"\n return self._con.get(path=uURL, params=params)", "def listRoles(self):\n return self._client.listRoles()", "def get_roles(self) -> requests.models.Response:\n return self.get('v1/roles')", "def list(self, **kwargs):\n # TODO(adriant): Look up user by name/id\n url = '/openstack/users/%s/roles' % kwargs['user']\n return self._list(url, 'roles')", "def user_roles():\n access_token = _request_ctx_stack.top.current_user_token\n message_log(\"Got access token for user roles\")\n user_roles = get_user_roles(access_token)\n return json.dumps(list(user_roles))", "def list_roles(self):\n resp, body = self.get(\"roles\")\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['roles'])", "def get_roles(self, include_remote=True):\n rbac_service = get_rbac_backend().get_service_class()\n result = rbac_service.get_roles_for_user(\n user_db=self, include_remote=include_remote\n )\n return result", "def _get_roles(self):\n return api.tuskar.OvercloudRole.list(self.request)", "def list(self):\n return self.client.find_all_roles()", "def token_auth_get_user_roles(user):\n print(user)\n return user.get_roles()", "def get_user_roles(user_id: str) -> list:\n response = api.get_user_roles(user_id)\n\n if not response.ok:\n print(response.data)\n sys.exit(1)\n\n return response.data.get(\"items\")", "async def get_user_roles(request):\n\n user_id = request.match_info[\"user_id\"]\n try:\n user_id = int(user_id)\n except (ValueError, TypeError):\n return web.Response(status=400, text=\"Incorrect user_id\")\n\n user = request.cirrina.db_session.query(User).filter_by(id=user_id).first()\n if not user:\n return web.Response(status=404, text=\"User not found\")\n\n data = {\n \"username\": user.username,\n \"user_id\": user.id,\n \"roles\": {\"owner\": [], \"member\": [], \"manager\": []}, # FIXME : use USER_ROLES\n }\n\n roles = (\n request.cirrina.db_session.query(UserRole)\n .filter_by(user_id=user_id)\n .join(Project)\n .filter(UserRole.project_id == Project.id)\n .order_by(Project.name)\n .values(UserRole.role, Project.id, Project.name)\n )\n\n for role in roles:\n data[\"roles\"][role.role].append({\"id\": role.id, \"name\": role.name})\n\n return web.json_response(data)", "def getAllRoles(self):\n\n # Learn URL of AllRoles service\n url = self.config.get(\"Authorization\",\"allroles\") # http://erra.ccss.cz/g4i-portlet/service/list/roles/en\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles url: %s\"% url)\n \n # Request all roles from LifeRay\n import httplib2\n h = httplib2.Http()\n header, content = h.request(url, \"GET\")\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response header: %s\"% header)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] response content: %s\"% content)\n\n # Parse the response\n try:\n allRolesJson = json.loads(content)\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] AllRoles reply succesfully parsed\")\n except ValueError,e:\n logging.error(\"[LaymanAuthLiferay][getAllRoles] Cannot parse AllRoles reply: '%s'\"% content)\n raise AuthError(500, \"Cannot parse GET All Roles response [%s] as JSON:%s\"% (content,e)) \n \n roles = allRolesJson[\"roles\"]\n\n # lower() and spaces\n for rr in roles:\n rr[\"roleName\"] = rr[\"roleName\"].lower()\n rr[\"roleName\"] = \"_\".join(rr[\"roleName\"].split(' '))\n\n # Return roles\n logging.debug(\"[LaymanAuthLiferay][getAllRoles] Return roles: %s\"% str(roles))\n return roles", "def list_roles():\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles\".format(url=get_registry_url()))\n\treturn response.json()[\"results\"]", "def get_roles():\n check_admin()\n roles = Role.query.all()\n\n return render_template('admin/roles/roles.html', roles=roles, title=\"Roles\")", "def get_roles(self):\n path = \"%s/services/impala/roles\" % self.__base_path\n response = self.__session.get(path)\n self.__check_status_code(response.status_code)\n return response.json()", "def list_available_roles(self, uid):\n uid = self._check_uid(uid)\n role_data = self._router_request(\n self._make_request_data(\n 'getRolesList',\n data=dict(\n uid=uid\n )\n )\n )\n\n return role_data['data']", "def list_user_roles_on_project(self, project_id, user_id):\n resp, body = self.get('projects/%s/users/%s/roles' %\n (project_id, user_id))\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBodyList(resp, body['roles'])", "def get_roles():\n\n # Get instance of RolesOperations Class\n roles_operations = RolesOperations()\n\n # Call get_roles method\n response = roles_operations.get_roles()\n\n if response is not None:\n\n # Get the status code from response\n print('Status Code: ' + str(response.get_status_code()))\n\n if response.get_status_code() in [204, 304]:\n print('No Content' if response.get_status_code() == 204 else 'Not Modified')\n return\n\n # Get object from response\n response_object = response.get_object()\n\n if response_object is not None:\n\n # Check if expected ResponseWrapper instance is received.\n if isinstance(response_object, ResponseWrapper):\n\n # Get the list of obtained Role instances\n roles_list = response_object.get_roles()\n\n for role in roles_list:\n # Get the DisplayLabel of each Role\n print(\"Role DisplayLabel: \" + str(role.get_display_label()))\n\n # Get the forecastManager User instance of each Role\n forecast_manager = role.get_forecast_manager()\n\n # Check if forecastManager is not None\n if forecast_manager is not None:\n\n # Get the ID of the forecast Manager\n print(\"Role Forecast Manager User-ID: \" + str(forecast_manager.get_id()))\n\n # Get the name of the forecast Manager\n print(\"Role Forecast Manager User-Name: \" + str(forecast_manager.get_name()))\n\n # Get the ShareWithPeers of each Role\n print(\"Role ShareWithPeers: \" + str(role.get_share_with_peers()))\n\n # Get the Name of each Role\n print(\"Role Name: \" + role.get_name())\n\n # Get the Description of each Role\n print(\"Role Description: \" + str(role.get_description()))\n\n # Get the Id of each Role\n print(\"Role ID: \" + str(role.get_id()))\n\n # Get the reporting_to User instance of each Role\n reporting_to = role.get_reporting_to()\n\n # Check if reporting_to is not None\n if reporting_to is not None:\n # Get the ID of the reporting_to User\n print(\"Role ReportingTo User-ID: \" + str(reporting_to.get_id()))\n\n # Get the name of the reporting_to User\n print(\"Role ReportingTo User-Name: \" + str(reporting_to.get_name()))\n\n # Get the AdminUser of each Role\n print(\"Role AdminUser: \" + str(role.get_admin_user()))\n\n # Check if the request returned an exception\n elif isinstance(response_object, APIException):\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())", "def list(self, **kwargs):\n params = {}\n url = '/openstack/roles?%(params)s' % {\n 'params': parse.urlencode(params, True)\n }\n return self._list(url, 'roles')", "def get_roles(self):\n\t\tif not self.roles:\n\t\t\tself.roles = get_roles(self.name)\n\t\treturn self.roles", "def get_user_roles(user=None):\n if user is None:\n user = g.user\n return user.roles", "def get_all_roles(self):\n token = self.get_token_auth_header()\n unverified_claims = jwt.get_unverified_claims(token)\n return self.jwt_role_callback(unverified_claims)", "def getRoles():\n return jsonify(listRoles(ROLES_DIR))", "def getRoles(context):\n\n pmemb = getToolByName(getSite(), 'portal_membership')\n roles = [role for role in pmemb.getPortalRoles() if role != 'Owner']\n return SimpleVocabulary.fromValues(roles)", "def listUserRoles(self, name):\n return self._client.listUserRoles(name)", "def get_roles_list(self):\n try:\n roles = self.db_handler.get_roles_list()\n self.logger.write_to_log('roles got', 'model')\n return roles\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def roles(self):\n return self._roles", "def user_roles(request):\n logger.debug('user roles')\n if not hasattr(request, 'user') or not request.user.is_authenticated:\n return {\n 'admin': False,\n 'super user': False,\n 'roles': {},\n }\n # here you get in db or ldap the user's authorizations\n # in this skeleton I did not share it for now\n admin = False\n super_user = False\n roles = {'api_file': {'GET': True, 'POST': False}}\n return {\n 'admin': admin,\n 'super user': super_user,\n 'roles': roles,\n }", "def all_organization_member_roles(\n self,\n id: str,\n user_id: str,\n page: int | None = None,\n per_page: int | None = None,\n ) -> list[dict[str, Any]]:\n params = {\"page\": page, \"per_page\": per_page}\n return self.client.get(\n self._url(id, \"members\", user_id, \"roles\"), params=params\n )" ]
[ "0.7616649", "0.74757826", "0.74659115", "0.7441553", "0.7416953", "0.7209505", "0.720258", "0.71936095", "0.7161377", "0.7090873", "0.7051462", "0.7028158", "0.6980865", "0.6955454", "0.6946875", "0.6920613", "0.69160676", "0.6897977", "0.683362", "0.6829747", "0.68292475", "0.68265396", "0.67393553", "0.67325294", "0.6718806", "0.6706972", "0.66471225", "0.6634886", "0.66161215", "0.6600932" ]
0.8221138
0
Retrieve the user object from the user reference.
def get_user(self, user_reference): url = 'users/{0}'.format(user_reference) result = self.get(url) return result.get('user', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reference_user(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return ref\n except ObjectDoesNotExist:\n return None", "def get_user(self) -> User:\n return self.__user", "def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def get_user(self, object_id):\n return self.get_object(\"user\", object_id)", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def user(self):\n return self.getattr('user')", "def get_user(self, user, instance=None):\n instance = self._get_resource(_instance.Instance, instance)\n return self._get(_user.User, user)", "def get_user(user_ref):\n UserModel = get_user_model()\n try:\n return UserModel.objects.get(username=user_ref)\n except UserModel.DoesNotExist:\n return UserModel.objects.get(email=user_ref)", "def get_user(self):\n return self.user", "def get_user(self):\n return self.user", "def get_object(self):\n return User.objects.get(username=self.request.user.username)", "def get_user(self):\n\n user_session = self.get()\n if not user_session:\n return None\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n return us.single(user_session.login)", "def get(self, user_id):\n return User.get(user_id)", "def get_user(self, instance, name):\n return instance.get_user(name)", "def get_user(self, user_id=None):\n raise NotImplementedError", "def user(self):\r\n return resource.User(self)", "def get_user(self):\n raise NotImplementedError", "def user(self):\n if \"user\" in self._prop_dict:\n if isinstance(self._prop_dict[\"user\"], OneDriveObjectBase):\n return self._prop_dict[\"user\"]\n else :\n self._prop_dict[\"user\"] = User(self._prop_dict[\"user\"])\n return self._prop_dict[\"user\"]\n\n return None", "def get_user(self):\n return self.get('users/self')", "def user(self):\r\n return users.User(self)", "def get(user):\n if user:\n return Member.get_by_key_name(user.user_id())", "def get_user(self, user_id):\n return UserModel._default_manager.get(pk=user_id)", "def get_user(self, user_id):\n return None # noqa: WPS324", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self, user_id):\r\n return users.User(self, user_id)" ]
[ "0.78336525", "0.7715592", "0.7700846", "0.7665564", "0.7653078", "0.76358473", "0.7595922", "0.7539751", "0.75345474", "0.7522908", "0.75210303", "0.75210303", "0.74338484", "0.73960936", "0.73945874", "0.73929757", "0.73423374", "0.72848636", "0.72711205", "0.72705686", "0.7268451", "0.7217297", "0.7207012", "0.7183442", "0.7180589", "0.71605617", "0.71605617", "0.71605617", "0.71605617", "0.7159645" ]
0.79839873
0
Retrieve the list of companies to which the current authorized user \ has access.
def get_companies(self): url = 'companies' result = self.get(url) return result['companies']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_queryset(self):\n return self.request.user.setting_set.get().companies", "def get_companies(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in userCompanies]", "def get_companies(self, **kwargs):\n return self.get('companies.json', **kwargs)", "def companies(self):\n from hubspot3.companies import CompaniesClient\n\n return CompaniesClient(**self.auth, **self.options)", "def get_companies(request):\n companies = Company.objects.all()\n context={'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)", "def get_companies(self):\n response = self.do_request('/undertaking/list')\n if response:\n return response.json()", "def company_lists(self):\n return self.client.get('company/named-lists')", "def get_all_companies_and_people():", "def get_companies(self, obj):\n groupCompanies = get_objects_for_group(\n obj, \"view_company\", klass=models.Company)\n return [x.id for x in groupCompanies]", "def get_companies(self):\n response = self.do_request('/management/companies/export/json')\n if response:\n return response.json()", "def get_companies():\n all_companies = storage.all(Company).values()\n list_companies = []\n for company in all_companies:\n list_companies.append(company.to_dict())\n return jsonify(list_companies)", "def companies():\n res = requests.get('http://0.0.0.0:5002/companies')\n return jsonify(res.json())", "def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs", "def all_companies(login_details):\n output = None\n sql = u'SELECT client_company_ID ' \\\n u'FROM client_company_TBL;'\n\n c, conn = connection(login_details)\n try:\n c.execute(sql)\n values = c.fetchall()\n if values is not None:\n output = values\n finally:\n conn_close(c, conn)\n\n return output", "def read_companies():\n list_of_companies = data_manager.get_data_from_file(filename=\"company/company_data.csv\")\n return list_of_companies", "def get_available_companies(team):", "def company(self):\n\n x = 0\n my_company = self.data[\"Company Name\"]\n my_account = self.data[\"Account\"]\n result = []\n for i in my_company:\n my_string = i + \" -- \" + my_account[x]\n x += 1\n result.append(my_string)\n\n return result", "def get_isAdminOf(self, obj):\n userCompanies = get_objects_for_user(\n obj, \"change_company\", klass=models.Company, accept_global_perms=False)\n return [x.id for x in userCompanies]", "def get_queryset(self):\n user = self.request.user\n collabLists = ListObject.objects.filter(collaborators__id=user.id)\n return collabLists", "def get_available_companies_and_people(team):", "def test_get_all_companies(self):\n create_company()\n res = self.client.get(ALL_COMPANIES_LIST)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def for_company(cls, company_id):\n return cls.objects.filter(vacancy__company__id=company_id)", "def company(self):\n return self._company", "def company(self):\n return self._company", "def get_companies(request):\n try:\n companies = []\n for company in Company.objects.all():\n companies.append(company.dump_to_dict())\n\n return format_ajax_response(True, \"Companies list retrieved successfully.\", {'companies': companies})\n except Exception as ex:\n logging.error(\"failed to get_companies: %s\" % ex)\n return format_ajax_response(False, \"There was a problem retrieving the companies listing.\")", "def list_all(request):\n companies = Company.objects.order_by('-created')\n context = dict(companies=companies)\n return render(request, 'companies/all.html', context)", "def get_company_users(self, company_referece, active=True):\n url = 'companies/{0}/users'.format(company_referece)\n if active:\n data = {'status_in_company': 'active'}\n else:\n data = {'status_in_company': 'inactive'}\n result = self.get(url, data)\n return result.get('users', result)" ]
[ "0.78642094", "0.78642094", "0.78642094", "0.78642094", "0.7753397", "0.73986655", "0.738589", "0.73140615", "0.7313578", "0.7297904", "0.7194016", "0.71020436", "0.7099391", "0.6925291", "0.68956596", "0.6662987", "0.66623086", "0.6641846", "0.6559034", "0.6555118", "0.6300432", "0.6262147", "0.62536037", "0.62259656", "0.6208141", "0.61857396", "0.61857396", "0.6172328", "0.6100705", "0.61004084" ]
0.79357696
0
Retrieve the company object from the company reference.
def get_company(self, company_referece): url = 'companies/{0}'.format(company_referece) result = self.get(url) return result.get('company', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def company(self):\n return self._company", "def company(self):\n return self._company", "def get_company(self, name):\n return self.instance.company.id", "def get_company(self, name):\n return self.store.company.id", "def get_company(self, name):\n return self.website.company.id", "def get_company(company_id):\n company = storage.get(Company, company_id)\n if not company:\n abort(404)\n\n return jsonify(company.to_dict())", "def get_one(self, company_id, *args, **kw):\n log.debug('company_id {} {}'.format(company_id, type(company_id)))\n\n company = M.Company.query.get(index=company_id)\n log.debug('company {}'.format(company))\n if(company):\n kw['_id'] = company._id\n return super(CompanyAPIController, self).get_one(*args, **kw)", "def get_companies(self, **kwargs):\n return self.get('companies.json', **kwargs)", "def design_company(self):\n return self._design_company", "def get_company(self, cmp):\n if cmp in self.cnames:\n return self.cnames[cmp]\n else:\n return None", "def _company(self, uid=1):\r\n company = self.env['res.company'].browse(uid)\r\n return {\r\n 'journal': company.pledge_journal.id,\r\n 'product': company.pledge_product,\r\n 'account': company.pledge_receipt_account.id,\r\n 'property_account_income': company.pledge_product.property_account_income_id,\r\n }", "def get_companies(self):\n url = 'companies'\n result = self.get(url)\n return result['companies']", "def companies(self):\n from hubspot3.companies import CompaniesClient\n\n return CompaniesClient(**self.auth, **self.options)", "def get_company_to_contacts(self, company_id: str):\n return self.get(object_id=company_id, definition=Definitions.COMPANY_TO_CONTACT)", "def get_company(self, from_email, to_email):\n to_email = self._extract_email_address(to_email)\n from_email = self._extract_email_address(from_email)\n # use from and to email addresses combination as a primary key\n _id = base64.b64encode(bytes(from_email+\"-\"+to_email, encoding='utf-8'))\n res = self._client.get_item(\n TableName='Company',\n Key={\n 'id':{\n 'S':_id.decode('utf-8')\n }\n }\n )\n if 'Item' in res:\n return res['Item']['company']['S']\n else:\n return 'unknown'", "def get_active_company(request):\n from project.models import get_user_profile_ex\n profile = get_user_profile_ex(request.user)\n try:\n company = profile.active_company\n except:\n company = None\n if company is None:\n raise Exception('Please select active company in user\\'s profile')\n return company", "def companies_properties(self):\n from hubspot3.companies_properties import CompaniesPropertiesClient\n\n return CompaniesPropertiesClient(**self.auth, **self.options)", "def test_get_company_props_by_company_id_using_get(self):\n pass", "def company_name(self):\n if \"companyName\" in self._prop_dict:\n return self._prop_dict[\"companyName\"]\n else:\n return None", "def default_company():\n return Transaction().context.get('company')", "def get_company_to_deals(self, company_id: str):\n return self.get(object_id=company_id, definition=Definitions.COMPANY_TO_DEAL)", "def get_single_company(self, code_type, company_code) -> pd.DataFrame:\n validate_type(code_type, str, 'Unexpected value: code_type must be str')\n validate_type(company_code, str, 'Unexpected value: company must be str')\n\n headers_dict = {\n 'user-key': self.user_key.key\n }\n\n endpoint = f'{const.API_HOST}{const.API_SNAPSHOTS_COMPANIES_BASEPATH}/{code_type}/{company_code}'\n\n response = req.api_send_request(method='GET', endpoint_url=endpoint, headers=headers_dict)\n\n if response.status_code == 200:\n response_data = response.json()\n return pd.DataFrame.from_records([response_data['data']['attributes']])\n\n raise RuntimeError('API Request returned an unexpected HTTP status')", "def company_name(self) -> Optional[str]:\n return pulumi.get(self, \"company_name\")", "def get_company(self, code_type, company_codes) -> pd.DataFrame:\n if type(company_codes) is str:\n return self.get_single_company(code_type, company_codes)\n elif type(company_codes) is list:\n return self.get_multiple_companies(code_type, company_codes)\n else:\n raise ValueError('company_codes must be a string or a list')", "def get_company_founded(self):\n return self.company_founded", "def company(self, company):\n self._company = company", "def get_company_id_value(self):\n return self.company_id_value", "def get_company_name(self):\n\t\treturn call_sdk_function('PrlLic_GetCompanyName', self.handle)", "def update_company(cls, **kwargs):\n return cls._do_call(\n 'PUT', cls.api_endpoint + 'companies', params=kwargs)", "def company():\n\n company = Company.objects.create(name='Tre G.M.B.H.', country='Germany')\n return company" ]
[ "0.75074327", "0.75074327", "0.72663546", "0.7220919", "0.7126427", "0.6900781", "0.670451", "0.6574739", "0.6413427", "0.6407828", "0.63729036", "0.6328418", "0.60932", "0.6011323", "0.5989269", "0.59663576", "0.5930345", "0.5922437", "0.58928245", "0.5855893", "0.58174187", "0.5777587", "0.5765768", "0.5759101", "0.57032144", "0.567092", "0.56641734", "0.5661111", "0.56545514", "0.56535125" ]
0.83995926
0
Retrieve a list of teams within the company being referenced. User has to have access to the referenced company.
def get_company_teams(self, company_referece): url = 'companies/{0}/teams'.format(company_referece) result = self.get(url) return result.get('teams', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)", "def teams(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'teams')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_teams(self, account_id):\n endpoint = '/accounts/{}/teams'.format(account_id)\n return self._api_call('get', endpoint)", "def get_teams(self, *args, **kwargs):\n\n teams_data = api.get_teams(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Team(creds=self.__creds, **team_data) for team_data in teams_data]", "def get_teams(self):\n url = '{}/api/v4/teams'.format(self.server_url)\n response = self._request(self._session.get, url)\n return json.loads(response.content)", "def teams(self):\n return self._get_by_class(Team)", "def teams(self) -> 'outputs.OfficeDataConnectorDataTypesResponseTeams':\n return pulumi.get(self, \"teams\")", "def getAllTeams(self):\n return []", "def get_teams():", "def get_teams():\n teams = []\n for teamId in range(1, 68):\n t = requests.get(TEAMS_URL.format(teamId)).json()\n team_list = t.get('teams')\n if team_list is None or len(team_list) == 0:\n continue\n teams.append(Team.from_json(team_list[0]))\n return teams", "def get_team_list(self):\n result = dict\n managers = User.get_users([UserRole.ProjectManager])\n for manager in managers:\n result.update({manager.get_username(): manager.get_team_members()})\n return build_team_list(result)", "def get_all_teams(self):\n return self._db.Teams.find({})", "async def get_teams(\n self,\n payload: Union[dict, List[dict]],\n concurrent_tasks: Optional[int] = 10,\n sort: Optional[str] = None,\n ) -> Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]]:\n\n return await self._get(\"/teams\", payload, concurrent_tasks, sort)", "def teams():\n print 'Getting Teams'\n\n substring = \"%\" + request.args.get('t') + \"%\"\n\n team_list = datastore.get_teams_typeahead(engine, substring, max_teams=10)\n\n print 'Teams:', team_list\n return jsonify(team_list)", "def get_user_teams(user_id):\n teams = []\n response = Litmos.get_user_teams(user_id)\n for res in response:\n try:\n team = Team.objects.get(team_id=res['Id'])\n teams.append(team)\n except Team.DoesNotExist:\n continue\n return teams", "def get_teams(self, *teams):\n teams = [convert_name(team, how='abbr') for team in teams]\n return self._db.Teams.find({'Tm' : {'$in' : teams}})", "def get_companies_and_people(team):", "def test_user_get_teams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/teams')\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def list_teams():\n name = request.args.get(\"name\", None)\n\n # Search team by name\n if name:\n team = TeamController.get(filters={\"Team\": {\"name\": name}})\n return jsonify(format_team(team)), 200\n\n # Otherwise list of the teams\n teams = TeamController.list()\n return jsonify({\"teams\": [format_team(s) for s in teams]}), 200", "def teams(self):\n t = [e[0] for e in self.pick_set.all().values_list(\"team\")]\n return Team.objects.filter(id__in=set(t))", "def list_teams(self, user_id=None):\n # Create initial query with placeholder for the table that the teams\n # are selected from.\n sql = 'SELECT t.id, t.name, t.owner_id, COUNT(*) as member '\n sql += 'FROM {} t, team_member m WHERE t.id = m.team_id '\n sql += 'GROUP BY t.id, t.name, t.owner_id'\n # Depending on whether the user id is given the teams are either\n # taken directly from the teams table of a sub-query that filters\n # teams that the user is member of.\n if not user_id is None:\n team_table = 'SELECT id, name, owner_id FROM team t1, team_member m1 '\n team_table += 'WHERE t1.id = m1.team_id AND m1.user_id = ?'\n team_table = '(' + team_table + ')'\n bindings = (user_id,)\n else:\n team_table = 'team'\n bindings = ()\n sql = sql.format(team_table)\n result = list()\n for team in self.con.execute(sql, bindings).fetchall():\n result.append(\n TeamDescriptor(\n identifier=team['id'],\n name=team['name'],\n owner_id=team['owner_id'],\n member_count=team['member']\n )\n )\n return result", "def get_available_companies(team):", "def list(self, request):\n teams = self.controller.retrieve_all_teams()\n serializer = data_serializers.PresentTeamSerializer(teams, many=True)\n return Response(serializer.data)", "def test_teams_get_teams_v2(self):\n pass", "def test_teams_get_teams_v1(self):\n pass", "def get_all_teams():\n # Try to get all teams from database\n query = Team.query\n\n try:\n teams = query.all()\n\n # If query returns no teams, return erorr\n if len(teams) == 0:\n return jsonify({'error': 'No results found!'}), 404\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 404\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialize array of teams\n team_schema = TeamSchema(many=True)\n output = team_schema.dump(teams).data\n\n # Return json response\n return jsonify(\n {\n 'num_results': str(len(output)),\n 'success': 'Successfully retrieved teams!',\n 'teams': output,\n }\n ), 200", "def teams(request):\n locales = Locale.objects.available().prefetch_related(\"latest_translation__user\")\n\n form = LocaleRequestForm()\n\n if not locales:\n return render(request, \"no_projects.html\", {\"title\": \"Teams\"})\n\n return render(\n request,\n \"teams/teams.html\",\n {\n \"locales\": locales,\n \"form\": form,\n \"top_instances\": locales.get_top_instances(),\n },\n )", "def collect_teams(year: int = 2005) -> None:\n\n\twith open('../resources/config.json') as config_file, open('../resources/secrets.json') as secrets_file:\n\t\tconfig_json = json.load(config_file)\n\t\tsecrets_json = json.load(secrets_file)\n\n\t\turl = '/'.join(['http:', '', config_json['base_url'], config_json['fbs_teams_endpoint']])\n\t\tapi_key = secrets_json['api_key']\n\n\theaders = {'Authorization': api_key}\n\tparams = {'year': year}\n\n\tresponse = requests.get(url, headers = headers, params = params).json()\n\n\t# dict of one array for json dump\n\tteam_names = {'teamNames': list(map(lambda r: r['school'], response))}\n\n\twith open('../resources/teams.json', 'w') as teams_file:\n\t\tjson.dump(team_names, teams_file)", "def teams(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"teams\")" ]
[ "0.7466345", "0.73378897", "0.7215378", "0.7142608", "0.70981383", "0.69372785", "0.69193894", "0.6851884", "0.67941964", "0.6701132", "0.6668236", "0.66499996", "0.65841126", "0.65534383", "0.65458757", "0.64566386", "0.63817936", "0.6355966", "0.6346351", "0.6331452", "0.63274854", "0.6316139", "0.6292312", "0.62741166", "0.6254246", "0.62327003", "0.6225111", "0.6196994", "0.6154381", "0.6145168" ]
0.8338206
0
Retrieve a list of all users within the referenced company. Only available for users with hiring privileges for the company.
def get_company_users(self, company_referece, active=True): url = 'companies/{0}/users'.format(company_referece) if active: data = {'status_in_company': 'active'} else: data = {'status_in_company': 'inactive'} result = self.get(url, data) return result.get('users', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def fetch_all_users():\n url = \"{}/workspace/{}/users\".format(V1_API_URL, WORKSPACE_ID)\n responses = requests.get(url, headers=HEADERS)\n return [\n {\n \"acronym\": user[\"name\"].lower(),\n \"clockify_id\": user[\"id\"],\n \"email\": user[\"email\"].lower(),\n }\n for user in responses.json()\n ]", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def get_all_users():", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def get_users(self):\n url = \"%s/api/v1/users\" % self.subdomain\n req = request.get(url, headers=self.api_headers)\n if request.ok(req):\n response_json = req.json()\n return response_json[\"users\"]\n else:\n return None", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def users(self,org_id=None):\n if org_id is None:\n org_id = self.org_id\n return self.get('{}/orgs/{}/users'.format(ApiVersion.A1.value,org_id))", "def get_users(self, *, Range=None, filter=None, fields=None, **kwargs):\n headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})\n function_endpoint = urljoin(self._baseurl, 'users')\n return self._call('GET', function_endpoint, headers=headers, **kwargs)", "def get_users():\n return db.fetch_users()", "def get_all_users_for_admin_purposes(connection):\r\n with connection:\r\n return connection.execute(GET_ALL_USERS).fetchall()[1]", "def get_users(self):\n return self.get_all_dbusers()", "def get_all_users():\n return jsonify(admin.get_all_users(current_app.scoped_session()))", "def get_user_list():\r\n session = tables.get_session()\r\n if session is None:\r\n return {'success': False, 'reason': 'failed'}\r\n try:\r\n user_account = UserAccount()\r\n user_account.find_all_user(session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Get user details failed: %s', err)\r\n return {'success': False, 'reason': 'failed'}\r\n finally:\r\n session.close()\r\n return {'success': True}", "def get_all_users(self):\n \n sql = \"select * from users\"\n return self._query_all(sql)", "def get_users():\n coll = data_access.get_user_collection()\n users = [User(**u) for u in coll.find()]\n return users", "def list_users(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/users\"\n _body = None\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \"get user list Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"users List : %s\")\n return output[\"users\"]", "def get_user_list():\n\tudb = UserPageDB()\n\ttry:\n\t\tusers = udb.user_list()\n\t\treturn UserList([_transform_user(u) for u in users])\n\tfinally:\n\t\tudb.close()", "def listUsers(self):\n return self._client.listUsers()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get(self):\n return get_all_users()", "def get_users(self, *args, **kwargs):\n\n users_data = api.get_users(\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return [en.User(creds=self.__creds, **user_data) for user_data in users_data]", "def get_users(self, query_args={}):\n endpoint = '/v3/educator/users'\n result = self.request(endpoint, query_args)\n\n users = []\n for data in result.response:\n user = User(data)\n users.append(user)\n\n return users", "def get_user_list(self, connection):\n http = get_web_service(connection)\n try:\n req = http.request('GET', connection[\"url\"] + '/users/?_format=json')\n data = json.loads(req.data.decode('utf-8'))\n # print(json.dumps(data, indent=4, sort_keys=True))\n return data\n except urllib3.exceptions.HTTPError as e:\n print(\"Connection error\")\n print(e)", "def getOkcupidUsers(self):\n\t\tself.logger.info(\"Get all OkCupid users\")\n\t\tusers = self.session.query(Models.Okcupid).all()\n\t\treturn users", "def get_users(self):\n return get_users(self['__store'].db, self)", "def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()" ]
[ "0.71246296", "0.7009031", "0.6966797", "0.6915568", "0.6887449", "0.6832766", "0.6830223", "0.6819139", "0.6808806", "0.679949", "0.6799372", "0.6789007", "0.67806226", "0.67742866", "0.67446244", "0.671317", "0.6694751", "0.66875106", "0.666239", "0.6648931", "0.6621379", "0.6621379", "0.6621379", "0.6621379", "0.6620884", "0.66177773", "0.6605091", "0.6596719", "0.6596649", "0.6589796" ]
0.817946
0
Retrieve a list of all the teams that a user has access to. This will return teams across all companies the current user has access to.
def get_teams(self): url = 'teams' result = self.get(url) return result.get('teams', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_company_teams(self, company_referece):\n url = 'companies/{0}/teams'.format(company_referece)\n result = self.get(url)\n return result.get('teams', result)", "def teams(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'teams')\r\n\r\n return http.Request('GET', url), parsers.parse_json", "def get_teams(self, account_id):\n endpoint = '/accounts/{}/teams'.format(account_id)\n return self._api_call('get', endpoint)", "def get_teams(self):\n url = '{}/api/v4/teams'.format(self.server_url)\n response = self._request(self._session.get, url)\n return json.loads(response.content)", "def get_teams(self, *args, **kwargs):\n\n teams_data = api.get_teams(\n *args,\n api_key=self.__creds.api_key_v2,\n **kwargs)\n return [en.Team(creds=self.__creds, **team_data) for team_data in teams_data]", "def get_all_teams(self):\n return self._db.Teams.find({})", "def get_user_teams(user_id):\n teams = []\n response = Litmos.get_user_teams(user_id)\n for res in response:\n try:\n team = Team.objects.get(team_id=res['Id'])\n teams.append(team)\n except Team.DoesNotExist:\n continue\n return teams", "def getAllTeams(self):\n return []", "def get_team_list(self):\n result = dict\n managers = User.get_users([UserRole.ProjectManager])\n for manager in managers:\n result.update({manager.get_username(): manager.get_team_members()})\n return build_team_list(result)", "def teams(self):\n return self._get_by_class(Team)", "def restricted_teams(self, user):\n return []", "def list(self, request):\n teams = self.controller.retrieve_all_teams()\n serializer = data_serializers.PresentTeamSerializer(teams, many=True)\n return Response(serializer.data)", "def list_teams(self, user_id=None):\n # Create initial query with placeholder for the table that the teams\n # are selected from.\n sql = 'SELECT t.id, t.name, t.owner_id, COUNT(*) as member '\n sql += 'FROM {} t, team_member m WHERE t.id = m.team_id '\n sql += 'GROUP BY t.id, t.name, t.owner_id'\n # Depending on whether the user id is given the teams are either\n # taken directly from the teams table of a sub-query that filters\n # teams that the user is member of.\n if not user_id is None:\n team_table = 'SELECT id, name, owner_id FROM team t1, team_member m1 '\n team_table += 'WHERE t1.id = m1.team_id AND m1.user_id = ?'\n team_table = '(' + team_table + ')'\n bindings = (user_id,)\n else:\n team_table = 'team'\n bindings = ()\n sql = sql.format(team_table)\n result = list()\n for team in self.con.execute(sql, bindings).fetchall():\n result.append(\n TeamDescriptor(\n identifier=team['id'],\n name=team['name'],\n owner_id=team['owner_id'],\n member_count=team['member']\n )\n )\n return result", "def teams(self) -> 'outputs.OfficeDataConnectorDataTypesResponseTeams':\n return pulumi.get(self, \"teams\")", "def test_get_all_for_team(self):\n team = Team.create(name='foo', captain_id='User_cap',\n program_id=self.program.uid)\n team.put()\n user = User.create(name='foo', email='[email protected]',\n owned_teams=[team.uid])\n user.put()\n response = self.testapp.get(\n '/api/teams/{}/users'.format(team.uid),\n headers=self.login_headers(user),\n )\n response_list = json.loads(response.body)\n self.assertEqual(len(response_list), 1)", "def test_user_get_teams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/teams')\n assert r.status_code == 200\n destroy_ctfd(app)", "def get_teams():\n teams = []\n for teamId in range(1, 68):\n t = requests.get(TEAMS_URL.format(teamId)).json()\n team_list = t.get('teams')\n if team_list is None or len(team_list) == 0:\n continue\n teams.append(Team.from_json(team_list[0]))\n return teams", "def teams():\n print 'Getting Teams'\n\n substring = \"%\" + request.args.get('t') + \"%\"\n\n team_list = datastore.get_teams_typeahead(engine, substring, max_teams=10)\n\n print 'Teams:', team_list\n return jsonify(team_list)", "def list_teams():\n name = request.args.get(\"name\", None)\n\n # Search team by name\n if name:\n team = TeamController.get(filters={\"Team\": {\"name\": name}})\n return jsonify(format_team(team)), 200\n\n # Otherwise list of the teams\n teams = TeamController.list()\n return jsonify({\"teams\": [format_team(s) for s in teams]}), 200", "def get_teams():", "def get(self, user_id):\n\n # Users can fetch only their own teams\n if current_user.id != int(user_id):\n abort(403)\n\n user = User.get_if_exists(user_id)\n\n if 'role' not in request.args:\n abort(400)\n\n role = request.args['role']\n if role not in ['manager', 'member']:\n abort(400)\n req_role = True if role == 'manager' else False\n\n team_links = [l for l in user.teams if l.manager is req_role]\n team_links.sort(key=lambda l: l.team.name)\n\n response = jsonify([l.team.serialize() for l in team_links])\n response.status_code = 200\n return response", "def getAllTeams(self):\n if self._teams is None:\n self._teams = []\n for match in self._matches.items():\n self._teams.append(match.homeTeam)\n return self._teams", "def team_list(request):\n template = loader.get_template('team/team_list.html')\n teams_list = Team.objects.all().order_by('name')\n\n if not request.user.is_authenticated:\n team = None\n else:\n team = request.user.profile.team\n\n paginator = Paginator(teams_list, 6)\n\n page = request.GET.get('page')\n try:\n teams = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n teams = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n teams = paginator.page(paginator.num_pages)\n\n context = {\n 'teams': teams,\n 'team': team\n }\n\n return CustomHttpResponse.send(template, context, request)", "def get_available_teams(self):\n teams = self.request.user.team_set.filter(competition__is_open=True)\n if not teams.exists():\n msg = \"Can't send invites at this time. You're not\"\n msg += \" registered for any open competitions\"\n messages.error(self.request, msg)\n raise Http404(msg)\n return teams", "def get_all_teams():\n # Try to get all teams from database\n query = Team.query\n\n try:\n teams = query.all()\n\n # If query returns no teams, return erorr\n if len(teams) == 0:\n return jsonify({'error': 'No results found!'}), 404\n\n # If no result found, return error\n except NoResultFound:\n return jsonify({'error': 'No result found!'}), 404\n\n # If some other sqlalchemy error is thrown, return error\n except SQLAlchemyError:\n return jsonify({'error': 'Some problem occurred!'}), 400\n\n # Serialize array of teams\n team_schema = TeamSchema(many=True)\n output = team_schema.dump(teams).data\n\n # Return json response\n return jsonify(\n {\n 'num_results': str(len(output)),\n 'success': 'Successfully retrieved teams!',\n 'teams': output,\n }\n ), 200", "def get_teams(self, *teams):\n teams = [convert_name(team, how='abbr') for team in teams]\n return self._db.Teams.find({'Tm' : {'$in' : teams}})", "def test_teams_get_users_teams_v1(self):\n pass", "async def get_teams(\n self,\n payload: Union[dict, List[dict]],\n concurrent_tasks: Optional[int] = 10,\n sort: Optional[str] = None,\n ) -> Union[List[Dict[str, Any]], List[List[Dict[str, Any]]]]:\n\n return await self._get(\"/teams\", payload, concurrent_tasks, sort)", "def test_teams_get_users_teams_v2(self):\n pass", "def teams(self):\n t = [e[0] for e in self.pick_set.all().values_list(\"team\")]\n return Team.objects.filter(id__in=set(t))" ]
[ "0.7489081", "0.72419095", "0.7240157", "0.72171396", "0.72042984", "0.71952933", "0.7183357", "0.71334547", "0.6981296", "0.6936945", "0.67850655", "0.6764911", "0.67639595", "0.67597723", "0.6750933", "0.67025095", "0.6670462", "0.6529327", "0.6526705", "0.6499297", "0.6498879", "0.64009035", "0.6390917", "0.6373748", "0.635701", "0.6351616", "0.6334832", "0.6290947", "0.6277655", "0.62026405" ]
0.7426243
1
Retrieve the complete job object for the referenced job. This is only available to users with manage_recruiting permissions within the team that the job is posted in.
def get_job(self, job_reference): url = 'jobs/{0}'.format(job_reference) result = self.get(url) return result.get('job', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object(self) -> Job:\n project = ProjectPermissionsMixin.get_object(self)\n return project.jobs.get(id=self.kwargs[\"job\"])", "def get_job(self) -> CustomJob:\n return self._client.get_custom_job(name=self._job_name)", "def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure", "def get_job(self, user, job_id):\n calling_user = User.get_user_by_username(user)\n job = Job.get_job_by_id(job_id)\n for_user = job.get_user()\n self.access_handler.check_read_rights(for_user, calling_user)\n return build_job(job)", "def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()", "def get_job(self) -> Job:\n return self.jobs_list[self.sel_idx]", "def job(self):\n return self.batch[self.job_id]", "def getJob(uniq):\n return Job(Cuebot.getStub('job').GetJob(\n job_pb2.JobGetJobRequest(id=uniq), timeout=Cuebot.Timeout).job)", "def get_job(self, _id):\n data = {\n 'class': 'Job',\n 'id': _id,\n 'attrs': {},\n }\n job = self.db_client.send_request('list', json.dumps(data))\n\n return Job(\n _id=job['id'],\n _type=job['type'],\n task=job['task'],\n command=job['command'],\n input_parameters=job['inputParameters'],\n status=job['status'],\n runner_id=job['runner'],\n )", "def retrieve(received_job_id: str) -> Union[Job, None]:\n # todo: add error handling\n found_job = db.Jobs().get_by_id(received_job_id)\n if not found_job:\n return\n return found_job", "def get_job_by_id(self, job_id):\n return self.get_resource(category=SYSTEM, resource_level=JOB,\n resource_level_id=job_id)", "def retrieve_job(self, job_id) -> AzureQuantumJob:\n return self._provider.get_job(job_id)", "def job(self):\n\n if self.current_bead is None:\n return None\n\n if self.jobs is None:\n RosProxy().notify(\"Can not find jobs.\", STATE.ERROR)\n return None\n\n _job = None\n for job in self.jobs.configurations:\n if job.job_number == self.current_bead.wps_job_number:\n return job\n\n return None", "def _get_job(self, uid):\n try:\n return self._jobs[uid]\n except KeyError:\n raise JobNotFoundError('job \\'%s\\' is not found' % (uid,))", "def get_job(\n self, job_id: Union[str, int], *, params: Optional[dict] = None\n ) -> \"resource_types.Job\":\n\n return communicator.Job(self.__requester).from_id(\n job_id=job_id, parameters=params\n )", "def retrieve_job(self, job_id):\n job = {}\n with self._lock:\n if job_id not in self._jobs:\n return None\n job = self._jobs[job_id]\n return job", "def get_a_job(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error': 'Job Not Found'}, 404)\n\n job_message = job_schema.dump(job, many=True)\n return custom_response(job_message, 200)", "def get_object(self, queryset=None):\n # 404 if job doesn't exist\n try:\n job = Job.objects.select_related().get(pk=self.kwargs['pk'])\n except Job.DoesNotExist:\n raise Http404(\"No Job with PK#{} found.\".format(self.kwargs['pk']))\n\n # Staff can see all jobs\n if self.request.user.is_staff:\n return job\n\n # Creator can see their own jobs no matter the status\n if job.creator == self.request.user:\n return job\n\n # For everyone else the job needs to be visible\n if job.visible:\n return job\n\n # Return None to signal 401 unauthorized\n return None", "def get_job(self, identifier: str):\n self._log_operation('Getting job {i}'.format(i=identifier))\n return self._job_queue.get_job_details(identifier)", "def get_job(self, job_name):\n try:\n return self.json_dict['job definitions'][job_name]\n except KeyError:\n print('No job \"%s\" in %s' % (job_name, self.filepath))\n return None", "def get_job(self) -> Union[Dict[Text, Text], CustomJob]:\n pass", "def get_job(jid=None):\n if not jid:\n raise CommandExecutionError(\"ID option must not be none.\")\n\n query = {\"type\": \"op\", \"cmd\": \"<show><jobs><id>{}</id></jobs></show>\".format(jid)}\n\n return __proxy__[\"panos.call\"](query)", "def getJob(appName, jobId):\n jobs = db.getJobs(jobId=jobId)\n job = None if len(jobs) == 0 else jobs[0]\n k3job = dispatcher.getJob(int(jobId))\n\n if job == None:\n return returnError(\"Job ID, %s, does not exist\" % jobId, 404)\n\n thisjob = dict(job, url=dispatcher.getSandboxURL(jobId))\n if k3job != None:\n thisjob['master'] = k3job.master\n local = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId)).encode(encoding='utf8', errors='ignore')\n path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId),'role.yaml').encode(encoding='utf8', errors='ignore')\n if os.path.exists(local) and os.path.exists(path):\n with open(path, 'r') as role:\n thisjob['roles'] = role.read()\n else:\n return returnError(\"Job Data no longer exists\", 400)\n\n thisjob['sandbox'] = sorted (os.listdir(local))\n\n if 'application/json' in request.headers['Accept']:\n return jsonify(thisjob)\n else:\n return render_template(\"last.html\", appName=appName, lastjob=thisjob)", "def get_job_detail():\n\n return JobDetail.query.all()", "def get_job(self, job_id) -> AzureQuantumJob:\n azure_job = self._workspace.get_job(job_id)\n backend = self.get_backend(azure_job.details.target)\n return AzureQuantumJob(backend, azure_job)", "def get_job_owner(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.get_job_owner',\n [job], self._service_ver, context)", "def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details", "def job_id(self):\n return self._properties.get(\"jobReference\", {}).get(\"jobId\")", "def getJob(self, name=None):\n if name == None: \n name = self.jobstable.get_selectedRecordNames()[0]\n if name == None:\n return None, name\n jobid = self.DB.meta.peatsa_jobs[name]\n try:\n job = PEATSA.WebApp.Data.Job(jobid, self.connection)\n except:\n #print 'job not in database'\n return None,name\n return job, name", "async def job_detail(request, job_id=None):\n current_jobs = dagobah._serialize().get('jobs', {})\n jobs = [job for job in current_jobs if str(job['job_id']) == job_id]\n if not jobs:\n raise ValueError('not find any jobs')\n return template('job_detail.html', job=jobs[0], hosts=dagobah.get_hosts())" ]
[ "0.81940305", "0.763955", "0.72616756", "0.72534204", "0.7205309", "0.7107446", "0.70887804", "0.70690084", "0.6927024", "0.689124", "0.688404", "0.6878481", "0.68731385", "0.68664885", "0.68400425", "0.6831167", "0.67426634", "0.6740695", "0.6673949", "0.66355276", "0.6575131", "0.6534467", "0.6511777", "0.64456713", "0.6400381", "0.639183", "0.639031", "0.6336522", "0.6311414", "0.61653525" ]
0.77152336
1
Retrieve a list of all the offers on a specific job or within \ a specific team.
def get_offers(self, buyer_team_reference, include_sub_teams=None, provider_ref=None, profile_key=None, job_ref=None, agency_ref=None, status=None, created_time_from=None, created_time_to=None, page_offset=0, page_size=20, order_by=None): url = 'offers' data = {} data['buyer_team__reference'] = buyer_team_reference if include_sub_teams: data['include_sub_teams'] = include_sub_teams if provider_ref: data['provider__reference'] = provider_ref if profile_key: data['profile_key'] = profile_key if job_ref: data['job__reference'] = job_ref if agency_ref: data['agency_team__reference'] = agency_ref if status: data['status'] = status if created_time_from: data['created_time_from'] = created_time_from if created_time_to: data['created_time_to'] = created_time_to data['page'] = '{0};{1}'.format(page_offset, page_size) if order_by is not None: data['order_by'] = order_by result = self.get(url, data) return result.get('offers', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_offers(self):\n pass", "def get_league_listing(self):\n url = self.__build_url(urls.GET_LEAGUE_LISTING)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)", "def list_offerings(nextToken=None):\n pass", "def get_available_companies(team):", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def get_offer(self, offer_reference):\n url = 'offers/{0}'.format(offer_reference)\n result = self.get(url)\n return result.get('offer', result)", "def get_teams():", "def scrap_data_companies(self):\n list_job_offers = self.driver.find_elements_by_class_name(\n \"jobContainer\")\n jobs = []\n if len(list_job_offers) == 0:\n print(\"There is nothing to scrap for \", conf.URL_TO_SCRAPE,\n \"that was requested\")\n return\n\n for i, elt in enumerate(list_job_offers):\n\n self.remove_sign_up_prompt()\n self.remove_recommended_jobs()\n html_job_container = elt.get_attribute('innerHTML')\n time.sleep(2)\n name_company = get_name_company(elt.text)\n city_job = get_city_job(html_job_container)\n job_id = get_job_id(html_job_container)\n position_job = get_position(html_job_container)\n job_description = get_summary_job(position_job)\n\n if job_id is not None and name_company is not None:\n company = Company.Company(name_company)\n company_and_id_job = name_company + \"-\" + job_id\n self.current_path = os.path.join(self.date_path,\n company_and_id_job)\n os.mkdir(self.current_path)\n\n if i != 0:\n click_on_job_offer(\n elt) # link since we are already seeing it\n\n self.scrape_data_company(elt, company)\n company_id = company.insert_to_db(self.db_connection)\n job = JobOffer.JobOffer(job_id, company=company, city=city_job,\n position=position_job,\n description=job_description)\n job.insert_to_db(company_id, self.db_connection)\n jobs.append(job)\n print(job)\n else:\n logger.error(\"Job Id not found\")\n JobOffer.print_jobs(jobs)", "def get(self):\n for team in api.team.get_all_teams():\n team_id = team[\"tid\"]\n team_members = api.team.get_team_members(tid=team_id, show_disabled=False)\n all_scoreboards = api.scoreboards.get_all_scoreboards()\n member_eligibilities = dict()\n for member in team_members:\n member_eligibilities[member[\"uid\"]] = {\n scoreboard[\"sid\"]\n for scoreboard in all_scoreboards\n if api.scoreboards.is_eligible(member, scoreboard)\n }\n\n team_eligibilities = list(set.intersection(*member_eligibilities.values()))\n db = api.db.get_conn()\n db.teams.find_one_and_update(\n {\"tid\": team_id}, {\"$set\": {\"eligibilities\": team_eligibilities}}\n )\n return jsonify({\"success\": True})", "def get_available_companies_and_people(team):", "def offers_list(update, context):\n chat = Chat.get(update.message.chat_id)\n\n if not chat.cart.subscriptions:\n text = 'ℹ️ Du får ingen tilbud, hvis du ikke har nogen søgninger.'\n update.message.reply_text(text)\n\n for sub in chat.cart:\n if not sub.offers:\n text = f'ℹ️ Søgningen efter \"{sub.query}\" har ingen tilbud.'\n update.message.reply_text(text)\n continue\n\n lines = [\n f'ℹ️ Søgningen efter \"{sub.query}\" har {len(sub.offers)} tilbud:',\n ''\n ]\n for offer in sub.offers:\n print(offer, offer.timeleft(), offer.run_till)\n lines.append(offer_text(offer))\n\n update.message.reply_text('\\n'.join(lines))", "def base_offers_list():\n offers_list = Offers()\n offers_list.add_new_offer('Offer1', 'Baked Beans', 3, 1)\n offers_list.add_new_offer('Offer2', 'Sardines', 1, 0.25)\n return offers_list", "def get_teams(self):\n url = 'teams'\n result = self.get(url)\n return result.get('teams', result)", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def get_offers(street_id: int, house_number: str) -> List[Dict[str, Any]]:\n\n url = 'https://api.n1.ru/api/v1/offers/'\n params = _offers_params.copy()\n params['filter_or[addresses][0][street_id]'] = street_id\n params['filter_or[addresses][0][house_number]'] = house_number\n offset, count, offers = 0, 1, []\n\n while offset < count: # while do\n try:\n r = requests.get(url, params=params, headers=_headers)\n response = r.json()\n count = response['metadata']['resultset']['count']\n except requests.RequestException as e:\n raise ParserException(\n f'Fail make request. street_id: {street_id}, house_number: {house_number}'\n ) from e\n except KeyError as e:\n raise ParserException('It was not possible to get the number of offers') from e\n \n offers.extend(response.get('result', []))\n offset += 25\n time.sleep(0.5)\n \n return offers", "def get_offer(request, offer_id):\n offer = get_object_or_404(Offers, pk=offer_id)\n serializer = OffersSerializer(offer)\n return JsonResponse(serializer.data, safe=False)", "def get_employees(self):\n return self.employees", "def get_stored_offers():\n return deserialize(read_from_db())", "def get_team_list(sport_type, exclude=[]):\n url = ESPN_API_PREFIX + Sport.get_resource_url(sport_type) + \"/teams\"\n params = {\"region\": \"us\",\n \"lang\": \"en\",\n \"contentorigin\": \"espn\",\n \"limit\": \"99\"}\n r = requests.get(url=url, params=params)\n data = r.json()\n team_list = [team[\"team\"] for team in data[\"sports\"][0][\"leagues\"][0][\"teams\"]]\n team_objects_list = []\n excluded_teams = [team.id for team in exclude if team.sport == sport_type]\n for team in team_list:\n if team[\"id\"] not in excluded_teams:\n team_objects_list.append(Team(team[\"id\"], team[\"displayName\"], team[\"abbreviation\"],\n sport_type, team[\"logos\"][0][\"href\"]))\n return team_objects_list", "def get_live_league_games(self):\n url = self.__build_url(urls.GET_LIVE_LEAGUE_GAMES)\n req = self.executor(url)\n if self.logger:\n self.logger.info('URL: {0}'.format(url))\n if not self.__check_http_err(req.status_code):\n return response.build(req, url, self.raw_mode)", "def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)", "def get_jobs(self, buyer_team_reference,\n include_sub_teams=False,\n status=None, created_by=None, created_time_from=None,\n created_time_to=None, page_offset=0, page_size=20,\n order_by=None):\n url = 'jobs'\n\n data = {}\n data['buyer_team__reference'] = buyer_team_reference\n\n data['include_sub_teams'] = False\n if include_sub_teams:\n data['include_sub_teams'] = include_sub_teams\n\n if status:\n data['status'] = status\n\n if created_by:\n data['created_by'] = created_by\n\n if created_time_from:\n data['created_time_from'] = created_time_from\n\n if created_time_to:\n data['created_time_to'] = created_time_to\n\n data['page'] = '{0};{1}'.format(page_offset, page_size)\n\n if order_by is not None:\n data['order_by'] = order_by\n\n result = self.get(url, data)\n return result.get('jobs', result)", "def get(self, team_id):\n team = api.team.get_team(team_id)\n if not team:\n raise PicoException(\"Team not found\", 404)\n\n team_members = api.team.get_team_members(tid=team_id, show_disabled=False)\n all_scoreboards = api.scoreboards.get_all_scoreboards()\n member_eligibilities = dict()\n for member in team_members:\n member_eligibilities[member[\"uid\"]] = {\n scoreboard[\"sid\"]\n for scoreboard in all_scoreboards\n if api.scoreboards.is_eligible(member, scoreboard)\n }\n\n team_eligibilities = list(set.intersection(*member_eligibilities.values()))\n db = api.db.get_conn()\n db.teams.find_one_and_update(\n {\"tid\": team_id}, {\"$set\": {\"eligibilities\": team_eligibilities}}\n )\n\n return jsonify({\"success\": True, \"eligibilities\": team_eligibilities})", "def get_all_jobs_for_day(self, worker: str = \"*\", **kwargs) -> requests.Response:\n\n data = {\"worker\": worker}\n\n if \"day\" in kwargs:\n data[\"day\"] = kwargs[\"day\"]\n if \"team\" in kwargs:\n data[\"team\"] = kwargs[\"team\"]\n if \"order_id\" in kwargs:\n data[\"orderId\"] = kwargs[\"order_id\"]\n if \"order_ids\" in kwargs:\n data[\"orderIds\"] = []\n data[\"orderIds\"].extend(kwargs[\"order_ids\"])\n if \"progress_success\" in kwargs:\n data[\"progressSuccess\"] = kwargs[\"progress_success\"]\n\n response = self._api_call(method=\"POST\", endpoint=GET_JOBS_ENDPOINT, data=data)\n return response", "def list(self, jobguid=\"\", executionparams=None):", "def get_job_detail():\n\n return JobDetail.query.all()", "def get_companies_and_people(team):", "def test_offers_show_every(self):\n result = self.client.get('/offers_show_every')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Offers', result.data)", "def task_offers_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n offers = Offer.objects.filter(task=task).all().order_by(\"-timestamp\")\n serializer = OfferSerializer(offers, many=True)\n return Response(serializer.data)", "def get_orders(self, event_ids=None, market_ids=None, runner_ids=None, offer_id=None, offset=0, per_page=500,\n interval=None, side=Side.Default, status=Status.Default, session=None):\n params = clean_locals(locals())\n params['exchange-type'] = self.client.exchange_type\n method = 'offers'\n date_time_sent = datetime.datetime.utcnow()\n if offer_id:\n method = 'offers/{0}'.format(offer_id)\n params = {'odds-type': self.client.odds_type}\n response = self.request(\"GET\", self.client.urn_edge, method, params=params, session=session).json()\n else:\n response = self.request(\n \"GET\", self.client.urn_edge, method, params=params, target='offers', session=session\n )\n date_time_received = datetime.datetime.utcnow()\n return self.process_response(response, resources.Order, date_time_sent, date_time_received)" ]
[ "0.57628614", "0.56692207", "0.5559179", "0.55402625", "0.5539658", "0.55073285", "0.547301", "0.5470004", "0.54166543", "0.537629", "0.5374227", "0.53590316", "0.53125197", "0.5308537", "0.5307341", "0.5237498", "0.51619124", "0.51416373", "0.512965", "0.5113388", "0.5079461", "0.5078868", "0.5056176", "0.5014904", "0.5009402", "0.5000487", "0.49504527", "0.494942", "0.49487573", "0.49449527" ]
0.68964773
0
Retrieve the referenced offer.
def get_offer(self, offer_reference): url = 'offers/{0}'.format(offer_reference) result = self.get(url) return result.get('offer', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_offer(request, offer_id):\n offer = get_object_or_404(Offers, pk=offer_id)\n serializer = OffersSerializer(offer)\n return JsonResponse(serializer.data, safe=False)", "def get_offer(\n self,\n service_name,\n ):\n import urllib.parse\n\n if self.index is None:\n self.index = self.urlgetter.get(self.start).json()\n offer_code = self.aliases.get(service_name, service_name)\n offer_ref = self.index['offers'][offer_code]\n offer = OfferFile(\n url=urllib.parse.urljoin(self.start, offer_ref['currentVersionUrl']),\n code=offer_code,\n urlgetter=self.urlgetter\n )\n return offer", "def get(self, price, way):\n for offer in self.book[way]:\n if offer.get_price() == price:\n return offer\n return None", "def makes_offer(self) -> object:\n return self._makes_offer", "def offer(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"offer\")", "def get_stored_offers():\n return deserialize(read_from_db())", "def get_master_offer(self):\n return Offer.objects.get(is_master=True)", "def value_offers(self):\n return OrderBookUtils.book_value(self.offer)", "def getReference(self):\n return _libsbml.Association_getReference(self)", "def offer_type(self):\n return self.proto.offer[0].offerType", "def mid(self):\n if self.bid and self.offer:\n return (self.bid[-1].price + self.offer[0].price) / 2.0\n\n raise Exception(\"No bids / offers!\")", "def get_amount(self, offer_id):\r\n row = self._find_row(offer_id)\r\n if row:\r\n return row.getAmount()", "def get_reference(self):\t\t\n\t\treturn self._reference", "def getItem(self) -> Optional[items.Item]:\n return None if self.__itemRef is None else self.__itemRef()", "def get_resource(self):\n return self._stores", "def related_entity(self):\n return self._related_entity", "def get_best_offer(self,way):\n if way==\"BUY\":\n return self.book[Trade.WAY_BUY][0].get_price()\n elif way==\"SELL\":\n return self.book[Trade.WAY_SELL][len(self.book[Trade.WAY_SELL])-1].get_price()", "def getItem(self):\n return self.getItem(0)", "def has_offer_catalog(self) -> object:\n return self._has_offer_catalog", "def get(self):\n return self._get()", "def get_reference(self):\n return self.resource.url", "def get_poll(self, poll_key):\n if poll_key[:5] != 'poll_':\n raise Exception('Incorrect key passed to get_poll(): ' + poll_key)\n poll_data = self.client.get(poll_key)\n if poll_data is None:\n return None\n else:\n return loads(poll_data)", "def get(self, id):\n return self.__get_object(super(PullRequests, self).get(id))", "async def receive_offer(\n self, cred_ex_record: V20CredExRecord, cred_offer_message: V20CredOffer\n ) -> None:", "def reference(self):\n \n return self._reference", "def GetDetailsItem(self):\r\n if self.details: return self.details.GetDetailsItem()\r\n return None", "def get_spot_datafeed_subscription(self):\r\n return self.get_object('DescribeSpotDatafeedSubscription',\r\n None, SpotDatafeedSubscription, verb='POST')", "def reference(self):\n return self._reference", "def reference(self):\n return self._reference", "def test_show_offer(self, mock_find):\n mock_find.return_value = sample_offer\n\n result = self.client.get(f'/offers/{sample_offer_id}')\n self.assertEqual(result.status, '200 OK')\n self.assertIn(b'Description', result.data)" ]
[ "0.6480831", "0.62062526", "0.6197832", "0.61778504", "0.5886585", "0.5828745", "0.5711066", "0.5514177", "0.5442751", "0.5413679", "0.5136411", "0.5119012", "0.5102649", "0.5079585", "0.5053517", "0.4994421", "0.49824262", "0.49734816", "0.49673218", "0.49591538", "0.4944525", "0.4916293", "0.4907343", "0.48960295", "0.48895642", "0.48610026", "0.48603103", "0.4859117", "0.4859117", "0.48546717" ]
0.7965513
0
Retrieve referenced engagement object.
def get_engagement(self, engagement_reference): url = 'engagements/{0}'.format(engagement_reference) result = self.get(url) return result.get('engagement', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_object(self):\n return self._object", "def lookup_obj(self,):\n return self._lookup_obj", "def get_object(self):\n account = Account.get_account_with_admins(account.id)\n\n return account[0] if account else None", "def get_object(self):\n if getattr(self, 'current_instance', None):\n ret = self.current_instance\n else:\n ret = super().get_object()\n return ret", "def get(cls):\n return cls.instance", "def GetEntity(self):\n\t\treturn self.acad.ActiveDocument.Utility.GetEntity()", "def get_reference(self):\t\t\n\t\treturn self._reference", "def obj(self):\n if not self._obj:\n self._get()\n return self._obj", "def get_offer(self, offer_reference):\n url = 'offers/{0}'.format(offer_reference)\n result = self.get(url)\n return result.get('offer', result)", "def getObject(self):\n return self.base.get(\"object\", [])", "def getObject(self):\n # try getting the remote object by unique id\n remote_obj = self._getObjectByUid()\n if remote_obj is not None:\n return remote_obj\n\n utool = getUtility(IURLTool)\n return utool.getPortalObject().restrictedTraverse(self.remote_url)", "def get_article(self):\n return self.article", "def get_object(id):", "def object(self):\n return self._object", "def __current_object__(self):\n return self.__lookup()", "def get_managed_object(self):\n return self.key", "def get_object(self):\n queryset = self.get_queryset() # acquire queryset\n for key in self.lookup_args:\n if self.kwargs.get(key):\n id = self.kwargs[key]\n try:\n instance = queryset.get(id=id) # acquire current instance\n return instance \n except models.ObjectDoesNotExist:\n raise Http404('NO object found.')\n \n raise Http404('No object found.')", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get_object(self, name):\n return self._internal.objects[name]", "def _viewer_by_reference(self, reference):\n viewer_item = self._viewer_item_by_reference(reference)\n\n return self._viewer_store[viewer_item['id']]", "def engagements(self):\n from hubspot3.engagements import EngagementsClient\n\n return EngagementsClient(**self.auth, **self.options)", "def get(self):\n return self.__expedition", "def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, question=self.kwargs['pk'])\n self.check_object_permissions(self.request, obj)\n return obj", "def obj(self):\r\n return self._obj", "def get_object(self):\n obj = get_object_or_404(Article, slug=self.kwargs[\"slug\"])\n self.check_object_permissions(self.request, obj)\n return obj", "def obj(self):\n return self._obj", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get(self, obj):", "def get_episode(self, object_id):\n return self.get_object(\"episode\", object_id)", "def get(self, email):\n adm = Administration()\n pers = adm.get_person_by_google_mail(email)\n return pers" ]
[ "0.5783282", "0.56287766", "0.5541179", "0.550063", "0.54797107", "0.54678154", "0.54535735", "0.54510117", "0.5434497", "0.5410832", "0.5401811", "0.5345003", "0.5332453", "0.5307219", "0.53062886", "0.5299677", "0.5265588", "0.52421933", "0.52373636", "0.5230046", "0.52231437", "0.5200054", "0.5161994", "0.51438147", "0.51422423", "0.5138027", "0.5124444", "0.5115886", "0.511103", "0.5110449" ]
0.7453287
0
List job applications as a client.
def list_client_applications(self, buyer_team__reference, job_key, status=None, profile_key=None, agency_team__reference=None, order_by=None, page_offset=None, page_size=None): data = {} data['buyer_team__reference'] = buyer_team__reference data['job_key'] = job_key if status: data['status'] = status if profile_key: data['profile_key'] = profile_key if agency_team__reference: data['agency_team__reference'] = agency_team__reference if order_by: data['order_by'] = order_by data['page'] = '{0};{1}'.format(page_offset, page_size) url = 'clients/applications' return self.get(url, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def clientlist(self) -> None:\n from bacommon.servermanager import ClientListCommand\n self._enqueue_server_command(ClientListCommand())\n self._block_for_command_completion()", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])", "def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def list(self):\n\n config = self.get_config()\n client = config['client']\n default_config = config[client]\n\n msg.run('Saved options for client %s' % client)\n msg.inf('Default application (%s)' % default_config.get('defapp'))\n msg.inf('environment (%s)' % default_config['environment'])\n msg.inf('databases prod (%s) test (%s)' %\n (default_config['database'],\n default_config['test_database']))\n msg.inf('Image (%s)' % default_config['image'])\n msg.inf('Nginx (%s) Debug (%s) Verbose (%s)' %\n (default_config['nginx'],\n default_config['debug'],\n default_config['verbose'])\n )\n msg.run('\\nOther clients in this environment')\n clients = [item for item in config if item != 'client']\n\n msg.inf(', '.join(clients))", "def ListApps(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def get_integrations_clientapps(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'sort_by', 'expand', 'next_page', 'previous_page']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_clientapps\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/clientapps'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'sort_by' in params:\n query_params['sortBy'] = params['sort_by']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'next_page' in params:\n query_params['nextPage'] = params['next_page']\n if 'previous_page' in params:\n query_params['previousPage'] = params['previous_page']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ClientAppEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result", "def applications(self) -> List[ApplicationRequestResponse]:\n return self._applications", "def all_jobs_for_client(ClientID):\n\n client = Client.get(ClientID)\n\n jobs = Job.get_all_for_client(ClientID)\n\n oneoffs = OneOff.get_from_client_id_between_dates(ClientID)\n\n invoices = MonthlyInvoice.get_from_client_id_between_dates(ClientID)\n\n job = JobView(client, jobs, oneoffs, False, Job.get_count_for_client(ClientID) > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job, invoices=invoices)", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)", "def get_applications(self):\n status_code_dict = {\n codes.ok: ApplicationListResponse,\n codes.bad_request: ErrorResponse,\n }\n return self.get_request(APPLICATION_URL,\n status_code_response_class_dict=status_code_dict,\n )", "def get_applications(rest, sessionsArg, option):\n applications = []\n if option == 'heartbeat':\n appsString = rest.get_environment_applications(sessionsArg).strip();\n else:\n appsString = rest.get_all_applications().strip();\n rawList = appsString.split('\\n<\\n')\n for raw in rawList:\n if printtrace: print '_' * 20\n if applicationdataok(raw):\n attributes = [a.split(': ')[1] for a in raw.split('\\n')]\n if printtrace: print attributes\n\n a = Application()\n a.sessionId = attributes[0]\n a.nameInEnvironmentView = attributes[1]\n a.fileName = attributes[2]\n a.processString = attributes[3]\n a.discoveryChecks = attributes[4:]\n a.isgeneric = a.nameInEnvironmentView == 'generic application' or a.fileName.find('generic-application') > 0\n if not a.isgeneric:\n applications.append(a)\n return applications", "def list_service(request):\n builder = http.ResponseBuilder()\n master_addr = request.GET.get('master',None)\n if not master_addr:\n return builder.error('master is required').build_json()\n\n client = wrapper.Galaxy(master_addr,settings.GALAXY_CLIENT_BIN)\n status,jobs = client.list_jobs()\n LOG.info(status)\n if not status:\n return builder.error('fail to list jobs').build_json()\n ret = []\n for job in jobs:\n ret.append(job.__dict__)\n return builder.ok(data=ret).build_json()", "def redis_client_list(self):\n def func(server):\n return server.server.client_list()\n self.__run_redis_cmd(func)", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def view_all(options, client):\n if options.show_events:\n return display_events(client.events())\n\n return \"\".join([\n display.DisplayServices().format(client.services()),\n '\\n',\n display.DisplayJobs(options).format(client.jobs())\n ])", "def list_clients(self):\n\n return self.clients_info", "def get_applications(status):\n return status['applications']", "def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')", "def applications(self):\r\n return applications.Applications(self)", "def get_job_list(self):\n return self.job_list", "def get_job_list(self):\n return self.job_list", "def app_list(self, third_only=False):\n return self.adb.app_list(third_only)", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')" ]
[ "0.6776876", "0.6732049", "0.6693379", "0.6689234", "0.6620565", "0.6502964", "0.6432655", "0.63388366", "0.630927", "0.62085605", "0.6156658", "0.6153231", "0.61485225", "0.6116445", "0.6095281", "0.6093142", "0.6039287", "0.6038911", "0.6036074", "0.6015291", "0.5975665", "0.5954315", "0.59470713", "0.59213305", "0.59082013", "0.5902246", "0.58911127", "0.58911127", "0.5890866", "0.5884211" ]
0.73799616
0
Get specific job application as a client.
def get_client_application(self, application_id, buyer_team__reference): data = {} data['buyer_team__reference'] = buyer_team__reference url = 'clients/applications/{0}'.format(application_id) return self.get(url, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)", "def job(job_name):\n ClientID = Job.get_client_id(job_name)\n return tasks_for_client_job(ClientID, job_name)", "def client(self) -> str:\n return pulumi.get(self, \"client\")", "def _get_client(self, requester_name: str) -> Any:\n return self.datastore.get_client_for_requester(requester_name)", "def get_client(self):\n return self.client", "def get_job(self) -> CustomJob:\n return self._client.get_custom_job(name=self._job_name)", "def client(self):\n response = requests.get(self._url(self._CLIENT_PATH), headers=self._headers)\n return response.json()", "def client_app_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"client_app_id\")", "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def list_client_applications(self, buyer_team__reference, job_key,\n status=None, profile_key=None,\n agency_team__reference=None,\n order_by=None, page_offset=None,\n page_size=None):\n data = {}\n\n data['buyer_team__reference'] = buyer_team__reference\n data['job_key'] = job_key\n\n if status:\n data['status'] = status\n\n if profile_key:\n data['profile_key'] = profile_key\n\n if agency_team__reference:\n data['agency_team__reference'] = agency_team__reference\n\n if order_by:\n data['order_by'] = order_by\n\n data['page'] = '{0};{1}'.format(page_offset, page_size)\n\n url = 'clients/applications'\n return self.get(url, data)", "def public_client(self) -> Optional[pulumi.Input['ApplicationPublicClientArgs']]:\n return pulumi.get(self, \"public_client\")", "def public_client(self) -> Optional[pulumi.Input['ApplicationPublicClientArgs']]:\n return pulumi.get(self, \"public_client\")", "def _getClient(self, app_token=None):\n if app_token is None:\n from . import models\n app_token = models.Aplicacion.objects.get(app_id=self.app_id).app_token\n return api.OAuthAppClient(settings.CLIENT_ID, settings.CLIENT_SECRET, self.app_id, app_token)", "def get_job_by_id(self, job_id):\n return self.get_resource(category=SYSTEM, resource_level=JOB,\n resource_level_id=job_id)", "def getApp(self):\n return self.serviceClass.app", "def get_app(self, app_id):\n return req(self.logger, self.access_token, 'GET', '/apps/'+app_id, {})", "def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()", "def get_job_client(\n enable_ucaip: Optional[bool] = False,\n ucaip_region: Optional[Text] = None\n) -> Union[CAIPJobClient, UCAIPJobClient]:\n if enable_ucaip:\n return UCAIPJobClient(ucaip_region)\n return CAIPJobClient()", "def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client", "def _get_login_oauth_client():\n login_client_id = settings.JWT_AUTH['JWT_LOGIN_CLIENT_ID']\n try:\n return Application.objects.get(client_id=login_client_id)\n except Application.DoesNotExist:\n raise AuthFailedError( # lint-amnesty, pylint: disable=raise-missing-from\n f\"OAuth Client for the Login service, '{login_client_id}', is not configured.\"\n )", "def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]", "def get_application(handle):\n\n if handle in applications:\n return applications.get(handle)\n raise KeyError(\"Application with handle '%s' not registered\" % handle)", "def get_client(self, user_id: int, client_name: str) -> Client:\n return self.clients[user_id][client_name]", "def get_client(self, server_name=None, server_address=None):\n if server_name:\n for name, address in self.registry.servers.items():\n if name == server_name:\n return Client(address)\n return None\n elif server_address:\n return Client(server_address)", "def client(self):\n return self._thread._client", "def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client", "def api_client(api_app):\n return api_app.test_client()", "def client(app=None):\n fs_client = _utils.get_app_service(app, _FIRESTORE_ATTRIBUTE, _FirestoreClient.from_app)\n return fs_client.get()", "def _get_app(flask_app):\n flask_app.test_client_class = TestClient\n return flask_app.test_client()", "def client(self):\n return self._client" ]
[ "0.63978887", "0.62402445", "0.6198", "0.61576414", "0.6136321", "0.6130897", "0.61121416", "0.60793394", "0.5979147", "0.5958431", "0.59523165", "0.59523165", "0.59153277", "0.58909774", "0.5878597", "0.58340085", "0.582294", "0.5817454", "0.5794533", "0.57913315", "0.5790265", "0.5765234", "0.57364804", "0.57345104", "0.5730022", "0.57228565", "0.5687308", "0.56782365", "0.5675098", "0.563467" ]
0.650442
0
List job applications as a freelancer.
def list_freelancer_applications(self, status=None): data = {} if status: data['status'] = status url = 'contractors/applications' return self.get(url, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_client_applications(self, buyer_team__reference, job_key,\n status=None, profile_key=None,\n agency_team__reference=None,\n order_by=None, page_offset=None,\n page_size=None):\n data = {}\n\n data['buyer_team__reference'] = buyer_team__reference\n data['job_key'] = job_key\n\n if status:\n data['status'] = status\n\n if profile_key:\n data['profile_key'] = profile_key\n\n if agency_team__reference:\n data['agency_team__reference'] = agency_team__reference\n\n if order_by:\n data['order_by'] = order_by\n\n data['page'] = '{0};{1}'.format(page_offset, page_size)\n\n url = 'clients/applications'\n return self.get(url, data)", "def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def all_jobs():\n\n jobs = Job.get_all()\n\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, False, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)", "def program_list():\n items = []\n\n soup = abcradionational.get_soup(URL + \"/podcasts/program\")\n \n program_heading = abcradionational.get_podcast_heading(soup)\n\n for program in program_heading:\n items.append({\n 'label': program['title'],\n 'path': plugin.url_for('program_item', url=program['url']),\n })\n\n return items", "def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()", "def application_list(p_engine, p_username, format, appname):\n\n ret = 0\n\n enginelist = get_list_of_engines(p_engine, p_username)\n\n if enginelist is None:\n return 1\n\n data = DataFormatter()\n data_header = [\n (\"Engine name\", 30),\n (\"Application name\", 30),\n ]\n data.create_header(data_header)\n data.format_type = format\n for engine_tuple in enginelist:\n engine_obj = DxMaskingEngine(engine_tuple)\n if engine_obj.get_session():\n continue\n applist = DxApplicationList()\n # load all objects\n applist.LoadApplications()\n\n if appname is None:\n applications = applist.get_allref()\n else:\n applications = applist.get_applicationId_by_name(appname)\n if len(applications) == 0:\n ret = ret + 1\n\n for appref in applications:\n appobj = applist.get_by_ref(appref)\n data.data_insert(\n engine_tuple[0],\n appobj.application_name\n )\n\n print(\"\")\n print (data.data_output(False))\n print(\"\")\n \n \n return ret", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def view_job(options, job_name, client):\n if options.show_events:\n return display_events(client.job_events(job_name))\n\n job_content = client.job(job_name)\n return display.DisplayJobs(options).format_job(job_content)", "def view_all(options, client):\n if options.show_events:\n return display_events(client.events())\n\n return \"\".join([\n display.DisplayServices().format(client.services()),\n '\\n',\n display.DisplayJobs(options).format(client.jobs())\n ])", "def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def listJobs():\n logger.debug('[FLASKWEB /jobs] Request for job listing')\n jobs = db.getJobs(numdays=2)\n for job in jobs:\n job['time'] = datetime.datetime.strptime(job['time'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if job['complete']:\n job['complete'] = datetime.datetime.strptime(job['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n\n # Garbage Collect Orpahened jobs\n compiles = db.getCompiles()\n for compile in compiles:\n if compile['submit']:\n compile['submit'] = datetime.datetime.strptime(compile['submit'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if compile['complete']:\n compile['complete'] = datetime.datetime.strptime(compile['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n # for c in compiles:\n # if c['uid'] not in compile_tasks.keys():\n # db.updateCompile(c['uid'], status='KILLED', done=True)\n # compiles = db.getCompiles()\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(LaunchJobs=jobs, CompilingJobs=compiles)), 200\n else:\n return render_template(\"jobs.html\", joblist=jobs, compilelist=compiles)", "def get_applications(status):\n return status['applications']", "def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))", "def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')", "def active_jobs():\n\n jobs = Job.get_all_active()\n oneoffs = OneOff.get_all()\n\n job = JobView(None, jobs, oneoffs, True, Job.count() > 0)\n\n add_trello_task_links_to_g()\n\n return render_template(\"jobs.template.html\", page_title=\"Jobs\", job_info=job)", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)", "def app_list(request):\n return render(request, 'mdm/app_list.html', {})", "def app_list(self, third_only=False):\n return self.adb.app_list(third_only)", "def view(args):\n print(\"List of all available phonebooks:\")\n for file in glob.glob(\"*.ph\"):\n print(file)", "def get_applications(rest, sessionsArg, option):\n applications = []\n if option == 'heartbeat':\n appsString = rest.get_environment_applications(sessionsArg).strip();\n else:\n appsString = rest.get_all_applications().strip();\n rawList = appsString.split('\\n<\\n')\n for raw in rawList:\n if printtrace: print '_' * 20\n if applicationdataok(raw):\n attributes = [a.split(': ')[1] for a in raw.split('\\n')]\n if printtrace: print attributes\n\n a = Application()\n a.sessionId = attributes[0]\n a.nameInEnvironmentView = attributes[1]\n a.fileName = attributes[2]\n a.processString = attributes[3]\n a.discoveryChecks = attributes[4:]\n a.isgeneric = a.nameInEnvironmentView == 'generic application' or a.fileName.find('generic-application') > 0\n if not a.isgeneric:\n applications.append(a)\n return applications", "def listapps(parser):\n\n print('Function List')\n subparsers_actions = [\n # pylint: disable=protected-access\n action for action in parser._actions\n # pylint: disable=W0212\n if isinstance(action, argparse._SubParsersAction)]\n # there will probably only be one subparser_action,\n # but better safe than sorry\n for subparsers_action in subparsers_actions:\n # get all subparsers and print help\n for choice, subparser in subparsers_action.choices.items():\n print(\"Function: '{}'\".format(choice))\n print(subparser.format_help())\n # print(parser.format_help())", "def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])", "def scrape_recruitment(self):\n d = self.driver\n recruitment_page = self.guildwork_url + '/recruitment'\n d.get(recruitment_page)\n soup = BeautifulSoup(d.page_source, 'lxml')\n apps = soup.find('table', {'id': 'applications'})\n\n all_apps = []\n for row in tqdm(apps.find_all('tr')):\n if not (row.find('th', {'class':'header'})):\n name_field = row.find('a', href=True)\n app_url = self.guildwork_url + name_field.get('href')\n app_name = name_field.text\n app_status = row.find('span',{'class':'label'}).text\n\n # Note that this is only returning information on accepted applications\n if (app_status == 'Accepted'):\n d.get(app_url)\n soup = BeautifulSoup(d.page_source, 'lxml')\n timestamp = soup.find('span', attrs={'data-timestamp': True})['data-timestamp']\n\n app_data = {\n 'url' : app_url,\n 'name' : app_name,\n 'joined' : datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S'),\n 'lodestone_link' : soup.find('label',text='Lodestone Link').find_next('div').text.strip()\n\n }\n all_apps.append(app_data)\n d.close()\n return all_apps", "def show_applications_toc():\n if not cache.get(APPLICATIONS_TOC_CACHE_KEY):\n from django.utils.importlib import import_module\n from sveedocuments.models import Page\n \n apps_infos = []\n for appname, apptitle, appdesc, appkwargs in settings.PUBLISHED_APPS:\n title = apptitle or appname\n desc = appdesc\n doc_link = appkwargs.get('doc_link', None)\n demo_link = appkwargs.get('demo_link', None)\n download_link = appkwargs.get('download_link', None)\n github_link = None\n \n # Links can be tuple, that is assumed to be passed by a reverse url with first \n # element as url name and second argument as args list\n if doc_link and not isinstance(doc_link, basestring):\n doc_link = reverse(doc_link[0], args=doc_link[1])\n \n if demo_link and not isinstance(demo_link, basestring):\n demo_link = reverse(demo_link[0], args=demo_link[1])\n \n if download_link and not isinstance(download_link, basestring):\n download_link = reverse(download_link[0], args=download_link[1])\n \n # Determine some optionnals urls from a schema where we insert the appname\n if not download_link and appkwargs.get('pypi', False):\n download_link = \"http://pypi.python.org/pypi/{0}\".format(appname)\n \n if appkwargs.get('github', False):\n github_link = \"https://github.com/sveetch/{0}\".format(appname)\n if not download_link:\n download_link = \"{0}/tags\".format(github_link)\n \n # Try to get introduction from the module __doc__ attribute\n if not desc:\n try:\n mod = import_module(appname)\n except ImportError:\n pass\n else:\n if mod.__doc__.strip():\n desc = mod.__doc__.strip()\n \n # Try to get some informations from the document Page if it exists\n try:\n page_instance = Page.objects.get(slug=appname)\n except Page.DoesNotExist:\n pass\n else:\n title = page_instance.title\n doc_link = page_instance.get_absolute_url() or doc_link\n \n apps_infos.append({\n 'title': title,\n 'desc': desc,\n 'doc_link': doc_link,\n 'demo_link': demo_link,\n 'download_link': download_link,\n 'github_link': github_link,\n })\n \n cache.set(APPLICATIONS_TOC_CACHE_KEY, {'application_toc': tuple(apps_infos)})\n \n return cache.get(APPLICATIONS_TOC_CACHE_KEY)", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))" ]
[ "0.6346794", "0.62193936", "0.60555565", "0.59836745", "0.58695585", "0.57646716", "0.57538605", "0.5735169", "0.5733384", "0.5714936", "0.5675769", "0.5643017", "0.56337637", "0.56111324", "0.56018645", "0.5600278", "0.5583996", "0.5577506", "0.5548305", "0.5516825", "0.5475642", "0.5472265", "0.54481107", "0.543254", "0.542418", "0.5401586", "0.53784895", "0.53679264", "0.53317225", "0.5326645" ]
0.6721879
0
Get specific job application as a freelancer.
def get_freelancer_application(self, application_id): data = {} url = 'contractors/applications/{0}'.format(application_id) return self.get(url, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_freelancer_applications(self, status=None):\n data = {}\n\n if status:\n data['status'] = status\n\n url = 'contractors/applications'\n return self.get(url, data)", "def view_job(options, job_name, client):\n if options.show_events:\n return display_events(client.job_events(job_name))\n\n job_content = client.job(job_name)\n return display.DisplayJobs(options).format_job(job_content)", "def get(self):\n app_info = {\n 'developedBy': 'This app was developed by the Melbourne eResearch Group (www.eresearch.unimelb.edu.au) within the School of Computing and Information Systems (https://cis.unimelb.edu.au) at The University of Melbourne (www.unimelb.edu.au). ',\n 'description': 'The app uses artificial intelligence (convolutional neural networks) to identify the age, gender, and emotion of the people.',\n 'contact': 'https://eresearch.unimelb.edu.au',\n 'developedByHTML': '<p>This app was developed by the Melbourne eResearch Group (<a href=\\\"www.eresearch.unimelb.edu.au\\\" target=\\\"_blank\\\">www.eresearch.unimelb.edu.au</a>) within the School of Computing and Information Systems (<a href=\\\"https://cis.unimelb.edu.au\\\" target=\\\"_blank\\\">https://cis.unimelb.edu.au</a>) at The University of Melbourne (<a href=\\\"www.unimelb.edu.au\\\" target=\\\"_blank\\\">www.unimelb.edu.au</a>).</p>',\n 'descriptionHTML': '<p>The app uses artificial intelligence (convolutional neural networks) to identify the age, gender, and emotion of the people.</p>',\n 'contactHTML': '<p>Please contact us at: <a href=\\\"eresearch.unimelb.edu.au\\\" target=\\\"_blank\\\">eresearch.unimelb.edu.au</a></p>'\n }\n\n return send_json_response(app_info, 200)", "def simplyapply(request, job, resume, mobile=False):\n apply_info = get_apply_info(request)\n if not apply_info['email']:\n if resume.contact and resume.contact.email:\n apply_info['email'] = resume.contact.email\n else:\n apply_info['email'] = 'Not Provided'\n\n apply_info['job_company'] = job.company\n apply_info['job_title'] = job.title\n apply_info['job_location'] = job.location\n apply_info['source'] = job.source if hasattr(job, '_jobpost') else 'Simply Hired' # JBB/Publishers get a different source in the email.\n\n if resume.source == 'Linkedin':\n attachment = get_pdf_resume(resume)\n else:\n # TODO: handle the case where the resume has no content entry.\n content = models.Content.objects.get(resume=resume.id)\n attachment = {}\n mimetypes.init()\n attachment['mimetype'] = mimetypes.guess_type(content.file_name)\n try:\n attachment['raw_resume'] = content.raw_resume.decode('utf-8').encode('latin-1')\n except UnicodeDecodeError:\n attachment['raw_resume'] = content.raw_resume\n attachment['filename'] = content.file_name\n\n subject = u\"Application for {0} at {1}\".format(job.title, job.company)\n send_email('Simply Hired <[email protected]>', job.apply_email, subject, EMAIL_BODY.format(**apply_info), attachment,\n reply_to=resume.contact.email if resume.contact.email else None)\n\n try:\n # JBB job.\n if hasattr(job, '_jobpost'):\n jbb.JobPostMetrics.objects.filter(jobpostid=job._jobpost.jobpostid).update(count_apply_email=F('count_apply_email')+1)\n\n # Log for generic tracking.\n log_apply(request, job, apply_info, attachment, resume, mobile)\n except Exception, msg:\n logger.exception('Error in writing to tracking: %s %s' % (Exception, msg))\n\n if resume.contact.email:\n send_confirmation(resume.contact.email, apply_info)\n\n return", "def get_glitter_app(self, glitter_app_name):\n if not self.discovered:\n self.discover_glitter_apps()\n\n try:\n glitter_app = self.glitter_apps[glitter_app_name]\n return glitter_app\n except KeyError:\n return None", "def list_client_applications(self, buyer_team__reference, job_key,\n status=None, profile_key=None,\n agency_team__reference=None,\n order_by=None, page_offset=None,\n page_size=None):\n data = {}\n\n data['buyer_team__reference'] = buyer_team__reference\n data['job_key'] = job_key\n\n if status:\n data['status'] = status\n\n if profile_key:\n data['profile_key'] = profile_key\n\n if agency_team__reference:\n data['agency_team__reference'] = agency_team__reference\n\n if order_by:\n data['order_by'] = order_by\n\n data['page'] = '{0};{1}'.format(page_offset, page_size)\n\n url = 'clients/applications'\n return self.get(url, data)", "def createJobLatest(appName):\n logger.debug('[FLASKWEB /jobs/<appName>] Redirect to current version of /jobs/%s' % appName)\n app = db.getApp(appName)\n if app:\n return createJob(appName, app['uid'])\n else:\n return returnError(\"Application %s does not exist\" % appName, 404)", "def AppGetApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_job(self, job_id):\n\n try:\n exposure = Job.objects.filter(id=job_id)\n except:\n exposure = None\n\n return exposure", "def get_client_application(self, application_id, buyer_team__reference):\n data = {}\n\n data['buyer_team__reference'] = buyer_team__reference\n\n url = 'clients/applications/{0}'.format(application_id)\n return self.get(url, data)", "def audience(self):\n return \"HealthProfessional\"", "async def get_app(self, name: str) -> Callable:\n return await self.AD.app_management.get_app(name)", "def get_viewjob_url(jk):\n\treturn 'https://www.indeed.co.uk/restaurant-jobs-in-England' + jk", "def follow_gae_application(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"follow_gae_application\")", "def product(self):\n return self.appName", "def getJobName():\n return os.environ['LCATR_JOB']", "def current_app(self) -> str:\n app_id = self.app.get_current() # Returns the application ID (string) of the\n foreground_app = [x for x in self.app.list_apps() if app_id == x[\"id\"]][0]\n return foreground_app['title']", "def get_job(self) -> Dict[Text, Text]:\n request = self._client.projects().jobs().get(name=self._job_name)\n return request.execute()", "def getApp(appName):\n logger.debug('[FLASKWEB /apps/<appName>] GET request for app, `%s`' % appName)\n applist = [a['name'] for a in db.getAllApps()]\n if appName in applist:\n versionList = db.getVersions(appName)\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(name=appName, versions=versionList)), 200\n else:\n return render_template(\"apps.html\", name=appName, versionList=versionList)\n else:\n return returnError(\"Application %s does not exist\" % appName, 404)", "def getJob(appName, jobId):\n jobs = db.getJobs(jobId=jobId)\n job = None if len(jobs) == 0 else jobs[0]\n k3job = dispatcher.getJob(int(jobId))\n\n if job == None:\n return returnError(\"Job ID, %s, does not exist\" % jobId, 404)\n\n thisjob = dict(job, url=dispatcher.getSandboxURL(jobId))\n if k3job != None:\n thisjob['master'] = k3job.master\n local = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId)).encode(encoding='utf8', errors='ignore')\n path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], appName, str(jobId),'role.yaml').encode(encoding='utf8', errors='ignore')\n if os.path.exists(local) and os.path.exists(path):\n with open(path, 'r') as role:\n thisjob['roles'] = role.read()\n else:\n return returnError(\"Job Data no longer exists\", 400)\n\n thisjob['sandbox'] = sorted (os.listdir(local))\n\n if 'application/json' in request.headers['Accept']:\n return jsonify(thisjob)\n else:\n return render_template(\"last.html\", appName=appName, lastjob=thisjob)", "def get_job(self) -> CustomJob:\n return self._client.get_custom_job(name=self._job_name)", "def get_job(self, job_reference):\n url = 'jobs/{0}'.format(job_reference)\n result = self.get(url)\n return result.get('job', result)", "def get_job(job_name: str):\n\n job_details = redis_controller.get_job_details(job_name=job_name)\n return job_details", "def getApp(self):\n return self.serviceClass.app", "def get(self):\n api_url = 'https://api.line.me/liff/v1/apps'\n result = requests.get(api_url, headers={\"Authorization\": self._headers[\"Authorization\"]})\n if result.status_code == 401:\n raise ErrorResponse(\"[401 Error] Certification failed.\")\n elif result.status_code == 404:\n raise ErrorResponse(\"[404 Error] There is no LIFF application on the channel.\")\n return json.loads(result.content)['apps']", "def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')", "def get(self, email, application_category):", "def get_job_by_id(self, job_id):\n return self.get_resource(category=SYSTEM, resource_level=JOB,\n resource_level_id=job_id)", "def find_app(self, app_name):\n self._app = None\n for p in self.policy_list.response:\n apps = [app for app in p.resource.applications if app.appName == app_name]\n if len(apps) > 0:\n return apps[0]", "def aboutThermAP():\n import webbrowser\n url = os.path.join(progpath, \"ThermAP_presentation.pdf\")\n if platform.system() == \"Darwin\":\n webbrowser._browsers['safari'][1].open(url)\n else:\n webbrowser.open(url)" ]
[ "0.55221313", "0.55001366", "0.5176622", "0.5111231", "0.5085925", "0.5068883", "0.50630915", "0.5051905", "0.5005737", "0.49769104", "0.49479914", "0.4939961", "0.49382383", "0.4904667", "0.48747703", "0.485379", "0.48349792", "0.48300624", "0.48130533", "0.4791802", "0.47848734", "0.47821972", "0.47292155", "0.47095212", "0.46859372", "0.46844262", "0.46838355", "0.4672355", "0.46655983", "0.46625003" ]
0.64061135
0
Edit an existing milestone.
def edit_milestone(self, milestone_id, milestone_description=None, deposit_amount=None, due_date=None, message=None): data = {} data['milestone_id'] = milestone_id if milestone_description: data['milestone_description'] = milestone_description if deposit_amount: data['deposit_amount'] = deposit_amount if due_date: data['due_date'] = due_date if message: data['message'] = message url = 'fp/milestones/{0}'.format(milestone_id) return self.put(url, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_edit_milestone(self):\n pass", "def update(self):\n\n params = {\n \"title\": self.title,\n \"body\": self.body,\n \"state\": self.state,\n \"labels\": self.labels,\n \"assignees\": self.assignees,\n }\n\n if self.milestone:\n params[\"milestone\"] = self.milestone\n\n resp = yield self.client.request(\n \"{}/issues/{}\".format(self.repo.base_path, self.num),\n params=params, method=\"PATCH\")\n self.c = resp.data\n self.after_sync()\n raise gen.Return(self)", "def edit_task(task_id, form):\n db.task.replace_one(\n {\n '_id': ObjectId(task_id)\n },\n {\n 'title': form.title.data,\n 'description': form.description.data,\n 'status': form.status.data,\n 'priority': form.priority.data,\n 'date_added': datetime.datetime.now().timestamp()\n }\n )", "def milestone(self, milestone_id):\r\n return IssueMilestone(self, milestone_id)", "def edit(self, **kwargs):\n ...", "def edit(tid, tag, description, title, priority, end):\n try:\n manager = Actions()\n if tid is None:\n raise ValueError(\"Input tid!\")\n manager.edit_task(tid,\n title=title,\n tag=tag,\n priority=priority,\n description=description,\n end=end)\n except Exception as e:\n click.echo(e)", "def _get_milestone(self, req):\n\n milestone_id = req.args['id']\n try:\n milestone = Milestone(self.env, milestone_id)\n except ResourceNotFound:\n milestone = None\n\n return milestone", "def test_issue_get_milestone(self):\n pass", "def process_milestone_on_project(self,\n project,\n old_milestone_name,\n new_milestone):\n self.logging.debug(\n 'Retrieving closed milestone %s..',\n old_milestone_name\n )\n old_milestone = project.getMilestone(name=old_milestone_name)\n\n if old_milestone:\n self.logging.debug(\n 'Retrieving bugs for closed milestone %s..',\n old_milestone.name\n )\n\n old_bugs = old_milestone.searchTasks(\n status=self.statuses,\n importance=self.bugs_importance\n )\n\n bugs_num = len(old_bugs)\n self.logging.debug('Got %s bugs..', bugs_num)\n\n self.get_stats()[project.name][old_milestone_name] = {\n 'total': bugs_num,\n 'migrated': 0\n }\n\n for bug in old_bugs:\n if self.is_limit_achived():\n break\n self.logging.debug(\"Bug #%s %s [%s]\",\n bug.bug.id,\n bug.bug.title[0:80] + ('' if len(bug.bug.title) < 80 else '...'),\n bug.web_link)\n if self.is_targeted_for_maintenance(bug):\n self.process_mtn_bug(\n bug, project.name, old_milestone_name, new_milestone\n )\n else:\n self.process_not_mtn_bug(\n bug, project.name, old_milestone_name, new_milestone\n )\n self.logging.debug(\"\")\n else:\n self.logging.debug(\n \"Closed milestone %s wasn't found. Skipped..\",\n old_milestone_name\n )", "def test_issue_create_milestone(self):\n pass", "def edit():", "def milestone(self, milestone_id):\r\n return milestones.Milestone(self, milestone_id)", "def test_issue_delete_milestone(self):\n pass", "def edit(self):\n\n pass", "def edit_task(self,tid, **kwargs):\n self.task_controller.edit(tid, **kwargs)", "def test_delete_milestone(self):\n milestone1_startdate = timezone.datetime(2020, 9, 1).date()\n milestone1_targetdate = timezone.datetime(2020, 9, 20).date()\n kippomilestone_1 = KippoMilestone(\n project=self.project,\n title=\"test milestone 1\",\n start_date=milestone1_startdate,\n target_date=milestone1_targetdate,\n )\n kippomilestone_1.save()\n\n # assign milestone to tasks\n self.task1.milestone = kippomilestone_1\n self.task1.save()\n task1_id = self.task1.id\n\n # delete milestone\n kippomilestone_1.delete()\n\n # confirm task still exists\n self.assertTrue(KippoTask.objects.filter(id=task1_id).exists())", "def get_updates_milestone_for(self, milestone_name, project_name):\n updates_milestone_name = milestone_name + '-updates'\n\n project = self.get_lp_client().projects[project_name]\n milestone = project.getMilestone(name=updates_milestone_name)\n\n if not milestone:\n self.logging.error(\n \"Can't find the milestone '%s' on project '%s'.\",\n updates_milestone_name,\n project_name\n )\n\n return None\n\n return milestone", "def edit_project(project_id):\n \n if 'username' in session: \n project = mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(project_id)})\n form=ProjectForm()\n form.title.data = project['title']\n form.status.data = project['status']\n form.deadline.data = project['deadline'].strftime('%d/%m/%Y')\n form.brief.data = project['brief']\n form.note.data = project['note']\n return render_template('pages/editproject.html', form=form, project=project, legend='Edit your project')", "def addOne(self):\n m = self.request.get('milestone')\n self._createMilestone(m)\n return self.request.response.redirect(self.context.absolute_url())", "def test_issue_edit_issue(self):\n pass", "def edit_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [label])\n if len(results) == 0:\n print(\"No task found with label '%s'.\" % label)\n return\n # the task exists, so ask the user for the new description\n responses = accept_inputs([\"New description\"])\n # update db\n query_no_results(\"update task set description = ? where label = ?\", [responses[\"New description\"], label])\n print(\"Task with label '%s' updated.\" % label)", "def backlog_milestone():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"project\", help=\"name of the project\")\n parser.add_argument(\"milestone\", help=\"name of the milestone\")\n\n args = parser.parse_args()\n\n session = GithubSession()\n\n project_data = session.get_project(args.project)\n\n milestone_data = session.get_milestone(args.milestone)\n milestone_title = milestone_data[\"title\"]\n\n backlog_data = session.get_column(project_data, \"backlog\")\n icebox_data = session.get_column(project_data, \"icebox\")\n\n results = session.search(f'repo:openslate/openslate milestone:\"{milestone_title}\"')\n for search_data in results[\"items\"]:\n issue_data = get_issue(search_data[\"number\"]).issue\n issue_card = session.get_card(project_data, issue_data)\n\n if issue_card[\"column_url\"] == icebox_data[\"url\"]:\n session.move_card(issue_card, backlog_data)\n\n print(\".\", end=\"\")", "def edit(self):\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n title = self.request.get('title')\n if not title:\n return JSONResponse(self.request).error(\n _('agenda_item_update_empty_string',\n default=u\"Agenda Item title must not be empty.\")).proceed().dump()\n\n title = title.decode('utf-8')\n if self.agenda_item.has_proposal:\n if len(title) > ISubmittedProposal['title'].max_length:\n return JSONResponse(self.request).error(\n _('agenda_item_update_too_long_title',\n default=u\"Agenda Item title is too long.\")\n ).proceed().dump()\n\n self.agenda_item.set_title(title)\n return JSONResponse(self.request).info(\n _('agenda_item_updated',\n default=u\"Agenda Item updated.\")).proceed().dump()", "def add_target_to_bug(self, bug, project_name, new_milestone):\n ms_name = self.get_new_milestone_name()\n\n tasks = bug.related_tasks\n # check if already targeted\n if any(self.bug_milestone_name(task) == ms_name for task in tasks):\n return False\n\n print(ms_name, [self.bug_milestone_name(task) for task in tasks])\n old_status = bug.status\n old_importance = bug.importance\n old_assignee = bug.assignee\n self.logging.debug(\"Add milestone %s, status %s, importance %s, assignee %s\",\n new_milestone.name, old_status, old_importance,\n old_assignee.name if old_assignee else 'Unassigned')\n if self.is_debug():\n return False\n try:\n target = bug.bug.addTask(target=new_milestone.series_target)\n\n target.milestone = new_milestone\n target.status = old_status\n target.importance = old_importance\n target.assignee = old_assignee\n\n target.lp_save()\n except Exception as exc: # pylint: disable=W0703\n self.logging.error(\n \"Can't save target milestone '%s' for bug #%s : %s\",\n self.get_new_milestone_name(),\n bug.bug.id,\n exc\n )\n # self.logging.exception(exc)\n raise\n return True\n return False", "def process_mtn_bug(self,\n bug,\n project_name,\n old_milestone_name,\n new_milestone):\n updates_milestone = self.get_updates_milestone_for(\n old_milestone_name,\n project_name\n )\n\n if not updates_milestone:\n return None\n\n errors = self.add_target_to_bug(bug, project_name, new_milestone)\n\n self.logging.debug(\"Set milestone %s\", updates_milestone.name, )\n if not self.is_debug() and not errors:\n bug.milestone = updates_milestone\n\n try:\n bug.lp_save()\n except Exception as exc: # pylint: disable=W0703\n errors = True\n self.logging.error(\n \"Can't target bug for maintenance '%s' for bug #%s : %s\",\n old_milestone_name + '-updates',\n bug.bug.id,\n exc\n )\n self.logging.exception(exc)\n\n if not errors:\n self.get_stats()[project_name][old_milestone_name]['migrated'] += 1\n self.increase_proccessed_issues()\n else:\n self.logging.error(\"Can't reassign the bug #%s.\", bug.bug.id)", "def on_milestone(self, payload):\n pass", "def edit_time_spent(entry):\n entry.time_spent = get_minutes()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def edit(request):\n issue = request.issue\n base = issue.base\n\n if request.method != 'POST':\n reviewers = [models.Account.get_nickname_for_email(reviewer,\n default=reviewer)\n for reviewer in issue.reviewers]\n ccs = [models.Account.get_nickname_for_email(cc, default=cc)\n for cc in issue.cc]\n form = EditLocalBaseForm(initial={'subject': issue.subject,\n 'description': issue.description,\n 'base': base,\n 'reviewers': ', '.join(reviewers),\n 'cc': ', '.join(ccs),\n 'closed': issue.closed,\n 'private': issue.private,\n })\n return respond(request, 'edit.html', {\n 'issue': issue,\n 'form': form,\n 'offer_delete': (issue.owner == request.user\n or auth_utils.is_current_user_admin())\n })\n\n form = EditLocalBaseForm(request.POST)\n\n if form.is_valid():\n reviewers = _get_emails(form, 'reviewers')\n\n if form.is_valid():\n cc = _get_emails(form, 'cc')\n\n if not form.is_valid():\n return respond(request, 'edit.html', {'issue': issue, 'form': form})\n cleaned_data = form.cleaned_data\n\n was_closed = issue.closed\n issue.subject = cleaned_data['subject']\n issue.description = cleaned_data['description']\n issue.closed = cleaned_data['closed']\n issue.private = cleaned_data.get('private', False)\n base_changed = (issue.base != base)\n issue.base = base\n issue.reviewers = reviewers\n issue.cc = cc\n if base_changed:\n for patchset in issue.patchsets:\n ndb.transaction(lambda: _delete_cached_contents(list(patchset.patches)))\n issue.calculate_updates_for()\n issue.put()\n\n return HttpResponseRedirect(reverse(show, args=[issue.key.id()]))", "def edit(request, observation_id, summary_id):\n\n if request.method == 'POST':\n if observation_id and summary_id:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n form = Observation(request.POST,instance=o)\n else:\n form = Observation(request.POST)\n if form.is_valid():\n form.save()\n return render_to_response(\"obsform_form.html\",\n {'form': form,\n 'success' : 'Your observation was saved'},\n context_instance=RequestContext(request))\n else:\n o = get_object_or_404(models.Observations, pk=observation_id)\n o.summary_id = summary_id\n\n form = Observation(instance=o)\n\n return render_to_response('obsform_form.html', {'form' : form},\n context_instance=RequestContext(request))", "def edit_announcement():\n # Implement me!\n\n announcement = get_announcement(request.vars.announcement_id, auth.user.email)\n\n announcement.description = request.vars.description\n announcement.name = request.vars.name\n announcement.updated_on = datetime.datetime.utcnow()\n announcement.update_record()\n return response.json(announcement)" ]
[ "0.7369814", "0.6184584", "0.60975057", "0.60197014", "0.592604", "0.5918857", "0.5915418", "0.5914129", "0.5884606", "0.58582073", "0.5761563", "0.5744777", "0.572514", "0.5671504", "0.56410825", "0.56336033", "0.5633136", "0.5629405", "0.56153303", "0.5606772", "0.5602144", "0.55984503", "0.556675", "0.55516744", "0.5519743", "0.5496862", "0.54797214", "0.5474188", "0.54471827", "0.5439245" ]
0.76799893
0
Activates a milestone that has not been funded.
def activate_milestone(self, milestone_id, message=None): data = {} data['milestone_id'] = milestone_id if message: data['message'] = message url = 'fp/milestones/{0}/activate'.format(milestone_id) return self.put(url, data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_milestone(self, payload):\n pass", "def set_milestone(self):\n\t\te = Event()\n\t\tself.queue.put(e)\n\t\treturn e", "def add_target_to_bug(self, bug, project_name, new_milestone):\n ms_name = self.get_new_milestone_name()\n\n tasks = bug.related_tasks\n # check if already targeted\n if any(self.bug_milestone_name(task) == ms_name for task in tasks):\n return False\n\n print(ms_name, [self.bug_milestone_name(task) for task in tasks])\n old_status = bug.status\n old_importance = bug.importance\n old_assignee = bug.assignee\n self.logging.debug(\"Add milestone %s, status %s, importance %s, assignee %s\",\n new_milestone.name, old_status, old_importance,\n old_assignee.name if old_assignee else 'Unassigned')\n if self.is_debug():\n return False\n try:\n target = bug.bug.addTask(target=new_milestone.series_target)\n\n target.milestone = new_milestone\n target.status = old_status\n target.importance = old_importance\n target.assignee = old_assignee\n\n target.lp_save()\n except Exception as exc: # pylint: disable=W0703\n self.logging.error(\n \"Can't save target milestone '%s' for bug #%s : %s\",\n self.get_new_milestone_name(),\n bug.bug.id,\n exc\n )\n # self.logging.exception(exc)\n raise\n return True\n return False", "def test_issue_edit_milestone(self):\n pass", "def milestones(self, milestones):\n\n self._milestones = milestones", "def begin_not_undoable_action(self):\n self.not_undoable_action = True", "def reactivate(self):\n self.write({'active': True, 'state': 'running'})\n STAGE = self.env['anytracker.stage']\n for ticket in self:\n starts = STAGE.search([('method_id', '=', ticket.method_id.id),\n ('progress', '=', 0)])\n if len(starts) != 1:\n raise except_orm(\n _('Configuration error !'),\n _('One and only one stage should have a 0% progress'))\n # write stage in a separate line to recompute progress & risk\n ticket.write({'stage_id': starts[0].id})\n self.recompute_parents()", "def test_issue_create_milestone(self):\n pass", "def test_milestone_due_unset_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('milestone due milestone2 \"%s\"' % self._test_date)\n self._execute('milestone due milestone2 \"\"')\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def deny(self):\n self.quest_node['completed_by'] = ''\n self.completed_by = None\n self.active = True\n self.quest_node['active'] = True\n graph.push(self.quest_node)", "def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()", "def setIsNotNewInstallation(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n launcher.setIsNotNewInstallation()", "def pending(self):\n self.state = Step.State.PENDING", "def open_mstone(request):\n if (request.method == \"POST\" and\n 'ms' in request.POST and\n request.user.has_perm('shipping.can_open')):\n try:\n mstone = Milestone.objects.get(code=request.POST['ms'])\n mstone.status = 1\n # XXX create event\n mstone.save()\n except:\n pass\n return HttpResponseRedirect(reverse('shipping.views.milestones'))", "def attempt_to_acquire_leader(self, permanent=False):", "def addOne(self):\n m = self.request.get('milestone')\n self._createMilestone(m)\n return self.request.response.redirect(self.context.absolute_url())", "def test_delete_milestone(self):\n milestone1_startdate = timezone.datetime(2020, 9, 1).date()\n milestone1_targetdate = timezone.datetime(2020, 9, 20).date()\n kippomilestone_1 = KippoMilestone(\n project=self.project,\n title=\"test milestone 1\",\n start_date=milestone1_startdate,\n target_date=milestone1_targetdate,\n )\n kippomilestone_1.save()\n\n # assign milestone to tasks\n self.task1.milestone = kippomilestone_1\n self.task1.save()\n task1_id = self.task1.id\n\n # delete milestone\n kippomilestone_1.delete()\n\n # confirm task still exists\n self.assertTrue(KippoTask.objects.filter(id=task1_id).exists())", "async def async_turn_on_when_active(self, **kwargs: Any) -> None:\n await self._data.controller.programs.start(self.entity_description.uid)\n self._update_activities()", "def reset_continued(self): \n self._recent_goal_continued = False\n self._update_action = False\n self._update_action_without_pause = False", "def test_milestone_due_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('milestone due milestone2 \"%s\"' % self._test_date)\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def update_activation(self) -> None:\n if self.forced:\n return\n self.units.update_activation()", "def test_issue_delete_milestone(self):\n pass", "def applyLock(self, pkmn):\n pkmn.actionLock = ActionLock(pkmn, \\\n pkmn.lastAction, self.turns-1)", "def start(self):\n\t\tpm = PendingMission(mission_id=self.mission_id, kingdom_id=self.kingdom_id)\n\t\tpm.save()\n\t\t\n\t\treturn pm", "def change_abandoned(self, event):\n pass", "def mark_as_not_done(self):\n grade_event = {'value': 0, 'max_value': self.points}\n self.runtime.publish(self, 'grade', grade_event)", "async def async_turn_on(self, **kwargs: Any) -> None:\n if not self.coordinator.data[self.entity_description.uid][\"active\"]:\n self._attr_is_on = False\n self.async_write_ha_state()\n raise HomeAssistantError(\n f\"Cannot turn on an inactive program/zone: {self.name}\"\n )\n\n await self.async_turn_on_when_active(**kwargs)", "def action_lock(self):\n self.state = 'locked'", "def test_milestone_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('milestone remove milestone3')\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_issue_get_milestone(self):\n pass" ]
[ "0.57851166", "0.5676139", "0.55553406", "0.54831004", "0.5425825", "0.53606015", "0.5351348", "0.53409654", "0.53227174", "0.5304007", "0.53013045", "0.5250818", "0.5250497", "0.5195492", "0.5162674", "0.5126358", "0.5120708", "0.51070344", "0.5075711", "0.50711673", "0.5066265", "0.50573087", "0.50059164", "0.5004741", "0.5000212", "0.49924666", "0.49615481", "0.4955564", "0.49479225", "0.49373507" ]
0.6922214
0