query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Gets the screenshots of this Listing. Screenshots of the listing.
def screenshots(self): return self._screenshots
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_snapshots(self) -> SnapshotListing:\n return self.snapshots", "def get_screenshot(self):\n method_name = self._testMethodName\n class_name = type(self).__name__\n time_now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n folder = os.path.dirname(os.getcwd())\n directory = \"\".join([folder, \"/test-results/\", class_name])\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = \"%s/%s - %s.png\" % (directory, time_now, method_name)\n\n self.driver.get_screenshot_as_file(file_name)\n print \"[[ATTACHMENT|%s]]\" % file_name\n print \"current url - %s\" % self.driver.current_url", "def getScreenList(self, verbose = False):\n return execCmd(\"%s -list\" % self._screenPath, verbose)", "def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res", "def image_list(self):\n return self._image_list", "def measure_screen(self):\n outputs = self._get_images()\n\n if self.save_image_flag:\n self.save_images(outputs)\n return outputs", "def imageList(self):\n return self.__imageList", "def get_screenshots(miscobj):\n\n imagedir = misctools.get_screenshots_dir(miscobj)\n if imagedir:\n return mark_safe(htmltools.get_screenshots(imagedir))\n else:\n return None", "def GetImageList(self):\r\n\r\n return self._imageList", "def thumbnails(self):\n return self._thumbnails", "def get_images(self):\n \n return self.img_lst", "def list(self):\n r = self.target.ttbd_iface_call(\"images\", \"list\", method = \"GET\")\n return r['result']", "def get_snapshot_list(self, base, snappref=\"SPECTRA_\"):\n #print('Looking for spectra in', base)\n powerspectra = FluxPower(maxk=self.max_k)\n for snap in range(30):\n snapdir = os.path.join(base,snappref+str(snap).rjust(3,'0'))\n #We ran out of snapshots\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base,\"PART_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n snapdir = os.path.join(base, \"snap_\"+str(snap).rjust(3,'0'))\n if not os.path.exists(snapdir):\n continue\n #We have all we need\n if powerspectra.len() == np.size(self.zout):\n break\n try:\n ss = self._get_spectra_snap(snap, base)\n# print('Found spectra in', ss)\n if ss is not None:\n powerspectra.add_snapshot(snap,ss)\n except IOError:\n print(\"Didn't find any spectra because of IOError\")\n continue\n #Make sure we have enough outputs\n if powerspectra.len() != np.size(self.zout):\n raise ValueError(\"Found only\",powerspectra.len(),\"of\",np.size(self.zout),\"from snaps:\",powerspectra.snaps)\n return powerspectra", "def get_shots(self, project_id=None):\n\n project_id = project_id if project_id is not None else self.project_id\n shots = self.client.GET(\"/projects/{}/shots\".format(project_id))\n return shots.json()", "def getDisplaysAsImages():\n\ttry:\n\t\trects = getDisplayRects()\n\texcept RectFailed as e:\n\t\traise GrabFailed(\"Error during getDisplayRects: \" + str(e))\n\t# im has an origin at (0, 0) in the top-left corner of the virtual screen,\n\t# but our `rect`s have a (0, 0) origin in the top-left corner of the main\n\t# display. So we normalize all coordinates in the rects to be >= 0.\n\tnormalizedRects = normalizeRects(rects)\n\tim = getScreenAsImage()\n\n\treturn list(im.crop(rect) for rect in normalizedRects)", "def list_images(self):\n raise NotImplementedError()", "def grab_hp_images(screenshot):\n return [screenshot.crop((230, 208, 400, 254)),\n screenshot.crop((230, 316, 400, 362)),\n screenshot.crop((230, 424, 400, 470)),\n screenshot.crop((230, 532, 400, 578)),\n screenshot.crop((230, 640, 400, 686)),\n screenshot.crop((230, 748, 400, 794)),\n screenshot.crop((230, 856, 400, 902)),\n screenshot.crop((230, 964, 400, 1010))]", "def getImageList(self):\n ps = getToolByName(self.context, 'portal_skins')\n folder = self.context.unrestrictedTraverse('/'.join(ps.getPhysicalPath()) + '/custom-logos')\n return folder.values()", "def screenshots(self, screenshots):\n self._screenshots = screenshots", "def getSnapshots(self):\n snapshots = []\n for x in self.root.goto('CommonDataObjects/Attachments'):\n for y in x.getList():\n if y['name'] == 'Video Snapshot':\n self.f.seek(y['bidx'])\n blk = Block(self.f)\n sx = blk.goto('res_x').getLong()\n sy = blk.goto('res_y').getLong()\n raw = blk.goto(\"imagedata\").value\n data = zlib.decompress(raw)\n I = np.flipud(np.array(struct.unpack(\"<\" + str(3 * sx * sy) + \"B\", data)).reshape((sy, sx, 3)))\n snapshots.append(I)\n del blk\n return snapshots", "def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)", "def images(self):\n return self.gameimage_set.all()", "def getScreenshot(self):\n cmdId = self.executeCommand(Command.SCREENSHOT)\n return cmdId", "def find_screenshots():\n # Inside SCREENSHOT_DIR, there should be 1 folder with a\n # random name which contains the user's puzzles. Just\n # attempt to modify a screenshot in each of the directories\n # in the folder.\n for folder in os.listdir(SCREENSHOT_DIR):\n full_path = os.path.join(SCREENSHOT_DIR, folder)\n if os.path.isdir(full_path):\n # The screenshot to modify is untitled.jpg\n screenshot = os.path.join(full_path, 'untitled.jpg')\n if os.path.isfile(screenshot):\n yield screenshot", "def GetImageList(self):\r\n\r\n return self._imageListNormal", "def showSnapshots(self):\n from .utils import sp\n s = self.getSnapshots()\n ax = sp(len(s))\n for i, S in enumerate(s):\n ax[i].imshow(S)", "def list(self, detailed=True, search_opts=None, marker=None, limit=None,\n sort=None):\n resource_type = \"snapshots\"\n url = self._build_list_url(resource_type, detailed=detailed,\n search_opts=search_opts, marker=marker,\n limit=limit, sort=sort)\n return self._list(url, resource_type, limit=limit)", "def grab_big_hp_images(screenshot):\n return [screenshot.crop((123, 160, 400, 254)),\n screenshot.crop((123, 268, 400, 362)),\n screenshot.crop((123, 376, 400, 470)),\n screenshot.crop((123, 484, 400, 578)),\n screenshot.crop((123, 592, 400, 686)),\n screenshot.crop((123, 700, 400, 794)),\n screenshot.crop((123, 808, 400, 902)),\n screenshot.crop((123, 916, 400, 1010))]", "def list_images():\n return json_response(list_manifests())", "def get_image_links(data):\n painting_links = []\n\n print(data)\n\n for painting in data:\n painting_links.append(painting['image'])\n\n return painting_links" ]
[ "0.6174978", "0.6116951", "0.6091908", "0.6024215", "0.6005619", "0.59941226", "0.59864277", "0.5935582", "0.59189415", "0.59039044", "0.58653", "0.5848835", "0.58187425", "0.5804207", "0.57708025", "0.576877", "0.5758277", "0.57075787", "0.5696212", "0.569035", "0.5667706", "0.5664595", "0.56131214", "0.5601428", "0.5597842", "0.55875075", "0.55192447", "0.55149627", "0.5508581", "0.54976845" ]
0.7933375
0
Sets the screenshots of this Listing. Screenshots of the listing.
def screenshots(self, screenshots): self._screenshots = screenshots
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def screenshots(self):\n return self._screenshots", "def configure_screenshots(scenario):\r\n world.auto_capture_screenshots = False", "def SetImageList(self, imageList):\r\n\r\n self._imageList = imageList", "def set_screens(screen_list):\n global screens, screen_manager\n screens = screen_list\n for s in screen_list:\n screen_manager.add_widget(s)", "def thumbnails(self, value):\n self._thumbnails = value", "def AssignImageList(self, imageList):\r\n\r\n self.SetImageList(imageList)", "def deviceScreenshot(self, event):\n\n self.screenshotbutton.Disable()\n\n deviceModel, deviceID = self.getDevices()\n deviceIDModel = []\n\n if not deviceModel or not deviceID:\n self.SetStatusText(\"No Android devices detected\")\n self.screenshotbutton.Enable()\n return\n\n for everyi in deviceModel:\n for everym in deviceID:\n deviceIDModel = [everym + \" \" + everyi]\n\n try:\n dialog = wx.MultiChoiceDialog(self, \"Pick your devices\", \"caption\", deviceIDModel, wx.OK | wx.CANCEL)\n except UnboundLocalError:\n self.SetStatusText(f\"No Devices Found\")\n self.screenshotbutton.Enable()\n return\n\n instance = dialog.ShowModal()\n devices = dialog.GetSelections()\n\n listLength = len(devices)\n dialog.Destroy()\n\n if instance == wx.ID_OK:\n for i in range(listLength):\n self.SetStatusText(f\"Screenshotting {deviceModel[i]} {i+1}/{listLength}\")\n subprocess.call(f\"adb -s {deviceID[i]} shell screencap /sdcard/{deviceModel[i]}.png\", creationflags=self.createNoWindow)\n subprocess.call(fr\"adb -s {deviceID[i]} pull /sdcard/{deviceModel[i]}.png C:\\ADBscripts\\PhoneScreenshots\", creationflags=self.createNoWindow)\n\n if listLength > 1:\n self.SetStatusText(f\"Took {listLength} Screenshots\")\n else:\n self.SetStatusText(f\"Took {listLength} Screenshot\")\n\n self.screenshotbutton.Enable()", "def mod_screenshots():\n mod_type = CONF['screenshot_type', 'PETI'].lower()\n\n if mod_type == 'cust':\n LOGGER.info('Using custom screenshot!')\n scr_loc = CONF['screenshot', '']\n elif mod_type == 'auto':\n LOGGER.info('Using automatic screenshot!')\n scr_loc = None\n # The automatic screenshots are found at this location:\n auto_path = os.path.join(\n '..',\n GAME_FOLDER.get(CONF['game_id', ''], 'portal2'),\n 'screenshots'\n )\n # We need to find the most recent one. If it's named\n # \"previewcomplete\", we want to ignore it - it's a flag\n # to indicate the map was playtested correctly.\n try:\n screens = [\n os.path.join(auto_path, path)\n for path in\n os.listdir(auto_path)\n ]\n except FileNotFoundError:\n # The screenshot folder doesn't exist!\n screens = []\n screens.sort(\n key=os.path.getmtime,\n reverse=True,\n # Go from most recent to least\n )\n playtested = False\n for scr_shot in screens:\n filename = os.path.basename(scr_shot)\n if filename.startswith('bee2_playtest_flag'):\n # Previewcomplete is a flag to indicate the map's\n # been playtested. It must be newer than the screenshot\n playtested = True\n continue\n elif filename.startswith('bee2_screenshot'):\n continue # Ignore other screenshots\n\n # We have a screenshot. Check to see if it's\n # not too old. (Old is > 2 hours)\n date = datetime.fromtimestamp(\n os.path.getmtime(scr_shot)\n )\n diff = datetime.now() - date\n if diff.total_seconds() > 2 * 3600:\n LOGGER.info(\n 'Screenshot \"{scr}\" too old ({diff!s})',\n scr=scr_shot,\n diff=diff,\n )\n continue\n\n # If we got here, it's a good screenshot!\n LOGGER.info('Chosen \"{}\"', scr_shot)\n LOGGER.info('Map Playtested: {}', playtested)\n scr_loc = scr_shot\n break\n else:\n # If we get to the end, we failed to find an automatic\n # screenshot!\n LOGGER.info('No Auto Screenshot found!')\n mod_type = 'peti' # Suppress the \"None not found\" error\n\n if srctools.conv_bool(CONF['clean_screenshots', '0']):\n LOGGER.info('Cleaning up screenshots...')\n # Clean up this folder - otherwise users will get thousands of\n # pics in there!\n for screen in screens:\n if screen != scr_loc:\n os.remove(screen)\n LOGGER.info('Done!')\n else:\n # PeTI type, or something else\n scr_loc = None\n\n if scr_loc is not None and os.path.isfile(scr_loc):\n # We should use a screenshot!\n for screen in find_screenshots():\n LOGGER.info('Replacing \"{}\"...', screen)\n # Allow us to edit the file...\n utils.unset_readonly(screen)\n shutil.copy(scr_loc, screen)\n # Make the screenshot readonly, so P2 can't replace it.\n # Then it'll use our own\n utils.set_readonly(screen)\n\n else:\n if mod_type != 'peti':\n # Error if we were looking for a screenshot\n LOGGER.warning('\"{}\" not found!', scr_loc)\n LOGGER.info('Using PeTI screenshot!')\n for screen in find_screenshots():\n # Make the screenshot writeable, so P2 will replace it\n LOGGER.info('Making \"{}\" replaceable...', screen)\n utils.unset_readonly(screen)", "def _set_screenshot_dir(self) -> None:\r\n self.screenshot_dir = Path(\r\n f\"distbot/screenshots_{self.start_time.strftime('%Y-%m-%d_%H:%M:%S')}\")\r\n self.screenshot_dir.mkdir(exist_ok=True)", "def showSnapshots(self):\n from .utils import sp\n s = self.getSnapshots()\n ax = sp(len(s))\n for i, S in enumerate(s):\n ax[i].imshow(S)", "def screenshot(self):\n self.context.draw.window.screenshot(self.filename)", "def set_local_screenshot_folder(self, folder):\n self.use_local_screenshot = True\n self.local_screenshot_iter = iter(os.scandir(folder))", "def populateImagesList(self):\n \n self._gui_server.getImagesList(self._populateImagesList)", "def manifestations_shoots(self, manifestations_shoots):\n\n self._manifestations_shoots = manifestations_shoots", "def SetImageList(self, imageList):\r\n\r\n if self._ownsImageListNormal:\r\n del self._imageListNormal\r\n \r\n self._imageListNormal = imageList\r\n self._ownsImageListNormal = False\r\n self._dirty = True\r\n \r\n # Don't do any drawing if we're setting the list to NULL,\r\n # since we may be in the process of deleting the tree control.\r\n if imageList:\r\n self.CalculateLineHeight()\r\n\r\n # We gray out the image list to use the grayed icons with disabled items\r\n sz = imageList.GetSize(0)\r\n self._grayedImageList = wx.ImageList(sz[0], sz[1], True, 0)\r\n\r\n for ii in xrange(imageList.GetImageCount()):\r\n bmp = imageList.GetBitmap(ii)\r\n newbmp = MakeDisabledBitmap(bmp)\r\n self._grayedImageList.Add(newbmp)", "def AssignImageList(self, imageList):\r\n\r\n self.SetImageList(imageList)\r\n self._ownsImageListNormal = True", "def grabScreenshot(self):\n\n self.griddButton.setVisible(True)\n self.mirrorButton.setVisible(True)\n self.blurButton.setVisible(True)\n self.display1Button.setVisible(True)\n self.display2Button.setVisible(True)\n self.tutorialLabel.setVisible(False)\n\n print (\"Grabbing Screenshot\")\n print (\"Showing Buttons now\")\n\n with mss() as sct:\n monitor = sct.monitors[1]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.firstScreen, \"PNG\")\n\n # 2nd Display Screen shot\n\n monitor = sct.monitors[2]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.secondScreen, \"PNG\")\n self.photo.setPixmap(QtGui.QPixmap(self.firstScreen))\n self.statustext.setText(\"Added display 1 as work display for now\")\n self.ActivePhoto = \"Screenshot1.png\" # Set Photo as display 1 so we dont get callstack error when mirrroring", "def populateShots(current, *args):\n cmds.textScrollList(widgets[\"shotListTSL\"], e=True, ra=True)\n clearAll()\n\n shotList = cFuncs.getProjectShotList(current)\n shotExclude = [\"master\", \"prepro\", \"launchExample\", \".mayaSwatches\", \".directory\", \"houdiniDev\"]\n\n if shotList:\n shotList.sort()\n for shot in shotList:\n if shot not in shotExclude:\n shotFolder = cFuncs.fixPath(os.path.join(current, \"shots\", shot))\n cmds.textScrollList(widgets[\"shotListTSL\"], e=True, a=shot, sc = updateShotInfo, dcc=openShotFolder)\n\n populateMasteredAssets()", "def images(self, **kwargs):\n\n raise NotImplementedError", "def face_snaps(self, face_snaps):\n\n self._face_snaps = face_snaps", "def export_screenshot(self):\n\n if self.vis_type is None or len(self.vis_type) < 1:\n vis_type_suffix = ''\n else:\n vis_type_suffix = self.vis_type\n\n print(\"exporting screenshot for {}\".format(self.current_unit_id))\n ss_out_file = self.screenshot_dir / \"{}_{}_{}.{}\".format(\n self.current_unit_id, vis_type_suffix,\n cfg.screenshot_suffix, cfg.screenshot_format_ext)\n self.fig.savefig(ss_out_file, bbox_inches='tight', dpi=cfg.dpi_export_fig)", "def screen_shot(self):\n screen_size = '{}x{}@{}x{}/0'.format(self.screen[0], self.screen[1], self.screen[0], self.screen[1])\n subprocess.check_call([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'LD_LIBRARY_PATH=/data/local/tmp', '/data/local/tmp/minicap', '-s', '-P', screen_size,\n '>', TEMP_PIC_ANDROID_PATH\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n logger.info('screen shot saved in {}'.format(TEMP_PIC_ANDROID_PATH))", "def screenshot_disabled(self, screenshot_disabled):\n\n self._screenshot_disabled = screenshot_disabled", "def updateShotInfo(*args):\n shot = cmds.textScrollList(widgets[\"shotListTSL\"], q=True, si=True)[0]\n\n #clear all text fields\n clearFields()\n\n pi.currentShotFolder = cFuncs.fixPath(os.path.join(pi.currentProject, \"shots\", shot))\n pi.currentVariant = \"\" \n######---------reset the pi variables for the shot stuff\n\n lists = [\"anmVariationsTSL\", \"lgtVariationsTSL\", \"fxVariationsTSL\"]\n types = [\"anm\", \"lgt\", \"fx\"]\n\n #loop through types of files in shot - anm, lgt, fx\n for x in range(3):\n shotTypeFolder = \"{0}/{1}\".format(pi.currentShotFolder, types[x])\n #clear the list\n cmds.textScrollList(widgets[lists[x]], e=True, ra=True)\n cmds.image(widgets[\"shotInfoPic\"], e=True, image = \"{0}/defaultAssetImage.jpg\".format(pi.images))\n vars = cFuncs.getShotVariantList(shotTypeFolder)\n if vars:\n for var in vars:\n cmds.textScrollList(widgets[lists[x]], e=True, a=var, sc=partial(updateVariantInfo, var, shotTypeFolder))", "def images(self, images):\n\n self._images = images", "def images(self, images):\n\n self._images = images", "def images(self, images):\n\n self._images = images", "def images(self, images):\n\n self._images = images", "def configure_screenshots_for_all_steps(_step, action):\r\n action=action.strip()\r\n if action == 'enable':\r\n world.auto_capture_screenshots = True\r\n elif action == 'disable':\r\n world.auto_capture_screenshots = False\r\n else:\r\n raise ValueError('Parameter `action` should be one of \"enable\" or \"disable\".')", "def measure_screen(self):\n outputs = self._get_images()\n\n if self.save_image_flag:\n self.save_images(outputs)\n return outputs" ]
[ "0.6365965", "0.5973052", "0.57192713", "0.5689033", "0.5593626", "0.5548558", "0.5500021", "0.5379296", "0.537902", "0.53294593", "0.5321024", "0.53020626", "0.528143", "0.5255104", "0.5241511", "0.5227716", "0.5225529", "0.52027875", "0.5186099", "0.5182037", "0.5140037", "0.50770146", "0.5076677", "0.50710934", "0.5028807", "0.5028807", "0.5028807", "0.5028807", "0.502181", "0.5015417" ]
0.77826786
0
Gets the videos of this Listing. Videos of the listing.
def videos(self): return self._videos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_videos(self) -> APIReturn:\n return await self._request(\"GET\", \"/getVideos\")", "def video_list(self) -> list:\n return self._video_list", "def get_videos(self):\n return list(self._videos.values())", "def get_videos(self, **kwargs):\n return self.get('videos', **kwargs)", "def videos(self):\r\n return v3.Videos(self)", "def videos(self) -> List[AbstractVideoLoader]:\n return self._videos", "def get_all_videos(self):\n\n return list(self._videos.values())", "def fetch_video_list(self, params):\n list_id = params.get('list_id', [''])[0]\n start = int(params.get('list_from', [0])[0])\n end = int(params.get('list_to', [26])[0])\n raw_video_list = self.netflix_session.fetch_video_list(\n list_id=list_id,\n list_from=start,\n list_to=end)\n if 'error' in raw_video_list:\n return raw_video_list\n # parse the video list ids\n if 'videos' in raw_video_list.get('value', {}).keys():\n video_list = self.netflix_session.parse_video_list(\n response_data=raw_video_list)\n return video_list\n return []", "def list(self):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n \"\"\"\n\n result = Model.execute(query)\n\n return result.fetchall()", "def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()", "def fetch_videos():\n channels = get_channels_from_file()\n\n channels_request = service.channels().list(\n part='id, contentDetails',\n forUsername=channels[0]['channelUsername'] # first channel for now\n )\n\n video_list = []\n\n channels_response = channels_request.execute()\n for channel in channels_response['items']:\n uploads_list_id = channel['contentDetails']['relatedPlaylists']['uploads']\n\n next_page_token = ''\n while next_page_token is not None:\n playlistitems_response = service.playlistItems().list(\n playlistId=uploads_list_id,\n part='snippet',\n maxResults=50,\n pageToken=next_page_token\n ).execute()\n\n for playlist_item in playlistitems_response['items']:\n title = playlist_item['snippet']['title']\n video_id = playlist_item['snippet']['resourceId']['videoId']\n print(f'{title}, {video_id}')\n video_list.append({'title': title, 'video_id': video_id})\n\n next_page_token = playlistitems_response.get('nextPageToken')\n\n return video_list", "def getvideolist():\n safeprint(\"Getting video list...\")\n response = getfile(\"http://openings.moe/api/list.php\")\n lstjson = response.read().decode(\"utf-8\", \"ignore\")\n videolist = json.loads(lstjson)\n return videolist", "def parse_video_list (self, response_data):\n video_list = {};\n raw_video_list = response_data['value']\n netflix_list_id = self.parse_netflix_list_id(video_list=raw_video_list);\n for video_id in raw_video_list['videos']:\n if self._is_size_key(key=video_id) == False:\n video_list.update(self.parse_video_list_entry(id=video_id, list_id=netflix_list_id, video=raw_video_list['videos'][video_id], persons=raw_video_list['person'], genres=raw_video_list['genres']))\n return video_list", "def videos(self, **kwargs):\n\n path = self._get_movie_id_path('translations')\n resp = self._get_method(path, kwargs)\n return resp", "def get_videos(self):\n\n videos = []\n with open(self.filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in reader:\n for col in row:\n videos.append(col)\n videos = list(filter(None, list(set(videos))))\n return videos", "def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')", "def get_queryset(self):\n\n videos = []\n\n # If query argument is provided\n if 'q' in self.request.GET:\n # Get the query provided by user\n query = self.request.GET.get('q')\n # Filter all records with title and description containing query\n videos = VideoData.objects.filter(title__icontains=query) \n else:\n videos = VideoData.objects.all()\n \n # If sortby argument is provided\n if 'sortby' in self.request.GET:\n # Get the sortby query provided by user\n sort_by = self.request.GET.get('sortby')\n\n # Check if it is valid property by which we can sort\n if sort_by in SORT_PROPERTIES:\n videos = videos.order_by(sort_by)\n return videos\n \n # If invalid/no sortby argument provided, sort with default property\n videos = videos.order_by(DEFAULT_SORT_PROPERTY)\n\n return videos", "def load_videos(self):\n logging.debug(\"Loading videos data...\")\n\n # loading videos\n data=requests.get(self.__URL_VIDEOS)\n self.__dataframe_videos=pd.DataFrame(data.json())\n # XXX transposing as the API returns a pre index list of videos\n self.__dataframe_videos = self.__dataframe_videos.transpose()\n if self.__save_raw_data_to_csv:\n logging.debug(\"Saving raw data to CSV [%s...\" % self.__RAW_DATA_FILENAME)\n self.__dataframe_videos.to_csv(self.__RAW_DATA_FILENAME, encoding='utf-8', sep=',', index=False)\n self.__dataframe_videos['video_contents'] = self.__dataframe_videos[['video_title', 'video_desc']].\\\n apply(lambda x: \" \".join(x), axis=1)\n\n logging.debug(\"Informative videos data loaded! n=%s\" % self.__dataframe_videos.shape[0])\n\n return self.__dataframe_videos", "def get_videos_of_folder(folder):\n\n Settings.dev_print(\"getting videos of folder: {}\".format(folder.get_title()))\n if not folder: return []\n videos = []\n files = []\n valid_videos = [\".mp4\",\".mov\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_videos:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"video path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def get_video_games(self, **kwargs):\n return self.get('video_games.json', **kwargs)", "def list_videos(movie,thumb):\n\n videos = get_videos(movie)\n listing = []\n for video in videos:\n list_item = xbmcgui.ListItem(label=video[0])\n list_item.setArt({'thumb': thumb,\n 'icon': thumb,\n 'fanart': thumb})\n list_item.setInfo('video', {'title': video[0]})\n list_item.setProperty('IsPlayable', 'true')\n url = '{0}?action=play&video={1}'.format(_url, video[1])\n is_folder = False\n listing.append((url, list_item, is_folder))\n\n xbmcplugin.addDirectoryItems(_handle, listing, len(listing))\n xbmcplugin.endOfDirectory(_handle)", "def fetch_video_list_ids(self, params):\n guid = self.netflix_session.user_data.get('guid')\n cached_list = self.video_list_cache.get(guid, None)\n if cached_list is not None:\n self.kodi_helper.log(msg='Serving cached list for user: ' + guid)\n return cached_list\n video_list_ids_raw = self.netflix_session.fetch_video_list_ids()\n\n if 'error' in video_list_ids_raw:\n return video_list_ids_raw\n video_list = self.netflix_session.parse_video_list_ids(\n response_data=video_list_ids_raw)\n return video_list", "def get_videos(town):\n\n entries = get_town_videos(town)\n\n print entries\n\n if entries:\n return render_template('videos.html', videos=entries, town=town)\n else:\n flash('No se encontraron videos.')\n return render_template('videos.html', town=town)", "def get_video_sources(self):\n return self.camera_media.GetVideoSources()", "def videos(self, videos):\n self._videos = videos", "def get_vr_videos(self, count = 30, page = 1):\n uri = 'videos/vr'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def fetch_video_list_information (self, video_ids):\n paths = []\n for video_id in video_ids:\n paths.append(['videos', video_id, ['summary', 'title', 'synopsis', 'regularSynopsis', 'evidence', 'queue', 'episodeCount', 'info', 'maturity', 'runtime', 'seasonCount', 'releaseYear', 'userRating', 'numSeasonsLabel', 'bookmarkPosition', 'watched', 'videoQuality']])\n paths.append(['videos', video_id, 'cast', {'from': 0, 'to': 15}, ['id', 'name']])\n paths.append(['videos', video_id, 'cast', 'summary'])\n paths.append(['videos', video_id, 'genres', {'from': 0, 'to': 5}, ['id', 'name']])\n paths.append(['videos', video_id, 'genres', 'summary'])\n paths.append(['videos', video_id, 'tags', {'from': 0, 'to': 9}, ['id', 'name']])\n paths.append(['videos', video_id, 'tags', 'summary'])\n paths.append(['videos', video_id, ['creators', 'directors'], {'from': 0, 'to': 49}, ['id', 'name']])\n paths.append(['videos', video_id, ['creators', 'directors'], 'summary'])\n paths.append(['videos', video_id, 'bb2OGLogo', '_400x90', 'png'])\n paths.append(['videos', video_id, 'boxarts', '_342x192', 'jpg'])\n paths.append(['videos', video_id, 'boxarts', '_1280x720', 'jpg'])\n paths.append(['videos', video_id, 'storyarts', '_1632x873', 'jpg'])\n paths.append(['videos', video_id, 'interestingMoment', '_665x375', 'jpg'])\n paths.append(['videos', video_id, 'artWorkByType', 'BILLBOARD', '_1280x720', 'jpg'])\n\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='fetch_video_list_information')", "def get_video_data(id, fetch_all_videos=True):\n youtube_data = _youtube_feed('videos', id)['entry']\n return Playlist(None, 1, [_get_video_data(youtube_data)], None)", "def get(self):\r\n self.response.headers['Content-Type'] = 'application/json; charset=utf-8'\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY)\r\n params = {\"part\": SEARCH_PART,\r\n \"type\": SEARCH_TYPE,\r\n \"channelId\":CHANNEL_ID,\r\n \"maxResults\":MAX_RESULTS}\r\n searchText = self.request.get('q')\r\n if ( searchText != \"\" ):\r\n params[\"q\"] = searchText\r\n search_response = youtube.search().list(**params).execute()\r\n \r\n search_videos = []\r\n for search_result in search_response.get(\"items\", []): \r\n search_videos.append(\r\n {\"thumbnail\":search_result[\"snippet\"][\"thumbnails\"][\"default\"],\r\n \"description\":search_result[\"snippet\"][\"description\"],\r\n \"title\":search_result[\"snippet\"][\"title\"],\r\n \"videoId\":search_result[\"id\"][\"videoId\"]\r\n }\r\n )\r\n page_info = search_response.get(\"pageInfo\")\r\n response = {\"videos\":search_videos,\"total_results\":page_info[\"totalResults\"]}\r\n self.response.out.write( json.dumps(response) )", "def get_videos(url):\n videos = []\n if 'cinebix.com' in url:\n resolve_media(url,videos)\n return videos\n \n html = requests.get(url, headers=mozhdr).text\n mlink = SoupStrainer('div', {'class':re.compile('^singcont')})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n mlink = SoupStrainer('div', {'class':'entry-excerpt'})\n videoclass = BeautifulSoup(html, parseOnlyThese=mlink)\n try:\n links = videoclass.findAll('iframe')\n for link in links:\n if 'http' in str(link):\n url = link.get('src')\n resolve_media(url,videos)\n except:\n pass\n\n try:\n url = videoclass.p.a.get('href')\n resolve_media(url,videos)\n except:\n pass \n \n return videos" ]
[ "0.791812", "0.78357685", "0.7770867", "0.772594", "0.76119924", "0.74094355", "0.7159197", "0.7119709", "0.7093945", "0.6897495", "0.68349713", "0.6766724", "0.67164904", "0.66286135", "0.64525926", "0.64008445", "0.6372604", "0.6356699", "0.63502234", "0.6330106", "0.6250551", "0.6196666", "0.6134653", "0.6104255", "0.6089899", "0.60722446", "0.6062339", "0.60575", "0.6052034", "0.6019966" ]
0.79238147
0
Sets the videos of this Listing. Videos of the listing.
def videos(self, videos): self._videos = videos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def videos(self):\n return self._videos", "def get_videos(self, **kwargs):\n return self.get('videos', **kwargs)", "def video_list(self) -> list:\n return self._video_list", "def videos(self):\r\n return v3.Videos(self)", "def videos(self) -> List[AbstractVideoLoader]:\n return self._videos", "def get_videos(self):\n return list(self._videos.values())", "async def get_videos(self) -> APIReturn:\n return await self._request(\"GET\", \"/getVideos\")", "def parse_video_list (self, response_data):\n video_list = {};\n raw_video_list = response_data['value']\n netflix_list_id = self.parse_netflix_list_id(video_list=raw_video_list);\n for video_id in raw_video_list['videos']:\n if self._is_size_key(key=video_id) == False:\n video_list.update(self.parse_video_list_entry(id=video_id, list_id=netflix_list_id, video=raw_video_list['videos'][video_id], persons=raw_video_list['person'], genres=raw_video_list['genres']))\n return video_list", "def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()", "def video_set(self):\n # Just proxies the User's video_set\n if self._video_set is None:\n self._video_set = self.userprofile.user.video_set\n return self._video_set", "def load_video_data(self):\n self.file_videos = [\n Video.from_file(path, self)\n for path in self.video_dir.glob('*.json')\n ]", "def get_all_videos(self):\n\n return list(self._videos.values())", "def load_videos(self):\n logging.debug(\"Loading videos data...\")\n\n # loading videos\n data=requests.get(self.__URL_VIDEOS)\n self.__dataframe_videos=pd.DataFrame(data.json())\n # XXX transposing as the API returns a pre index list of videos\n self.__dataframe_videos = self.__dataframe_videos.transpose()\n if self.__save_raw_data_to_csv:\n logging.debug(\"Saving raw data to CSV [%s...\" % self.__RAW_DATA_FILENAME)\n self.__dataframe_videos.to_csv(self.__RAW_DATA_FILENAME, encoding='utf-8', sep=',', index=False)\n self.__dataframe_videos['video_contents'] = self.__dataframe_videos[['video_title', 'video_desc']].\\\n apply(lambda x: \" \".join(x), axis=1)\n\n logging.debug(\"Informative videos data loaded! n=%s\" % self.__dataframe_videos.shape[0])\n\n return self.__dataframe_videos", "def fetch_video_list(self, params):\n list_id = params.get('list_id', [''])[0]\n start = int(params.get('list_from', [0])[0])\n end = int(params.get('list_to', [26])[0])\n raw_video_list = self.netflix_session.fetch_video_list(\n list_id=list_id,\n list_from=start,\n list_to=end)\n if 'error' in raw_video_list:\n return raw_video_list\n # parse the video list ids\n if 'videos' in raw_video_list.get('value', {}).keys():\n video_list = self.netflix_session.parse_video_list(\n response_data=raw_video_list)\n return video_list\n return []", "def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')", "def merge_video_data(self):\n if self.overwrite:\n if self.wipe:\n self.videos = self.youtube_videos\n elif self.add_new_files or self.overwrite_fields:\n old_videos = {\n video.filename: video\n for video in self.file_videos\n }\n old_videos_url = {\n video.metadata['videos'][0]['url']: video\n for video in self.file_videos\n }\n new_videos = {}\n for video in self.youtube_videos:\n new_video_url = video.metadata['videos'][0]['url']\n if new_video_url in old_videos_url:\n new_video_filename = old_videos_url[new_video_url].filename\n else:\n new_video_filename = video.filename\n new_videos[new_video_filename] = video\n\n if self.overwrite_fields:\n forgotten = set(old_videos) - set(new_videos)\n for name in forgotten:\n logger.warning('Missing video: {} {}',\n old_videos[name].filename,\n old_videos[name].metadata['videos'][0]['url'],\n )\n\n changes = set(new_videos).intersection(set(old_videos))\n for path in changes:\n merged_video = old_videos[path].merge(\n new_videos[path], self.overwrite_fields)\n self.videos.append(merged_video)\n else:\n self.videos = self.file_videos\n if self.add_new_files:\n adds = set(new_videos) - set(old_videos)\n self.videos.extend([new_videos[path] for path in adds])\n else: # not self.overwrite\n self.videos = self.youtube_videos", "def videos(self, **kwargs):\n\n path = self._get_movie_id_path('translations')\n resp = self._get_method(path, kwargs)\n return resp", "def list_videos(movie,thumb):\n\n videos = get_videos(movie)\n listing = []\n for video in videos:\n list_item = xbmcgui.ListItem(label=video[0])\n list_item.setArt({'thumb': thumb,\n 'icon': thumb,\n 'fanart': thumb})\n list_item.setInfo('video', {'title': video[0]})\n list_item.setProperty('IsPlayable', 'true')\n url = '{0}?action=play&video={1}'.format(_url, video[1])\n is_folder = False\n listing.append((url, list_item, is_folder))\n\n xbmcplugin.addDirectoryItems(_handle, listing, len(listing))\n xbmcplugin.endOfDirectory(_handle)", "async def videos(self, *, key: str, part: List[str], video_ids: List[str],\n max_results: int = 50, page_token: str = None,\n **kwargs):\n params = {\n 'key': key,\n 'id': ','.join(video_ids),\n 'part': ','.join(part),\n 'maxResults': max_results,\n }\n if page_token:\n params['pageToken'] = page_token\n\n return await self._request(kwargs.get('name'), params=params)", "def get_videos(self):\n\n videos = []\n with open(self.filename, newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in reader:\n for col in row:\n videos.append(col)\n videos = list(filter(None, list(set(videos))))\n return videos", "def list(self):\n\n query = \"\"\"\n SELECT id, uri, filename, description\n FROM videos\n \"\"\"\n\n result = Model.execute(query)\n\n return result.fetchall()", "def __init__(self, *args, **kwargs):\r\n super(VideoDescriptor, self).__init__(*args, **kwargs)\r\n # For backwards compatibility -- if we've got XML data, parse it out and set the metadata fields\r\n if self.data:\r\n field_data = self._parse_video_xml(self.data)\r\n self._field_data.set_many(self, field_data)\r\n del self.data\r\n\r\n editable_fields = super(VideoDescriptor, self).editable_metadata_fields\r\n\r\n self.source_visible = False\r\n if self.source:\r\n # If `source` field value exist in the `html5_sources` field values,\r\n # then delete `source` field value and use value from `html5_sources` field.\r\n if self.source in self.html5_sources:\r\n self.source = '' # Delete source field value.\r\n self.download_video = True\r\n else: # Otherwise, `source` field value will be used.\r\n self.source_visible = True\r\n download_video = editable_fields['download_video']\r\n if not download_video['explicitly_set']:\r\n self.download_video = True\r\n\r\n # for backward compatibility.\r\n # If course was existed and was not re-imported by the moment of adding `download_track` field,\r\n # we should enable `download_track` if following is true:\r\n download_track = editable_fields['download_track']\r\n if not download_track['explicitly_set'] and self.track:\r\n self.download_track = True", "def fetch_videos():\n channels = get_channels_from_file()\n\n channels_request = service.channels().list(\n part='id, contentDetails',\n forUsername=channels[0]['channelUsername'] # first channel for now\n )\n\n video_list = []\n\n channels_response = channels_request.execute()\n for channel in channels_response['items']:\n uploads_list_id = channel['contentDetails']['relatedPlaylists']['uploads']\n\n next_page_token = ''\n while next_page_token is not None:\n playlistitems_response = service.playlistItems().list(\n playlistId=uploads_list_id,\n part='snippet',\n maxResults=50,\n pageToken=next_page_token\n ).execute()\n\n for playlist_item in playlistitems_response['items']:\n title = playlist_item['snippet']['title']\n video_id = playlist_item['snippet']['resourceId']['videoId']\n print(f'{title}, {video_id}')\n video_list.append({'title': title, 'video_id': video_id})\n\n next_page_token = playlistitems_response.get('nextPageToken')\n\n return video_list", "def __append_videos(_all_videos, _query, _start_index, _return_fields, _ignore_exceptions):\n _videos = core.get_paginated_results(_query, 'video', _start_index, filter_info=('type', 'video'),\n return_fields=_return_fields, ignore_exceptions=_ignore_exceptions)\n _all_videos = core_utils.add_to_master_list(_videos, _all_videos)\n return _all_videos, len(_videos)", "def add_videos(playlist):\n surl = playlist['link']\n # 작은 playlist의 url을 surl에 저장\n soup = get_soup(surl)\n # 작은 플레이리스트의 html 파싱하여 soup에 저장\n print(f\" getting videos for playlist: {playlist['title']}\")\n\n videos = []\n\n # items are list of video a links from list\n items = soup('a', class_='yt-uix-tile-link')\n # a 태그의 class가 'yt-uix-tile-link'인 태그 items에 저장\n # items는 작은 플레이리스트의 동영상 목록들임\n\n # note first part of look get info from playlist page item,\n # and the the last part opens the video and gets more details\n if len(items) > 0:\n for i in items:\n # 각각의 items i에 하나씩 저장\n d = dict()\n vurl = fix_url(i['href'])\n # 동영상 url을 vurl에 저장\n t = i.find_next('span', {'aria-label': True})\n # 동영상의 span 태그 중 aria=label값이 존재하는 것 t에 저장\n # t는 동영상의 재생 시간임\n d['time'] = t.text if t else 'NA'\n # d 딕셔너리에 t저장\n\n d.update(parse_video(vurl))\n videos.append(d)\n # videos에 d를 append\n\n else: # must be only one video\n d = {'time': 'NA'}\n d.update(parse_video(surl))\n videos.append(d)\n\n # add new key to this playlist of list of video infos\n playlist['videos'] = videos\n print()", "def fetch_video_list_information (self, video_ids):\n paths = []\n for video_id in video_ids:\n paths.append(['videos', video_id, ['summary', 'title', 'synopsis', 'regularSynopsis', 'evidence', 'queue', 'episodeCount', 'info', 'maturity', 'runtime', 'seasonCount', 'releaseYear', 'userRating', 'numSeasonsLabel', 'bookmarkPosition', 'watched', 'videoQuality']])\n paths.append(['videos', video_id, 'cast', {'from': 0, 'to': 15}, ['id', 'name']])\n paths.append(['videos', video_id, 'cast', 'summary'])\n paths.append(['videos', video_id, 'genres', {'from': 0, 'to': 5}, ['id', 'name']])\n paths.append(['videos', video_id, 'genres', 'summary'])\n paths.append(['videos', video_id, 'tags', {'from': 0, 'to': 9}, ['id', 'name']])\n paths.append(['videos', video_id, 'tags', 'summary'])\n paths.append(['videos', video_id, ['creators', 'directors'], {'from': 0, 'to': 49}, ['id', 'name']])\n paths.append(['videos', video_id, ['creators', 'directors'], 'summary'])\n paths.append(['videos', video_id, 'bb2OGLogo', '_400x90', 'png'])\n paths.append(['videos', video_id, 'boxarts', '_342x192', 'jpg'])\n paths.append(['videos', video_id, 'boxarts', '_1280x720', 'jpg'])\n paths.append(['videos', video_id, 'storyarts', '_1632x873', 'jpg'])\n paths.append(['videos', video_id, 'interestingMoment', '_665x375', 'jpg'])\n paths.append(['videos', video_id, 'artWorkByType', 'BILLBOARD', '_1280x720', 'jpg'])\n\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='fetch_video_list_information')", "def save_video_data(self):\n if self.overwrite:\n # Erase old event videos\n for path in self.video_dir.glob('*.json'):\n path.unlink()\n for video in self.videos:\n video.save()", "def fetch_video_list_ids(self, params):\n guid = self.netflix_session.user_data.get('guid')\n cached_list = self.video_list_cache.get(guid, None)\n if cached_list is not None:\n self.kodi_helper.log(msg='Serving cached list for user: ' + guid)\n return cached_list\n video_list_ids_raw = self.netflix_session.fetch_video_list_ids()\n\n if 'error' in video_list_ids_raw:\n return video_list_ids_raw\n video_list = self.netflix_session.parse_video_list_ids(\n response_data=video_list_ids_raw)\n return video_list", "def video_categories(self):\r\n return v3.VideoCategories(self)", "def get_videos_of_folder(folder):\n\n Settings.dev_print(\"getting videos of folder: {}\".format(folder.get_title()))\n if not folder: return []\n videos = []\n files = []\n valid_videos = [\".mp4\",\".mov\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_videos:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"video path: {}\".format(os.path.join(folder.get_path(),f)))\n return files" ]
[ "0.68391746", "0.6699461", "0.6632075", "0.6552472", "0.65026706", "0.63689184", "0.6289307", "0.612901", "0.6074", "0.607086", "0.6037618", "0.6023897", "0.59083235", "0.589968", "0.5862606", "0.58311903", "0.5796718", "0.57352465", "0.5617791", "0.5506406", "0.5484566", "0.5416645", "0.537387", "0.534179", "0.53183013", "0.5307187", "0.52785885", "0.5262048", "0.52607626", "0.52513874" ]
0.79657626
0
Gets the support_contacts of this Listing. Contact information to use to get support from the publisher for the listing.
def support_contacts(self): return self._support_contacts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_list(self):\n return self._contact_list", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def list_contacts(self):\n return self.contacts", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)", "def contacts(self):\n return ContactCollection(self.request)", "def contacts(self):\r\n return contacts.Contacts(self)", "def contacts(self):\n from hubspot3.contacts import ContactsClient\n\n return ContactsClient(**self.auth, **self.options)", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=next.href)\n\t\t\tfeeds.append(feed)\n\t\t\tnext = feed.GetNextLink()\n\t\t\n\t\tcontacts = []\n\t\tfor feed in feeds:\n\t\t\tif not feed.entry:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tfor i, entry in enumerate(feed.entry):\n\t\t\t\t\tcontacts.append(entry)\n\t\treturn contacts", "def support_contacts(self, support_contacts):\n self._support_contacts = support_contacts", "def get_contacts_list(self):\n return [(id + 1, contact) for id, contact in enumerate(self.contact_list)]", "async def get_contacts(self, **kwargs) -> List[CertificateContact]:\n contacts = await self._client.get_certificate_contacts(\n vault_base_url=self._vault_url, **kwargs\n )\n return [CertificateContact._from_certificate_contacts_item(contact_item=item) for item in contacts.contact_list]", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def contact_info(self):\n return self._contact_info", "def contacts(self):\n if \"contacts\" in self._prop_dict:\n return ContactsCollectionPage(self._prop_dict[\"contacts\"])\n else:\n return None", "def contact_information(self) -> ContactInformation:\n return self._contact_information", "def contacts(self):\n service_root = self._get_webservice_url(\"contacts\")\n return ContactsService(service_root, self.session, self.params)", "def contact(self):\n return self._contact", "def contact(self):\n return self._contact", "def get_queryset(self):\n return self.request.user.contacts.all()", "def _get_receivers_list(self):\n\n # TODO: document what this plugin expects to be in Dockerfile/where it gets info from\n global_component = self._get_component_label()\n # this relies on bump_release plugin configuring source.git_commit to actually be\n # branch name, not a commit\n if not isinstance(self.workflow.source, GitSource):\n raise PluginFailedException('Source is not of type \"GitSource\", panic!')\n git_branch = self.workflow.source.git_commit\n try:\n r = requests.get(urljoin(self.pdc_url, 'rest_api/v1/release-component-contacts/'),\n headers={'Authorization': 'Token %s' % self._get_pdc_token()},\n params={'global_component': global_component,\n 'dist_git_branch': git_branch,\n 'role': self.pdc_contact_role},\n verify=self.pdc_verify_cert)\n except requests.RequestException as e:\n self.log.error('failed to connect to PDC: %s', str(e))\n raise RuntimeError(e)\n\n if r.status_code != 200:\n self.log.error('PDC returned status code %s, full response: %s',\n r.status_code, r.text)\n raise RuntimeError('PDC returned non-200 status code (%s), see referenced build log' %\n r.status_code)\n\n contacts = r.json()\n\n if contacts['count'] == 0:\n self.log.error('no %s role for the component', self.pdc_contact_role)\n raise RuntimeError('no %s role for the component' % self.pdc_contact_role)\n\n send_to = []\n for contact in contacts['results']:\n send_to.append(contact['contact']['email'])\n\n return send_to", "def contact_info(self):\n return [\n {\n 'contact_info': c.get('contactInfo'),\n 'type': c.get('type'),\n 'primary': c.get('primary'),\n 'verified': c.get('verified'),\n }\n for c in self.entity_payload.get('contactInfo')]", "def Contact(self):\n return self.__contact", "def get_cached_contacts(self):\n return list(self._replacement_cache)", "def ListAllContacts(self):\n feed = self.gd_client.GetContacts()\n self.contacts = self.CleanPhoneNumbers(self.GetContactsInfo(feed))\n return self.contacts", "def support_tickets(self):\n return self._support_tickets", "def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df", "def getListContacts(self, LibraryID, ListID, EmbeddedData=None, ContactHistory=None, LastRecipientID=None, NumberOfRecords=None,\n ExportLanguage=None, Unsubscribed=None, Subscribed=None, **kwargs):\n if not self.request(\"getListContacts\",\n Product='TA',\n LibraryID=LibraryID,\n ListID=ListID,\n EmbeddedData=EmbeddedData,\n ContactHistory=ContactHistory,\n LastRecipientID=LastRecipientID,\n NumberOfRecords=NumberOfRecords,\n ExportLanguage=ExportLanguage,\n Unsubscribed=Unsubscribed,\n Subscribed=Subscribed,\n **kwargs):\n print(self.last_error_message)\n return None\n return self.json_response", "def get_contacts(self):\n feet = [\"REAR_RIGHT_FOOT\", \"REAR_LEFT_FOOT\",\n \"FRONT_RIGHT_FOOT\", \"FRONT_LEFT_FOOT\"]\n contacts = np.zeros(4, dtype=np.float32)\n for i, foot in enumerate(feet):\n if self.supervisor.getFromDef(foot).getNumberOfContactPoints() > 0:\n contacts[i] = 1.0\n return contacts", "def contact_points(self) -> object:\n return self._contact_points" ]
[ "0.71102977", "0.70214784", "0.70081", "0.6748876", "0.67099714", "0.66819096", "0.6501014", "0.6308295", "0.6275049", "0.6235824", "0.6175169", "0.5944758", "0.5910778", "0.58919144", "0.5812473", "0.5797119", "0.57762897", "0.57244956", "0.57244956", "0.5658239", "0.56238693", "0.5598746", "0.5596889", "0.55957955", "0.5593124", "0.552597", "0.55088156", "0.5446359", "0.5365642", "0.53594327" ]
0.79084134
0
Sets the support_contacts of this Listing. Contact information to use to get support from the publisher for the listing.
def support_contacts(self, support_contacts): self._support_contacts = support_contacts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_list(self, contact_list):\n \n self._contact_list = contact_list", "def support_contacts(self):\n return self._support_contacts", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__contacts = contacts\n\t\tself.__key_modified['Contacts'] = 1", "def contacts(self, contacts):\n\n self._contacts = contacts", "def contacts(self, contacts):\n\n self._contacts = contacts", "def support_tickets(self, support_tickets):\n\n self._support_tickets = support_tickets", "def support_links(self, support_links):\n self._support_links = support_links", "def contact_lists(self):\n from hubspot3.contact_lists import ContactListsClient\n\n return ContactListsClient(**self.auth, **self.options)", "async def set_contacts(self, contacts: List[CertificateContact], **kwargs) -> List[CertificateContact]:\n new_contacts = await self._client.set_certificate_contacts(\n vault_base_url=self.vault_url,\n contacts=self._models.Contacts(contact_list=[c._to_certificate_contacts_item() for c in contacts]),\n **kwargs\n )\n return [\n CertificateContact._from_certificate_contacts_item(contact_item=item) for item in new_contacts.contact_list\n ]", "def contact_points(self, contact_points: object):\n\n self._contact_points = contact_points", "def update_contacts(self):\n self.contacts = self.db.list_contacts()\n return self.list_contacts()", "def contact_list(self):\n return self._contact_list", "def set_contact_mechanisms(cls, records, name, value=None):\n Party = Pool().get('party.party')\n\n for record in records:\n Party.write([record.party], {'contact_mechanisms': value})", "def update_contacts(self, contacts):\n\n if contacts.time.size != 1:\n raise IndexError(\"Contacts should be from one frame only\")\n if contacts.channel.size != self.contacts.channel.size:\n self.new_contact_set(contacts)\n return # Prevent calling update_contacts recursively\n self.contacts = contacts\n contacts = np.array(contacts)\n\n for i, actor in enumerate(self.contacts_actors):\n # mapper = actors.GetNextActor().GetMapper()\n mapper = actor.GetMapper()\n self.contacts_actors[i].GetProperty().SetColor(self.contacts_color)\n self.contacts_actors[i].GetProperty().SetOpacity(self.contacts_opacity)\n source = vtkSphereSource()\n source.SetCenter(contacts[0:3, i])\n source.SetRadius(self.contacts_size)\n mapper.SetInputConnection(source.GetOutputPort())", "def list_contacts(self):\n return self.contacts", "def contacts(self):\n return ContactCollection(self.request)", "def update_soft_contacts(self, soft_contacts):\n\n if soft_contacts.time.size != 1:\n raise IndexError(\"soft_contacts should be from one frame only\")\n if soft_contacts.channel.size != self.soft_contacts.channel.size:\n self.new_soft_contacts_set(soft_contacts)\n return # Prevent calling update_soft_contacts recursively\n self.soft_contacts = soft_contacts\n soft_contacts = np.array(soft_contacts)\n\n for i, actor in enumerate(self.soft_contacts_actors):\n # mapper = actors.GetNextActor().GetMapper()\n mapper = actor.GetMapper()\n self.soft_contacts_actors[i].GetProperty().SetColor(self.soft_contacts_color)\n self.soft_contacts_actors[i].GetProperty().SetOpacity(self.soft_contacts_opacity)\n source = vtkSphereSource()\n source.SetCenter(soft_contacts[0:3, i])\n source.SetRadius(self.soft_contacts_size[i])\n mapper.SetInputConnection(source.GetOutputPort())", "def contact_info(self, contact_info):\n\n self._contact_info = contact_info", "def contacts(self):\n from hubspot3.contacts import ContactsClient\n\n return ContactsClient(**self.auth, **self.options)", "def contact_reference(self, contact_reference):\n\n self._contact_reference = contact_reference", "def new_soft_contacts_set(self, soft_contacts):\n if soft_contacts.time.size != 1:\n raise IndexError(\"soft_contacts should be from one frame only\")\n self.soft_contacts = soft_contacts\n\n # Remove previous actors from the scene\n for actor in self.soft_contacts_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.soft_contacts_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(soft_contacts.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.soft_contacts_actors.append(vtkActor())\n self.soft_contacts_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.soft_contacts_actors[i])\n # Update marker position\n self.update_soft_contacts(self.soft_contacts)", "def update_contacts(self, contact_list):\n updated_contacts = 0\n request_list = list()\n\n # stale_contacts contains all old contacts at first, all current\n # contacts get then removed so that the remaining can get deleted\n stale_contacts = set(self.contacts)\n\n for contact in contact_list:\n c = Persona.query.get(contact[\"id\"])\n\n if c is None:\n c = Persona(id=contact[\"id\"], _stub=True)\n\n if c._stub is True:\n request_list.append(contact[\"id\"])\n\n try:\n # Old and new contact; remove from stale list\n stale_contacts.remove(c)\n except KeyError:\n # New contact\n self.contacts.append(c)\n updated_contacts += 1\n\n # Remove old contacts that are not new contacts\n for contact in stale_contacts:\n self.contacts.remove(contact)\n\n app.logger.info(\"Updated {}'s contacts: {} added, {} removed, {} requested\".format(\n self.username, updated_contacts, len(stale_contacts), len(request_list)))\n\n return request_list", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def new_contact_set(self, contacts):\n if contacts.time.size != 1:\n raise IndexError(\"Contacts should be from one frame only\")\n self.contacts = contacts\n\n # Remove previous actors from the scene\n for actor in self.contacts_actors:\n self.parent_window.ren.RemoveActor(actor)\n self.contacts_actors = list()\n\n # Create the geometry of a point (the coordinate) points = vtk.vtkPoints()\n for i in range(contacts.channel.size):\n # Create a mapper\n mapper = vtkPolyDataMapper()\n\n # Create an actor\n self.contacts_actors.append(vtkActor())\n self.contacts_actors[i].SetMapper(mapper)\n\n self.parent_window.ren.AddActor(self.contacts_actors[i])\n\n # Update marker position\n self.update_contacts(self.contacts)", "def contact(self, contact):\n\n self.logger.debug(\"In 'contact' setter.\")\n\n self._contact = contact", "def contacts_list_update(self):\n\t\tself.database.contacts_clear()\n\t\tclient_log.debug(f'Запрос контакт листа для пользователся {self.name}')\n\t\treq = {\n\t\t\tACTION: GET_CONTACTS,\n\t\t\tTIME: time.time(),\n\t\t\tUSER: self.username\n\t\t}\n\t\tclient_log.debug(f'Сформирован запрос {req}')\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, req)\n\t\t\tans = get_message(self.transport)\n\t\tclient_log.debug(f'Получен ответ {ans}')\n\t\tif RESPONSE in ans and ans[RESPONSE] == 202:\n\t\t\tfor contact in ans[LIST_INFO]:\n\t\t\t\tself.database.add_contact(contact)\n\t\telse:\n\t\t\tclient_log.error('Не удалось обновить список контактов.')", "def AddConfigureContactsSettingsFlagsToParser(parser):\n _AddContactSettingsFlagsToParser(parser, mutation_op=MutationOp.UPDATE)\n\n messages = apis.GetMessagesModule('domains', API_VERSION_FOR_FLAGS)\n base.Argument( # This is not a go/gcloud-style#commonly-used-flags.\n '--notices',\n help='Notices about special properties of contacts.',\n metavar='NOTICE',\n type=arg_parsers.ArgList(\n element_type=str, choices=ContactNoticeEnumMapper(\n messages).choices)).AddToParser(parser)", "def contact_information(self, contact_information: ContactInformation):\n\n self._contact_information = contact_information", "def support_url(self, support_url: str):\n\n self._support_url = support_url", "def contact(self, contact):\n\n self._contact = contact" ]
[ "0.6686726", "0.6543302", "0.64893526", "0.6251802", "0.6251802", "0.60224813", "0.5890734", "0.58105534", "0.5732187", "0.5686169", "0.55359674", "0.5383936", "0.5290222", "0.52160406", "0.51834357", "0.51061726", "0.50627476", "0.50477874", "0.50451815", "0.5015341", "0.499517", "0.49644834", "0.4942122", "0.49358296", "0.49148571", "0.49105018", "0.47959515", "0.47942322", "0.47799084", "0.47664782" ]
0.8408827
0
Gets the support_links of this Listing. Links to support resources for the listing.
def support_links(self): return self._support_links
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def support_links(self, support_links):\n self._support_links = support_links", "def getLinks(self):\n\n return self.links", "def documentation_links(self):\n return self._documentation_links", "def get_links(self) -> List[str]:\n return self.__links", "def support_url(self) -> str:\n return self._support_url", "def links(self):\n\t\treturn self.list_of_links", "def getLinks(self):\n return self.pageLinks", "def get_links(self):\r\n return self.__links", "def get_links(self):\r\n return self.links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self) -> Sequence[Link]:\n return self._links", "def support_ref(self):\n\n return self._support_ref", "def get_links(self):\n return self.__data['links']", "def links(self):\n\n links = []\n for foreign_key in self.__table__.foreign_keys:\n column = foreign_key.column.name\n column_value = getattr(self, column, None)\n if column_value:\n table = foreign_key.column.table.name\n with app.app_context():\n endpoint = current_app.class_references[table]\n links.append({'rel': 'related', 'uri': '/{}/{}'.format(\n endpoint.__name__, column_value)})\n links.append({'rel': 'self', 'uri': self.resource_uri()})\n return links", "def links(self):\n if not hasattr(self, '_links'):\n self._links = self.resource.links()\n values = self._response.headers.get('link')\n self._links.update([link for link in Links.parse(values)])\n return self._links", "def support_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"support_url\")", "def support_tickets(self):\n return self._support_tickets", "def links(self):\n return self._links_tpl.expand(self._identity, self._record)", "def links(self):\n return self.container['links']", "def links(self):\r\n return links.RepoLinks(self)", "def links(self):\n return self._link_reg", "def support_contacts(self):\n return self._support_contacts", "def get_links(self):\n return (link for link in self.links)", "def getLinkDefs(self): #$NON-NLS-1$\r\n if self.linkDefs is None:\r\n resouceReg = self.extensionPoint.getPlugin().getResourceRegistry()\r\n self.linkDefs = []\r\n linkElems = self._getExtensionDefChildNodes(u\"plg:links/plg:link\") #$NON-NLS-1$\r\n for linkElem in linkElems:\r\n linkDef = ZLinkDef(linkElem, resouceReg)\r\n self.linkDefs.append(linkDef)\r\n return self.linkDefs", "def links(self) -> str:\n return pulumi.get(self, \"links\")", "def listing(self):\r\n listing = LinkListing(self.builder_obj, show_nums = self.show_nums)\r\n return listing.listing()" ]
[ "0.63963586", "0.6201267", "0.61815244", "0.61208", "0.60785246", "0.6051551", "0.59929067", "0.59713036", "0.5868064", "0.58464664", "0.58464664", "0.58464664", "0.58464664", "0.58464664", "0.57972467", "0.5769803", "0.57259023", "0.56893015", "0.5656609", "0.55859476", "0.5584506", "0.55783856", "0.55672854", "0.5556627", "0.55400634", "0.5522024", "0.5495691", "0.5482983", "0.54344773", "0.54299754" ]
0.7983624
0
Sets the support_links of this Listing. Links to support resources for the listing.
def support_links(self, support_links): self._support_links = support_links
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def support_links(self):\n return self._support_links", "def support_url(self, support_url: str):\n\n self._support_url = support_url", "def documentation_links(self, documentation_links):\n self._documentation_links = documentation_links", "def support_contacts(self, support_contacts):\n self._support_contacts = support_contacts", "def links(self, links):\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def support_tickets(self, support_tickets):\n\n self._support_tickets = support_tickets", "def links(self, links):\n\n self.container['links'] = links", "def links(self, links):\n if links is None:\n raise ValueError(\"Invalid value for `links`, must not be `None`\")\n\n self._links = links", "def support_attachments(self, support_attachments: ConfigNodePropertyBoolean):\n\n self._support_attachments = support_attachments", "def set_support(self, support):\n self.support = round(support, 3)", "def support_url(self) -> str:\n return self._support_url", "def update_links(self, new_link):\r\n self.__links = new_link", "def setAddLinks(self,value):\n self.PDFreactorConfiguration.in1[\"addLinks\"] = value", "def documentation_links(self):\n return self._documentation_links", "def entities_links_wikidata(self, entities_links_wikidata):\n\n self._entities_links_wikidata = entities_links_wikidata", "def hyperlinks(self, hyperlinks):\n\n self.container['hyperlinks'] = hyperlinks", "def links_permalink(self, links_permalink):\n\n self._links_permalink = links_permalink", "def support_ref(self):\n\n return self._support_ref", "def links(self):\n\t\treturn self.list_of_links" ]
[ "0.69751316", "0.62614554", "0.6255054", "0.61495894", "0.59559757", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5915402", "0.5883081", "0.5737501", "0.5633349", "0.5605854", "0.56031364", "0.52699953", "0.51708966", "0.5139222", "0.50789213", "0.49955404", "0.49843812", "0.49563172", "0.49233067", "0.4794417" ]
0.84397984
0
Gets the documentation_links of this Listing. Links to additional documentation provided by the publisher specifically for the listing.
def documentation_links(self): return self._documentation_links
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def documentation_links(self, documentation_links):\n self._documentation_links = documentation_links", "def get_links(self) -> List[str]:\n return self.__links", "def getLinks(self):\n\n return self.links", "def get_links(self):\r\n return self.__links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def links(self):\n return self._links", "def get_links(self):\r\n return self.links", "def documentation_url(self) -> str:\n return self._documentation_url", "def getLinks(self):\n return self.pageLinks", "def getDocsList(self):\n return self.docs_list", "def links(self):\n\t\treturn self.list_of_links", "def get_links(self):\n return self.__data['links']", "def listing(self):\r\n listing = LinkListing(self.builder_obj, show_nums = self.show_nums)\r\n return listing.listing()", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def links(self):\n return self.container['links']", "def get_links(self):\n links = \"\"\n if self.title != \"\":\n links += html_link_to_tag(\n plain_to_html(self.title), self.title, self.proc\n )\n return links + \\\n html_unordered_list([x.get_links() for x in self.subsections])", "def links(self) -> Sequence[Link]:\n return self._links", "def support_links(self):\n return self._support_links", "def links(self) -> str:\n return pulumi.get(self, \"links\")", "def links(self):\r\n return links.RepoLinks(self)", "def _extract_links(self, publication, feed_self_url):\n self._logger.debug(\n \"Started extracting links from {0}\".format(encode(publication.links))\n )\n\n links = []\n\n for link in publication.links:\n link_metadata = self._extract_link(link, feed_self_url)\n links.append(link_metadata)\n\n description_link = self._extract_description_link(publication)\n if description_link:\n links.append(description_link)\n\n image_links = self._extract_image_links(publication, feed_self_url)\n if image_links:\n links.extend(image_links)\n\n self._logger.debug(\n \"Finished extracting links from {0}: {1}\".format(\n encode(publication.links), encode(links)\n )\n )\n\n return links", "def links(self):\n if not hasattr(self, '_links'):\n self._links = self.resource.links()\n values = self._response.headers.get('link')\n self._links.update([link for link in Links.parse(values)])\n return self._links", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_links(self):\n return (link for link in self.links)", "def docs(self):\n self._doc_info = DocumentationURL()\n self._doc_info.show()" ]
[ "0.65572166", "0.6341007", "0.630448", "0.6262078", "0.61744523", "0.61744523", "0.61744523", "0.61744523", "0.61744523", "0.61526346", "0.61452794", "0.6133612", "0.61207443", "0.61080486", "0.608221", "0.60754454", "0.5957445", "0.5935076", "0.59187263", "0.59180135", "0.58904487", "0.58708483", "0.58363545", "0.5738851", "0.5687977", "0.56847715", "0.56847715", "0.56847715", "0.5666536", "0.55751115" ]
0.7930184
0
Sets the documentation_links of this Listing. Links to additional documentation provided by the publisher specifically for the listing.
def documentation_links(self, documentation_links): self._documentation_links = documentation_links
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def documentation_links(self):\n return self._documentation_links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n self._links = links", "def links(self, links):\n if links is None:\n raise ValueError(\"Invalid value for `links`, must not be `None`\")\n\n self._links = links", "def support_links(self, support_links):\n self._support_links = support_links", "def links(self, links):\n\n self.container['links'] = links", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def documentation_url(self, documentation_url: str):\n\n self._documentation_url = documentation_url", "def links_permalink(self, links_permalink):\n\n self._links_permalink = links_permalink", "def hyperlinks(self, hyperlinks):\n\n self.container['hyperlinks'] = hyperlinks", "def setAddLinks(self,value):\n self.PDFreactorConfiguration.in1[\"addLinks\"] = value", "def update_links(self, new_link):\r\n self.__links = new_link", "def show_documentation(self):\n self.docs = documentation.Documentation()", "def docs(self):\n self._doc_info = DocumentationURL()\n self._doc_info.show()", "def documentation_url(self) -> str:\n return self._documentation_url", "def entities_links_wikidata(self, entities_links_wikidata):\n\n self._entities_links_wikidata = entities_links_wikidata", "def __init__(self, links: List[Link]=None, next_page: str=None):\n self.openapi_types = {\n 'links': List[Link],\n 'next_page': str\n }\n\n self.attribute_map = {\n 'links': 'links',\n 'next_page': 'next_page'\n }\n\n self._links = links\n self._next_page = next_page", "def add_documentation(cls, documentation):\n cls.__doc__ = documentation.CBAMLibrary\n methods = list(filter(lambda x: not x.startswith(\"_\"), dir(cls)))\n for method_name in methods:\n method = getattr(cls, method_name)\n if callable(method):\n name = method.__name__\n if hasattr(documentation, name):\n getattr(cls, name).__doc__ = getattr(documentation, name)", "def entities_links_wikipedia(self, entities_links_wikipedia):\n\n self._entities_links_wikipedia = entities_links_wikipedia", "def setListDoc(self, doc):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n libxml2mod.xmlSetListDoc(self._o, doc__o)" ]
[ "0.69023544", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.66297585", "0.6629672", "0.6383643", "0.6354247", "0.6296145", "0.604916", "0.6047695", "0.58300346", "0.5776611", "0.5711294", "0.56549853", "0.5424349", "0.5317481", "0.5294163", "0.5287634", "0.5265138", "0.52506065", "0.5197335", "0.5175074" ]
0.8419598
0
Sets the icon of this Listing.
def icon(self, icon): self._icon = icon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_icon(self, val):\n self._icon = val", "def icon(self, value: str | None) -> None:\n self._icon = value", "def setIcon(self, icon):\n if icon:\n self._icon = QIcon(icon)\n else:\n self._icon = None", "def setIcon(self,icon,index=0):\n self.rb[index].setIcon(icon)", "def icon(self, new_icon):\r\n self.set({\"icon\": new_icon})", "def set_icon(self, icon):\n icon = icon.title()\n if icon in self.rewards:\n self.icon = icon", "def icon(self, icon: str):\n if icon is None:\n raise ValueError(\"Invalid value for `icon`, must not be `None`\") # noqa: E501\n \n self._icon = icon", "def set_icons(self, value):\n self._icons = value", "def Icon(self, icon):\r\n\r\n if icon is None:\r\n icon = wx.NullIcon\r\n \r\n self.icon = icon\r\n return self", "def setDatastoreIcon(self, iconName):\r\n\r\n pixmap = getPixmapForImageName(iconName, False)\r\n self.selectedIconLabel.setPixmap(pixmap)", "def _set_icon(self):\n if self.current_status == 0:\n icon = WORK_ICON\n else:\n if self.break_count == 0:\n icon = LONG_REST_ICON\n else:\n icon = REST_ICON\n self.status_icon.set_title(icon.split('/')[-1])\n self.status_icon.set_from_file(icon)", "def icon(self):\n return self.ICON", "def icon(self):\n return self.ICON", "def icon_image(self, icon_image):\n\n self._icon_image = icon_image", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def setBtnIcon(self):\n self.setIcon(QtGui.QIcon(self.movie.currentPixmap()))\n self.setIconSize(QtCore.QSize(self.size[0], self.size[1]))", "def icon(self):\n return self.__icon", "def icon(self):\n return DEFAULT_ICON", "def icon(self) -> Icon:\n return self._icon", "def setIcon(self, icon, alpha=False):\n try:\n pygame.display.set_icon(icon)\n except TypeError:\n icon = self.newObject(icon, alpha)\n pygame.display.set_icon(icon)", "def icon(self):" ]
[ "0.8383824", "0.7790951", "0.7645983", "0.7563144", "0.7521918", "0.73360395", "0.7282719", "0.726055", "0.7193644", "0.7176555", "0.7172777", "0.6954962", "0.6954962", "0.6927028", "0.69077444", "0.69077444", "0.69077444", "0.69077444", "0.69077444", "0.69077444", "0.69077444", "0.69077444", "0.69077444", "0.69077444", "0.68883026", "0.68847966", "0.688386", "0.6855355", "0.6853696", "0.68380666" ]
0.8216263
1
Gets the banner of this Listing.
def banner(self): return self._banner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_banner(self,context,request):\n ba = queryMultiAdapter((context,request), interfaces.IBanner)\n if not ba:\n return ''\n return ba()", "def banner_url(self) -> typing.Optional[files.URL]:\n return self.make_banner_url()", "def get_banner(conn) -> str:\n banner_data = conn.recv(1024)\n banner = banner_data.decode().strip()\n print('Banner: {}'.format(banner))\n return banner", "def get_banner_image(self, tvdb_id: int) -> Optional[str]:\n if not tvdb_id:\n return None\n if not self.fanart_api_key:\n raise ValueError(\"Need Fanart.tv api key for TV titles!\")\n\n r = self.session.get(f\"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}\")\n if r.status_code == 404:\n return None\n res = r.json()\n\n error = res.get(\"error message\")\n if error:\n if error == \"Not found\":\n return None\n raise ValueError(f\"An unexpected error occurred while calling Fanart.tv, {res}\")\n\n banner = next((\n x[\"url\"] for x in (res.get(\"tvbanner\") or [])\n if x[\"lang\"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language\n ), None)\n\n return banner", "def render_banner(self, width=300, height=85):\n img_path = IMG_PATH + os.sep + CARD_BANNER\n banner_img = Image.open(img_path)\n banner_img = banner_img.resize((width, height))\n return banner_img", "def getBanner(outputScan):\n try:\n return str(outputScan.split(\", Banner: \", 1)[1][:12])\n #banner = re.search(r\"[0-9A-F]{12}\",outputScan, re.MULTILINE).group()\n #return str(banner)\n except Exception as e:\n print '\\033[91m'+\"ERROR_BANNER\"\n return \"BANNER_ERROR\"", "def getBannerHeight(self):\n return int(self.bannerHeight)", "def get_last_banner(self, ip):\n return self.last_banners.get(ip)", "def create_banner_list():\n template_vars = {\n 'title' : 'Banners - ' + sitesettings.SITE_NAME,\n 'siteurl' : sitesettings.SITE_URL,\n 'sitename' : sitesettings.SITE_NAME,\n 'meta_desc' : 'List of step-up banners in Final Fantasy Brave Exvius (FFBE)',\n 'last_four_banners' : nav.get_last_four_banners('all'),\n 'all_banner_info' : get_all_banner_info(),\n }\n\n bn_path = os.path.join(sitesettings.LOCAL_FILE_PATH, 'banner')\n\n if not os.path.exists(bn_path):\n os.makedirs(bn_path)\n\n template_file = 'bannerlist.html'\n html_file_loc = os.path.join(bn_path, 'index.html')\n generatehtml.generate_html(\n html_file_loc, template_file, template_vars, os.path.join(os.getcwd(), 'templates'))", "def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=\"{url}\" alt=\"{alt}\">'.format(\n url=banner_url,\n alt='Banner'\n )", "def choose_banner(banners):\n # simple random\n n = random.randint(0, len(banners)-1)\n return banners[n]", "def get_shares_banners():\n try:\n shares_banners = SharesBanner.objects.all()\n except SliderBanner.DoesNotExist:\n shares_banners = None\n return shares_banners", "def get_banners():\n banners = cache.get('banners')\n if not banners:\n banners = Banner.objects.filter(general=True)\n cache.set('banners', banners)\n return {'banners': banners}", "def banner_hash(self) -> typing.Optional[str]:", "def banner(self, banner):\n self._banner = banner", "def get_banner_layout(app):\n banner_layout = html.Div(className='row', id=\"banner\",\n children=[html.Div(\n html.Img(src=app.get_asset_url(\"252px-Rheem_logo.svg.png\"), style={\"width\": \"30%\",\n \"vertical-align\": \"middle\"}),\n className='two columns'),\n html.Div(html.H3(\"Odin Project: Heat Pump Water Heater Gen V Field Test\",\n className='header', id=\"title\", style={\"letter-spacing\": \"-1.6px\"}),\n className=\"ten columns\")],\n )\n return banner_layout", "def get_slider_banners():\n try:\n slider_banners = SliderBanner.objects.all()\n except SliderBanner.DoesNotExist:\n slider_banners = None\n return slider_banners", "def get_balancer_info(self):\n try:\n response = self.client.describe_load_balancers(\n Names=[self.get_balancer_name()],\n )\n assert response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n vpc_id = self.get_vpc_id()\n balancers = [balancer for balancer in response['LoadBalancers'] if balancer['VpcId'] == vpc_id]\n\n return balancers[0]\n except ClientError:\n self.logger.debug('Unable to find load balancer {}.'.format(self.get_balancer_name()))\n return None", "def get_album(self):\n return self._album", "def billing(self):\n return self._billing", "def content(self):\r\n return self.listing_obj", "def download_banner(self, banner_path):\n serie = self._root.find('Series')\n banner = unicode(serie.find('banner').text)\n if banner != '' and not os.path.isfile(banner_path):\n urllib.urlretrieve(self.URL_BANNER + banner, banner_path)", "def getBid(self):\r\n\t\treturn self.data['bid']", "def billing_info(self):\r\n return BillingInfo(self)", "def present_banner():\n writer(BANNER, FORMAT[\"BANNER\"])\n writer(\" \" * 30 + f\"version {VERSION}\")", "def advertise_id(self):\n return self._advertise_id", "def banner_hash(self) -> undefined.UndefinedNoneOr[str]:", "def pre_authentication_login_banner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pre_authentication_login_banner\")", "def pre_authentication_login_banner(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pre_authentication_login_banner\")", "def get_balancer_arn(self):\n return self.get_balancer_info()['LoadBalancerArn']" ]
[ "0.71143806", "0.6777335", "0.65265286", "0.6073808", "0.6052043", "0.58798075", "0.5859273", "0.58299464", "0.5820458", "0.5805905", "0.5702834", "0.5584134", "0.55791813", "0.55625266", "0.5438825", "0.53803754", "0.5352796", "0.5327807", "0.52649236", "0.52467376", "0.5245156", "0.52177185", "0.5184273", "0.5166478", "0.51295793", "0.5118609", "0.5104291", "0.50712615", "0.50712615", "0.505944" ]
0.8368317
0
Sets the banner of this Listing.
def banner(self, banner): self._banner = banner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def banner(self):\n return self._banner", "def set_last_banner(self, ip, banner_url):\n self.last_banners[ip] = banner_url", "def present_banner():\n writer(BANNER, FORMAT[\"BANNER\"])\n writer(\" \" * 30 + f\"version {VERSION}\")", "def download_banner(self, banner_path):\n serie = self._root.find('Series')\n banner = unicode(serie.find('banner').text)\n if banner != '' and not os.path.isfile(banner_path):\n urllib.urlretrieve(self.URL_BANNER + banner, banner_path)", "def create_banner_list():\n template_vars = {\n 'title' : 'Banners - ' + sitesettings.SITE_NAME,\n 'siteurl' : sitesettings.SITE_URL,\n 'sitename' : sitesettings.SITE_NAME,\n 'meta_desc' : 'List of step-up banners in Final Fantasy Brave Exvius (FFBE)',\n 'last_four_banners' : nav.get_last_four_banners('all'),\n 'all_banner_info' : get_all_banner_info(),\n }\n\n bn_path = os.path.join(sitesettings.LOCAL_FILE_PATH, 'banner')\n\n if not os.path.exists(bn_path):\n os.makedirs(bn_path)\n\n template_file = 'bannerlist.html'\n html_file_loc = os.path.join(bn_path, 'index.html')\n generatehtml.generate_html(\n html_file_loc, template_file, template_vars, os.path.join(os.getcwd(), 'templates'))", "def set_bid(self, bid):\n self.__bid = bid", "def banner_url(self) -> typing.Optional[files.URL]:\n return self.make_banner_url()", "def set_metadata(self, loadbalancer, metadata):\n return loadbalancer.set_metadata(metadata)", "def render_banner(self, width=300, height=85):\n img_path = IMG_PATH + os.sep + CARD_BANNER\n banner_img = Image.open(img_path)\n banner_img = banner_img.resize((width, height))\n return banner_img", "def edit_clan_banner(self, groupId):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/{groupId}/EditClanBanner/\"))", "def get_banner(self,context,request):\n ba = queryMultiAdapter((context,request), interfaces.IBanner)\n if not ba:\n return ''\n return ba()", "def banner_wrapper(banner_url):\n # so simple\n return '{url}<img src=\"{url}\" alt=\"{alt}\">'.format(\n url=banner_url,\n alt='Banner'\n )", "def billing(self, billing):\n\n self._billing = billing", "def set_album(self, album: str) -> None:\n self.album = album", "def brand(self, brand):\n\n self._brand = brand", "def brand(self, brand):\n\n self._brand = brand", "def brand(self, brand):\n\n self._brand = brand", "def _print_banner(out_file, banner_text):\n banner_separator = \"\".ljust(len(banner_text), \"=\")\n\n out_file.write(\"\\n{}\\n{}\\n{}\\n\".format(\n banner_separator,\n banner_text,\n banner_separator))", "def brand(self, brand: object):\n\n self._brand = brand", "def bbs_show_banner(tn, short = True):\n lines = cmd.lban(tn, short_banner = short)\n for line in lines:\n print(filter_tags(line))", "def set_object_description(self, agent, Description):\n\n self.send_ObjectDescription(agent, agent.agent_id, agent.session_id, {1:[self.LocalID, Description]})", "def capacidad(self, capacidad: int):\n\n self._capacidad = capacidad", "def bold(self, bold):\n\n self._bold = bold", "def plan(self, plan):\n\n self._plan = plan", "def load_banner_module(self):\n self.banner_module = None\n try:\n if self.force_json:\n banner_module_name = \"BannerJson\"\n else:\n banner_module_name = self.config_dict[\"BannerModule\"]\n \n except KeyError:\n print(\"***********************************\")\n print(\"* No BannerModule defined! EXITING \")\n print(\"***********************************\")\n sys.exit(1)\n\n try:\n banner_module = importlib.import_module(banner_module_name)\n except Exception as e:\n print(f\"******************************************\")\n print(f\"* Exception loading {banner_module_name} \")\n print(f\"* {e} \")\n print(f\"* in: {sys.path} \")\n print(f\"******************************************\")\n print(f\"Script-Path: {os.path.dirname(os.path.realpath(__file__))}\")\n print(f\"Working-Path: {os.getcwd()}\")\n sys.exit(1)\n\n if hasattr(banner_module, \"create_instance\"):\n self.banner_module = banner_module\n # print(f\"Successfully registered banner plugin '{banner_module_name}'\")\n else:\n print(\"************************************************\")\n print(f\"* BannerModule {banner_context_name}\")\n print(\"* is missing create_instance function; EXITING \")\n print(\"************************************************\")\n sys.exit(1)\n\n # TODO The banner is probably more a property of the router\n self.banner = self.banner_module.create_instance()", "def Banner():\n main_banner = pyfiglet.figlet_format(\" UTM NAT\", font = \"slant\")\n sub_banner1 = pyfiglet.figlet_format(\"tool\", font = \"isometric1\")\n sub_banner2 = \" -Generate a CSV file of Sophos UTM NAT statements-\"\n sub_banner3 = \" via REST API using the power of Python\"\n\n print()\n print('=' * 62)\n print(main_banner)\n print(sub_banner1)\n print()\n print(sub_banner2)\n print(sub_banner3)\n print()\n print('=' * 62)\n print()", "def clan(self, clan):\n\n self._clan = clan", "def __init__(self, snakeInterface, controller, bannerColor, bannerFontColor, bannerWidth, bannerHeight):\n self.snakeInterface = snakeInterface\n self.controller = controller\n self.bannerColor = bannerColor\n self.bannerFontColor = bannerFontColor\n self.bannerWidth = bannerWidth\n self.bannerHeight = bannerHeight\n self.scoreBannerRect = self.setScoreBannerRect(1, 0, self.bannerWidth, self.bannerHeight)\n\n self.createScoreBanner(self.scoreBannerRect)", "def ad(self, ad):\n # type: (int) -> None\n\n if ad is not None:\n if not isinstance(ad, int):\n raise TypeError(\"Invalid type for `ad`, type has to be `int`\")\n\n self._ad = ad", "def billing_info(self, billing_info):\n\n self._billing_info = billing_info" ]
[ "0.6423644", "0.5612951", "0.5583017", "0.5382535", "0.5339387", "0.5252789", "0.5187555", "0.5170073", "0.5092044", "0.50683033", "0.50154686", "0.50043285", "0.49510226", "0.4919356", "0.49002323", "0.49002323", "0.49002323", "0.48007524", "0.47906277", "0.47864276", "0.47459042", "0.47317383", "0.47300252", "0.47083822", "0.4696756", "0.46965647", "0.46557292", "0.4642372", "0.46411505", "0.4638682" ]
0.82278925
0
Gets the regions of this Listing. The regions where the listing is available.
def regions(self): return self._regions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_regions(self):\n return self._regions", "def regions(self) -> Sequence[str]:\n return pulumi.get(self, \"regions\")", "def regions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"regions\")", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def regions(self, member_state):\n rates = self._get_rates(member_state)\n return list(rates.regions.keys())", "def regions(self):\n regions = set()\n for report in self._reports:\n region = report.model.region\n if region is None or region in regions:\n continue\n yield region", "def list_regions(self, **kwargs):\n resource_path = \"/regions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_regions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")", "def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"regions\")", "def regions(self) -> Optional[Sequence['outputs.GetTrafficPolicyDocumentRuleRegionResult']]:\n return pulumi.get(self, \"regions\")", "def get_regions(self):\n if self.initiated is False:\n raise RuntimeError(\"Initiate first\")\n\n return self.R", "def region(self):\n return [node.region for node in self]", "def scope(self) -> List[Region]:\n return [self]", "def filter_regions(self):\n return self.filter_nodes('/DistrictBuilder/Regions/Region')", "def ListRegions(self):\n project = properties.VALUES.core.project.GetOrFail()\n request = self.messages.CloudfunctionsProjectsLocationsListRequest(\n name='projects/' + project\n )\n return list_pager.YieldFromList(\n service=self.client.projects_locations,\n request=request,\n field='locations',\n batch_size_attribute='pageSize',\n )", "def get_regions(self,online=False):\n clients = HWIOS.pb_server.get_clients()\n regions = []\n for client in clients:\n for service in client.region_services:\n if online: \n if service['status'] == 'ON':\n for region in service['regions']:\n regions.append(region)\n else:\n for region in service['regions']:\n region['status'] = service['status']\n regions.append(region)\n return regions", "def region(self):\n return regions.lookup(self.state)", "def get_regions(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_regions\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def _get_available_regions():\n session = boto3.session.Session()\n\n return session.get_available_regions(service_name='s3')", "def scope(self) -> List[Region]:\n return self._scope", "def listPredefinedRegions(self):\n\n res = self._Client__proxy.listPredefinedRegions(\n self._Client__session)\n\n self.checkResult(res)\n return res[\"predefinedRegions\"]", "def api_get_regions():\n db_session = DBSession()\n\n rows = []\n criteria = '%'\n if request.args and request.args.get('q'):\n criteria += request.args.get('q') + '%'\n else:\n criteria += '%'\n\n regions = db_session.query(Region).filter(Region.name.like(criteria)).order_by(Region.name.asc()).all()\n if len(regions) > 0:\n if request.args.get('show_all'):\n rows.append({'id': 0, 'text': 'ALL'})\n for region in regions:\n rows.append({'id': region.id, 'text': region.name})\n\n return jsonify(**{'data': rows})", "def list_regions():\n regions_areas = (\n db.session.query(\n models.Region.code.label(\"region_code\"),\n models.Region.name.label(\"region_name\"),\n db.case([(models.District.code.is_(None),\n db.literal_column(\"'admin_area'\"))],\n else_=db.literal_column(\"'district'\")).label(\"area_type\"),\n db.case([(models.District.code.is_(None), models.AdminArea.code)],\n else_=models.District.code).label(\"area_code\"),\n db.case([(models.District.code.is_(None), models.AdminArea.name)],\n else_=models.District.name).label(\"area_name\")\n ).select_from(models.Region)\n .join(models.Region.areas)\n .outerjoin(models.AdminArea.districts)\n .filter(models.Region.code != \"GB\")\n .order_by(\"region_name\", \"area_name\")\n .all()\n )\n regions = {}\n areas = {}\n for row in regions_areas:\n regions[row.region_code] = row.region_name\n areas.setdefault(row.region_code, []).append(row)\n\n return render_template(\"regions.html\", regions=regions, areas=areas)", "def get_valid_regions(self):\n conn = self._boto.ec2.connect_to_region(self.cli_region)\n\n regions = []\n for region in conn.get_all_regions():\n if getattr(RegionCode.Region, region.name, None) is not None:\n regions.append(RegionCode.Region[region.name])\n else:\n regions.append(region.name)\n\n return regions", "def get_valid_regions(self):\n pass", "def selected(self):\n\t\treturn [self.regions[int(i)]\n\t\t\tfor i in self.regionListing.hlist.info_selection()]", "def get_valid_regions(self):\n client = self._boto.client('ec2')\n\n regions = []\n for region in client.describe_regions().get('Regions', []):\n if getattr(RegionCode.Region, region.get('RegionName'), None) is not None:\n regions.append(RegionCode.Region[region.get('RegionName')])\n else:\n regions.append(region.get('RegionName'))\n\n return regions", "def get_regions(ec2_client=None):\n if not ec2_client:\n ec2_client = boto3.client('ec2')\n resp = ec2_client.describe_regions()\n return [region['RegionName'] for region in resp.get('Regions', [])]", "def getregion(self, *args, **kwargs):\n return _image.image_getregion(self, *args, **kwargs)", "def getRegions(self, polygon: Polygon, epsg: int) -> list:\n self.output_epsg = epsg\n polygon_df = gpd.GeoDataFrame([polygon], columns=['geometry'])\n\n polygon_df.set_crs(epsg=self.output_epsg, inplace=True)\n polygon_df['geometry'] = polygon_df['geometry'].to_crs(epsg=self.input_epsg)\n minx, miny, maxx, maxy = polygon_df['geometry'][0].bounds\n\n cond_xmin = self.metadata.xmin <= minx\n cond_xmax = self.metadata.xmax >= maxx\n cond_ymin = self.metadata.ymin <= miny\n cond_ymax = self.metadata.ymax >= maxy\n\n df = self.metadata[cond_xmin & cond_xmax & cond_ymin & cond_ymax]\n sort_df = df.sort_values(by=['year'])\n regions = sort_df['filename'].to_list()\n return regions" ]
[ "0.80393547", "0.7805494", "0.7723249", "0.72262144", "0.7184458", "0.7143942", "0.7119229", "0.7099365", "0.706028", "0.70048416", "0.70002544", "0.6929981", "0.68779176", "0.6825818", "0.6808158", "0.6807868", "0.6655617", "0.6641144", "0.66303986", "0.657817", "0.657202", "0.6562342", "0.6524341", "0.63982695", "0.63940656", "0.63760483", "0.6362827", "0.6351244", "0.6319498", "0.6292091" ]
0.7981078
1
Sets the regions of this Listing. The regions where the listing is available.
def regions(self, regions): self._regions = regions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_regions(self, **kwargs):\n resource_path = \"/regions\"\n method = \"GET\"\n\n expected_kwargs = [\"retry_strategy\"]\n extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]\n if extra_kwargs:\n raise ValueError(\n \"list_regions got unknown kwargs: {!r}\".format(extra_kwargs))\n\n header_params = {\n \"accept\": \"application/json\",\n \"content-type\": \"application/json\"\n }\n\n retry_strategy = self.retry_strategy\n if kwargs.get('retry_strategy'):\n retry_strategy = kwargs.get('retry_strategy')\n\n if retry_strategy:\n return retry_strategy.make_retrying_call(\n self.base_client.call_api,\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")\n else:\n return self.base_client.call_api(\n resource_path=resource_path,\n method=method,\n header_params=header_params,\n response_type=\"list[Region]\")", "def RegionList(self):\n command = \"\"\"\n IPython.notebook.kernel.execute(\"RegionList=\" + JSON.stringify(JS9.GetShapes(\"regions\", {{display: '{wid}JS9'}})));\n \"\"\".format(wid=self.wid)\n get_ipython().run_cell_magic('javascript', '', command)", "def region_sets(self,listA,listB):\n self.setA = GenomicRegionSet('for Unit Test')\n for i in range(len(listA)):\n self.setA.add(GenomicRegion(chrom=listA[i][0], initial=listA[i][1], final=listA[i][2]))\n \n self.setB = GenomicRegionSet('for Unit Test')\n for i in range(len(listB)):\n self.setB.add(GenomicRegion(chrom=listB[i][0], initial=listB[i][1], final=listB[i][2]))", "def regions(self) -> Sequence[str]:\n return pulumi.get(self, \"regions\")", "def copy_with_regions(self, regions):\n\n new = copy.deepcopy(self)\n new.regions = []\n for region in regions:\n new.regions.append(self._parse_region(region))\n\n return new", "def regions(self):\n\n class RegionIter(object):\n def __init__(self, region_based):\n self._region_based = region_based\n\n def __len__(self):\n return self._region_based._region_len()\n\n def __iter__(self):\n return self()\n\n def _fix_chromosome(self, regions):\n for r in regions:\n r.fix_chromosome(copy=True)\n\n def __call__(self, key=None, *args, **kwargs):\n fix_chromosome = kwargs.pop('fix_chromosome', False)\n\n if key is None:\n iterator = self._region_based._region_iter(*args, **kwargs)\n else:\n if isinstance(key, string_types) or isinstance(key, GenomicRegion):\n iterator = self._region_based.region_subset(key, *args, **kwargs)\n else:\n iterator = self._region_based._get_regions(key, *args, **kwargs)\n\n if fix_chromosome:\n return self._fix_chromosome(iterator)\n else:\n return iterator\n\n def __getitem__(self, item):\n if isinstance(item, string_types) or isinstance(item, GenomicRegion):\n return self._region_based.region_subset(item)\n return self._region_based._get_regions(item)\n\n return RegionIter(self)", "def regions(self):\n return self._regions", "def _choose_regions(self, display_regions=False):\n dstl = Load_DSTL()\n if self.class_type == 1:\n # Select regions where there are buildings (with red roofs)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n train_image, train_mask = dstl.extract_region_pos(1900, 3100, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(950, 1450, cutout_size=[200, 200], object_class=self.class_type)\n elif self.class_type == 5:\n train_image, train_mask = dstl.extract_region_pos(1150, 2150, cutout_size=[400, 400], object_class=self.class_type)\n test_image, test_mask = dstl.extract_region_pos(2300, 3000, cutout_size=[400, 400], object_class=self.class_type)\n cv_image, cv_mask = dstl.extract_region_pos(1900, 1950, cutout_size=[400, 400], object_class=self.class_type)\n else:\n pass\n self.images = {'train': train_image, 'cv': cv_image, 'test': test_image}\n self.masks = {'train': train_mask, 'cv': cv_mask, 'test': test_mask}\n if display_regions:\n for key in self.images.keys():\n display_three_band(self.images[key], self.masks[key], colors='green', title='{:} region'.format(key))", "def get_regions(self):\n return self._regions", "def region_setup(self, slices, ipa_regions):\n self.ipa_regions = ipa_regions\n self.slices = slices", "def add_regions(self, regions, **options):\n \n options.setdefault(\"col\", color(0,0,1))\n options.setdefault(\"style\", \"box\")\n options.setdefault(\"height\", 0.5)\n \n return self.add_track(RegionTrack, -.5, regions, **options)", "def __init__(__self__, *,\n regions: Optional[Sequence['outputs.RegionSettingResponse']] = None,\n routing_method: Optional[str] = None):\n if regions is not None:\n pulumi.set(__self__, \"regions\", regions)\n if routing_method is not None:\n pulumi.set(__self__, \"routing_method\", routing_method)", "def region(self, region):\n \n self._region = region", "def get_regions(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_regions\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def regions(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"regions\")", "def __init__(self):\n self.regions = []", "def region(self, region):\n\n self._region = region", "def region(self, region):\n\n self._region = region", "def region(self, region):\n\n self._region = region", "def regions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"regions\")", "def set_region(sender, instance, *args, **kwargs):\n if instance.geocity and not instance.georegion:\n instance.georegion = instance.geocity.region", "def SetRegion(self,stateAbbrev):\n if not stateAbbrev in self.VectorData:\n print \"Error - No Data for %s available\" % stateAbbrev\n print \"Valid state abbreviations are:\", self.StateAbbrevList\n else:\n self.SelectedRegion = stateAbbrev", "def filter_regions(self):\n return self.filter_nodes('/DistrictBuilder/Regions/Region')", "def setBoundingRegion(self):\n\n sx, sy, sz = self.min_point\n bx, by, bz = self.max_point\n\n point1 = [sx, sy, sz]\n point2 = [bx, by, bz]\n point3 = [bx, sy, sz]\n point4 = [bx, by, sz]\n point5 = [sx, by, sz]\n point6 = [sx, by, bz]\n point7 = [sx, sy, bz]\n point8 = [bx, sy, bz]\n\n points = [point1, point2, point3, point4, point5, point6, point7, point8]\n\n lines = [[0, 2], [0, 6], [0, 4], [2, 3], [2, 7], [6, 7], \n [6, 5], [4, 5], [4, 3], [3, 1], [1, 7], [5, 1]]\n\n colors = [self.colour for i in range(len(lines))]\n bounding_region = o3d.geometry.LineSet(\n points=o3d.utility.Vector3dVector(points),\n lines=o3d.utility.Vector2iVector(lines),\n )\n bounding_region.colors = o3d.utility.Vector3dVector(colors)\n self.bounding = bounding_region", "def AddRegions(self, **kwargs):\n # Addregions use pixel coordinates. listRegions and SaveRegions use RA and Dec.\n n_objs = 0\n objs = []\n # default shape is circle\n if not 'shape' in kwargs:\n kwargs['shape'] = ['circle']\n for k in kwargs.keys():\n n_objs = max(n_objs, len(kwargs[k]))\n for j in range(n_objs):\n temp = {}\n for k in kwargs.keys():\n try:\n temp[k] = kwargs[k][j]\n except IndexError:\n if k == 'shape': \n temp[k] = 'circle'\n objs.append(temp)\n self.all_objs = json.dumps(objs)\n command = \"JS9.AddRegions({objs}, {{display:'{wid}{suffix}'}})\".format(objs=self.all_objs, wid=self.wid, suffix=self.suffix)\n get_ipython().run_cell_magic('javascript', '', command)", "def get_regions():\n\n # Also known as the 'climbing directory'\n route_guide = urlopen('https://www.mountainproject.com/route-guide',\n context=ctx)\n # Opens HTML\n region_html = route_guide.read()\n # Parses HTML with BS package\n region_soup = BeautifulSoup(region_html, 'html.parser')\n # Finds regions area of the page\n regions = region_soup.find('div', id='route-guide')\\\n .find_all('div', class_='mb-half')\n\n for region in regions:\n # Link to region area guide\n url = region.find('a')['href']\n # English name of region\n region_name = region.find('a').get_text()\n # Writes region name and url to Areas DB. This gives the region a\n # unique id automatically\n cursor.execute('''\n INSERT INTO Areas(url, name)\n VALUES ('%s', '%s')\n ON CONFLICT DO NOTHING\n ''' % (url, region_name))\n # Commits to DB\n conn.commit()", "def regions(self):\n regions = set()\n for report in self._reports:\n region = report.model.region\n if region is None or region in regions:\n continue\n yield region", "def ListRegions(self):\n project = properties.VALUES.core.project.GetOrFail()\n request = self.messages.CloudfunctionsProjectsLocationsListRequest(\n name='projects/' + project\n )\n return list_pager.YieldFromList(\n service=self.client.projects_locations,\n request=request,\n field='locations',\n batch_size_attribute='pageSize',\n )", "def test_assign_to_regions(self):\n \n tool = pybedtools.BedTool(clipper.test_file(\"FOX2Brain-05.15.09.polyATrim.adapterTrim.rmRep.sorted.rmDup.peaks.bed\"))\n \n assign_to_regions(tool=tool, \n clusters=\"test\", \n speciesFA= clipper.test_file(\"mm9.fa\"), \n regions_dir=os.path.join(clipper.test_dir(), \"regions\"), \n regions={\"exons\" : \"Exon\", \"utr3\" : \"3' UTR\", \n \"utr5\" : \"5' UTR\", \"proxintron500\" : \"Proximal Intron\", \n \"distintron500\" : \"Distal Intron\"} ,\n assigned_dir = clipper.test_dir(),\n fasta_dir = clipper.test_dir(),\n species=\"mm9\", \n nrand = 3, \n getseq=False)", "def ranges(self, ranges):\n \n self._ranges = ranges" ]
[ "0.6481046", "0.64200956", "0.6331844", "0.62551534", "0.62191546", "0.61702317", "0.60755", "0.58735037", "0.5828514", "0.5823908", "0.58030134", "0.5796802", "0.57923263", "0.57890207", "0.5787557", "0.57725894", "0.5729864", "0.5729864", "0.5729864", "0.5685778", "0.5671174", "0.56067777", "0.5546608", "0.5522584", "0.5515762", "0.55047405", "0.54731447", "0.5470826", "0.54082733", "0.53872585" ]
0.7949664
0
Gets the package_type of this Listing. The listing's package type.
def package_type(self): return self._package_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_type (self):\n return self._stype", "def type(self):\n return 'package'", "def get_type(self):\n return self._type", "def get_type(self):\n return self._type", "def type(self):\n\n return self.manifest[\"type\"]", "def get_type(self):\n return self.type", "def get_type(self):\n return self.type", "def get_type(self):\n return self._TYPE", "def get_type(self) -> str:\n return self.type", "def type(self):\n return self._getValue('type')", "def get_type(self):\n return self._sType", "def getType(self):\n return self.type_", "def item_type(self):\n return self._item_type", "def get_type(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetType', self.handle)", "def item_type(self) -> str:\n return self.__item_type", "def get_type(self):\n return self._type_obj", "def getType(self):\n return self.type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type", "def type(self):\n return self._type" ]
[ "0.6629074", "0.66246414", "0.65971076", "0.65971076", "0.6529473", "0.65072304", "0.65072304", "0.6460079", "0.6435091", "0.6430391", "0.6405827", "0.6404363", "0.64036995", "0.63900316", "0.6379892", "0.6343936", "0.63309747", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746", "0.63023746" ]
0.78860635
1
Sets the package_type of this Listing. The listing's package type.
def package_type(self, package_type): allowed_values = ["ORCHESTRATION", "IMAGE"] if not value_allowed_none_or_none_sentinel(package_type, allowed_values): package_type = 'UNKNOWN_ENUM_VALUE' self._package_type = package_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def package_type(self, package_type):\n\n self._package_type = package_type", "def setType(self, newType):\n self._itemType = newType", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self.type = type", "def set_type(self, type):\n self._type = type", "def item_group_type(self, item_group_type):\n\n self._item_group_type = item_group_type", "def package_type(self):\n return self._package_type", "def package_type(self):\n return self._package_type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type", "def type(self, type):\n\n self._type = type" ]
[ "0.8191449", "0.6388714", "0.6379617", "0.6379617", "0.6370877", "0.6280505", "0.62801737", "0.62801737", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625", "0.61183625" ]
0.6474645
1
Gets the default_package_version of this Listing. The default package version.
def default_package_version(self): return self._default_package_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDefaultPackageVersion():\n return _libsbml.CompExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.MultiExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.FbcExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.LayoutExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.GroupsExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.QualExtension_getDefaultPackageVersion()", "def FbcExtension_getDefaultPackageVersion():\n return _libsbml.FbcExtension_getDefaultPackageVersion()", "def CompExtension_getDefaultPackageVersion():\n return _libsbml.CompExtension_getDefaultPackageVersion()", "def getDefaultVersion():\n return _libsbml.LayoutExtension_getDefaultVersion()", "def LayoutExtension_getDefaultPackageVersion():\n return _libsbml.LayoutExtension_getDefaultPackageVersion()", "def getDefaultVersion():\n return _libsbml.CompExtension_getDefaultVersion()", "def getDefaultVersion():\n return _libsbml.MultiExtension_getDefaultVersion()", "def default_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_version_number\")", "def MultiExtension_getDefaultPackageVersion():\n return _libsbml.MultiExtension_getDefaultPackageVersion()", "def getDefaultVersion():\n return _libsbml.FbcExtension_getDefaultVersion()", "def GroupsExtension_getDefaultPackageVersion():\n return _libsbml.GroupsExtension_getDefaultPackageVersion()", "def getDefaultVersion():\n return _libsbml.GroupsExtension_getDefaultVersion()", "def default_package_version(self, default_package_version):\n self._default_package_version = default_package_version", "def get_default_version(self):\n # latest is a special case where we don't have to check if it exists\n if self.default_version == 'latest':\n return self.default_version\n # check if the default_version exists\n version_qs = self.versions.filter(\n slug=self.default_version,\n active=True\n )\n if version_qs.exists():\n return self.default_version\n return 'latest'", "def getDefaultVersion():\n return _libsbml.SBMLDocument_getDefaultVersion()", "def QualExtension_getDefaultPackageVersion():\n return _libsbml.QualExtension_getDefaultPackageVersion()", "def LayoutExtension_getDefaultVersion():\n return _libsbml.LayoutExtension_getDefaultVersion()", "def getDefaultVersion():\n return _libsbml.QualExtension_getDefaultVersion()", "def FbcExtension_getDefaultVersion():\n return _libsbml.FbcExtension_getDefaultVersion()", "def CompExtension_getDefaultVersion():\n return _libsbml.CompExtension_getDefaultVersion()", "def get_default(self):\n\n\t\treturn self.__default", "def GroupsExtension_getDefaultVersion():\n return _libsbml.GroupsExtension_getDefaultVersion()", "def MultiExtension_getDefaultVersion():\n return _libsbml.MultiExtension_getDefaultVersion()", "def getPackageVersion(self):\n return _libsbml.SBase_getPackageVersion(self)", "def version(self):\n if not hasattr(self, '_version'):\n self._version = self._get_package_version()\n return self._version" ]
[ "0.83390003", "0.8327047", "0.826186", "0.8256483", "0.80970424", "0.8055716", "0.76140296", "0.7595864", "0.75897837", "0.7588637", "0.7577043", "0.75349975", "0.75328416", "0.75281364", "0.7481759", "0.7434644", "0.7396291", "0.73511976", "0.73455566", "0.725068", "0.72183937", "0.6946564", "0.6933997", "0.68579173", "0.6829785", "0.67616254", "0.67614126", "0.6721485", "0.6691934", "0.66838133" ]
0.8867176
0
Sets the default_package_version of this Listing. The default package version.
def default_package_version(self, default_package_version): self._default_package_version = default_package_version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_package_version(self):\n return self._default_package_version", "def getDefaultPackageVersion():\n return _libsbml.LayoutExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.MultiExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.CompExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.FbcExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.GroupsExtension_getDefaultPackageVersion()", "def getDefaultPackageVersion():\n return _libsbml.QualExtension_getDefaultPackageVersion()", "def set_default_version(self) -> Optional[bool]:\n return pulumi.get(self, \"set_default_version\")", "def SetDefaultVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def getDefaultVersion():\n return _libsbml.LayoutExtension_getDefaultVersion()", "def getDefaultVersion():\n return _libsbml.MultiExtension_getDefaultVersion()", "def getDefaultVersion():\n return _libsbml.CompExtension_getDefaultVersion()", "def getDefaultVersion():\n return _libsbml.GroupsExtension_getDefaultVersion()", "def getDefaultVersion():\n return _libsbml.FbcExtension_getDefaultVersion()", "def getDefaultVersion():\n return _libsbml.SBMLDocument_getDefaultVersion()", "def LayoutExtension_getDefaultPackageVersion():\n return _libsbml.LayoutExtension_getDefaultPackageVersion()", "def default_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_version_number\")", "def setPackageVersion(self, *args):\n return _libsbml.ISBMLExtensionNamespaces_setPackageVersion(self, *args)", "def FbcExtension_getDefaultPackageVersion():\n return _libsbml.FbcExtension_getDefaultPackageVersion()", "def GroupsExtension_getDefaultPackageVersion():\n return _libsbml.GroupsExtension_getDefaultPackageVersion()", "def MultiExtension_getDefaultPackageVersion():\n return _libsbml.MultiExtension_getDefaultPackageVersion()", "def CompExtension_getDefaultPackageVersion():\n return _libsbml.CompExtension_getDefaultPackageVersion()", "def SetDefaultVersion(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def fusion_api_set_default_api_version(self, api=None):\n return self.version.set(api=api)", "def default(self, default):\n\n self._default = default", "def getDefaultVersion():\n return _libsbml.QualExtension_getDefaultVersion()", "def QualExtension_getDefaultPackageVersion():\n return _libsbml.QualExtension_getDefaultPackageVersion()", "def default(self, default):\n self._default = default\n return self", "def SetDefaultVersion(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def LayoutExtension_getDefaultVersion():\n return _libsbml.LayoutExtension_getDefaultVersion()" ]
[ "0.7756795", "0.72961414", "0.7260456", "0.7226229", "0.7168602", "0.7086842", "0.70237195", "0.689463", "0.6791142", "0.67155576", "0.6561462", "0.6515818", "0.6482345", "0.64594007", "0.63910294", "0.6364691", "0.63292134", "0.6326202", "0.62639976", "0.62392294", "0.62035227", "0.6180608", "0.61059225", "0.6092272", "0.6057623", "0.6018941", "0.59563744", "0.59501475", "0.5860992", "0.5768238" ]
0.8982981
0
Sets the links of this Listing. Links to reference material.
def links(self, links): self._links = links
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self._links = links", "def links(self, links):\n\n self.container['links'] = links", "def links(self, links):\n if links is None:\n raise ValueError(\"Invalid value for `links`, must not be `None`\")\n\n self._links = links", "def update_links(self, new_link):\r\n self.__links = new_link", "def documentation_links(self, documentation_links):\n self._documentation_links = documentation_links", "def support_links(self, support_links):\n self._support_links = support_links", "def setAddLinks(self,value):\n self.PDFreactorConfiguration.in1[\"addLinks\"] = value", "def links_permalink(self, links_permalink):\n\n self._links_permalink = links_permalink", "def hyperlinks(self, hyperlinks):\n\n self.container['hyperlinks'] = hyperlinks", "def references(self, references):\n\n self._references = references", "def links(self) -> Sequence[Link]:\n return self._links", "def links(self):\n\t\treturn self.list_of_links", "def link(self, link):\n\n self._set_field(\"link\", link)", "def link(self, link):\n\n self.container['link'] = link", "def link(self, link):\n\n self._link = link", "def link(self, link):\n\n self._link = link", "def link(self, link):\n\n self._link = link", "def link(self, link):\n\n self._link = link", "def link(self, link):\n\n self._link = link", "def link(self, link):\n\n self._link = link" ]
[ "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.76569706", "0.7470207", "0.73794794", "0.7082816", "0.6771199", "0.65618646", "0.64178824", "0.61817366", "0.6178725", "0.6121204", "0.6026034", "0.60238534", "0.59433687", "0.5934583", "0.5921616", "0.5921616", "0.5921616", "0.5921616", "0.5921616", "0.5921616" ]
0.77638394
0
Gets the is_featured of this Listing. Indicates whether the listing is included in Featured Listings.
def is_featured(self): return self._is_featured
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_featured(self, is_featured):\n self._is_featured = is_featured", "def test_get_featured_front_page_only_returns_featured(self):\r\n\r\n featured_app = self.create_app(None)\r\n non_featured_app = self.create_app(None)\r\n non_featured_app.name = 'other_app'\r\n non_featured_app.short_name = 'other_app'\r\n featured_app.owner = self.user\r\n non_featured_app.owner = self.user\r\n db.session.add(featured_app)\r\n db.session.add(non_featured_app)\r\n featured = Featured(app=featured_app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n\r\n featured = cached_apps.get_featured_front_page()\r\n\r\n assert len(featured) is 1, featured", "def test_get_featured_front_page(self):\r\n\r\n app = self.create_app(None)\r\n app.owner = self.user\r\n db.session.add(app)\r\n featured = Featured(app=app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n\r\n featured = cached_apps.get_featured_front_page()\r\n\r\n assert len(featured) is 1, featured", "def featured_news(self):\n return self.split(self.settings.featured_news)", "def is_flagged(self):\n return self._is_flagged", "def isFlagged(self):\n return self.flag", "def featured_services(self):\n return self.split(self.settings.featured_services)", "def get_featured_content():\n\n return FeatureHistory.objects.filter(featured=True).order_by('updated_at')[:3]", "def featured():\n try:\n where = []\n for badval in [\"Unlisted\", \"Archived\", \"Deleted\"]:\n where.append(\"featured != \\\"\" + badval + \"\\\"\")\n where = \"WHERE \" + \" AND \".join(where)\n tls = dbacc.query_entity(\"Timeline\", where)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respJSON(tls)", "def feature(self):\n return self._feature", "def feature(self):\n return self._feature", "def get_queryset(self, *args, **kwargs):\n return Product.objects.featured()", "def isEnabled(self):\n return self.enabled", "def exposes_features(self):\n return self._features_op is not None", "def isEnabled(self):\n return self.__enabled", "def is_on(self):\n return self._data[\"enabled\"]", "def test_toggle_featured(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n on_homepage=True)\n story2 = create_story(title=\"Test Related Story\", \n summary=\"Test Related Story Summary\",\n byline=\"Test Related Story Byline\",\n status='published',\n on_homepage=False)\n self.assertEqual(story.on_homepage, True)\n self.assertEqual(story2.on_homepage, False)\n toggle_featured(None, None,\n Story.objects.filter(pk__in=[story.pk, story2.pk]))\n story = Story.objects.get(pk=story.pk)\n story2 = Story.objects.get(pk=story2.pk)\n self.assertEqual(story.on_homepage, False)\n self.assertEqual(story2.on_homepage, True)", "def is_enabled(self, feature):\n if feature in self._disabled:\n return False\n if feature in self._enabled:\n return True\n return self.default_enabled is True", "def test_get_featured_front_page_returns_required_fields(self):\r\n\r\n app = self.create_app(None)\r\n app.owner = self.user\r\n db.session.add(app)\r\n featured = Featured(app=app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n fields = ('id', 'name', 'short_name', 'info', 'n_volunteers', 'n_completed_tasks')\r\n\r\n featured = cached_apps.get_featured_front_page()[0]\r\n\r\n for field in fields:\r\n assert featured.has_key(field), \"%s not in app info\" % field", "def enabled(self):\n return self._get('enabled')", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def supported_features(self):\n return self._support_flags", "def features(self) -> Optional[pulumi.Input['DevToolPortalFeatureSettingsArgs']]:\n return pulumi.get(self, \"features\")", "def feature_flags(self):\r\n return self.env_tokens.get('FEATURES', dict())", "def is_on(self):\n return self._light_on", "def get_isenabled(self):\n return self.isenabled", "def is_on(self):\n return self.heater.is_on", "def lfs_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"lfs_enabled\")" ]
[ "0.72854847", "0.5756443", "0.54711634", "0.537165", "0.5351628", "0.53243643", "0.5320827", "0.52955836", "0.528026", "0.5253999", "0.5253999", "0.5250387", "0.5224164", "0.52167726", "0.5172859", "0.5123695", "0.50961107", "0.50507337", "0.4989964", "0.49887696", "0.49846926", "0.49846926", "0.49846926", "0.49846926", "0.4978222", "0.49729684", "0.49597973", "0.49574527", "0.49527556", "0.49469918" ]
0.83817303
0
Sets the is_featured of this Listing. Indicates whether the listing is included in Featured Listings.
def is_featured(self, is_featured): self._is_featured = is_featured
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_featured(self):\n return self._is_featured", "def test_toggle_featured(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n on_homepage=True)\n story2 = create_story(title=\"Test Related Story\", \n summary=\"Test Related Story Summary\",\n byline=\"Test Related Story Byline\",\n status='published',\n on_homepage=False)\n self.assertEqual(story.on_homepage, True)\n self.assertEqual(story2.on_homepage, False)\n toggle_featured(None, None,\n Story.objects.filter(pk__in=[story.pk, story2.pk]))\n story = Story.objects.get(pk=story.pk)\n story2 = Story.objects.get(pk=story2.pk)\n self.assertEqual(story.on_homepage, False)\n self.assertEqual(story2.on_homepage, True)", "def is_flagged(self, is_flagged):\n \n self._is_flagged = is_flagged", "def one_election_set_featured(request, election):\n\n user = get_user(request)\n if not user_can_feature_election(user, election):\n raise PermissionDenied()\n\n featured_p = bool(int(request.GET['featured_p']))\n election.featured_p = featured_p\n election.save()\n \n return HttpResponseRedirect(settings.SECURE_URL_HOST + reverse(url_names.election.ELECTION_VIEW, args=[election.uuid]))", "def test_get_featured_front_page_only_returns_featured(self):\r\n\r\n featured_app = self.create_app(None)\r\n non_featured_app = self.create_app(None)\r\n non_featured_app.name = 'other_app'\r\n non_featured_app.short_name = 'other_app'\r\n featured_app.owner = self.user\r\n non_featured_app.owner = self.user\r\n db.session.add(featured_app)\r\n db.session.add(non_featured_app)\r\n featured = Featured(app=featured_app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n\r\n featured = cached_apps.get_featured_front_page()\r\n\r\n assert len(featured) is 1, featured", "def featured():\n try:\n where = []\n for badval in [\"Unlisted\", \"Archived\", \"Deleted\"]:\n where.append(\"featured != \\\"\" + badval + \"\\\"\")\n where = \"WHERE \" + \" AND \".join(where)\n tls = dbacc.query_entity(\"Timeline\", where)\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respJSON(tls)", "def test_get_featured_front_page(self):\r\n\r\n app = self.create_app(None)\r\n app.owner = self.user\r\n db.session.add(app)\r\n featured = Featured(app=app)\r\n db.session.add(featured)\r\n db.session.commit()\r\n\r\n featured = cached_apps.get_featured_front_page()\r\n\r\n assert len(featured) is 1, featured", "def get_queryset(self, *args, **kwargs):\n return Product.objects.featured()", "def set_fed_station(self, dpg_list):\n if self.fed_station_id:\n self.fed_station = dpg_list.by_id[self.fed_station_id]\n if self.fed_station:\n self.fed_station.add_supply_dpg(self)", "def set_floatable(self, floatable):\n self.widget.SetFloatable(floatable)", "def set_previewable(self, previewable):\n self._is_previewable = bool(previewable)", "def featured_news(self):\n return self.split(self.settings.featured_news)", "def __init__(__self__, *,\n state: Optional[pulumi.Input[Union[str, 'DevToolPortalFeatureState']]] = None):\n if state is None:\n state = 'Enabled'\n if state is not None:\n pulumi.set(__self__, \"state\", state)", "def SetFloatable(self, floatable):\n if self._floatable != floatable:\n self._floatable = floatable\n def closure(pane):\n pane.Floatable(floatable)\n self._PaneInfoOperation(closure)", "def featured(self):\n \n featured_content = []\n \n def rnd_content_items(return_items=1, **kwargs):\n if 'limit' not in kwargs:\n kwargs['limit'] = 3\n # searching due_date limits results to only assignments; if we want\n # all articles, then we don't want to limit to assignments\n #if 'due_date' not in kwargs and kwargs.get('type') == \"assignment\":\n # kwargs['due_date'] = \">now\"\n \n # AllanC - Because we were not getting new content on the site - if one of these returns gives back nothing, redo the query with a longer timeframe\n # This shouldnt be to much of a problem as this is cached server side every 10 mins .,.. once we get popular it will always break on the first try\n for days_jump in [i*7 for i in range(1,5)]:\n kwargs['update_date'] = \">\"+str(now() - datetime.timedelta(days=days_jump)) \n kwargs['exclude_content'] = \",\".join([str(content['id']) for content in featured_content])\n content_list = content_search(**kwargs)['data']['list']\n if len(content_list['items']) >= return_items:\n break\n log.info('Too few content retured for %s - attempting query again with day range of %d' % (kwargs, days_jump))\n \n random.shuffle( content_list['items'] )\n content_list['items'] = content_list['items'][:return_items]\n #content_list['count'] = len(content_list['items'])\n for content_item in content_list['items']:\n featured_content.append(content_item)\n \n return content_list\n \n \n #return to_apilist(featured_content, obj_type='contents') # AllanC - a liniear list of featured contebt\n \n # Sponsored content dictionary\n #s = {}\n #s['sponsored_responded' ] = rnd_content_items(return_items=1, sort='-num_responses', limit=3 )\n #s['sponsored_assignment' ] = rnd_content_items(return_items=1, sort='-views', type='assignment', limit=3 )\n \n # Featured content dictionary\n f = {}\n ##f['top_viewed_assignments' ] = rnd_content_items(return_items=2, sort='-views' , type='assignment', limit=5)\n f['recent' ] = rnd_content_items(return_items=3, sort='-update_date' , limit=5)\n f['most_responses' ] = rnd_content_items(return_items=3, sort='-num_responses', limit=5)\n if request.GET.get(\"location\"):\n f['near_me' ] = rnd_content_items(return_items=3, sort='distance' , limit=5, location=request.GET.get(\"location\"))\n f['recent_assignments' ] = rnd_content_items(return_items=3, sort='-update_date' , limit=5, list='assignments_active')\n \n # New members\n m ={}\n m['new_members'] = member_search(sort='-join_date', type='user' , limit=3)['data']['list']\n m['new_groups' ] = member_search(sort='-join_date', default_content_visibility='public', limit=3)['data']['list']\n \n # AllanC - HACK HACK!!!\n # The count from the query using the default_content_visibility='public' is wrong .. the content is correct .. the count is broken\n # Set the count FOR THIS LIST ONLY to match the items returned\n m['new_groups' ]['count'] = len(m['new_groups' ]['items'])\n \n return action_ok(\n data={\n #'sponsored' : s,\n 'featured' : f,\n 'members' : m,\n }\n )", "def featured_services(self):\n return self.split(self.settings.featured_services)", "def set_has_fan(self, value: bool = True):\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"has_fan\", value))\r\n self._has_fan = value", "def feature(self, feature):\n\n self._feature = feature", "def feature(self, feature):\n\n self._feature = feature", "def set_features(self, features):\n self.features_ = list(features)", "def set_show_stockfish(self, show_stockfish):\n self.show_stockfish = show_stockfish\n logger.debug(\"Stockfish output is now {0}\".format(\n \"enabled\" if self.show_stockfish else \"disabled\"))\n for halfmove, tag in self.tags.items():\n if self.show_stockfish:\n self.update_info(halfmove)\n else:\n tag.set_property(\n \"foreground-gdk\", Gdk.Color(65535, 65535, 65535))", "def flavor(self, flavor):\n self._flavor = flavor", "def get_featured_photos(self, count = 30, page = 1):\n uri = 'photos/featured'\n options = { 'per_page': count, 'page': page }\n return self.make_request(uri, options)", "def featured_playlists(\n self, locale=None, country=None, timestamp=None, limit=20, offset=0, **kwargs\n ):\n return self._get(\n API.FEATURED_PLAYLISTS.value,\n locale=locale,\n country=country,\n timestamp=timestamp,\n limit=limit,\n offset=offset,\n **kwargs,\n )", "def update(self):\n self._listings = None\n super(FeaturedListingsViewlet, self).update()\n self.context_state = queryMultiAdapter(\n (self.context, self.request), name='plone_context_state',\n )\n self.limit = self.config.get('limit', 25)\n self._get_listings()", "def set_features(self, features: list):\n self._features = features", "def is_flagged(self):\n return self._is_flagged", "def is_selected(self, is_selected):\n\n self.container['is_selected'] = is_selected", "def featured(request):\n\n artist = Artist.objects.all()\n\n \n\n context = {\n 'artist': artist,\n }\n\n return render(request, 'artist/featured.html', context)", "def is_started(self, is_started):\n\n self._is_started = is_started" ]
[ "0.7416678", "0.56418395", "0.5471387", "0.5297682", "0.5234299", "0.50609577", "0.49377993", "0.49298215", "0.4823086", "0.47666132", "0.47003838", "0.4657619", "0.46520665", "0.46120566", "0.45910445", "0.4579018", "0.45746464", "0.45710957", "0.45710957", "0.4534249", "0.4508544", "0.449845", "0.44761133", "0.44461098", "0.44362092", "0.44294757", "0.44204155", "0.44166186", "0.44089827", "0.43827268" ]
0.85117894
0
Reads the text file of affine transformations as it is returned by the Affine_transformations.py code
def read_affine(file): data = open(file, 'r').read() data = data.split('\n') for i in range(1, 5): data[i] = data[i].split(':') int_lon = np.fromstring(data[1][1], dtype='float', sep=',') int_lat = np.fromstring(data[2][1], dtype='float', sep=',') Nlon = len(int_lon) - 1 Nlat = len(int_lat) - 1 data[3][1] = data[3][1].split(',') data[4][1] = data[4][1].split(',') lon_transform = np.zeros((Nlon, 2)) lat_transform = np.zeros((Nlat, 2)) for i in range(Nlon): data[3][1][i] = data[3][1][i].split(' ') lon_transform[i] = [data[3][1][i][0], data[3][1][i][1]] for i in range(Nlat): data[4][1][i] = data[4][1][i].split(' ') lat_transform[i] = [data[4][1][i][0], data[4][1][i][1]] lon_transform = np.array(lon_transform).astype('float') lat_transform = np.array(lat_transform).astype('float') return int_lon, int_lat, lon_transform, lat_transform
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_transform(filename, dimension=3, precision='float'):\n filename = os.path.expanduser(filename)\n read_transform_fn = _read_transform_dict[precision][dimension]\n itk_tx = read_transform_fn(filename, dimension, precision)\n return tio.ants_transform(itk_tx)", "def _read_txt(file_path):\n translation_pairs = []\n with file_path.open() as f:\n for line in f:\n translation_pairs.append(\n evaluation.TranslationPair(source=None, translation=line.strip())\n )\n return translation_pairs", "def decodeAffine(self, m, n):\n\n decode = Affine(m, n)\n planeText = decode.decode(self.cipherText)\n\n if (self.verbose == 1):\n print(planeText)\n \n return(planeText)", "def transformAffine(self, path=None, src=None, dst=None):\n if path is not None:\n landmarks = pd.read_csv(path, skiprows=1,engine=\"c\", na_filter=False, header=None, delim_whitespace=True, dtype=np.float32).as_matrix()\n dst = landmarks[:,3:5]\n src = landmarks[:,1:3]\n affine = transform.estimate_transform(\"affine\",src,dst)\n data = self.stormData[0][:,0:2]\n data = affine(data)\n self.stormData[0][:,0:2] = data", "def _parse_transformations(lines):\n # Each transformation requires 3 lines for the (x,y,z) components\n if len(lines) % 3 != 0:\n raise InvalidFileError(\"Invalid number of transformation vectors\")\n n_transformations = len(lines) // 3\n\n rotations = np.zeros((n_transformations, 3, 3), dtype=float)\n translations = np.zeros((n_transformations, 3), dtype=float)\n\n transformation_i = 0\n component_i = 0\n for line in lines:\n # The first two elements (component and\n # transformation index) are not used\n transformations = [float(e) for e in line.split()[2:]]\n if len(transformations) != 4:\n raise InvalidFileError(\n \"Invalid number of transformation vector elements\"\n )\n rotations[transformation_i, component_i, :] = transformations[:3]\n translations[transformation_i, component_i] = transformations[3]\n\n component_i += 1\n if component_i == 3:\n # All (x,y,z) components were parsed\n # -> head to the next transformation \n transformation_i += 1\n component_i = 0\n \n return rotations, translations", "def read_affine(df):\n SliceThickness = [df.SliceThickness]\n PixelSpacing = _string_to_list_of_floats(df.PixelSpacing)\n ImageOrientationPatient = _string_to_list_of_floats(df.ImageOrientationPatient)\n ImagePositionPatient = _string_to_list_of_floats(df.ImagePositionPatient)\n\n Zooms = np.array(PixelSpacing+SliceThickness, dtype=float)\n ImageOrientationPatient = np.array(ImageOrientationPatient, dtype=float)\n ImagePositionPatient = np.array(ImagePositionPatient, dtype=float)\n \n ijk2ras = extract_cosines(ImageOrientationPatient)\n\n ijk2ras = (ijk2ras*np.array([-1,-1,1])).T\n ImagePositionPatient = ImagePositionPatient*np.array([-1,-1,1])\n\n affine = np.stack((ijk2ras[:,0]*Zooms[0],\n ijk2ras[:,1]*Zooms[1],\n ijk2ras[:,2]*Zooms[2],\n ImagePositionPatient), axis=1)\n\n return np.vstack((affine,[[0,0,0,1]]))", "def get_affine(nii_file):\n\timg = nib.load(nii_file)\n\treturn img.get_affine()", "def read_activity_mappings(self):\n with open('act.translate', \"r\") as file:\n for line in file:\n x = str(str(line).strip()).split(' ', 3)\n self.amappings[x[0]] = x[1]", "def read_file(path: str) -> Iterator[Problem]:\n with open(path) as f:\n txt = f.read()\n\n for encoded_problem in txt.split('\\n\\n'):\n yield parse_alpha_encoding(encoded_problem)", "def get_affine_orig_v2():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n annotations_dir = root_dir + 'annotations/'\n affine_dir = root_dir + \"affine_orig_v2/\"\n if not os.path.exists(affine_dir):\n os.makedirs(affine_dir)\n MIN_MATCH_COUNT = 10\n # 1088 is more accurate\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n # seqs = [seq.strip() for seq in seqs_str.split()]\n seqs = os.listdir(seq_dir)\n for seq in seqs:\n print(seq)\n # sort the seq files\n seq_files = os.listdir(os.path.join(seq_dir, seq))\n seq_files = sorted(seq_files, key=lambda x: int(x[:-4]))\n image0 = cv2.imread(os.path.join(seq_dir, seq, seq_files[0]))\n height, width = image0.shape[0], image0.shape[1]\n print(\"height: {}, width: {}\".format(height, width))\n # first load the bbox annotations\n frame_mask = get_frame_mask(annotations_dir, seq+'.txt', width=width, height=height)\n affine_dict = {}\n for i in range(len(seq_files)-1):\n print(i)\n image0 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i]))\n image1 = cv2.imread(os.path.join(seq_dir, seq, seq_files[i+1]))\n image0 = cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY)\n image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n surf = cv2.xfeatures2d.SURF_create()\n kp0, des0 = surf.detectAndCompute(image0, None)\n kp1, des1 = surf.detectAndCompute(image1, None)\n # filter the kp0 and des0, kp1 and des1 by mask0 and mask1\n mask0 = frame_mask[i]\n mask1 = frame_mask[i+1]\n FLANN_INDEX_KDTREE = 0\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)\n search_params = dict(checks=10)\n point_mask0 = [1 if mask0[int(i.pt[1]), int(i.pt[0])] == 1 else 0 for i in kp0]\n point_mask1 = [1 if mask1[int(i.pt[1]), int(i.pt[0])] == 1 else 0 for i in kp1]\n kp0 = [i for idx, i in enumerate(kp0) if point_mask0[idx] == 1]\n des0 = [i for idx, i in enumerate(des0) if point_mask0[idx] == 1]\n des0 = np.array(des0)\n kp1 = [i for idx, i in enumerate(kp1) if point_mask1[idx] == 1]\n des1 = [i for idx, i in enumerate(des1) if point_mask1[idx] == 1]\n des1 = np.array(des1)\n flann = cv2.FlannBasedMatcher(index_params, search_params)\n matchs = flann.knnMatch(des0, des1, k=2)\n\n # store all the good matchs as per Lowe's ratio test\n good = []\n for m, n in matchs:\n if m.distance < 0.7 * n.distance:\n good.append(m)\n if len(good) > MIN_MATCH_COUNT:\n src_pts = np.float32([kp0[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n dst_pts = np.float32([kp1[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)\n else:\n M = np.eye(3, 3)\n affine_dict[seq_files[i]] = M\n with open(os.path.join(seq_dir, affine_dir, seq+'.pickle'), 'wb') as fout:\n pickle.dump(affine_dict, fout)", "def testTranslateAffine(self):\n affineClass = xyTransformRegistry[\"affine\"]\n affineConfig = affineClass.ConfigClass()\n affineConfig.translation = (1.2, -3.4)\n with lsst.utils.tests.getTempFilePath(\".py\") as filePath:\n self.checkConfig(affineClass, affineConfig, filePath)\n affine = affineClass(affineConfig)\n for fromPoint in self.fromIter():\n toPoint = affine.forwardTransform(fromPoint)\n predToPoint = fromPoint + Extent2D(*affineConfig.translation)\n for i in range(2):\n self.assertAlmostEqual(toPoint[i], predToPoint[i])", "def extract_affine(self):\n self.affine = self.ecat.affine.tolist()", "def loadGeoTransform(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.GetGeoTransform()", "def show_affine(self):\n for row in self.affine:\n print(row)", "def read_input_file(file_name):\n matrix = np.asmatrix(np.loadtxt(file_name))\n matrix = matrix[:, :-1]\n\n (rows, attribute_count) = np.shape(matrix)\n\n # convert data into an list of Examples\n examples = [\n Example(matrix[i, :])\n for i in range(0, rows)\n ]\n\n return (examples, attribute_count)", "def read(fileName,filetransform):\n\n with open(fileName) as f:\n for li in f:\n li = li.strip()\n print(\"\")\n choice = input(\"You want to 'ENCRYPT' or 'DECRYPT : ' \" + li )\n print(\"\")\n with open(filetransform) as f:\n for line in f:\n line = line.strip()\n if \";\" in line:\n simulate(li,choice,line)\n else:\n userchoice(li,choice,line)", "def eval_pos_affine():\n root_dir = \"/home/sdb/wangshentao/myspace/thesis/data/VisDrone2019-MOT-test-dev/\"\n seq_dir = root_dir + \"sequences/\"\n annotations_dir = root_dir + 'annotations/'\n affine_dir = root_dir + \"affine_orig/\"\n all_iou = []\n seqs_sample = '''\n uav0000249_00001_v\n uav0000249_02688_v\n '''\n seqs_str = seqs_sample\n seqs = [seq.strip() for seq in seqs_str.split()]\n for seq in seqs:\n image_file = os.listdir(os.path.join(seq_dir, seq))[0]\n image = cv2.imread(os.path.join(seq_dir, seq, image_file))\n orig_h, orig_w = image.shape[:2]\n\n with open(os.path.join(affine_dir, seq+'.pickle'), 'rb') as fin:\n affine_dict = pickle.load(fin)\n\n bbox, frame_id = get_frame_bbox(annotations_dir, seq + '.txt')\n predict_bbox = []\n for i in range(len(bbox)):\n # convert to std resolution\n bbox[i][:, 0] = bbox[i][:, 0]\n bbox[i][:, 1] = bbox[i][:, 1]\n bbox[i][:, 2] = bbox[i][:, 2]\n bbox[i][:, 3] = bbox[i][:, 3]\n\n # for j in range(bbox[i].shape[0]):\n # bbox[i][j] = tlwh_to_tlbr(bbox[i][j])\n for idx in range(len(bbox)):\n kalman_filter = KalmanFilter()\n trace_bbox = bbox[idx]\n trace_predict_bbox = []\n mean, covariance = kalman_filter.initiate(tlwh_to_xyah(trace_bbox[0]))\n for i in range(1, trace_bbox.shape[0]):\n # i-1 to i M\n frame_name = \"{:07d}.jpg\".format(int(frame_id[idx][i-1]))\n M = affine_dict[frame_name]\n bbox_infer = tlwh(mean)\n bbox_infer = tlwh_to_tlbr(bbox_infer)\n bbox_expand = np.ones((3, 4))\n bbox_expand[:2, 0] = bbox_infer[:2]\n bbox_expand[:2, 1] = bbox_infer[2:]\n # tr\n bbox_expand[:2, 2] = bbox_infer[2], bbox_infer[1]\n # bl\n bbox_expand[:2, 3] = bbox_infer[0], bbox_infer[3]\n bbox_expand = np.dot(M, bbox_expand)\n for t in range(bbox_expand.shape[1]):\n bbox_expand[:2, t] /= bbox_expand[2, t]\n # bbox_infer[:2] = bbox_expand[:2, 0]\n # bbox_infer[2:] = bbox_expand[:2, 1]\n # get the out bounding bbox\n bbox_infer[0] = min(bbox_expand[0, :])\n bbox_infer[1] = min(bbox_expand[1, :])\n bbox_infer[2] = max(bbox_expand[0, :])\n bbox_infer[3] = max(bbox_expand[1, :])\n bbox_infer = tlbr_to_tlwh(bbox_infer)\n # print(bbox_infer)\n trace_predict_bbox.append(bbox_infer)\n # move = mean[:4] - tlwh_to_xyah(bbox_infer)\n # if np.sum(np.square(move)[:2]) > 32*32:\n # print(move)\n # print(idx, frame_name)\n # print(mean)\n mean[:4] = tlwh_to_xyah(bbox_infer)\n # print(mean)\n mean, covariance = kalman_filter.predict(mean, covariance)\n mean, covariance = kalman_filter.update(mean, covariance, tlwh_to_xyah(trace_bbox[i]))\n\n trace_predict_bbox = np.array(trace_predict_bbox)\n for i in range(trace_predict_bbox.shape[0]):\n trace_predict_bbox[i] = tlwh_to_tlbr(trace_predict_bbox[i])\n for i in range(trace_bbox.shape[0]):\n trace_bbox[i] = tlwh_to_tlbr(trace_bbox[i])\n\n predict_bbox.append(trace_predict_bbox)\n bbox[idx] = bbox[idx][1:]\n frame_id[idx] = frame_id[idx][1:]\n assert bbox[idx].shape[0] == predict_bbox[idx].shape[0]\n iou = []\n for i in range(len(bbox)):\n trace_iou = []\n trace_bbox = bbox[i]\n trace_predict_bbx = predict_bbox[i]\n for j in range(trace_bbox.shape[0]):\n iou_val = bbox_ious(np.ascontiguousarray(trace_bbox[j][np.newaxis, :], dtype=np.float),\n np.ascontiguousarray(trace_predict_bbx[j][np.newaxis, :], dtype=np.float))\n trace_iou.append(iou_val)\n iou.append(np.array(trace_iou))\n iou = [int(np.mean(i) * 100) for i in iou]\n all_iou += iou\n bins = np.zeros(101)\n for i in all_iou:\n bins[i] += 1\n plt.bar(np.arange(101), bins)\n plt.ylabel('num')\n plt.xlabel('iou(*100)')\n plt.show()", "def read_filename(self, filename):\r\n self.text_lines = task3.read_text_file(filename)", "def readFastaFile(filename):", "def anglor(infile, sequence):\n return np.loadtxt(infile, usecols=1).clip(min=-180, max=180).reshape((1, -1, 1))", "def readfile(filename):\n infile = open(filename,\"r\")\n seq = infile.read()\n return re.sub('[^acgtACGT]', '', seq).upper()", "def convert_affine(ref, t, out):\n args = [\n transform_exe,\n '-d', '3',\n '-r', ref,\n '-t', '[{},0]'.format(t),\n '-o', '[{},1]'.format(out),\n '--float'\n ]\n subprocess.check_call(args)", "def _read_eeg(eeg_file):\r\n pass", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def read_transcript_data(fn):\n\n def _read_lines(fn):\n # NC_000007.13\tRefSeq\tcDNA_match\t50344265\t50344518\t254\t+\t.\tID=aln58042;Target=NM_001220765.2 1 254 +;gap_count=0;identity=0.0691326;idty=1;num_ident=428;num_mismatch=0;pct_coverage=6.91326;pct_identity_gap=100;pct_identity_ungap=100;score=254\n # NC_000002.11 RefSeq cDNA_match 179671939 179672150 212 - . ID=ed951d46-194c-477a-a480-4bc64530c5ba;Target=NM_001267550.2 1 212 +;gap_count=0;identity=0.999991;idty=1;num_ident=109223;num_mismatch=1;pct_coverage=100;pct_identity_gap=99.9991;pct_identity_ungap=99.9991\n line_re = re.compile(\n \"(?P<ref_ac>\\S+)\\s+(?P<origin>\\S+)\\s+(?P<match_type>\\S+)\\s+\"\n \"(?P<g_start>\\d+)\\s+(?P<g_end>\\d+)\\s+(?P<score>\\S+)\\s+\"\n \"(?P<strand>[-+])\\s+\\.\\s+ID=(?P<aln>[^;]+);Target=(?P<tx_ac>\\S+)\"\n \"\\s+(?P<tx_start>\\d+)\\s+(?P<tx_end>\\d+).+?\"\n \"pct_coverage=(?P<pct_coverage>[^;]+);\"\n \"pct_identity_gap=(?P<pct_identity_gap>[^;]+);\"\n \"pct_identity_ungap=(?P<pct_identity_ungap>[^;]+)\"\n )\n fh = io.open(fn, \"rb\")\n while fh.peek(1)[0] == \"#\":\n fh.readline()\n while fh.peek(3)[0:3] != \"###\":\n line = fh.readline()\n try:\n yield line_re.match(line).groupdict()\n except AttributeError:\n raise Exception(\"Failed at\", line)\n raise StopIteration\n def _key(e):\n return (e[\"tx_ac\"], not e[\"ref_ac\"].startswith(\"NC_\"), e[\"ref_ac\"], e[\"aln\"])\n return itertools.groupby(sorted(_read_lines(fn), key=_key), key=_key)", "def parse_txt_file(txtfile):\n array = np.genfromtxt(txtfile)\n return array", "def read_alignment(file):\n alignments = list()\n with open(file, 'r') as f:\n for line in f:\n line_lst = line.strip().split()\n align_lst = list()\n for pair in line_lst:\n src_idx, tgt_idx = pair.split('-')\n align_lst.append((int(src_idx),int(tgt_idx)))\n # print(align_lst)\n alignments.append(align_lst)\n return alignments", "def read_input():\n return Path(__file__).with_name('input.txt').read_text().splitlines()", "def load_trans(self, fname):\n info = read_trans(fname)\n head_mri_trans = info['trans']\n self.set_trans(head_mri_trans)", "def readascii(file_name):\n data = np.loadtxt(file_name)\n z = data[0,1:]\n nuInu = data[1:,1:]\n lmu = data[1:,0]\n return EBL(z, lmu, nuInu)" ]
[ "0.6390113", "0.6214353", "0.5905246", "0.58053416", "0.5798058", "0.5783118", "0.5749797", "0.5664429", "0.5572933", "0.5517509", "0.5496632", "0.5440119", "0.54245305", "0.54138404", "0.5407004", "0.53825873", "0.5361961", "0.5361539", "0.5356766", "0.53254974", "0.5318438", "0.53174675", "0.5297356", "0.5296254", "0.5268147", "0.52669096", "0.5264846", "0.52600884", "0.522458", "0.52195376" ]
0.67697954
0
Given two (nested) lists of tensors, return whether they are equal.
def tensor_lists_equal(t1, t2): if isinstance(t1, torch.Tensor) and isinstance(t2, torch.Tensor): ## round in case of floating errors t1 = np.round(t1.data.numpy(), decimals=5) t2 = np.round(t2.data.numpy(), decimals=5) return np.array_equal(t1, t2) assert isinstance(t1, list) assert isinstance(t2, list) assert len(t1) == len(t2) for t1_, t2_ in zip(t1, t2): if not tensor_lists_equal(t1_, t2_): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tensors_equal(self, x, y):\n if isinstance(x, PackedSequence):\n return self.tensors_equal(x[0], y[0]) and self.tensors_equal(x[1], y[1])\n\n if isinstance(x, dict):\n return (\n (x.keys() == y.keys()) and\n all(self.tensors_equal(x[k], y[k]) for k in x)\n )\n\n if isinstance(x, (list, tuple)):\n return all(self.tensors_equal(xi, yi) for xi, yi in zip(x, y))\n\n if x.is_sparse is not y.is_sparse:\n return False\n\n if x.is_sparse:\n x, y = x.to_dense(), y.to_dense()\n\n return (x == y).all()", "def all_equal(list_a, list_b):\n if len(list_a) != len(list_b):\n return False\n a, b = np.array(list_a), np.array(list_b)\n return all(a == b)", "def _is_all_tensor_equal(self, input_shape_tuple, cache_shape_tuple):\n for i, elem in enumerate(cache_shape_tuple):\n res = self._is_tensor_equal(input_shape_tuple[i], elem)\n if not res:\n return False\n return True", "def _are_equal(grid: List[List[str]], other: List[List[str]]) -> bool:\n for row in range(len(grid)):\n for col in range(len(grid[row])):\n if grid[row][col] != other[row][col]:\n return False\n return True", "def list_same(lst1, lst2):\n return len(lst1) == len(lst2) and all([x == y for x, y in zip(lst1, lst2)])", "def lists_are_identical(list1, list2):\n if len(list1) != len(list2):\n return False\n for elem1, elem2 in zip(list1, list2):\n if elem1 != elem2:\n return False\n return True", "def _is_tensor_equal(input_tensor, cache_tensor):\n if input_tensor.dtype != cache_tensor.dtype:\n return False\n\n if input_tensor.shape != cache_tensor.shape:\n return False\n\n if len(input_tensor.shape) != len(cache_tensor.shape):\n return False\n\n return True", "def has_tensors(ls):\n # Note: at some point in time ragged tensors didn't count as tensors, so this\n # returned false for ragged tensors. Making this return true fails some tests\n # which would then require a steps_per_epoch argument.\n if isinstance(ls, (list, tuple)):\n return any(\n tensor_util.is_tf_type(v) and\n not isinstance(v, ragged_tensor.RaggedTensor) for v in ls)\n if isinstance(ls, dict):\n return any(\n tensor_util.is_tf_type(v) and\n not isinstance(v, ragged_tensor.RaggedTensor)\n for _, v in ls.items())\n return tensor_util.is_tf_type(ls) and not isinstance(\n ls, ragged_tensor.RaggedTensor)", "def same(d1: Sequence[Any], d2: Sequence[Any]) -> bool:\n if len(d1) != len(d2):\n return False\n for i in range(len(d1)):\n if d1[i] != d2[i]:\n return False\n return True", "def _verify_event_lists_have_same_tensor_values(self, expected, gotten):\n self.assertEqual(len(expected), len(gotten))\n\n # Compare the events one at a time.\n for expected_event, gotten_event in zip(expected, gotten):\n self.assertEqual(expected_event.summary.value[0].node_name,\n gotten_event.summary.value[0].node_name)\n self.assertAllClose(\n tensor_util.make_ndarray(expected_event.summary.value[0].tensor),\n tensor_util.make_ndarray(gotten_event.summary.value[0].tensor))\n self.assertEqual(expected_event.summary.value[0].tag,\n gotten_event.summary.value[0].tag)", "def check_lists_equal(list_1, list_2):\n if len(list_1) != len(list_2):\n return False\n return sorted(list_1) == sorted(list_2)", "def eq_elements(a, b):\r\n if ((isinstance(a, int) == True) or (isinstance(a, str) == True)):\r\n return a == b\r\n if ((isinstance(a[0], int) == True) or (isinstance(a[0], str) == True)):\r\n return (set() == set(a).difference(set(b)))\r\n else: \r\n for i in range(len(a)):\r\n test = False \r\n for j in range(len(b)):\r\n if (eq_elements(a[i],b[j]) == True):\r\n test = True\r\n if (test == False):\r\n return False\r\n else:\r\n return True", "def has_match(trajs_0, trajs_1):\n for i in range(len(trajs_0)):\n for j in range(len(trajs_1)):\n R = (trajs_0[i].get_slice()[:,:2] == trajs_1[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass \n return False", "def is_equal(a: list[int], b: list[int]) -> bool:\n i: int = 0\n if len(a) != len(b):\n return False\n while i < len(a):\n if a[i] != b[i]:\n return False\n else:\n i = i + 1\n return True", "def is_equal(a: list[int], b: list[int]) -> bool:\n if len(a) == len(b):\n i: int = 0\n num: int = 0\n while i < len(a):\n if a[i] == b[i]:\n i = i + 1\n num = num + 1\n else:\n i = i + 1\n return (num == len(a))\n else:\n return False", "def _compare_nested_sequences(seq1, seq2):\n return all([(l == m).all() for l, m in zip(seq1, seq2)])", "def is_equal(a: list[int], b: list[int]) -> bool:\n a_length: int = len(a)\n b_length: int = len(b)\n if a_length == 0 and b_length == 0:\n return True\n else:\n i = 0\n if a_length == b_length:\n if a_length <= len(b):\n while i < a_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n while i < b_length:\n if a[i] == b[i]:\n return True\n else:\n i += 1\n return False\n else:\n return False", "def _equals(a: Union[str, Iterable[str]], b: Union[str, Iterable[str]]) -> bool:\n if isinstance(a, str) and isinstance(b, str):\n return a.replace(' ', '') == b.replace(' ', '')\n elif isinstance(a, Iterable) and isinstance(b, Iterable):\n return all(_equals(a_, b_) for a_, b_ in zip(a, b))\n else:\n raise TypeError(f'arguments must be both strings or both lists, not {type(a)}, {type(b)}')", "def is_equal(a: list[int], b: list[int]) -> bool:\n i = 0 \n if len(a) == 0 and len(b) == 0:\n return True\n if len(a) == 0 and len(b) > 0:\n return False\n if len(a) > 0 and len(b) == 0:\n return False\n\n while i < len(a) and i < len(b):\n if a[i] == b[i]:\n if i == len(a) - 1 and i == len(b) - 1:\n if len(a) == len(b):\n if a[len(a) - 1] == b[len(b) - 1]:\n return True\n i += 1\n else:\n return False\n return False", "def test_type_equality(self):\r\n #list of matrices\r\n myType1 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of matrices\r\n myType2 = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n #list of scalars\r\n myType3 = TypedListType(T.TensorType(theano.config.floatX,\r\n ()))\r\n\r\n self.assertTrue(myType2 == myType1)\r\n self.assertFalse(myType3 == myType1)", "def check_equal(tensor_1, tensor_2):\n return tf.reduce_max(tf.abs(tensor_1 - tensor_2)).numpy() < 1e-6", "def allclose(tensor1: Tensor, tensor2: Tensor) ->bool:\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2\n return torch.allclose(tensor1, tensor2)", "def _chain_equal(a,b):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.array_equal(a_seg, b_seg):\n return False\n return True", "def check_form_match(\n cls,\n tensor1=None,\n tensor2=None,\n qhape1=None,\n shape1=None,\n dirs1=None,\n qhape2=None,\n shape2=None,\n dirs2=None,\n qodulus=None,\n ):\n if tensor1 is not None:\n qhape1 = tensor1.qhape\n shape1 = tensor1.shape\n dirs1 = tensor1.dirs\n if tensor2 is not None:\n qhape2 = tensor2.qhape\n shape2 = tensor2.shape\n dirs2 = tensor2.dirs\n if not (\n len(qhape1)\n == len(qhape2)\n == len(shape1)\n == len(shape2)\n == len(dirs1)\n == len(dirs2)\n ):\n return False\n # Loop over the indices of both tensors in tandem.\n for d1, qim1, dim1, d2, qim2, dim2 in zip(\n dirs1, qhape1, shape1, dirs2, qhape2, shape2\n ):\n # This is almost like compatible_indices, but for the missing minus\n # sign when building o_qim.\n qim2 = [d1 * d2 * q for q in qim2]\n if qodulus is not None:\n qim2 = [q % qodulus for q in qim2]\n qimdim1 = set(zip(qim1, dim1))\n qimdim2 = set(zip(qim2, dim2))\n if not qimdim1 == qimdim2:\n return False\n return True", "def _are_matrix_elements_equal(element, another_element):\n difference = sympy.N(sympy.expand(element) - sympy.expand(another_element))\n\n try:\n return np.allclose(\n float(sympy.re(difference)) + 1j * float(sympy.im(difference)), 0\n )\n except TypeError:\n return False", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def _buffer_list_equal(a, b):\n if len(a) != len(b):\n return False\n if a == b:\n return True\n for ia, ib in zip(a, b):\n # Check byte equality, since bytes are what is actually synced\n # NOTE: Simple ia != ib does not always work as intended, as\n # e.g. memoryview(np.frombuffer(ia, dtype='float32')) !=\n # memoryview(np.frombuffer(b)), since the format info differs.\n # Compare without copying.\n if memoryview(ia).cast('B') != memoryview(ib).cast('B'):\n return False\n return True", "def grid_equal(grid1, grid2):\r\n for i in range(len(grid1)):\r\n for j in range(len(grid1[i])):\r\n if grid1[i][j] != grid2[i][j]:\r\n return False\r\n return True", "def eq(a, b):\n if isinstance(a, (ast.Load, ast.Store)):\n return isinstance(b, type(a))\n if isinstance(a, list):\n return isinstance(b, list) and all(map(eq, a, b))\n return a == b or (\n isinstance(a, type(b)) and\n isinstance(b, type(a)) and\n hasattr(a, '_fields') and\n hasattr(b, '_fields') and\n a._fields == b._fields and\n all(eq(getattr(a, field), getattr(b, field)) for field in a._fields)\n )", "def validate_product_tensor_lists(conn_graph: ConnectedGraph):\n for product in conn_graph.get_all_products().values():\n # products going to branch ops will not have tensors associated with them\n if product.consumers[0].type != 'branch':\n if len(product.consumers) != len(product.tensor_dict.keys()):\n return False\n return True" ]
[ "0.788319", "0.7249695", "0.7223901", "0.6960324", "0.6813348", "0.6810137", "0.67806697", "0.67561144", "0.6703272", "0.66944456", "0.66410375", "0.6631499", "0.65602744", "0.6520822", "0.6506562", "0.6469257", "0.6439419", "0.6430223", "0.6427981", "0.6399093", "0.6396647", "0.63844025", "0.63635015", "0.63337576", "0.63109946", "0.6309243", "0.62902534", "0.6221476", "0.6202028", "0.6176923" ]
0.8785997
0
Check whether we can create a folder. We don't verify folder name, so returned_match = None
async def check_one_foldername(provider: providers.BaseProvider, scenario: typing.Tuple[str, str]) -> report.Report: prose, fn = scenario # TODO: Some providers may have a problem with nested folders; check print(f'Checking: {provider.provider_name} for foldername {fn}') folder_id, code = await provider.create_folder(fn) allowed_creation = (code < 400) return report.Report( description=prose, our_fn=fn, their_fn=folder_id, upload_status_code=code, returned_match=None )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_create_folder(self, output, *folder_names):\n path = self.video_file\n\n # if none then create diectory on same level as video directory with the folder_name and video name\n if output is None:\n output = os.path.abspath(os.path.join(os.path.dirname(path), os.pardir, *folder_names))\n else:\n output = os.path.join(output, self.video_name)\n\n # if directory not existing create directory\n if not os.path.exists(output):\n print('created new directory: ', output)\n os.makedirs(output)\n\n return output", "def _folderCheck(self, folder):\n logger.debug(\"Func: _folderCheck\")\n\n if not os.path.isdir(os.path.normpath(folder)):\n os.makedirs(os.path.normpath(folder))", "def test_create_folder_getters_are_correct(self):\n my_folder_id = '123456'\n my_folder_name = 'My folder name'\n f1 = folder.Folder(my_folder_id, display_name=my_folder_name,\n lifecycle_state=folder.FolderLifecycleState.ACTIVE)\n self.assertEqual(my_folder_id, f1.id)\n self.assertEqual(\n folder.Folder.RESOURCE_NAME_FMT % my_folder_id, f1.name)\n self.assertEqual(my_folder_name, f1.display_name)\n self.assertEqual(resource.ResourceType.FOLDER, f1.type)\n self.assertEqual(None, f1.parent)\n self.assertEqual(folder.FolderLifecycleState.ACTIVE,\n f1.lifecycle_state)", "def check_make(folder_check):\n if not os.path.isdir(folder_check):\n os.mkdir(folder_check)", "def _create_folder(self, unsupported_file: File) -> str:\n if not self.possibilities:\n print(\n f\"----\\nNo folders found in directory. Please enter directory name for \"\n f\"{unsupported_file} file:\\n\"\n )\n else:\n print(\"Please enter directory name:\")\n\n while True:\n folder_name = input()\n checker = [True if char.isalnum() else False for char in folder_name]\n if False not in checker and folder_name not in self.possibilities.keys():\n os.makedirs(folder_name)\n temp_folder = Folder(folder_name)\n self.folders.append(temp_folder)\n if unsupported_file.get_extension():\n temp_folder.files.append(PlaceHolderFile(unsupported_file.name))\n return folder_name\n else:\n print(\"Invalid input\")", "def check_folder(filepath):\n if not os.path.exists(filepath):\n os.mkdir(filepath)\n return filepath", "def verify_folder_name(folder):\n regex = re.compile(\"\\/([0-9]{8})_([0-9]{6})_(\\w+)$\")\n find = regex.search(folder)\n if find:\n date = f\"{find.group(1)}_{find.group(2)}\"\n name = find.group(3)\n folder = f\"{date}_{name}\"\n try:\n ctime = datetime.datetime.strptime(date, \"%Y%m%d_%H%M%S\")\n return (folder, name, ctime)\n except:\n return False", "def mkdir(folder_name: str) -> None:\n if exist(folder_name):\n print(\"The folder is already exist\")\n return \n\n os.mkdir(folder_name)", "def test_create_folder(self):\n test = Server()\n inputs = [['create_folder','oook'],['create_folder','oook']]\n response = ['folder created','Folder already exists. Try with another folder name']\n res = []\n for val in inputs:\n res.append(test.create_folder(val))\n self.assertListEqual(res, response)", "def _check_directory(my_folder):\n if not os.path.exists(my_folder):\n os.makedirs(my_folder)", "def create_folder(target_folder):\n try:\n os.makedirs(target_folder)\n except OSError as e:\n pass\n return os.path.exists(target_folder)", "def create_folder(target_folder):\n try:\n os.makedirs(target_folder)\n except OSError as e:\n pass\n return os.path.exists(target_folder)", "def check_folder(directory):\n global path_checked\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n path_checked = True", "def check_dest_root(self):\n dest_root = self.view.folder_line.text()\n if not os.path.isdir(dest_root):\n try:\n os.makedirs(dest_root)\n except (WindowsError, TypeError):\n self.view.message.setText('Please input a valid folder path.')\n return False\n return True", "def create_folder(folder):\n flag = True\n if not os.path.exists(folder):\n try:\n os.makedirs(folder)\n initlog('Folder path:%s created by me; ' % folder) \n except Exception, e:\n initlog('failed to create Folder path; %s' % str(e))\n flag = False\n return flag", "def create_folder(folder):\n\n try:\n os.mkdir(folder, 0740)\n except OSError:\n return False\n else:\n return True", "def create_folder(self, req, folder_path, new_folder_name):\n\t\tdirectory_path = os.path.join(self.get_selected_root(req), folder_path)\n\t\t\n\t\t#prevent shenanigans\n\t\tnew_folder_name = new_folder_name.split('/').pop()\n\t\t\n\t\tnew_path = os.path.join(directory_path, new_folder_name)\n\t\tif(os.access(new_path, os.F_OK)):\n\t\t\tcontent = tags.Tag('Error')(number=FLD_EXISTS)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tos.mkdir(new_path)\n\t\t\t\tcontent = tags.Tag('Error')(number=SUCCESS)\n\t\t\texcept:\n\t\t\t\tcontent = tags.Tag('Error')(number=FLD_UNKNOWN_ERROR)\n\t\t\n\t\treturn content", "def create_folder(self):\n path = os.path.expanduser('~') + \"/.ip_enrich/\"\n # Does it exist already?\n if os.path.isdir(path):\n return True\n try:\n os.mkdir(path)\n return True\n except Exception as e:\n print (f\"Creation of the directory {path} failed\")\n print (f\"Error {e}\")\n return False", "def create_folder(self, foldername: str) -> int:\n raise NotImplementedError", "def ensure_folder(*arg):\n if len(arg) == 0:\n raise Exception(\"No input to ensure_folder\")\n path = get_dir(Path(*arg))\n path.mkdir(parents=True, exist_ok=True)", "def create_folder(path):\n folder_missing = not os.path.exists(path)\n\n if folder_missing:\n # Using makedirs since the path hierarchy might not fully exist.\n try:\n os.makedirs(path)\n except OSError as e:\n if (e.errno, e.strerror) == (17, 'File exists'):\n print(e)\n else:\n raise\n\n print('Created folder {0}'.format(path))\n\n return folder_missing", "def test_make_output_folder_exists_no_timestamp(self, mock_makedirs,\n mock_logger):\n mock_makedirs.side_effect = [OSError, True]\n test_object = Maic()\n path = \"my_path\"\n sep = os.sep\n if os.sep == '\\\\':\n # we've got a backslash which causes havoc in a regex so we need\n # to escape the backslash twice\n sep = '\\\\\\\\'\n result = test_object.make_output_folder(output_folder=path)\n match_string = r'^my_path-\\d{4}(-\\d{2}){2}-(-\\d{2}){2}' + sep + '$'\n self.assertTrue(\n re.search(match_string, result,\n re.S),\n \"Should have got a path with a Timestamp attached\")\n mock_logger.assert_called_with(\n \"Specified folder (my_path) already exists - trying to create \"\n \"one with a timestamp\")", "def create_folder(path: str):\n try:\n Path(path).mkdir(parents=True, exist_ok=True)\n return True\n except:\n print(\"An error occured.\")", "def check_folder_exists(location: str) -> bool:\n if os.path.isdir(location):\n return True\n else:\n return False", "def checkfolder(paths):\n\tpaths = paths if isinstance(paths, list) else [paths]\n\t\n\tdef creat_dir(x):\n\t\tx = Path(x)\n\t\tif x.is_dir():\n\t\t\tprint(f\"Dir {x} already exists\")\n\t\telse:\n\t\t\tPath.mkdir(x)\n\t\t\tprint(f\"Created new dir {x}\")\n\t\n\tlist(map(creat_dir, paths))", "def validate_isfolder(value):\n return os.path.isdir(value)", "def ensure_folder_exists(folder_path: str) -> None:\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)", "def createFolder(self):\n raise NotImplementedError", "def validate_path_or_name(path_or_name: str, is_folder: bool=False) -> str:\n\n if is_folder:\n assert path_or_name.endswith('/')\n else:\n assert not path_or_name.endswith('/')\n\n return path_or_name", "def create_folder(name_folder: str):\n try:\n # Create a new direcctory\n os.mkdir(name_folder)\n except FileExistsError:\n # If the direcctory already exits print.\n print(f\"The directory {name_folder} already exists.\")" ]
[ "0.6989845", "0.6891283", "0.683919", "0.6803831", "0.6788181", "0.6731252", "0.67084515", "0.67037123", "0.66955405", "0.6634677", "0.6596907", "0.6596907", "0.6592645", "0.65525657", "0.6545261", "0.6474729", "0.6458068", "0.6358634", "0.6331798", "0.6315", "0.6301309", "0.6286801", "0.6261464", "0.6235317", "0.6186679", "0.61795545", "0.61606807", "0.6160546", "0.6158134", "0.6155082" ]
0.6929287
1
Creates the phi version of the training and test datasets.
def _create_phi_data(training_data, test_data): _METRICS = ['vmsram', 'tasks', 't_rscthnetno', 't_rscthhfsrb', 'c_ucpupct'] phi_training_data = {} phi_test_data = {} # Iterate and compute arccos of each time series in training and test data for key in training_data.keys(): if key in _METRICS: phi_training_data[key] = np.arccos(training_data[key]) phi_test_data[key] = np.arccos(test_data[key]) else: phi_training_data[key] = training_data[key] phi_test_data[key] = test_data[key] return phi_training_data, phi_test_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_data(self):\n # Set up the path\n self.path_target_train = os.path.join(self.data_dir, self.train_path_file_target + \".pkl\")\n self.path_target_test = os.path.join(self.data_dir, self.test_path_file_target + \".pkl\")\n\n if not os.path.exists(self.path_target_train) or not os.path.exists(self.path_target_test):\n # Create vocabularies of the appropriate sizes.\n self.create_vocabulary(self.train_path_file)\n\n # Create token ids for the training data.\n input_train_path = self.train_path_file\n target_train_path = self.train_path_file_target\n train_input, train_input_length, train_labels = self.data_to_token_ids(input_train_path, target_train_path)\n\n # Create token ids for the validation data.\n input_test_path = self.test_path_file\n target_test_path = self.test_path_file_target\n test_input, test_input_length, _ = self.data_to_token_ids(input_test_path, target_test_path, train=False)\n\n # Collect data into a list\n training_data = [train_input, train_input_length, train_labels]\n test_data = [test_input, test_input_length]\n\n # Save all the data\n with open(self.path_target_train, 'wb') as f:\n pickle.dump(training_data,f)\n with open(self.path_target_test, 'wb') as f:\n pickle.dump(test_data, f)\n else:\n # Load data\n with open(self.path_target_train, 'rb') as f:\n training_data = pickle.load(f)\n with open(self.path_target_test, 'rb') as f:\n test_data = pickle.load(f)\n\n # Initialize vocabulary\n self.initialize_vocabulary()\n\n # Convert list into a numpy array - train data\n train_input = pd.DataFrame(training_data[0]).fillna(value=0).astype(int).values\n train_length_input = np.array(training_data[1], dtype=int)\n train_labels = np.array(training_data[2], dtype=int)\n\n # Convert list into a numpy array - test data\n test_input = pd.DataFrame(test_data[0]).fillna(value=0).astype(int).values\n test_length_input = pd.DataFrame(test_data[1]).fillna(value=0).astype(int).values\n\n # Printing maximum length\n print(\"Shape of the input training matrix {}\".format(str(train_input.shape)))\n print(\"Shape of the input test matrix {}\".format(str(test_input.shape)))\n\n # Copy the files\n self.copy_files()\n\n # Return output\n return train_input, train_length_input, train_labels, test_input, test_length_input", "def _create_data():\n tf.logging.info(\"Create records..\")\n train, val, test = util.load_data(data_dir, FLAGS[\"is_aug\"])\n tf.logging.info(\"Dataset size: Train-{} Test-{} Val-{}\".format(len(train), len(test), len(val)))\n return train, val, test", "def generate_train_test(self):\n x, y = self.read_data()\n x_train, y_train, x_test, y_test = self.sample_data(x, y)\n self.train = (x_train, y_train)\n self.test = (x_test, y_test)", "def create_data(self):\n\n print (f'Using {self.n_s} simulations for the training data to estimate cov')\n print (f'Using {self.n_p} simulations for the upper/lower training data')\n print (f'Number of splits, to increase number simulations: {self.n_train}')\n print (f'Adding noise to the derivative: {np.invert(self.noiseless_deriv)}')\n\n # Number of upper and lower simulations\n n_p = int(self.n_s * self.derivative_fraction)\n\n # set a seed to surpress the sample variance (EVEN FOR CENTRAL SIMULATIONS)\n seed = np.random.randint(1e6) \n # We should double-check to see if the sample variance if being surpressed\n\n # Perturb lower \n np.random.seed(seed)\n t_m = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n ,train = -self.delta_theta, flatten = self.flatten\n ,noiseless_deriv = self.noiseless_deriv) \n # Perturb higher \n np.random.seed(seed)\n t_p = self.generate_data(np.array([theta_fid for i in \n range(self.n_train * self.n_p)])\n ,train = self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n\n # Central\n np.random.seed(seed)\n t = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_s)])\n ,train = None, flatten = self.flatten)\n\n\n # derivative data\n t_d = (t_p - t_m) / (2. * self.delta_theta)\n\n # Save in a dict that the network takes\n data = {\"data\": t, \"data_d\": t_d}\n # for plotting purposes we save the upper/lower separately as well\n data[\"x_m\"], data[\"x_p\"] = t_m, t_p \n\n\n # Repeat the same story to generate test data\n print ('\\n')\n print (f'Using {self.n_s} simulations for the test data to estimate cov')\n print (f'Using {self.n_p_val} simulations for the upper/lower test data')\n print (f'Number of splits, to increase number simulations: {self.n_train_val}')\n print (f'Adding noise to the derivative: {np.invert(self.noiseless_deriv)}')\n print ('\\n')\n\n seed = np.random.randint(1e6)\n # Perturb lower \n np.random.seed(seed)\n tt_m = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n , train = -self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n # Perturb higher \n np.random.seed(seed)\n tt_p = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_p)])\n , train = self.delta_theta, flatten = self.flatten\n , noiseless_deriv = self.noiseless_deriv)\n # Central sim\n np.random.seed(seed)\n tt = self.generate_data(np.array([self.theta_fid for i in \n range(self.n_train * self.n_s)])\n , train = None, flatten = self.flatten)\n \n # np.random.seed()\n \n # derivative data\n tt_d = (tt_p - tt_m) / (2. * self.delta_theta)\n\n data[\"validation_data\"] = tt \n data[\"validation_data_d\"] = tt_d\n\n # for plotting purposes we save the upper/lower separately\n data[\"x_m_test\"], data[\"x_p_test\"] = tt_m, tt_p \n\n return data", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def generate(self):\n self.training_data.gen_x(self.x_func)\n self.training_data.gen_a(self.a_func)\n self.training_data.gen_y(self.y_func)\n \n self.testing_data.gen_x(self.x_func)\n self.testing_data.gen_ys(self.y_func)\n self.testing_data.gen_azero(self.ytotal_func)", "def run():\r\n \r\n LABEL = data.LabelField(use_vocab=True)\r\n TEXT = data.Field(sequential=True, tokenize=lambda x:x.split(), lower=True, fix_length=config.MAX_LENGTH)\r\n\r\n### 1/5\r\n dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n # split the dataset, 8:2\r\n train_dataset, valid_dataset = dataset.split(split_ratio=[0.8,0.2], random_state=random.getstate())\r\n \r\n test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n \r\n### 2\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n# valid_dataset = data.TabularDataset(path=config.VAL_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n \r\n### 3/4\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# dataset = data.TabularDataset(path=config.TEST_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n# # split the dataset, 5:5\r\n# valid_dataset, test_data = dataset.split(split_ratio=[0.5,0.5], random_state=random.getstate())\r\n\r\n### 5\r\n\r\n\r\n\r\n # load embeddings\r\n vectors_data = load_vectors(config.EMBEDDING_FNAME)\r\n\r\n TEXT.build_vocab(train_dataset, vectors=vectors_data)\r\n LABEL.build_vocab(train_dataset)\r\n print ('vector size:',TEXT.vocab.vectors.size())\r\n embedding_pretrained_matrix = TEXT.vocab.vectors\r\n \r\n # create torch device\r\n print(\"To device...\")\r\n USE_CUDA = torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\r\n\r\n train_it, valid_it = data.BucketIterator.splits((train_dataset, valid_dataset),\r\n batch_sizes=(config.TRAIN_BATCH_SIZE,config.VAL_BATCH_SIZE), \r\n device=device, \r\n sort_key=lambda x: len(x.text),\r\n sort_within_batch=False,\r\n shuffle=True,\r\n repeat=False)\r\n test_it = data.BucketIterator(test_data, \r\n batch_size=config.TEST_BATCH_SIZE, \r\n sort_key=lambda x: len(x.text), \r\n shuffle=False,\r\n device=device)\r\n \r\n \r\n # fetch model\r\n vocab_size = len(TEXT.vocab) # TEXT.vocab.vectors.size()\r\n# pretrained_vec = TEXT.vocab.vectors\r\n \r\n # selecte network \r\n x = import_module('networks.'+config.NETWORK)\r\n model = x.Model(vocab_size,embedding_pretrained=embedding_pretrained_matrix)\r\n \r\n # send model to device\r\n model.to(device)\r\n\r\n # initialize Adam optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)\r\n\r\n # if you have multiple GPUs, model model to DataParallel to use multiple GPUs\r\n if torch.cuda.device_count() > 1:\r\n model = nn.DataParallel(model)\r\n \r\n params_list = []\r\n # train and validate for all epochs\r\n for epoch in range(config.EPOCHS):\r\n epoch_start_time = time.time()\r\n\r\n ###----Train--------\r\n train_outputs, train_labels, train_loss = engine.train_fn(train_it, model, optimizer, device)\r\n train_outputs = torch.Tensor(train_outputs)\r\n _, train_predicted = torch.max(train_outputs, dim=1)\r\n train_parameters_dict = metrics_func.performance_evaluation_func(train_predicted,train_labels,epoch=str(epoch))\r\n # save train paremeters\r\n params_list.append(train_parameters_dict)\r\n train_f1 = train_parameters_dict['f1_score_macro']\r\n train_prec = train_parameters_dict['precision_macro']\r\n train_recall = train_parameters_dict['precision_macro']\r\n print('\\n')\r\n print(f\" Train Epoch: {epoch}, F1 = {train_f1},precision = {train_prec},recall = {train_recall}\")\r\n ###------------\r\n \r\n # validate\r\n val_outputs, val_labels, valid_loss = engine.evaluate_fn(valid_it, model, device)\r\n val_outputs = torch.Tensor(val_outputs)\r\n _, val_predicted = torch.max(val_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n val_parameters_dict = metrics_func.performance_evaluation_func(val_predicted, val_labels, epoch=str(epoch),flag='val')\r\n # save evaluation paremeters\r\n params_list.append(val_parameters_dict)\r\n \r\n val_f1 = val_parameters_dict['f1_score_macro']\r\n val_prec = val_parameters_dict['precision_macro']\r\n val_recall = val_parameters_dict['recall_macro']\r\n print(f\"Val Epoch: {epoch},F1 = {val_f1},precision = {val_prec}, recall = {val_recall}\")\r\n \r\n ###-------Test-----------------------\r\n test_outputs, test_labels, test_loss = engine.evaluate_fn(test_it, model, device)\r\n test_outputs = torch.Tensor(test_outputs)\r\n _, test_predicted = torch.max(test_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n test_parameters_dict = metrics_func.performance_evaluation_func(test_predicted, test_labels, epoch=str(epoch),flag='test')\r\n # save evaluation paremeters\r\n params_list.append(test_parameters_dict)\r\n \r\n test_f1 = test_parameters_dict['f1_score_macro']\r\n test_prec = test_parameters_dict['precision_macro']\r\n test_recall = test_parameters_dict['recall_macro']\r\n print(f\"test Epoch: {epoch},F1 = {test_f1},precision = {test_prec}, recall = {test_recall}\")\r\n \r\n lr_scheduler = LRScheduler(optimizer)\r\n lr_scheduler(valid_loss)\r\n \r\n \r\n # simple early stopping\r\n# val_f1 = float(val_f1)\r\n #f1 = (float(train_f1) + float(val_f1)) / 2\r\n val_loss = float(valid_loss)\r\n early_stopping(val_loss, model)\r\n if early_stopping.early_stop:\r\n print(\"Early stopping\")\r\n break\r\n # 获得 early stopping 时的模型参数\r\n# model.load_state_dict(torch.load('checkpoint.pt'))\r\n\r\n# save_model_func(model, epoch, path='outputs')\r\n \r\n metrics_func.save_parameters_txt(params_list)", "def test_training(self):\n\t\tpass", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def create_train_test(option, transform, params, split=0.2):\r\n clip_im_dir = option.clip_im_dir\r\n matting_dir = option.matting_dir\r\n csv_path = option.csv_path\r\n \r\n print(\"create datasets\")\r\n \r\n \r\n data_df = pd.read_csv(csv_path)\r\n # data_df = MergeDataframe(clip_im_dir, matting_dir)\r\n \r\n #separate data in training and test data (20/80)\r\n train_df, test_df = train_test_split(data_df, test_size=split)\r\n \r\n #search right Dataset class\r\n package_dir = Path(src.dataset.__file__).resolve().parent\r\n\r\n for (_, module_name, _) in iter_modules([package_dir]):\r\n # print(module_name, self.ComType)\r\n if option.dataset.lower() == module_name.lower() :\r\n modelModule = importlib.import_module(\".\"+module_name)\r\n break\r\n \r\n # train data\r\n training_set = modelModule(train_df, clip_im_dir, matting_dir, transform, transform)\r\n train_loader = DataLoader(training_set, **params)\r\n \r\n \r\n #test data\r\n testing_set = modelModule(test_df, clip_im_dir, matting_dir, transform, transform)\r\n test_loader = DataLoader(testing_set, **params)\r\n \r\n return train_loader, test_loader", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def data_manager_fixture():\n\n class DataManager:\n def __init__(self):\n self.gen = 1000\n self.cfg = get_cfg_defaults()\n mode = \"test_inference\"\n self.dataset = Dataset(None, self.cfg, mode)\n self.auto_anchors = AutoAnchors(self.dataset, self.cfg.model, self.gen)\n self.k_points = torch.ones((12, 2)) * 2.0\n self.wh = torch.ones((1000, 2)) * 2.0\n\n return DataManager()", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def setup():\n # change working directory to\n os.chdir(ROOT_DIR)\n # move to dataFiles\n with misc.cd('dataFiles'):\n print('Now in:', os.getcwd())\n # Load in data\n model_test = models.MlModel('rf', 'water-energy.csv', 'expt')\n # Get feature. I use rdkit2d as it is fast to generate\n df, num_feat, feat_time = features.featurize(model_test.data, model_test.algorithm, [0])\n # Split the data\n train_features, test_features, train_target, test_target, feature_list = features.targets_features(df, 'expt')\n return train_features, test_features, train_target, test_target", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def generatePredictorDataTemplate(self):\n self.__pdir = Predictor.directory\n self.__predictorData = PredictorData(None)\n self.save()", "def creates_data_loader():\n dataset_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=True)\n\n dataset_no_faces = FaceDataset(\n IMG_DIR, transform=transform_train, face=False)\n\n datasets_faces_split = train_val_test(dataset_faces, 0.2, 0.0)\n datasets_no_faces_split = train_val_test(dataset_no_faces, 0.2, 0.0)\n\n datasets = {}\n datasets[\"train\"] = datasets_faces_split[\"train\"] + \\\n datasets_no_faces_split[\"train\"]\n datasets[\"test\"] = datasets_no_faces_split[\"test\"]\n datasets[\"val\"] = datasets_faces_split[\"val\"] + \\\n datasets_no_faces_split[\"val\"]\n\n train_loader = DataLoader(dataset=datasets[\"train\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n\n val_loader = DataLoader(dataset=datasets[\"val\"], batch_size=BATCH_SIZE,\n num_workers=NUM_WORKERS, pin_memory=PIN_MEMORY, shuffle=True, drop_last=False)\n return train_loader, val_loader", "def phase_two_data():\n from pathlib import Path\n try:\n import cPickle as pickle\n except ImportError:\n import pickle\n \n from annotation import parse_fulltext\n from features import ALL_FEATURES\n \n from feature_template import apply_templates\n from feature_selection import filter_by_frequency\n from feature_encoding import encode\n\n # Feature templates considered if heading by 1:\n # ----------------------------\n # Position + Voice\n # Path length + Clause layer\n # 1 Predicate + Path\n # Path + Position + Voice\n # Path + Position + Voice + Predicate\n # 1 Head word stem + Predicate\n # 1 Head word stem + Predicate + Path\n # 1 Head word stem + Phrase\n # Clause layer + Position + Predicate\n templates = [tuple([f.name]) for f in ALL_FEATURES] + \\\n [('path_to_frame', 'frame'), ('head_stem', 'frame'), ('head_stem', 'frame', 'path_to_frame'), ('head_stem', 'phrase_type')]\n \n size = 40\n instances = []\n for i, p in enumerate(Path(\"/cs/fs2/home/hxiao/Downloads/fndata-1.5/fulltext/\").glob(\"*.xml\")):\n if i == size:\n break\n sys.stderr.write(\"Processing file: '%s'\\n\" %p.absolute())\n annotations = parse_fulltext(str(p.absolute()))\n instances += make_training_data(ALL_FEATURES, annotations)\n\n sys.stderr.write(\"Feature selection...\\n\")\n x, y = zip(*instances)\n x = apply_templates(x, templates)\n features = filter_by_frequency(x, 5)\n sys.stderr.write(\"Feature encoding...\\n\")\n x, feature_map = encode(x, features)\n \n sys.stderr.write(\"Dumping data...\\n\") \n pickle.dump((x, y, ALL_FEATURES, templates, feature_map), open('dump/test_data.pkl', 'w'))\n import pdb\n pdb.set_trace()\n print len(instances)", "def generate_data(project_data, config, split_method = RAW) :\n training_data = []\n testing_data = []\n \n # Flatten the data (collapse the project and session hierarchy into a list of session_data)\n for v in config.train_project_names:\n # Data in all sessions of one project\n project_session_data = random.sample(project_data[v], len(project_data[v]))\n\n training_data += project_session_data[int(config.session_training_percentage[0] * len(project_session_data)):\n int(config.session_training_percentage[1] * len(project_session_data))]\n\n if config.double_training:\n for i in xrange(int(config.session_training_percentage[0] * len(project_session_data)),\n int(config.session_training_percentage[1] * len(project_session_data))):\n session_data = project_session_data[i]\n\n reversed_session_data = {}\n reversed_session_data[SESSION_NAME] = session_data[SESSION_NAME] + \"_reversed\"\n reversed_session_data[SESSION_EVENTS] = []\n\n def reverse_point_data_qsr(point_data):\n reversed_point_data = point_data[:4]\n # Hands to objects feature swap\n reversed_point_data += point_data[8:12] \n reversed_point_data += point_data[4:8]\n\n # Centroid direction and distance difference is symmetric\n reversed_point_data += point_data[12:14]\n\n # Object corners swap\n reversed_point_data += point_data[16:18] \n reversed_point_data += point_data[14:16]\n\n reversed_point_data += point_data[18:19]\n reversed_point_data += point_data[20:21] \n reversed_point_data += point_data[19:20]\n\n # For QTCCS\n reversed_point_data += point_data[22:23]\n reversed_point_data += point_data[21:22]\n reversed_point_data += point_data[24:25]\n reversed_point_data += point_data[23:24]\n\n # # For difference of features\n # fl = 21\n # reversed_point_data += point_data[fl:fl + 4]\n # # Hands to objects feature swap\n # reversed_point_data += point_data[fl + 8:fl + 12] \n # reversed_point_data += point_data[fl + 4:fl + 8]\n\n # # Centroid direction and distance difference is symmetric\n # reversed_point_data += point_data[fl + 12:fl + 14]\n\n # # Object corners swap\n # reversed_point_data += point_data[fl + 16:fl + 18] \n # reversed_point_data += point_data[fl + 14:fl + 16]\n\n # reversed_point_data += point_data[fl + 18:fl + 19]\n # reversed_point_data += point_data[fl + 20:fl + 21] \n # reversed_point_data += point_data[fl + 19:fl + 20]\n\n return reversed_point_data\n\n def reverse_point_data_sparse_qsr(point_data):\n reversed_point_data = point_data[:2 * 56]\n # Hands to objects feature swap\n reversed_point_data += point_data[4 * 56:6 * 56] \n reversed_point_data += point_data[2 * 56:4 * 56]\n\n # Centroid direction and distance difference is symmetric\n reversed_point_data += point_data[6 * 56:7 * 56]\n\n # Object corners swap\n reversed_point_data += point_data[8 * 56:9 * 56] \n reversed_point_data += point_data[7 * 56:8 * 56]\n\n anchor = 9 * 56\n reversed_point_data += point_data[anchor:anchor + 2]\n reversed_point_data += point_data[anchor + 2 * 2:anchor + 3 * 2] \n reversed_point_data += point_data[anchor + 2:anchor + 2 * 2]\n\n anchor = 9 * 56 + 3 * 2\n # For QTCCS\n reversed_point_data += point_data[anchor + 3:anchor + 2 * 3]\n reversed_point_data += point_data[anchor:anchor + 3]\n reversed_point_data += point_data[anchor + 3 * 3:anchor + 4 * 3]\n reversed_point_data += point_data[anchor + 2 * 3:anchor + 3 * 3]\n\n return reversed_point_data\n\n reversed_session_data[SESSION_DATA] = []\n for point_data in session_data[SESSION_DATA]:\n if split_method == RAW:\n reversed_point_data = point_data[:39]\n reversed_point_data += point_data[51:63]\n reversed_point_data += point_data[39:51]\n elif split_method == PCAS:\n reversed_point_data = point_data[:6]\n # Object centroid swap\n reversed_point_data += point_data[8:10] \n reversed_point_data += point_data[6:8]\n # Object corners swap\n reversed_point_data += point_data[14:18] \n reversed_point_data += point_data[10:14]\n elif split_method == QSR or split_method == EVENT:\n reversed_point_data = reverse_point_data_qsr(point_data)\n elif split_method == SPARSE_QSR:\n reversed_point_data = reverse_point_data_sparse_qsr(point_data)\n\n reversed_session_data[SESSION_DATA].append(reversed_point_data)\n\n for event_str in session_data[SESSION_EVENTS]:\n reversed_event_str = {}\n for key in event_str:\n reversed_event_str[key] = event_str[key]\n\n subj, obj, theme, event, prep = event_str['label']\n def swap_objects(value):\n if value == 2:\n return 3\n if value == 3:\n return 2\n return value\n\n reversed_event_str['label'] = (swap_objects(subj), swap_objects(obj), swap_objects(theme), event, prep)\n\n reversed_session_data[SESSION_EVENTS].append(reversed_event_str)\n\n training_data.append(reversed_session_data)\n\n\n testing_data += project_session_data[int(config.session_testing_percentage[0] * len(project_session_data)):\n int(config.session_testing_percentage[1] * len(project_session_data))]\n \n return (training_data, testing_data)", "def main():\n tpd_file_name = get_nonexisting_file(\"Enter name of new tpd file: \")\n tpd = TrainPredictData(tpd_file_name)\n\n print \"You can now enter the file paths of the the newly created tpd file.\"\n print \"If you want to skip a data set, just press enter without typing anything.\"\n\n train_raw_path = get_existing_file(\"Enter training raw path: \", skip=True)\n if train_raw_path is not None:\n train_raw_key = extract_h5_key(train_raw_path, \"Enter training raw h5 key: \")\n tpd.set_train_raw(train_raw_path, train_raw_key)\n\n train_gt_path = get_existing_file(\"Enter training gt path: \", skip=True)\n if train_gt_path is not None:\n train_gt_key = extract_h5_key(train_gt_path, \"Enter training gt h5 key: \")\n tpd.set_train_gt(train_gt_path, train_gt_key)\n\n train_pred_path = get_existing_file(\"Enter training pred path: \", skip=True)\n if train_pred_path is not None:\n train_pred_key = extract_h5_key(train_pred_path, \"Enter training pred h5 key: \")\n tpd.set_train_pred(train_pred_path, train_pred_key)\n\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n while train_feat_path is not None:\n train_feat_key = extract_h5_key(train_feat_path, \"Enter training feature path: \")\n tpd.add_train_feature(train_feat_path, train_feat_key)\n train_feat_path = get_existing_file(\"Enter training feature path: \", skip=True)\n\n test_raw_path = get_existing_file(\"Enter test raw path: \", skip=True)\n if test_raw_path is not None:\n test_raw_key = extract_h5_key(test_raw_path, \"Enter test raw h5 key: \")\n tpd.set_test_raw(test_raw_path, test_raw_key)\n\n test_gt_path = get_existing_file(\"Enter test gt path: \", skip=True)\n if test_gt_path is not None:\n test_gt_key = extract_h5_key(test_gt_path, \"Enter test gt h5 key: \")\n tpd.set_test_gt(test_gt_path, test_gt_key)\n\n test_pred_path = get_existing_file(\"Enter test pred path: \", skip=True)\n if test_pred_path is not None:\n test_pred_key = extract_h5_key(test_pred_path, \"Enter test pred h5 key: \")\n tpd.set_test_pred(test_pred_path, test_pred_key)\n\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n while test_feat_path is not None:\n test_feat_key = extract_h5_key(test_feat_path, \"Enter test feature path: \")\n tpd.add_test_feature(test_feat_path, test_feat_key)\n test_feat_path = get_existing_file(\"Enter test feature path: \", skip=True)\n\n return 0", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def get_phi(name, model_size='tiny'):\n\n def remove_classifier(model):\n \"\"\"\n Function that removes the last layer of a given model.\n\n Args:\n model: A pretrained model with layers.\n Returns:\n new_model: The same model, but with its last layer removed.\n \"\"\"\n return torch.nn.Sequential(*(list(model.children())[:-1]))\n\n # Whether a pretrained network is used. Defaults to None.\n pretrained = None\n\n if name == 'adult':\n # Define the output size of the layers.\n out_features = 80\n\n # Define the featurizer layers.\n layers = nn.Sequential(\n nn.Linear(INPUT_SIZE_ADULT, out_features),\n nn.SELU())\n\n # Make the featurizer module.\n phi = FeaturizerPhi(layers)\n\n elif name == 'celeba':\n out_features = 2048\n\n # Gather pytorch pretrained resnet50 model.\n layers = torch_models.resnet50(pretrained=True)\n\n # Remove the classifier, which is the last layer.\n layers = remove_classifier(layers)\n\n phi = FeaturizerPhi(layers)\n\n elif name == 'civil':\n # Raise error if unsupported model size is given.\n if model_size not in BERT_MODELS:\n raise ValueError(f\"Invalid input {model_size} for argument 'model_size'. \\nUse 'tiny', 'mini', 'small', 'medium' or 'base' instead.\")\n\n out_features = 80\n\n # Gather pytorch pretrained BERT model of specified size.\n pretrained = AutoModel.from_pretrained(f'prajjwal1/bert-{model_size}')\n\n layers = nn.Sequential(\n nn.Linear(BERT_MODELS[model_size], out_features),\n nn.SELU())\n\n phi = FeaturizerPhi(layers)\n\n else:\n # Exception for when a wrong name was given.\n raise Exception('An invalid dataset name was chosen.')\n\n return out_features, pretrained, phi", "def make_data(config, data, label):\n if not os.path.isdir(os.path.join(os.getcwd(), config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(), config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)", "def prepare_data():\n #data, label = load_ta_data(), load_ta_target()\n data, label = load_own_data(), load_own_target()\n tra_x, tst_x = split_samples(data)\n tra_y, tst_y = split_samples(label)\n return (tra_x, tst_x, tra_y, tst_y)", "def generate_dataset():\n if not os.path.exists(\"../data/COVID-19/COVID-19.npy\"):\n print(\"Processing Training Data.\")\n training_data = get_training_data('../data/COVID-19/train')\n print(\"Processing Test Data.\")\n test_data = get_training_data('../data/COVID-19/test')\n\n x_train, y_train, x_test, y_test = [], [], [], []\n\n for feature, label in training_data:\n x_train.append(feature)\n y_train.append(label)\n\n for feature, label in test_data:\n x_test.append(feature)\n y_test.append(label)\n\n # Normalize the data\n x_train = np.array(x_train) / 255\n x_test = np.array(x_test) / 255\n\n # resize data for deep learning\n x_train = x_train.reshape(-1, 3, img_size, img_size)\n y_train = np.array(y_train)\n x_test = x_test.reshape(-1, 3, img_size, img_size)\n y_test = np.array(y_test)\n\n # With data augmentation to prevent overfitting and handling the imbalance in dataset\n dataset = {\"x_train\": x_train, \"y_train\": y_train, \"x_test\": x_test, \"y_test\": y_test}\n np.save(\"../data/COVID-19/COVID-19.npy\", dataset)\n else:\n dataset = np.load(\"../data/COVID-19/COVID-19.npy\", allow_pickle=True).item()\n x_train, y_train, x_test, y_test = dataset[\"x_train\"], dataset[\"y_train\"], dataset[\"x_test\"], dataset[\"y_test\"]\n\n x_train_tensor = torch.from_numpy(x_train)\n x_train_tensor = x_train_tensor.type(torch.FloatTensor)\n y_train_tensor = torch.from_numpy(y_train)\n y_train_tensor = y_train_tensor.type(torch.LongTensor)\n x_test_tensor = torch.from_numpy(x_test)\n x_test_tensor = x_test_tensor.type(torch.FloatTensor)\n y_test_tensor = torch.from_numpy(y_test)\n y_test_tensor = y_test_tensor.type(torch.LongTensor)\n\n train_dataset = TensorDataset(x_train_tensor, y_train_tensor)\n test_dataset = TensorDataset(x_test_tensor, y_test_tensor)\n\n return train_dataset, test_dataset", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return", "def setUpTestData(cls):\n cls.user = UserFactory()\n cls.auth = AuthFactory()\n\n cls.device = TOTPDevice.objects.create(user=cls.user)\n cls.relate = TOTPDevice.challenge.objects.create(\n device=cls.device, token=cls.auth\n )\n\n cls.algorithm = TOTPAlgorithm()", "def new(self):\n self.labels = ((torch.empty((self.P, 1)).random_(0, 2) - .5) * 2)\n self.D = torch.empty((self.P, self.d)).normal_()\n\n if not self.big:\n self.labels = self.labels.cuda()\n self.D = self.D.cuda()\n\n torch.save(self.D, self.main_dir + '/data/D0.pt')\n torch.save(self.labels, self.main_dir + '/data/labels.pt')" ]
[ "0.62479264", "0.616918", "0.6110755", "0.6029446", "0.5991397", "0.5972835", "0.59226716", "0.58383435", "0.58339", "0.5822229", "0.57949585", "0.57768095", "0.575245", "0.57448965", "0.57334214", "0.5731233", "0.5727741", "0.5724", "0.5704525", "0.57044905", "0.5703933", "0.5692827", "0.56765336", "0.56642985", "0.56609404", "0.56538254", "0.5651217", "0.56359094", "0.5627497", "0.56126505" ]
0.71205443
0
Interpolate NAN elements in matrix.
def _interpolation(matrix): try: ok = ~np.isnan(matrix) xp = ok.ravel().nonzero()[0] fp = matrix[~np.isnan(matrix)] x = np.isnan(matrix).ravel().nonzero()[0] matrix[np.isnan(matrix)] = np.interp(x, xp, fp) return matrix except: return matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2", "def fill_nan(A):\n\tinds = np.arange(A.shape[0])\n\tgood = np.where(np.isfinite(A))\n\tA[np.isnan(A)] = np.interp(inds[np.isnan(A)], inds[good], A[good])\n\treturn A", "def fill_nan(array):\n idx = np.arange(array.shape[0])\n good = np.where(np.isfinite(array))\n interp = interpolate.interp1d(idx[good], array[good], bounds_error=False)\n return np.where(np.isfinite(array), array, interp(idx))", "def test_linear_interpolation_nan_array(self):\n\n # Define pixel centers along each direction\n x = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]\n y = [4.0, 5.0, 7.0, 9.0, 11.0, 13.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n A[2, 3] = numpy.nan # (x=2.0, y=9.0): NaN\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 12)\n etas = numpy.linspace(y[0], y[-1], 10)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n\n # Set reference result with expected NaNs and compare\n for i, (xi, eta) in enumerate(points):\n if (1.0 < xi <= 3.0) and (7.0 < eta <= 11.0):\n refs[i] = numpy.nan\n\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)", "def interpolation_matrix(m):\n return np.nanmean(m,axis=1)", "def fill_nan(x):\n (n_rows, wdw) = x.shape\n new_x = np.zeros((n_rows,wdw)); new_x[:] = np.nan\n for i in range(n_rows):\n indMissing = np.where(np.isnan(x[i,:]))[0]\n l = len(x[i,indMissing]) #number of MVs\n if l < 4*wdw/5: #20% available values otherwise discarded\n new_x[i,:] = x[i,:]\n if l > 0 and indMissing[0] == 0: #missing value at index 0 \n c = 0\n while c + 1 < len(indMissing) and indMissing[c+1] == indMissing[c] + 1:\n c += 1\n new_x[i,:c+1] = x[i,c+1] #first nans replaced by first non nan value\n indMissing = np.where(np.isnan(new_x[i,:]))[0]\n l = len(new_x[i,indMissing])\n if l > 0 and indMissing[0] > 0:\n new_x[i,:] = interpolate1d(new_x[i,:]) #interpolate intermediate nans\n ind = np.where(~np.isnan(new_x).all(axis=1))[0]\n new_x = new_x[ind] #remove NaNs \n \n return new_x, ind", "def check_and_interpolate_nans(df):\n nan_count = df.isna().sum().sum()\n if nan_count > 0:\n df.interpolate(method='linear', inplace=True)\n return df", "def test_linear_interpolation_nan_points(self):\n\n # Define pixel centers along each direction\n x = [1.0, 2.0, 4.0]\n y = [5.0, 9.0]\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Then test that interpolated points can contain NaN\n xis = numpy.linspace(x[0], x[-1], 10)\n etas = numpy.linspace(y[0], y[-1], 10)\n xis[6:7] = numpy.nan\n etas[3] = numpy.nan\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert nanallclose(vals, refs, rtol=1e-12, atol=1e-12)", "def extrapolate_nans(x, y, v):\n if numpy.ma.is_masked(v):\n nans = v.mask\n else:\n nans = numpy.isnan(v)\n notnans = numpy.logical_not(nans)\n v[nans] = scipy.interpolate.griddata((x[notnans], y[notnans]), v[notnans],\n (x[nans], y[nans]),\n method='nearest').ravel()\n return v", "def test_interpolation_random_array_and_nan(self):\n\n # Define pixel centers along each direction\n x = numpy.arange(20) * 1.0\n y = numpy.arange(25) * 1.0\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define arbitrary values for each x, y pair\n numpy.random.seed(17)\n A = numpy.random.random((len(x), len(y))) * 10\n\n # Create islands of NaN\n A[5, 13] = numpy.nan\n A[6, 14] = A[6, 18] = numpy.nan\n A[7, 14:18] = numpy.nan\n A[8, 13:18] = numpy.nan\n A[9, 12:19] = numpy.nan\n A[10, 14:17] = numpy.nan\n A[11, 15] = numpy.nan\n\n A[15, 5:6] = numpy.nan\n\n # Creat interpolation points\n xis = numpy.linspace(x[0], x[-1], 39) # Hit all mid points\n etas = numpy.linspace(y[0], y[-1], 73) # Hit thirds\n points = combine_coordinates(xis, etas)\n\n for mode in ['linear', 'constant']:\n vals = interpolate2d(x, y, A, points, mode=mode)\n\n # Calculate reference result with expected NaNs and compare\n i = j = 0\n for k, (xi, eta) in enumerate(points):\n\n # Find indices of nearest higher value in x and y\n i = numpy.searchsorted(x, xi)\n j = numpy.searchsorted(y, eta)\n\n if i > 0 and j > 0:\n\n # Get four neigbours\n A00 = A[i - 1, j - 1]\n A01 = A[i - 1, j]\n A10 = A[i, j - 1]\n A11 = A[i, j]\n\n if numpy.allclose(xi, x[i]):\n alpha = 1.0\n else:\n alpha = 0.5\n\n if numpy.allclose(eta, y[j]):\n beta = 1.0\n else:\n beta = eta - y[j - 1]\n\n if mode == 'linear':\n if numpy.any(numpy.isnan([A00, A01, A10, A11])):\n ref = numpy.nan\n else:\n ref = (A00 * (1 - alpha) * (1 - beta) +\n A01 * (1 - alpha) * beta +\n A10 * alpha * (1 - beta) +\n A11 * alpha * beta)\n elif mode == 'constant':\n assert alpha >= 0.5 # Only case in this test\n\n if beta < 0.5:\n ref = A10\n else:\n ref = A11\n else:\n msg = 'Unknown mode: %s' % mode\n raise Exception(msg)\n\n #print i, j, xi, eta, alpha, beta, vals[k], ref\n assert nanallclose(vals[k], ref, rtol=1e-12, atol=1e-12)", "def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data", "def interpolate1d(X):\n ind = np.arange(X.shape[0])\n ind_not_nans = np.where(~np.isnan(X)) #fill last values by last non nan values\n last_non_nan = X[ind_not_nans[0][-1]]\n f = interpolate.interp1d(ind[ind_not_nans], X[ind_not_nans], bounds_error=False, fill_value=last_non_nan)\n X_int = np.where(np.isfinite(X), X, f(ind))\n \n return X_int", "def image_naninterp(data):\n if not isinstance(data, np.ndarray) or len(data.shape) != 2:\n log.error(\"data must be a 2D %s\" % np.ndarray)\n return\n mask = np.isnan(data)\n if not mask.any():\n return data\n if mask.all():\n log.error(\"data are all NaN\")\n return\n\n yy, xx = np.mgrid[:data.shape[0], :data.shape[1]]\n points = np.array([yy[~mask], xx[~mask]]).T\n interp = interpolate.CloughTocher2DInterpolator(points, data[~mask])\n result = data.copy()\n result[mask] = interp(np.array([yy[mask], xx[mask]]).T)\n return result", "def nan_interp(A):\n\tni,nj = np.shape(A)\n\t# extend edges of A by one\n\tA = np.concatenate((np.array([A[:,0]]).transpose(),A,np.array([A[:,-1]]).transpose()),axis=1)\n\tA = np.concatenate((np.array([A[0,:]]),A,np.array([A[-1,:]])),axis=0)\n\t\n\t#nit = 0\n\t#while np.sum(np.isnan(A)) != 0:\n\t#nit+=1\n\tnanp = np.isnan(A)\n\tfor i in range(1,ni+1):\n\t\tfor j in range(1,nj+1):\n\t\t\tif nanp[i,j]:\n\t\t\t\t#\t# edges\n\t\t\t\t#\tif (i==0) & (j!=0)& (j!=nj-1):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j-1],A[i,j+1]])\n\t\t\t\t#\tif (i==ni-1) & (j!=0)& (j!=nj-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j-1],A[i,j+1]])\n\t\t\t\t#\tif (j==0) & (i!=0)& (i!=ni-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i+1,j],A[i,j+1]])\n\t\t\t\t#\tif (j==nj-1) & (i!=0)& (i!=ni-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i+1,j],A[i,j-1]])\n\t\t\t\t#\t# corners\n\t\t\t\t#\tif (i==0) & (j==0):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j+1]])\n\t\t\t\t#\tif (i==ni-1) & (j==0):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j+1]])\n\t\t\t\t#\tif (i==0) & (j==nj-1):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j-1]])\n\t\t\t\t#\tif (i==ni-1) & (j==nj-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j-1]])\n\t\t\t\t#\t# core\n\t\t\t\t#\telse:\n\t\t\t\tb = np.array([A[i-1,j],A[i,j-1],A[i+1,j],A[i,j+1]])\n\t\t\t\tsnan = np.sum(np.isnan(b))\n\t\t\t\tsb = np.nansum(b)\n\t\t\t\tA[i,j] = sb/(len(b)-snan)\n\t\t\t\t#print(i,j)\n\t# only the core matters\n\tA = A[1:ni+1,1:nj+1]\n\treturn A", "def interpolate_matrix(matrix):", "def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y", "def nan(self, check_inf = True):\n return self.foreach(\n lambda k,v: (k, numpy.isnan(v) + (check_inf == True) * numpy.isinf(v)),\n dimensions = self.dims,\n shape = self.shape,\n )", "def ffill_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n lastval = a[0]\n for i in range(a.shape[0]):\n if np.isnan(a[i]):\n out[i] = lastval\n else:\n lastval = out[i] = a[i]\n return out", "def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan", "def replace_nan(data):\r\n lst_ind = np.array(['valence_intensity', 'anger_intensity',\r\n 'fear_intensity', 'sadness_intensity', 'joy_intensity'])\r\n for i in lst_ind:\r\n native = data[:][i]\r\n avg = np.nanmean(native)\r\n data[:][i] = np.where(np.isnan(native), avg, native)\r\n return data", "def interpolate(self, _val1h) :\n\n\t\tval1h = _val1h.reshape(reduce(operator.mul, _val1h.shape))\t# Convert to 1-D\n\t\tv1h = np.copy(val1h)\n\t\tv1h[np.isnan(val1h)] = 0\t# Prepare for multiply\n\t\tval2 = self.M.transpose() * v1h\n\t\tval2[np.logical_not(self.mask2)] = np.nan\n\t\treturn val2", "def interpolate_eleMean(model):\n # Get mean of columns (data at the same elevation) without taking int account NaNs\n el_mean = nanmean(model,axis=0)\n #print(el_mean) \n # Find indices for NaNs, and replace them by the column mean\n ind_nan = np.where(np.isnan(model))\n model[ind_nan] = np.take(el_mean,ind_nan[1])\n\n return model", "def replace_nan(arr, value):\n arr[np.isnan(arr)] = value\n return arr", "def _nan_cells(traces):\n # Find all cells with NaNs\n nancells = []\n ncells = -1\n for cs in traces:\n if len(traces[cs]) > 0:\n ncells = np.shape(traces[cs])[1]\n ns = np.sum(np.sum(np.invert(np.isfinite(\n traces[cs])), axis=2), axis=0)\n vals = np.arange(ncells)\n nancells.extend(vals[ns > 0])\n\n # Set _mask_cells if it hasn't been set\n out = np.zeros(ncells, dtype=bool)\n\n # Convert nancells to a list of good cells\n nancells = np.array(list(set(nancells)))\n if len(nancells) > 0:\n print('Warning: %i cells have NaNs'%len(nancells))\n out[nancells] = True\n\n return out", "def interpolate_na(self, method: str = \"nearest\", **kwargs):\n ds_out = xr.Dataset(attrs=self._obj.attrs)\n for var in self.vars:\n ds_out[var] = self._obj[var].raster.interpolate_na(method=method, **kwargs)\n return ds_out", "def replace_nan(data_jets):\n \n data_mean = np.empty_like(data_jets)\n data_median = np.empty_like(data_jets)\n data_null = np.empty_like(data_jets)\n \n for jet in range(4):\n # Replace Remaining undefined values by Mean, median or zero\n data_mean[jet] = np.where(np.isnan(data_jets[jet]), np.nanmean(data_jets[jet], axis=0), data_jets[jet])\n data_median[jet] = np.where(np.isnan(data_jets[jet]), np.nanmedian(data_jets[jet], axis=0), data_jets[jet])\n data_null[jet] = np.where(np.isnan(data_jets[jet]), np.float64(0), data_jets[jet])\n \n return data_mean, data_median, data_null", "def fill_missing_data_points(data):\n return data.interpolate()", "def fix_nan(image, replace=0.):\n h = pyfits.open(image, mode='update')\n imgdata = h[0].data\n imgdata = np.where(np.isnan(imgdata), replace, imgdata)\n h[0].data = imgdata\n h.flush()\n h.close()", "def replaces_nans_ma(series):\n series = series.replace([np.inf, -np.inf], np.nan)\n result = series.fillna(series.rolling(window=len(series), min_periods=0).mean())\n return result", "def _index_to_nan_fast(data, existing_nans, to_nan):\n index_nan = []\n randgen = (np.random.choice(len(data)) for _ in cnt(start=1))\n for i in range(to_nan):\n ix = next(filter(lambda x: x not in existing_nans and x not in index_nan, randgen))\n index_nan.append(ix)\n data_imp = data.copy()\n data_imp[index_nan] = np.nan\n return data_imp, index_nan" ]
[ "0.7854625", "0.77361774", "0.7462804", "0.703943", "0.6974706", "0.6939863", "0.6854845", "0.6793678", "0.6705853", "0.6682552", "0.65645033", "0.6499139", "0.64525956", "0.6435207", "0.6404386", "0.62853634", "0.6258439", "0.60894126", "0.60201454", "0.59831077", "0.5959317", "0.5899223", "0.58559066", "0.58344746", "0.5831069", "0.5822646", "0.57666534", "0.5761252", "0.5744687", "0.5732637" ]
0.7968063
0
Create a collection of images for each time series across all metrics
def _create_image(list_of_dicts, largest_dim): timer = datetime.now() # All possible metrics _METRICS = ['vmsram', 'tasks', 't_rscthnetno', 't_rscthhfsrb', 'c_ucpupct'] # Initialize the collection of all images concatenated across all metrics images = np.zeros(shape=(len(list_of_dicts), len(_METRICS) * largest_dim, largest_dim, 3)) # For each dictionary in the list of dictionaries (i.e., for each time series) for num_dict, dict in enumerate(list_of_dicts): # For each metric for metric_num, metric in enumerate(_METRICS): temp_sin = np.sin(list_of_dicts[num_dict][metric][0:list_of_dicts[num_dict]['actual_lengths'][metric]]) temp_sin = temp_sin.reshape((len(temp_sin), 1)) temp_cos = np.cos(list_of_dicts[num_dict][metric][0:list_of_dicts[num_dict]['actual_lengths'][metric]]) temp_cos = temp_cos.reshape((len(temp_cos), 1)) # Calculate the image for a specific metric and time series images[num_dict, (metric_num * largest_dim):(metric_num * largest_dim + list_of_dicts[num_dict]['actual_lengths'][metric]), 0:list_of_dicts[num_dict]['actual_lengths'][metric], 0] = _interpolation(np.dot(temp_sin,temp_cos.T) - np.dot(temp_cos,temp_sin.T)) # Scale the set of images to [0,255] interval required by CNN images += 1 images *= 127.5 print "Creating the images took {0}".format(datetime.now() - timer) return images
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def genImages(self, gen_ts):\n t1 = time.time()\n ngen = 0\n\n # determine how much logging is desired\n log_success = to_bool(search_up(self.image_dict, 'log_success', True))\n\n # Loop over each time span class (day, week, month, etc.):\n for timespan in self.image_dict.sections:\n\n # Now, loop over all plot names in this time span class:\n for plotname in self.image_dict[timespan].sections:\n\n # Accumulate all options from parent nodes:\n plot_options = accumulateLeaves(self.image_dict[timespan][plotname])\n\n plotgen_ts = gen_ts\n if not plotgen_ts:\n binding = plot_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n plotgen_ts = db_manager.lastGoodStamp()\n if not plotgen_ts:\n plotgen_ts = time.time()\n\n image_root = os.path.join(self.config_dict['WEEWX_ROOT'],\n plot_options['HTML_ROOT'])\n # Get the path that the image is going to be saved to:\n img_file = os.path.join(image_root, '%s.png' % plotname)\n\n # Convert from string to an integer:\n ai = weeutil.weeutil.nominal_spans(plot_options.get('aggregate_interval'))\n # Check whether this plot needs to be done at all:\n if skipThisPlot(plotgen_ts, ai, img_file):\n continue\n\n # skip image files that are fresh, but only if staleness is defined\n stale = to_int(plot_options.get('stale_age'))\n if stale:\n t_now = time.time()\n try:\n last_mod = os.path.getmtime(img_file)\n if t_now - last_mod < stale:\n log.debug(\"Skip '%s': last_mod=%s age=%s stale=%s\",\n img_file, last_mod, t_now - last_mod, stale)\n continue\n except os.error:\n pass\n\n # Create the subdirectory that the image is to be put in. Wrap in a try block in\n # case it already exists.\n try:\n os.makedirs(os.path.dirname(img_file))\n except OSError:\n pass\n\n # Create a new instance of a time plot and start adding to it\n plot = weeplot.genplot.TimePlot(plot_options)\n\n # Calculate a suitable min, max time for the requested time.\n minstamp, maxstamp, timeinc = weeplot.utilities.scaletime(\n plotgen_ts - int(plot_options.get('time_length', 86400)), plotgen_ts)\n # Override the x interval if the user has given an explicit interval:\n timeinc_user = to_int(plot_options.get('x_interval'))\n if timeinc_user is not None:\n timeinc = timeinc_user\n plot.setXScaling((minstamp, maxstamp, timeinc))\n\n # Set the y-scaling, using any user-supplied hints:\n yscale = plot_options.get('yscale', ['None', 'None', 'None'])\n plot.setYScaling(weeutil.weeutil.convertToFloat(yscale))\n\n # Get a suitable bottom label:\n bottom_label_format = plot_options.get('bottom_label_format', '%m/%d/%y %H:%M')\n bottom_label = time.strftime(bottom_label_format, time.localtime(plotgen_ts))\n plot.setBottomLabel(bottom_label)\n\n # Set day/night display\n plot.setLocation(self.stn_info.latitude_f, self.stn_info.longitude_f)\n plot.setDayNight(to_bool(plot_options.get('show_daynight', False)),\n weeplot.utilities.tobgr(plot_options.get('daynight_day_color',\n '0xffffff')),\n weeplot.utilities.tobgr(plot_options.get('daynight_night_color',\n '0xf0f0f0')),\n weeplot.utilities.tobgr(plot_options.get('daynight_edge_color',\n '0xefefef')))\n\n # Loop over each line to be added to the plot.\n for line_name in self.image_dict[timespan][plotname].sections:\n\n # Accumulate options from parent nodes.\n line_options = accumulateLeaves(self.image_dict[timespan][plotname][line_name])\n\n # See what observation type to use for this line. By default, use the section\n # name.\n var_type = line_options.get('data_type', line_name)\n\n # Look for aggregation type:\n aggregate_type = line_options.get('aggregate_type')\n if aggregate_type in (None, '', 'None', 'none'):\n # No aggregation specified.\n aggregate_type = aggregate_interval = None\n else:\n try:\n # Aggregation specified. Get the interval.\n aggregate_interval = weeutil.weeutil.nominal_spans(\n line_options['aggregate_interval'])\n except KeyError:\n log.error(\"Aggregate interval required for aggregate type %s\",\n aggregate_type)\n log.error(\"Line type %s skipped\", var_type)\n continue\n\n # Now its time to find and hit the database:\n binding = line_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n # we need to pass the line options and plotgen_ts to our xtype\n # first get a copy of line_options\n option_dict = dict(line_options)\n # but we need to pop off aggregate_type and\n # aggregate_interval as they are used as explicit arguments\n # in our xtypes call\n option_dict.pop('aggregate_type', None)\n option_dict.pop('aggregate_interval', None)\n # then add plotgen_ts\n option_dict['plotgen_ts'] = plotgen_ts\n start_vec_t, stop_vec_t ,data_vec_t = weewx.xtypes.get_series(\n var_type,\n TimeSpan(minstamp, maxstamp),\n db_manager,\n aggregate_type=aggregate_type,\n aggregate_interval=aggregate_interval,\n **option_dict)\n\n # Get the type of plot (\"bar', 'line', or 'vector')\n plot_type = line_options.get('plot_type', 'line').lower()\n\n if aggregate_type and plot_type != 'bar':\n # If aggregating, put the point in the middle of the interval\n start_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in start_vec_t[0]], # Value\n start_vec_t[1], # Unit\n start_vec_t[2]) # Unit group\n stop_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in stop_vec_t[0]], # Velue\n stop_vec_t[1], # Unit\n stop_vec_t[2]) # Unit group\n\n # Convert the data to the requested units\n new_data_vec_t = self.converter.convert(data_vec_t)\n\n # Add a unit label. NB: all will get overwritten except the last. Get the label\n # from the configuration dictionary.\n unit_label = line_options.get(\n 'y_label', self.formatter.get_label_string(new_data_vec_t[1]))\n # Strip off any leading and trailing whitespace so it's easy to center\n plot.setUnitLabel(unit_label.strip())\n\n # See if a line label has been explicitly requested:\n label = line_options.get('label')\n if label:\n # Yes. Get the text translation\n label = self.text_dict[label]\n else:\n # No explicit label. Look up a generic one.\n # NB: generic_dict is a KeyDict which will substitute the key\n # if the value is not in the dictionary.\n label = self.generic_dict[var_type]\n\n # See if a color has been explicitly requested.\n color = line_options.get('color')\n if color is not None: color = weeplot.utilities.tobgr(color)\n fill_color = line_options.get('fill_color')\n if fill_color is not None: fill_color = weeplot.utilities.tobgr(fill_color)\n\n # Get the line width, if explicitly requested.\n width = to_int(line_options.get('width'))\n\n interval_vec = None\n gap_fraction = None\n vector_rotate = None\n\n # Some plot types require special treatments:\n if plot_type == 'vector':\n vector_rotate_str = line_options.get('vector_rotate')\n vector_rotate = -float(vector_rotate_str) \\\n if vector_rotate_str is not None else None\n elif plot_type == 'bar':\n interval_vec = [x[1] - x[0] for x in\n zip(start_vec_t.value, stop_vec_t.value)]\n elif plot_type == 'line':\n gap_fraction = to_float(line_options.get('line_gap_fraction'))\n if gap_fraction is not None and not 0 < gap_fraction < 1:\n log.error(\"Gap fraction %5.3f outside range 0 to 1. Ignored.\",\n gap_fraction)\n gap_fraction = None\n else:\n log.error(\"Unknown plot type '%s'. Ignored\", plot_type)\n continue\n\n # Get the type of line (only 'solid' or 'none' for now)\n line_type = line_options.get('line_type', 'solid')\n if line_type.strip().lower() in ['', 'none']:\n line_type = None\n\n marker_type = line_options.get('marker_type')\n marker_size = to_int(line_options.get('marker_size', 8))\n \n # Add the line to the emerging plot:\n plot.addLine(weeplot.genplot.PlotLine(\n stop_vec_t[0], new_data_vec_t[0],\n label = label,\n color = color,\n fill_color = fill_color,\n width = width,\n plot_type = plot_type,\n line_type = line_type,\n marker_type = marker_type,\n marker_size = marker_size,\n bar_width = interval_vec,\n vector_rotate = vector_rotate,\n gap_fraction = gap_fraction))\n\n # OK, the plot is ready. Render it onto an image\n image = plot.render()\n\n try:\n # Now save the image\n image.save(img_file)\n ngen += 1\n except IOError as e:\n log.error(\"Unable to save to file '%s' %s:\", img_file, e)\n t2 = time.time()\n\n if log_success:\n log.info(\"Generated %d images for report %s in %.2f seconds\",\n ngen,\n self.skin_dict['REPORT_NAME'], t2 - t1)", "def process_images(images, cam, params):\n print cam, params\n groups = groupby(images, \"EXPTIME\")\n for time, ims in groups.items():\n func = {\"sbc\": make_sbc_flat_name, \"sky\": make_sky_flat_name}[cam]\n out = func(time, params)\n out = os.path.join(FLATPATH, out)\n print time, len(ims), out\n make_flat_avg(ims, out)", "def get_measurements_for_good_pipeline(nimages=1, group_numbers=None):\n import cellprofiler_core\n\n path = os.path.abspath(\n os.path.join(\n os.path.dirname(cellprofiler_core.__file__),\n \"..\",\n \"tests/data/ExampleSBSImages\",\n )\n )\n # path = os.path.join(tests.modules.example_images_directory(), \"ExampleSBSImages\")\n m = cellprofiler_core.measurement.Measurements()\n if group_numbers is None:\n group_numbers = [1] * nimages\n group_indexes = [1]\n last_group_number = group_numbers[0]\n group_index = 1\n for group_number in group_numbers:\n if group_number == last_group_number:\n group_index += 1\n else:\n group_index = 1\n group_indexes.append(group_index)\n for i in range(1, nimages + 1):\n filename = \"Channel2-%02d-%s-%02d.tif\" % (\n i,\n \"ABCDEFGH\"[int((i - 1) / 12)],\n ((i - 1) % 12) + 1,\n )\n url = cellprofiler_core.utilities.pathname.pathname2url(\n os.path.join(path, filename)\n )\n m[\n cellprofiler_core.constants.measurement.IMAGE,\n cellprofiler_core.constants.measurement.C_FILE_NAME + \"_DNA\",\n i,\n ] = filename\n m[\n cellprofiler_core.constants.measurement.IMAGE,\n cellprofiler_core.constants.measurement.C_PATH_NAME + \"_DNA\",\n i,\n ] = path\n m[\n cellprofiler_core.constants.measurement.IMAGE,\n cellprofiler_core.constants.measurement.C_URL + \"_DNA\",\n i,\n ] = url\n m[\n cellprofiler_core.constants.measurement.IMAGE,\n cellprofiler_core.constants.measurement.GROUP_NUMBER,\n i,\n ] = group_numbers[i - 1]\n m[\n cellprofiler_core.constants.measurement.IMAGE,\n cellprofiler_core.constants.measurement.GROUP_INDEX,\n i,\n ] = group_indexes[i - 1]\n jblob = javabridge.run_script(\n \"\"\"\n importPackage(Packages.org.cellprofiler.imageset);\n importPackage(Packages.org.cellprofiler.imageset.filter);\n var imageFile=new ImageFile(new java.net.URI(url));\n var imageFileDetails = new ImageFileDetails(imageFile);\n var imageSeries=new ImageSeries(imageFile, 0);\n var imageSeriesDetails = new ImageSeriesDetails(imageSeries, imageFileDetails);\n var imagePlane=new ImagePlane(imageSeries, 0, ImagePlane.ALWAYS_MONOCHROME);\n var ipd = new ImagePlaneDetails(imagePlane, imageSeriesDetails);\n var stack = ImagePlaneDetailsStack.makeMonochromeStack(ipd);\n var stacks = java.util.Collections.singletonList(stack);\n var keys = java.util.Collections.singletonList(imageNumber);\n var imageSet = new ImageSet(stacks, keys);\n imageSet.compress(java.util.Collections.singletonList(\"DNA\"), null);\n \"\"\",\n dict(url=url, imageNumber=str(i)),\n )\n blob = javabridge.get_env().get_byte_array_elements(jblob)\n m[\n cellprofiler_core.constants.measurement.IMAGE,\n cellprofiler_core.modules.namesandtypes.M_IMAGE_SET,\n i,\n blob.dtype,\n ] = blob\n pipeline = cellprofiler_core.pipeline.Pipeline()\n pipeline.loadtxt(six.moves.StringIO(GOOD_PIPELINE))\n pipeline.write_pipeline_measurement(m)\n return m", "def create_png_images(self):\n if self.subject is None:\n print Console.WARNING + 'You need to specify a subject first' + Console.ENDC\n return\n\n check_dir_of = self.locations.check_dir_of\n check_dir_of(self.locations.HISTO_PNG_U)\n check_dir_of(self.locations.HISTO_PNG)\n check_dir_of(self.locations.SOURCE_PNG)\n\n\n\n fmap_img = ImageUtils.load_nifti_image(self.locations.HIST_FMAP) #loading subject nifti files\n volumes = []\n try:\n for s in self.locations.SOURCES:\n volumes.append(ImageUtils.load_nifti_image(s))\n except IOError as e:\n print Console.FAIL + 'There are errors loading nifi files for subject %s'%self.subject + Console.ENDC\n return False\n \n\n num_slices = volumes[0].shape[2] #use first volume to check expected number of slices\n\n self.locations.create_empty_dir(self.locations.IMAGES_DIR)\n\n print 'Creating input PNGs for %s'%self.subject\n for k, vol in enumerate(volumes):\n for i in range(num_slices):\n imslice = ImageUtils.data_to_bytescale_rgb(vol[:, :, i])\n im = Image.fromarray(imslice)\n im.save(self.locations.SOURCE_PNG % (self.locations.LABELS[k],i))\n\n \n print 'Creating histology PNGs for %s'%self.subject\n for i in range(num_slices):\n\n im_unscaled = ImageUtils.data_to_unscaled_rgb(fmap_img[:, :, i]); #keeps the original values\n im_unscaled = Image.fromarray(im_unscaled)\n im_unscaled = im_unscaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_unscaled.save(self.locations.HISTO_PNG_U % i)\n\n im_scaled = ImageUtils.data_to_bytescale_rgb(fmap_img[:,:,i]); # bytescaled histology\n im_scaled = Image.fromarray(im_scaled)\n im_scaled = im_scaled.filter(ImageFilter.GaussianBlur(radius=2)) #Filter requested by Ali Khan\n im_scaled.save(self.locations.HISTO_PNG % i)\n\n print\n return True", "def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))", "def images(self, **kwargs):\n\n raise NotImplementedError", "def metrics_group():", "def getimgs():", "def get_images(self, start_at=None, count=None):\n start_at = 0 if start_at is None else start_at\n end_at = len(self.fps) if count is None else start_at+count\n for fp in self.fps[start_at:end_at]:\n try:\n image = ndimage.imread(fp, mode=\"RGB\")\n except IOError as exc:\n image = None\n yield image", "def get_images_to_build(fuzzers, benchmarks):\n images = {}\n templates = _get_image_type_templates()\n for fuzzer in fuzzers:\n for benchmark in benchmarks:\n for name_templ, obj_templ in templates.items():\n name, obj = _instantiate_image_obj(name_templ, obj_templ,\n fuzzer, benchmark)\n images[name] = obj\n return images", "def save_images(self, step, images):\n\n # Save\n with self.summary_writer.as_default():\n for name, batch in images.items():\n image = batch[0]\n image = tf.expand_dims(image, axis=0)\n tf.summary.image(name, image, step)", "def _get_images(self):\n raw_outputs = self.interface.get_data(self.target_charge,\n self.charge_deviation,\n n_samples=self.n_samples)\n\n # apply roi to images\n roi_images = []\n for i in range(self.n_samples):\n roi_images += [apply_roi(raw_outputs['raw_images'][i], raw_outputs['ROI'])]\n\n # process and identify blobs in image\n min_size = 100\n outputs = {}\n for ele in self.output_keys:\n outputs[ele] = []\n\n for i in range(len(roi_images)):\n processed_image_data = image_processing.process_and_fit(roi_images[i],\n min_size)\n\n for ele in self.output_keys:\n if ele == 'image_check':\n outputs[ele] += [image_processing.check_image(processed_image_data['binary_image'],\n processed_image_data['smoothed_image'])]\n elif ele == 'processed_images':\n outputs[ele] += [processed_image_data['smoothed_image']]\n else:\n outputs[ele] += [processed_image_data[ele]]\n\n for ele in self.output_keys:\n outputs[ele] = np.array(outputs[ele])\n\n # add in raw data\n outputs.update(raw_outputs)\n\n # if we need to, get averaged results\n if self.average_measurements:\n avg_keys = ['rms_x', 'rms_y', 'CX', 'CY', 'n_blobs', 'FWHMX', 'FWHMY', 'centroid_offset']\n for key in avg_keys:\n outputs[key] = np.nanmean(outputs[key])\n\n return outputs", "def generate_images(self, image_idx, is_training, batch_size=16):\n \n # arrays to store our batched data\n images, ages, races, genders = [], [], [], []\n while True:\n for idx in image_idx:\n person = self.df.iloc[idx]\n \n age = person['age']\n race = person['race_id']\n gender = person['gender_id']\n file = person['file']\n \n im = self.preprocess_image(file)\n \n ages.append(age / self.max_age)\n races.append(to_categorical(race, len(dataset_dict['race_id'])))\n genders.append(to_categorical(gender, len(dataset_dict['gender_id'])))\n images.append(im)\n \n # yielding condition\n if len(images) >= batch_size:\n yield np.array(images), [np.array(ages), np.array(races), np.array(genders)]\n images, ages, races, genders = [], [], [], []\n \n if not is_training:\n break", "def save_test_images(images):\n for description, img in images.items():\n save_to_image(img, description)\n save_to_netcdf(img, description)", "def custom_data_generator(img_paths, final_height, final_width):\n for img_path in img_paths:\n image = Image.open(img_path)\n resized_image = image.resize((final_width, final_height), Image.ANTIALIAS) # Image.LANCZOS\n img = np.array(resized_image)\n img = tf.image.convert_image_dtype(img, tf.float32)\n yield img, tf.constant([[]], dtype=tf.float32), tf.constant([], dtype=tf.int32)", "def batch(img_path, gt_path,img_list, batch, total_size, label_list):\r\n\r\n image_list = [os.path.join(img_path, i) for i in img_list]\r\n gt_list = [os.path.join(gt_path,i) for i in img_list]\r\n\r\n \r\n for i in range(0, total_size, batch):\r\n yield image_load_resize(image_list[i:i+batch]), make_label_map(gt_list[i:i+batch], label_list)", "def generate_test_images():\n results = {}\n for antialias, aa_descriptor in antialias_options:\n for canvas, canvas_descriptor in canvas_options:\n for func in (generate_test_001,\n generate_test_002,\n generate_test_003,\n generate_test_004,\n generate_test_005,\n generate_test_007,\n ):\n points, name = func()\n aggregators = draw_lines(canvas, points, antialias)\n img = shade(aggregators, cmap=cmap01)\n description = \"{}_{}_{}\".format(\n name, aa_descriptor, canvas_descriptor)\n results[description] = img\n\n for func in (generate_test_006, ):\n points, name = func()\n aggregator = draw_multi_segment_line(canvas, points, antialias)\n img = shade(aggregator, cmap=cmap01)\n description = \"{}_{}_{}\".format(\n name, aa_descriptor, canvas_descriptor)\n results[description] = img\n return results", "def get_ticker_images(video, ticker, frame_numbers):\n \n images = []\n for frame_number in frame_numbers:\n frame = video.frame(frame_number)\n images.append(cut_window(frame, ticker))\n \n return images", "def get_template_series(self, nb_images):\n\n # Tab for the series of images\n self.template = []\n\n # Tab\n temp = []\n\n # Make current position the zero position\n self.arm.set_to_zero([0, 1, 2])\n self.microscope.set_to_zero([0, 1, 2])\n\n # Take imges only in the template zone\n template = self.template_zone()\n height, width = template.shape[:2]\n\n # Tab of weight to detect where the pipette is\n weight = []\n\n # Detecting the tip\n for i in range(3):\n for j in range(3):\n if (i != 1) & (j != 1):\n # divide template zone into 8 images\n temp = template[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n\n # Search the tip using the number of darkest pixel in the image\n bin_edge, _ = np.histogram(temp.flatten())\n weight += [bin_edge.min()]\n else:\n # image is the center of template zone, do not consider to have functional get_withdraw_sign method\n weight += [-1]\n\n # pipette is in the image with the most darkest pixels\n index = weight.index(max(weight))\n j = index % 3\n i = index // 3\n\n # Update the position of the tip in image\n self.template_loc = [temp.shape[1] * (1 - j / 2.), temp.shape[0] * (1 - i / 2.)]\n\n # Get the series of template images at different height\n for k in range(nb_images):\n self.microscope.absolute_move(k - (nb_images - 1) / 2, 2)\n self.microscope.wait_motor_stop(2)\n time.sleep(1)\n img = self.template_zone()\n height, width = img.shape[:2]\n img = img[i * height / 4:height / 2 + i * height / 4, j * width / 4:width / 2 + j * width / 4]\n self.template += [img]\n\n # reset position at the end\n self.go_to_zero()\n pass", "def store_sf_img_metrics(self):\n logger.info('Storing iso image metrics')\n rows = list(self._metrics_table_row_gen(self.job_id, self.sf_db_id,\n self.sf_metrics_df, self.sf_adduct_peaksn,\n self.metrics))\n self.db.insert(METRICS_INS, rows)", "def img_series_stats(image_ccd_lst,plots_path,obsdate):\n median_count = []\n mean_count = []\n \n source_hdu = CCDData(image_ccd_lst[0],unit='adu')\n source_image_data = source_hdu.data.astype(float) \n source_image_hdr = source_hdu.header\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n for a_file in image_ccd_lst:\n hdu = CCDData(a_file,unit='adu')\n image_data = hdu.data.astype(float) \n image_hdr = hdu.header\n \n median_count.append(np.median(a_file))\n mean_count.append(np.mean(a_file))\n \n min_count_for_median = np.min(median_count)\n min_count_for_mean = np.min(mean_count)\n max_count_for_median = np.max(median_count)\n max_count_for_mean = np.max(mean_count)\n \n plt.figure()\n plt.plot(mean_count, label='mean',color=\"palevioletred\")\n plt.axhline(y=min_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='min mean {:.2f}'.format(min_count_for_mean),alpha=1)\n plt.axhline(y=max_count_for_mean,linestyle='-',linewidth=0.5,color='blue',label='max mean {:.2f}'.format(max_count_for_mean),alpha=1)\n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Mean pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_mean.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()\n\n plt.figure()\n plt.plot(median_count, label='median',color=\"darkviolet\")\n plt.axhline(y=min_count_for_median,linestyle='-',linewidth=0.5,color='red',label='min median {:.2f}'.format(min_count_for_median),alpha=1)\n plt.axhline(y=max_count_for_median,linestyle='-',linewidth=0.5,color='red',label='max median {:.2f}'.format(max_count_for_median),alpha=1) \n plt.xlabel('Image number')\n plt.ylabel('Count (ADU)')\n plt.title('Median pixel value for aligned images')\n plt.legend()\n plt.grid()\n plt.savefig(plots_path/\"{}-{}-{}-aligned_stats_median.jpg\".format(obsdate,\n target_name,\n exptime,chip_num),\n dpi=900)\n plt.show()", "def _iter_images(self):\n for image in self._images:\n yield np.array(image.convert('RGB'))", "def create_new_images(x):\n \n datagen = ImageDataGenerator(width_shift_range=0.1,\n height_shift_range=0.1,\n shear_range=0.1,\n zoom_range=0.1,\n horizontal_flip=True,\n fill_mode='constant',\n cval=0) \n \n i = 0\n for batch in datagen.flow(x, batch_size=1,\n save_to_dir='data/Histology/new_benign',\n save_prefix='benign',\n save_format='jpeg'):\n i += 1 \n if i > 3:\n break\n \n return 0", "def getGrouppedRawImages():\n imagesGlob = ['**/*_timestamped.jpg', '**/*_timestamped.JPG']\n images = func.reduce(operator.add, [[path for path in path.Path(\n '.').glob(glob)] for glob in imagesGlob], [])\n labelled = sorted([{\n 'label': image.parent.parent.name,\n 'time': image.parent.name,\n 'path': image\n } for image in images], key=lambda label: label['label'])\n return iter.groupby(labelled, key=lambda label: label['label'])", "def load_images(self, tmx):\n for image_data in tmx.images:\n if image_data:\n image, _, _ = image_data\n self.load_image(image)", "def write_images(deployment_key, image_data):\n\n for image_data_dict in image_data:\n\n print \"------------------>>> \" + image_data_dict['longitude']+\" \"+image_data_dict['latitude']\n\n #save the image\n image = Image(deployment_id=deployment_key,\n image_name=image_data_dict['image_name'],\n date_time=image_data_dict['date_time'],\n position=\"SRID=4326;POINT(\"+image_data_dict['longitude']+\" \"+image_data_dict['latitude']+\")\",\n #depth=image_data_dict['depth'],\n #depth_uncertainty=image_data_dict['depth_uncertainty'],\n )\n image.save()\n\n write_measurement(image, 'depth', 'm', image_data_dict['depth'])\n write_measurement(image, 'depth_uncertainty', 'm', image_data_dict['depth_uncertainty'])\n write_measurement(image, 'temperature', 'cel', image_data_dict['temperature'])\n write_measurement(image, 'salinity', 'psu', image_data_dict['salinity'])\n write_measurement(image, 'pitch', 'rad', image_data_dict['pitch'])\n write_measurement(image, 'roll', 'rad', image_data_dict['roll'])\n write_measurement(image, 'yaw', 'rad', image_data_dict['yaw'])\n write_measurement(image, 'altitude', 'm', image_data_dict['altitude'])\n\n #link the camera to the image\n camera_data_dict = read_camera_data(image_data_dict)\n camera = Camera(**camera_data_dict)\n camera.image = image\n camera.save()\n\n return None", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def generate_images(generator_model, output_dir, epoch):\n test_image_stack = generator_model.predict(np.random.normal(size=(10, 100)))\n test_image_stack = (test_image_stack * 255)\n test_image_stack = np.squeeze(np.round(test_image_stack).astype(np.uint8))\n tiled_output = tile_images(test_image_stack)\n tiled_output = Image.fromarray(tiled_output)\n outfile = os.path.join(output_dir, 'epoch_{}.png'.format(epoch))\n tiled_output.save(outfile)", "def test_Series():\n # create from hdr image files\n Series(hdf5, image_dir=data_dir,\n time_interval=time_interval, cachedir=cache)\n # loading from hdf5 file\n Series(hdf5, cachedir=cache)", "def visualize_MTL(**images):\r\n n = len(images)\r\n plt.figure(figsize=(16, 5))\r\n for i, (name, image) in enumerate(images.items()):\r\n if image==None:\r\n continue\r\n else:\r\n plt.subplot(1, n, i + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n plt.title(' '.join(name.split('_')).title())\r\n plt.imshow(image)\r\n plt.show()" ]
[ "0.6314589", "0.6075045", "0.5856271", "0.57666314", "0.5765845", "0.5717563", "0.57157075", "0.5714897", "0.57141167", "0.5712262", "0.5707124", "0.57003945", "0.5693764", "0.56889784", "0.56745064", "0.56572545", "0.5643271", "0.5632118", "0.5629158", "0.56117296", "0.5605957", "0.5604387", "0.5601933", "0.5593919", "0.5591321", "0.5589957", "0.55779535", "0.5572448", "0.5570743", "0.55642676" ]
0.724603
0
Extract all line start/length pairs from the hunk header T.i. for "@@ 685,8 +686,14 @@ ..." extract `[(685, 8), (686, 14)]`. We do not extract the `+` signs. All leading segments have a `` sign, and the last segment has a `+`.
def safely_parse_metadata(self): # type: () -> List[Tuple[LineNo, int]] return [ (int(start), int(length or "1")) for start, length in SAFE_PARSE_HUNK_HEADER.findall( self.text.lstrip("@").split("@", 1)[0] ) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_hunks(diff: str) -> list[Hunk]:\n diff_pattern = (\n r\"diff --git a/.* b/(.*)\\n\" # capture file name\n r\"(?:\\w+ file mode \\d+\\n)?\" # maybe 'new file mode 100644' or similar\n r\"index .*\\n\"\n r\"--- .*\\n\"\n r\"\\+\\+\\+ .*\\n\"\n )\n\n # capture line number and length from header\n hunk_header_pattern = r\"@@ -\\d+,\\d+ \\+(\\d+),(\\d+) @@.*\\n\"\n\n # ignore initial empty match\n raw_per_file_hunks = re.split(diff_pattern, diff)[1:]\n\n parsed_hunks = []\n\n for file, raw_hunks in batch(raw_per_file_hunks, 2):\n # ignore initial empty match\n hunks = re.split(hunk_header_pattern, raw_hunks, re.MULTILINE)[1:]\n for start, length, body in batch(hunks, 3):\n lines = body.split(\"\\n\")\n lines = lines if lines[-1] else lines[:-1] # trim empty line\n parsed_hunks.append(Hunk(file, int(start), int(length), lines))\n\n return parsed_hunks", "def extract_header(tgt_file):\n with open(tgt_file) as tf:\n h_lines = []\n for t_line in tf:\n s_line = t_line.strip().split()\n if len(s_line) < 2:\n h_lines.append(t_line)\n continue\n try:\n # If we have a timestep, this is not a header line\n int(s_line[0])\n break\n except ValueError:\n h_lines.append(t_line)\n return h_lines", "def parse_zhuyin(line, length):\n END = '˙ˊˇˋ-'\n line = re.findall('zhuyin: .*, origin: ', line)[0][8:-10]\n zhuyin = []\n idx = 0\n for num in length:\n now = ''\n now_length = 0\n for i in range(idx, len(line)):\n now += line[i]\n\n if line[i] in END:\n now_length += 1\n if now_length == num:\n zhuyin.append(now)\n idx = i + 1\n break\n return zhuyin", "def get_chunks(diff):\n diff = clean_diff(diff)\n chunk = []\n chunks = []\n for line in diff.split('\\n'):\n if not line:\n continue\n if line.startswith('@@ '):\n if chunk:\n chunks.append('\\n'.join(chunk) + '\\n')\n chunk = [line]\n else:\n chunk.append(line)\n if chunk:\n chunks.append('\\n'.join(chunk) + '\\n')\n return chunks", "def _hunks_from_diff(diff_output):\n \n # TARGETS is a list of files with an optional list of hunks, represented as\n # pair (start, end) of line numbers, 1 based. \n # element of TARGETS: (filename, None) or (filename, [(start,end)])\n target_files = []\n \n # hunks_current_list serves as a reference to the hunks list of the\n # last added file\n hunks_current_list = None\n\n for line in diff_output:\n file_match = DIFF_FILE_PATT.search(line)\n hunk_match = DIFF_HUNK_PATT.search(line)\n if file_match:\n file_path = os.path.abspath(os.path.join(PELOTON_DIR, \n file_match.group(1)))\n \n hunks_current_list = []\n if file_path.endswith(\".h\") or file_path.endswith(\".cpp\"):\n target_files.append((file_path, hunks_current_list))\n # If this file is not .cpp/.h the hunks_current_list reference\n # will point to an empty list which will be discarded later\n elif hunk_match:\n # add entry in the hunk list of the last file\n if hunk_match.group(4) is None:\n hunk = (int(hunk_match.group(2)), int(hunk_match.group(2)))\n else:\n hunk = (int(hunk_match.group(2)), int(hunk_match.group(2)) + \n int(hunk_match.group(4)))\n hunks_current_list.append(hunk)\n \n return target_files", "def splitTrackingNums(_pack):\n multi = [ i.strip() for i in _pack[1].split(';') ]\n splits_ = [ [_pack[0], m] for m in multi ]\n return splits_", "def extract(grammar, inputdata, fixed_start = False):\r\n if not inputdata:\r\n return []\r\n checker = checker_factory(grammar)\r\n\r\n if isinstance(inputdata[0], (Token, PositionToken)):\r\n inputdata = [x.content for x in inputdata]\r\n\r\n totallen = len(inputdata)\r\n try:\r\n maxl = grammar.maxsize or totallen\r\n except NotImplementedError:\r\n maxl = totallen\r\n try:\r\n #minl = grammar.minsize #FIXME: It won't work with incompatible alphabets\r\n minl = 1\r\n except NotImplementedError:\r\n minl = 1\r\n if fixed_start:\r\n max_start = 1\r\n else:\r\n max_start = totallen\r\n result = []\r\n for i in range(max_start):\r\n for j in range(i+minl, min(i+maxl, totallen) + 1):\r\n check = checker.check(inputdata[i:j])\r\n if check:\r\n result.append(PositionToken(inputdata[i:j], None, i, j))\r\n return result", "def extract(lines):\n prefix = 'Note: including file: '\n for line in lines:\n if line.startswith(prefix):\n line = os.path.normpath(line[len(prefix):])\n # Determine the depth by counting the number of spaces starting the line.\n depth = len(line) - len(line.lstrip()) + 1\n yield (depth, line.strip())", "def parse(self):\n result = list()\n for i, line in enumerate([x.strip() for x in self._input_file], 1):\n if not line:\n continue\n # There should be only 2 entries. Example:\n # kernel`0xffffffff8074d27e;kernel`_sx_xlock 1\n try:\n frames, value = line.split()\n frames = [trim_offset(n) for n in frames.split(';')]\n except ValueError:\n raise StackCollapserException('Unable to parse line {}'.format(i))\n result.append((frames, int(value)))\n return result", "def get_metadata_header_lines(input_file):\n # type: (str) -> List[str]\n if not FileSystems.exists(input_file):\n raise ValueError('{} does not exist'.format(input_file))\n return[line for line in _header_line_generator(input_file) if\n line.startswith('##')]", "def svn_diff_hunk_get_original_start(hunk):\n return _diff.svn_diff_hunk_get_original_start(hunk)", "def svn_diff_hunk_get_modified_start(hunk):\n return _diff.svn_diff_hunk_get_modified_start(hunk)", "def findPKHeaders(args, fh):\n\n def processchunk(o, chunk):\n n = -1\n while True:\n n = chunk.find(b'PK', n+1)\n if n == -1 or n+4 > len(chunk):\n break\n cls = getDecoderClass(chunk[n+2:n+4])\n if cls:\n hdrEnd = n+4+cls.HeaderSize\n if hdrEnd > len(chunk):\n continue\n\n # todo: skip entries entirely within repeated chunk\n # if n<64 and hdrEnd>64:\n # continue\n\n yield cls(o, chunk, n+4)\n\n prev = b''\n o = 0\n if args.offset:\n fh.seek(args.offset, os.SEEK_SET if args.offset >= 0 else os.SEEK_END)\n o = args.offset\n while args.length is None or o < args.length:\n want = args.chunksize\n if args.length is not None and want > args.length - o:\n want = args.length - o\n fh.seek(o)\n chunk = fh.read(want)\n if len(chunk) == 0:\n break\n for ch in processchunk(o-len(prev), prev+chunk):\n yield ch\n\n # 64 so all header types would fit, exclusive their variable size parts\n prev = chunk[-64:]\n o += len(chunk)", "def read_slithertxt(filename: os.PathLike) -> tuple:\n\n reg_statistics = list()\n consume = False\n with open(filename, \"r\") as f:\n for line in f:\n if not consume:\n if line.startswith(\" FromLine\"):\n reg_statistics.append(line)\n consume = True\n else:\n continue\n else:\n if line.isspace():\n consume = False\n break\n else:\n reg_statistics.append(line)\n\n matchline = list()\n lineoffset = list()\n sampoffset = list()\n\n dialect = csv.Dialect\n dialect.delimiter = \" \"\n dialect.skipinitialspace = True\n dialect.quoting = csv.QUOTE_NONE\n dialect.lineterminator = \"\\n\"\n\n reader = csv.DictReader(reg_statistics, dialect=dialect)\n for row in reader:\n matchline.append(float(row[\"MatchLine\"]))\n lineoffset.append(float(row[\"LineOffset\"]))\n sampoffset.append(float(row[\"SampOffset\"]))\n\n return (np.array(matchline), np.array(lineoffset), np.array(sampoffset))", "def file_fzp_start(filename):\n\n with open(filename) as in_f:\n c= 0\n cols = []\n #find start of VISSIM data\n line = in_f.readline()\n while 'VehNr;' not in line:\n line = in_f.readline()\n cols = [x.strip() for x in line.split(';')][:-1]\n c +=1\n\n return {'lines_to_skip' : c, 'header_cols' : cols}", "def commentsStartStopLineNmbr(data):\n begin = 0\n end = 0\n i = 0\n\n if data is None or len(data) < 1:\n return None\n\n while i < len(data):\n if \"<table class=\\\"CMheadingBar\\\"\" in data[i]:\n if begin is 0:\n begin = i\n else:\n end = i\n break\n i += 1\n return (int(begin), int(end))", "def split_file(hl7file):\n rv = []\n for line in hl7file.split(\"\\r\"):\n line = line.strip()\n if line[:3] in [\"FHS\", \"BHS\", \"FTS\", \"BTS\"]:\n continue\n if line[:3] == \"MSH\":\n newmsg = [line]\n rv.append(newmsg)\n else:\n if len(rv) == 0:\n logger.error(\"Segment received before message header [%s]\", line)\n continue\n rv[-1].append(line)\n rv = [\"\\r\".join(msg) for msg in rv]\n for i, msg in enumerate(rv):\n if not msg[-1] == \"\\r\":\n rv[i] = msg + \"\\r\"\n return rv", "def head_lines(k):\n first_lines = 'Radar:' + location + '\\n\\n'' ' + k + '-DATA DATE: ' + date + \\\n '\\n\\n Bin Height/km'\n h = h1\n m = m1\n while h * 60 + m <= h2 * 60 + m2:\n hour = str(h)\n if len(hour) == 1:\n hour = \"0\" + hour\n minute = str(m)\n if len(minute) == 1:\n minute = \"0\" + minute\n first_lines = first_lines + ' ' + hour + ':' + minute + ' '\n h, m = next_time(h, m)\n first_lines = first_lines + '\\n\\n'\n return first_lines", "def _extract_next_forc(self, lines):\r\n\r\n _h, _m, _hr, _T = [], [], [], []\r\n i = 0\r\n\r\n while lines[i][0] in ['+', '-']:\r\n split_line = lines[i].split(',')\r\n _h.append(float(split_line[0]))\r\n _hr.append(_h[0])\r\n _m.append(float(split_line[1]))\r\n if self.temperature is not None:\r\n _T.append(float(split_line[2]))\r\n i += 1\r\n\r\n self.h.append(_h)\r\n self.hr.append(_hr)\r\n self.m.append(_m)\r\n if self.temperature is not None:\r\n self.temperature.append(_T)\r\n\r\n return len(_h)", "def find_line_markers(source):\n markers = {}\n for lineno, line in enumerate(source.splitlines(), start=1):\n m = re.search(r\"#=(\\w+)\", line)\n if m:\n markers[lineno] = m.group(1)\n return markers", "def get_frag_lines(log_data):\n frag_line_list = []\n for line in log_data.split('\\n'):\n if 'killed' in line:\n frag_line_list.append(line)\n return frag_line_list", "def findlinestarts(code):\n byte_increments = [ord(c) for c in code.co_lnotab[0::2]]\n line_increments = [ord(c) for c in code.co_lnotab[1::2]]\n result = []\n lastlineno = None\n lineno = code.co_firstlineno\n addr = 0\n for byte_incr, line_incr in zip(byte_increments, line_increments):\n if byte_incr:\n if lineno != lastlineno:\n result.append((addr, lineno))\n lastlineno = lineno\n addr += byte_incr\n lineno += line_incr\n if lineno != lastlineno:\n result.append((addr, lineno))\n return result", "def _readline_ins(self):\n if self._ins_filehandle is None:\n if not os.path.exists(self._ins_filename):\n raise Exception(\n \"instruction file '{0}' not found\".format(self._ins_filename)\n )\n self._ins_filehandle = open(self._ins_filename, \"r\")\n line = self._ins_filehandle.readline()\n self._ins_linecount += 1\n if line == \"\":\n return None\n self._last_line = line\n # check for spaces in between the markers - this gets ugly\n line = line.lower()\n if self._marker is not None and self._marker in line:\n\n # def find_all(a_str, sub):\n # start = 0\n # while True:\n # start = a_str.find(sub, start)\n # if start == -1:\n # return\n # yield start\n # start += len(sub)\n # poss speedup using regex\n midx = [m.start() for m in re.finditer(re.escape(self._marker), line)]\n # midx = list(find_all(line, self._marker))\n midx.append(len(line))\n first = line[: midx[0]].strip()\n tokens = []\n if len(first) > 0:\n # tokens.append(first)\n tokens.extend([f.strip() for f in first.split()])\n for idx in range(1, len(midx) - 1, 2):\n mstr = line[midx[idx - 1] : midx[idx] + 1]\n ostr = line[midx[idx] + 1 : midx[idx + 1]]\n tokens.append(mstr)\n tokens.extend(ostr.split())\n else:\n tokens = line.strip().split()\n return tokens", "def _lines(filename):\n \n handle = gzip.open(filename, 'rt') if _gz(filename) else open(filename)\n for line in handle:\n if not line.startswith('#'):\n yield line.strip().split('\\t')", "def extract_fastq_info(fastq):\n f = gzip.open(fastq, 'rb')\n header_lines = [x.replace(\"\\n\",\"\") for x in f.readlines(10000) if x.startswith(\"@\")]\n\n for heading in header_lines:\n l = re.split(r'(\\:|#| )',heading)\n line = {}\n index_set = []\n if len(l) == 11:\n line[\"instrument\"] = l[0]\n line[\"flowcell_lane\"] = l[2]\n line[\"flowcell_tile\"] = l[4]\n try:\n line[\"pair\"] = l[10].split(\"/\")[1]\n index_set.append(l[10].split(\"/\")[0])\n except:\n pass\n elif len(l) == 21:\n line[\"instrument\"] = l[0]\n line[\"run_id\"] = l[2]\n line[\"flowcell_id\"] = l[4]\n line[\"flowcell_lane\"] = l[6]\n line[\"flowcell_tile\"] = l[8]\n line[\"pair\"] = l[14]\n line[\"filtered\"] = l[16]\n line[\"control_bits\"] = l[16]\n line[\"index\"] = l[20]\n index_set.append(l[20])\n else:\n print \"error\", l\n line[\"index\"] = most_common(index_set)\n return line", "def hersheyparse(dat):\n\n lines = []\n\n # individual lines are stored separated by <space>+R\n # starting at col 11\n\n for s in dat[10:].split(\" R\"):\n\n # each line is a list of pairs of coordinates\n # NB: origin is at centre(ish) of character\n # Y coordinates **increase** downwards\n\n assert len(s) % 2 == 0\n line = map(None, *[iter(map(char2val, list(s)))] * 2)\n coords = [char2val(c) for c in s]\n line = list(zip(coords[0::2], coords[1::2]))\n lines.append(line)\n glyph = { # character code in columns 1-6; it's not ASCII\n # indicative number of vertices in columns 6-9 ** NOT USED **\n # left side bearing encoded in column 9\n # right side bearing encoded in column 10\n \"charcode\": int(dat[0:5]),\n \"left\": char2val(dat[8]),\n \"right\": char2val(dat[9]),\n \"lines\": lines,\n }\n return (glyph[\"charcode\"], glyph)", "def lines():\n line_dict = {}\n #\n line_dict['ArI'] = 2**0\n line_dict['HgI'] = 2**1\n line_dict['KrI'] = 2**2\n line_dict['NeI'] = 2**3\n line_dict['XeI'] = 2**4\n line_dict['CdI'] = 2**5\n line_dict['ZnI'] = 2**6\n line_dict['HeI'] = 2**7\n line_dict['OH_R24000'] = 2**8\n line_dict['OH_triplespec'] = 2**9\n line_dict['CuI'] = 2**10\n line_dict['ArII'] = 2**11\n line_dict['OH_XSHOOTER'] = 2**12\n line_dict['OH_GNIRS'] = 2**13\n line_dict['OH_NIRES'] = 2**14\n line_dict['ThAr_XSHOOTER_VIS'] = 2**15\n line_dict['OH_GMOS'] = 2**16\n line_dict['OH_MODS'] = 2**17\n line_dict['ThAr_MagE'] = 2**18 # R=4100\n line_dict['OH_FIRE_Echelle'] = 2**19 # R=6000\n line_dict['Ar_IR_GNIRS'] = 2**20 # R=6000\n line_dict['FeI'] = 2**21\n line_dict['FeII'] = 2**22\n line_dict['UNKNWN'] = 2**23\n line_dict['Ar_IR_MOSFIRE'] = 2 ** 24\n line_dict['Ne_IR_MOSFIRE'] = 2 ** 25\n line_dict['OH_MOSFIRE_Y'] = 2 ** 26\n line_dict['OH_MOSFIRE_J'] = 2 ** 27\n line_dict['OH_MOSFIRE_H'] = 2 ** 28\n line_dict['OH_MOSFIRE_K'] = 2 ** 29\n line_dict['ThAr_XSHOOTER_UVB'] = 2**30\n #\n return line_dict", "def parse_line(line):\n return parse('#{id_:d} @ {x:d},{y:d}: {w:d}x{h:d}', line)", "def extractData():\n for line in src:\n line = str(line) # now each line is a string\n line = line.rstrip()\n if '%%' in line: #header\n yield line\n for desired in desStr:\n if desired in line:\n yield line", "def FormatDiffHunks(hunks):\n r = []\n last_header = None\n for hunk in hunks:\n this_header = hunk.header[0:2]\n if last_header != this_header:\n r.extend(hunk.header)\n last_header = this_header\n else:\n r.extend(hunk.header[2])\n r.extend(hunk.lines)\n r.append(\"\\n\")\n return \"\".join(r)" ]
[ "0.6223655", "0.5699987", "0.56324047", "0.5606089", "0.5450486", "0.54438394", "0.5421206", "0.54067975", "0.53794557", "0.5329467", "0.5280043", "0.5274307", "0.522464", "0.5224071", "0.5206199", "0.51972467", "0.5157682", "0.5144656", "0.5139263", "0.5135846", "0.5124622", "0.51003855", "0.5095914", "0.50939816", "0.5088587", "0.5070031", "0.5054861", "0.50539535", "0.503796", "0.5033613" ]
0.6282997
0
Wraps together all actions needed to beautify a string, i.e. parse the string and then stringify the phrases (replace tags with formatting codes).
def beautify(self, string): if not string: return string # string may differ because of escaped characters string, phrases = self.parse(string) if not phrases: return string if not self.positional and not self.always: raise errors.ArgumentError("Found phrases, but no styles " "were supplied!") return self.stringify(string, phrases)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def beautify(string, *args, **kwargs):\n\n\tparser = Parser(args, kwargs)\n\treturn parser.beautify(string)", "def as_action_str(string: str) -> str:", "def stringify(self, string, phrases, parent=None):\n\n\t\tlast_tag = 0\n\n\t\tbeauty = \"\"\n\n\t\tfor phrase in phrases:\n\n\t\t\tbeauty += string[last_tag : phrase.opening]\n\n\t\t\tif phrase.string in self.always and not phrase.override:\n\t\t\t\tphrase.style = self.always[phrase.string]\n\n\t\t\tif phrase.arguments:\n\t\t\t\tcombination = 0\n\t\t\t\tfor i in phrase.arguments:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tcombination |= self.positional[i]\n\t\t\t\t\texcept IndexError:\n\t\t\t\t\t\traise errors.ArgumentError(\"Positional argument '{0}' \"\n\t\t\t\t\t\t\t \t\t\t\t\t \"is out of range\"\n\t\t\t\t\t\t\t \t\t\t\t\t \"!\".format(i))\n\n\t\t\t\tphrase.style |= combination\n\n\t\t\telif (phrase.string not in self.always or\n\t\t\t\t phrase.increment or phrase.override):\n\t\t\t\ttry:\n\t\t\t\t\tcombination = self.positional[self.counter]\n\n\t\t\t\t\tif phrase.increment or not phrase.override:\n\t\t\t\t\t\tself.counter += 1\n\t\t\t\texcept IndexError:\n\t\t\t\t\tself.raise_not_enough_arguments(phrase.string)\n\n\t\t\t\tphrase.style |= combination\n\n\t\t\tphrase.style = flags.codify(phrase.style)\n\n\t\t\tif phrase.nested:\n\t\t\t\tphrase.string = self.stringify(phrase.string,\n\t\t\t\t\t\t\t\t\t\t\t phrase.nested,\n\t\t\t\t\t\t\t\t\t\t\t phrase)\n\n\t\t\t# After a nested phrase is over, we reset the style to the\n\t\t\t# parent style, this gives the notion of nested styles.\n\t\t\treset = parent.style if parent else \"\"\n\n\t\t\t# \\033[ signifies the start of a command-line escape-sequence\n\t\t\tbeauty += \"\\033[{0}m{1}\\033[0;{2}m\".format(phrase.style,\n\t\t\t\t\t\t\t\t\t\t\t\t\t phrase,\n\t\t\t\t\t\t\t\t\t\t\t\t\t reset)\n\t\t\tlast_tag = phrase.closing + 1\n\n\t\tbeauty += string[last_tag:]\n\n\t\treturn beauty", "def tidy_string(s: str\n ) -> str:\n s = s.encode('ascii', errors='ignore').decode(FORMAT)\n s = s.replace(\"\\r\", \"\").replace(\"\\t\", \"\").replace('\\n', '') \n return s", "def wrap_string(input_str):\r\n return textwrap.wrap(input_str, 80)", "def _transform(func_name):\n\n def wrapped(self, *args, **kwargs):\n replacement_string = _query_super(func_name)(self, *args, **kwargs)\n to_string = []\n char_counter = 0\n for index in range(0, len(self._raw_string)):\n if index in self._code_indexes:\n to_string.append(self._raw_string[index])\n elif index in self._char_indexes:\n to_string.append(replacement_string[char_counter])\n char_counter += 1\n return ANSIString(\n \"\".join(to_string),\n decoded=True,\n code_indexes=self._code_indexes,\n char_indexes=self._char_indexes,\n clean_string=replacement_string,\n )\n\n return wrapped", "def get_processed_string(self, input_string):\n if input_string[:6] == '[sic]\"':\n return input_string[6: -1]\n else:\n return input_string.format(**self)", "def __str__(self) -> str:\r\n return self.process(self.string)", "def amend_str(text):\n text = text.lower()\n text = re.sub(r\"[^A-Za-z0-9^,!.\\/'+-=]\", \" \", text)\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"that's\", \"that is \", text)\n text = re.sub(r\"there's\", \"there is \", text)\n text = re.sub(r\"it's\", \"it is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\",\", \" \", text)\n text = re.sub(r\"\\.\", \" \", text)\n text = re.sub(r\"!\", \" ! \", text)\n text = re.sub(r\"\\/\", \" \", text)\n text = re.sub(r\"\\^\", \" ^ \", text)\n text = re.sub(r\"\\+\", \" + \", text)\n text = re.sub(r\"\\-\", \" - \", text)\n text = re.sub(r\"\\=\", \" = \", text)\n text = re.sub(r\"'\", \" \", text)\n text = re.sub(r\"(\\d+)(k)\", r\"\\g<1>000\", text)\n text = re.sub(r\":\", \" : \", text)\n text = re.sub(r\" e g \", \" eg \", text)\n text = re.sub(r\" b g \", \" bg \", text)\n text = re.sub(r\" u s \", \" american \", text)\n text = re.sub(r\"\\0s\", \"0\", text)\n text = re.sub(r\" 9 11 \", \"911\", text)\n text = re.sub(r\"e - mail\", \"email\", text)\n text = re.sub(r\"j k\", \"jk\", text)\n text = re.sub(r\"\\s{2,}\", \" \", text)\n\n return text.strip()", "def preprocessing(sample):\n\n content = ' '.join(sample)\n content = re.sub(r\"\\'ll\", \" will\", content)\n content = re.sub(r\"\\'d\", \" would\", content)\n content = re.sub(r\"\\'s\", \" is\", content)\n content = re.sub(r\"\\'m\", \" am\", content)\n content = re.sub(r\"\\'ve\", \" have\", content)\n content = re.sub(r\"\\'re\", \" are\", content)\n content = content.replace('&', 'and')\n content = content.replace('$', '')\n content = content.split()\n return content", "def to_string_wrap(s: str) -> str:\n return f\"to_string({s})\"", "def raw(string):\n string = string or \"\"\n return string.replace(\"{\", \"{{\").replace(\"|\", \"||\")", "def reformat(ctx):\n pass", "def htmlescape(s):\n if isinstance(s, htmltext):\n return s\n else:\n s = stringify(s)\n # inline _escape_string for speed\n s = s.replace(\"&\", \"&amp;\") # must be done first\n s = s.replace(\"<\", \"&lt;\")\n s = s.replace(\">\", \"&gt;\")\n s = s.replace('\"', \"&quot;\")\n return htmltext(s)", "def code(string: str) -> str:\n return f\"`{string}`\"", "def retag_string(self, string, tags):\r\n for (i, tag) in enumerate(tags):\r\n p = '<%s>' % i\r\n string = re.sub(p, tag, string, 1)\r\n return string", "def prettyPrintStringHelper_ (s, stream, indent, pretty_print=True, indent_additive=4):\r\n stream.write(repr(s))", "def fix_args(string):\n # Hide default values\n defs = re.compile('<span class=\"sig-paren\">\\(</span>(?P<args>[^\\)]*)<span class=\"sig-paren\">\\)</span>')\n opts = re.compile('<em class=\"sig-param\">(?P<var>[^=<]*)=(?P<val>[^<]*)</em>')\n \n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefargs = ''\n remnargs = remain[match.start(1):match.end(1)]\n optional = opts.search(remnargs)\n count = 0\n while optional:\n prefargs += remnargs[:optional.start(0)]+'<strong>[</strong>'\n prefargs += remnargs[optional.start(0):optional.end(1)]\n prefargs += remnargs[optional.end(2):optional.end(0)]\n remnargs = remnargs[optional.end(0):]\n optional = opts.search(remnargs)\n count += 1\n if count:\n prefargs += '<strong>'+']'*count+'</strong>'\n prefix += prefargs+remnargs\n prefix += remain[match.end(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def process_string(string: str) -> str:\n\n return string if string else Presenter.DEFAULT", "def _combineFragmentedString (cls, st : String) -> String:\n\n Logging.trace(\">>: %r\", st)\n\n ParseState_inLimbo = 0\n ParseState_inOther = 1\n ParseState_inString = 2\n ParseState_inLiteral = 3\n ParseState_inEscape = 4\n\n parseState = ParseState_inLimbo\n result = \"\"\n\n for ch in st:\n # process finite state automaton with three states based\n # on next character in string\n # Logging.trace(\"--: (%d) character: %r\", parseState, ch)\n\n if parseState == ParseState_inLimbo:\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inString\n elif not cls._whiteSpaceCharRegExp.search(ch):\n parseState = ParseState_inLiteral\n result += ch\n elif parseState == ParseState_inString:\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inLimbo\n else:\n result += ch\n parseState = iif(ch == cls._escapeCharacter,\n ParseState_inEscape, parseState)\n elif parseState == ParseState_inLiteral:\n result += ch\n if cls._whiteSpaceCharRegExp.search(ch):\n parseState = ParseState_inLimbo\n elif parseState == ParseState_inEscape:\n result += ch\n parseState = ParseState_inString\n else:\n Assertion.check(False,\n \"bad parse state - %s\" % parseState)\n\n Logging.trace(\"<<: %r\", result)\n return result", "def format_string(s, formatter='minimal'):\n if not callable(formatter):\n formatter = get_formatter_for_name(formatter)\n if formatter is None:\n output = s\n else:\n output = formatter(s)\n return output", "def prepare_input(self, extracted_str):\n\n # Remove withspace\n if self.options['remove_whitespace']:\n optimized_str = re.sub(' +', '', extracted_str)\n else:\n optimized_str = extracted_str\n \n # Remove accents\n if self.options['remove_accents']:\n optimized_str = unidecode(optimized_str)\n\n # specific replace\n for replace in self.options['replace']:\n assert len(replace) == 2, 'A replace should be a list of 2 items'\n optimized_str = optimized_str.replace(replace[0], replace[1])\n\n return optimized_str", "def strip_action_str(string: str) -> str:", "def rest2html(s):\n return core.publish_string(s, writer=html_fragment_writer)", "def texify_str(s):\n\n # the following replacements will get added to whenever needed\n tex_str = s.replace('_',' ')\n\n return tex_str", "def _on_raw(func_name):\n\n def wrapped(self, *args, **kwargs):\n args = list(args)\n try:\n string = args.pop(0)\n if hasattr(string, \"_raw_string\"):\n args.insert(0, string.raw())\n else:\n args.insert(0, string)\n except IndexError:\n # just skip out if there are no more strings\n pass\n result = getattr(self._raw_string, func_name)(*args, **kwargs)\n if isinstance(result, str):\n return ANSIString(result, decoded=True)\n return result\n\n return wrapped", "def wrap_with_span(string, arg):\n words = arg.split(' ')\n\n for word in words:\n if word[-1].lower() == 's':\n word = word[:-1]\n pattern = re.compile(r'\\b({0}[\\w\\d]*)\\b'.format(word), flags=re.I)\n\n for (match) in re.findall(pattern, string):\n string = re.sub(r'{0}'.format(match),\n '<span>{0}</span>'.format(match), string)\n break;\n\n return string.replace('&amp;#x', '&#x')", "def render_string(_str):\n\t\treturn str.encode(_str)", "def clean_str(string):\n\t# string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n\tstring = re.sub(r\"[^a-zA-Z0-9.]\", \" \", string)\n\tstring = re.sub(r\"\\'s\", \" is\", string)\n\tstring = re.sub(r\"\\'ve\", \" have\", string)\n\tstring = re.sub(r\"n\\'t\", \"n not\", string)\n\tstring = re.sub(r\"\\'re\", \" are\", string)\n\tstring = re.sub(r\"\\'d\", \" would\", string)\n\tstring = re.sub(r\"\\'ll\", \" will\", string)\n\tstring = re.sub(r\"\\'m\", \" am\", string)\n\t# string = re.sub(r\",\", \" , \", string)\n\t# string = re.sub(r\"!\", \" ! \", string)\n\t# string = re.sub(r\"\\.\", \" . \", string)\n\t# string = re.sub(r\"\\(\", \" ( \", string)\n\t# string = re.sub(r\"\\)\", \" ) \", string)\n\t# string = re.sub(r\"\\?\", \" ? \", string)\n\tstring = re.sub(r\"\\s{2,}\", \" \", string)\n\tstring = re.sub(r\"([A-Za-z0-9][a-z])([A-Z])\", lambda x: x.group(1) + \" \" + x.group(2), string)\n\tstring = re.sub(r\"([A-Za-z0-9][a-z])([A-Z])\", lambda x: x.group(1) + \" \" + x.group(2), string)\n\t# string = re.sub(r\"([A-Za-z0-9]+[a-z])([A-Z][a-z])\", lambda x: x.group(1) + \" \" + x.group(2), string)\n\treturn string.strip().lower()", "def embolden(string):\n return \"<b>\"+string+\"</b>\"" ]
[ "0.7252995", "0.61125165", "0.60799843", "0.5577729", "0.55171436", "0.54867864", "0.5460423", "0.5454511", "0.5451486", "0.5403064", "0.53998154", "0.5373028", "0.5360175", "0.53533274", "0.5329324", "0.53215384", "0.5302686", "0.52969646", "0.525726", "0.5251888", "0.5215348", "0.518868", "0.5188235", "0.5186128", "0.51820153", "0.51594937", "0.5146099", "0.5129173", "0.51255", "0.51251024" ]
0.69769526
1
Parses a string to handle escaped tags and retrieve phrases. This method works recursively to parse nested tags. When escaped tags are found, those are removed from the string. Also argument sequences are removed from the string. The string returned can thus be quite different from the string passed.
def parse(self, string, root=None): phrases = [] meta = self.meta.search(string) while meta: # Save some function calls pos = meta.start() if meta.group() == "<": string, child, meta = self.open_phrase(string, pos) if child and root: root.nested.append(child) elif child: phrases.append(child) # else it was escaped (+ new meta) continue elif root: if meta.group() == "(": meta = self.meta.search(string, pos + 1) if meta.group() == ")": string, root, meta = self.handle_arguments(string, root, pos, meta.start()) continue elif meta.group() == ">": string, phrase, meta = self.close_phrase(string, root, pos) if phrase: return string, phrase # else was escaped (+ new meta) continue string, meta = self.escape_meta(string, pos) if not root: return string, phrases # If this is not the first stack-depth the function should # have returned upon finding a closing tag, # i.e. we should never have gotten here. word = re.search(r"([\w\s]+)(?![\d]*>[\w\s]+>)", string) what = "No closing tag found for opening tag" if word: what += " after expression '{0}'".format(word.group()) raise errors.ParseError(what + "!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_tags(s: str) -> List[str]:\n tags = []\n buf = []\n in_quoted = None\n\n for c in s:\n if in_quoted:\n if c == in_quoted:\n in_quoted = None\n else:\n buf.append(c)\n elif c == '\"' or c == '\\'':\n in_quoted = c\n elif c == ',':\n if buf:\n tag = ''.join(buf).strip()\n if tag:\n tags.append(tag)\n buf.clear()\n else:\n buf.append(c)\n\n if buf:\n tag = ''.join(buf).strip()\n if tag:\n tags.append(tag)\n\n return tags", "def parse_tags(tagstring):\n if not tagstring:\n return []\n\n tagstring = force_str(tagstring)\n\n words = []\n buffer = []\n # Defer splitting of non-quoted sections until we know if there are\n # any unquoted commas.\n to_be_split = []\n i = iter(tagstring)\n try:\n while True:\n c = six.next(i)\n if c == '\"':\n if buffer:\n to_be_split.append(''.join(buffer))\n buffer = []\n c = six.next(i)\n while c != '\"':\n buffer.append(c)\n c = six.next(i)\n if buffer:\n word = ''.join(buffer).strip()\n if word:\n words.append(word)\n buffer = []\n else:\n buffer.append(c)\n except StopIteration:\n # If we were parsing an open quote which was never closed treat\n # the buffer as unquoted.\n if buffer:\n to_be_split.append(''.join(buffer))\n if to_be_split:\n for chunk in to_be_split:\n words.extend(split_strip(chunk, settings.TAGGIT_SELECTIZE['DELIMITER']))\n words = list(set(words))\n words.sort()\n return words", "def detag_string(self, string):\r\n counter = itertools.count(0)\r\n count = lambda m: '<%s>' % counter.next()\r\n tags = self.tag_pattern.findall(string)\r\n tags = [''.join(tag) for tag in tags]\r\n (new, nfound) = self.tag_pattern.subn(count, string)\r\n if len(tags) != nfound:\r\n raise Exception('tags dont match:' + string)\r\n return (new, tags)", "def open_phrase(self, string, pos):\n\n\t\t# Check for escaping\n\t\tif string[pos - 1] == \"\\\\\":\n\t\t\t# Remove the escape character\n\t\t\tstring = string[:pos - 1] + string[pos:]\n\n\t\t\t# When removing the escape character, the\n\t\t\t# pos tag index is pushed one back\n\t\t\tpos -= 1\n\n\t\t\t# If the escape character was not itself (double)\n\t\t\t# escaped we can look for the next tag\n\t\t\tif pos == 0 or string[pos - 1] != \"\\\\\":\n\t\t\t\ttag = self.meta.search(string, pos + 1)\n\n\t\t\t\treturn string, None, tag\n\n\t\tchild = Phrase(pos)\n\n\t\tescaped, child = self.parse(string[pos + 1:], child)\n\n\t\tstring = string[:pos + 1] + escaped\n\n\t\ttag = self.meta.search(string, child.closing + 1)\n\n\t\treturn string, child, tag", "def retag_string(self, string, tags):\r\n for (i, tag) in enumerate(tags):\r\n p = '<%s>' % i\r\n string = re.sub(p, tag, string, 1)\r\n return string", "def close_phrase(self, string, root, pos):\n\n\t\t# Whatever is between the opening tag and this closing tag\n\t\tsubstring = string[:pos]\n\n\t\t# Escape-character to escape the closing tag (/>)\n\t\tif substring.endswith(\"\\\\\"):\n\n\t\t\t# Get rid of the escape character either way\n\t\t\tstring = string[:pos - 1] + string[pos:]\n\n\t\t\t# Check if not double-escaped\n\t\t\tif not substring[:-1].endswith(\"\\\\\"):\n\t\t\t\t# pos is now one index passed the closing tag\n\t\t\t\ttag = self.meta.search(string, pos)\n\n\t\t\t\treturn string, None, tag\n\n\t\t\t# Double-escape means this is really supposed to be a\n\t\t\t# closing tag and thus we can return the phrase.\n\t\t\telse:\n\t\t\t\t# The closing position should be in the same scope\n\t\t\t\t# as the scope of the opening position (scope in\n\t\t\t\t# the sense of to which phrase the positions are\n\t\t\t\t# relative to). -1 due to the escaped character but\n\t\t\t\t# + 1 because index 0 is phrase.opening + 1\n\t\t\t\troot.closing = root.opening + pos\n\t\t\t\troot.string = string[:pos - 1]\n\t\telse:\n\t\t\troot.closing = root.opening + 1 + pos\n\t\t\troot.string = string[:pos]\n\n\t\treturn string, root, None", "def parse_tags(self, in_str):\n self.projects = []\n self.contexts = []\n for match in TAG_REGEX.finditer(in_str):\n tag_str = match.group(1)\n if tag_str[0] == \"+\":\n self.projects.append(tag_str[1:])\n elif tag_str[0] == \"@\":\n self.contexts.append(tag_str[1:])\n else:\n err_str = \"Unable to parse tag: %s\" % tag_str\n raise ValueError(err_str)\n words = re.sub(TAG_REGEX, \"\", in_str).split()\n return \" \".join(words)", "def handle_arguments(self, string, root, opening, closing):\n\n\t\t# The actual argument string (ignore whitespace)\n\t\targs = string[opening + 1 : closing].replace(\" \", \"\")\n\n\t\t# The argument sequence must be at the start of the phrase\n\t\t# and must match the allowed argument regular expression\n\t\tif opening > 0 or not self.arguments.match(args):\n\n\t\t\tif opening == 0:\n\t\t\t\traise errors.ParseError(\"Invalid argument sequence!\")\n\n\t\t\t# If escape_meta does indeed escape a character and removes\n\t\t\t# a backward slash, the positions 'opening' and 'closing' are no\n\t\t\t# longer valid. escape_meta does a search for the next meta\n\t\t\t# character though, which is then the closing parantheses,\n\t\t\t# so we can use its index value (in the now escaped string)\n\t\t\tstring, meta = self.escape_meta(string, opening)\n\t\t\tstring, meta = self.escape_meta(string, meta.start())\n\n\t\t\treturn string, root, meta\n\n\t\tif \"!\" in args:\n\t\t\troot.override = True\n\t\t\targs = args.replace(\"!\", \"\")\n\n\t\tif \"+\" in args:\n\t\t\troot.increment = True\n\t\t\targs = args.replace(\"+\", \"\")\n\n\t\troot.arguments = [int(i) for i in args.split(\",\") if i]\n\n\t\t# Remove the argument string including parantheses\n\t\tstring = string[closing + 1:]\n\n\t\tmeta = self.meta.search(string)\n\n\t\treturn string, root, meta", "def parse(s):\n return s", "def parse_tags(source):\n unmatched_count = 0\n start_pos = 0\n opened = False\n open_pos = 0\n cur_pos = 0\n\n finished = []\n segments = []\n\n for character in source:\n #scan for mismatched parenthesis:\n if character == '(':\n unmatched_count += 1\n if not opened:\n open_pos = cur_pos\n opened = True\n\n if character == ')':\n unmatched_count -= 1\n\n if opened and unmatched_count == 0:\n clean = source[start_pos:open_pos]\n clean = clean.strip()\n if clean:\n finished.extend(clean.split())\n\n segment = source[open_pos:cur_pos+1]\n #segments.append(segment)\n \n #get rid of bounding parentheses:\n pruned = segment[1:-1]\n group = pruned.split()\n finished.append(group)\n\n opened = False\n start_pos = cur_pos+1\n \n cur_pos += 1\n\n assert unmatched_count == 0\n\n if start_pos != cur_pos:\n #get anything that was left over here\n remainder = source[start_pos:cur_pos].strip()\n finished.extend(remainder.split())\n \n ## #now check on recursion:\n ## for item in segments:\n ## #get rid of bounding parentheses:\n ## pruned = item[1:-1]\n ## if recurse:\n ## results = parse_tags(pruned, recurse)\n ## finished.expand(results)\n ## else:\n ## finished.append(pruned.strip())\n \n return finished", "def remove_tags(raw):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, ' ', raw)\n return cleantext", "def parse (self, phrase):\r\n\r\n if isinstance(phrase,str):\r\n #If the phrase is a string\r\n if self.is_simple(phrase):\r\n #EXITS the recursion\r\n if phrase[0:2] == '~~':\r\n return phrase[2:]\r\n #Eliminates negations that cancel each other\r\n return phrase\r\n elif self.bracketed(phrase):\r\n #Eliminate top-level parantheses\r\n return self.parse(phrase[1:-1])\r\n elif phrase[0] == '~':\r\n #If the phrase begins with a negating prefix...\r\n negations,phrase = self.heading_count(phrase)\r\n \r\n if self.bracketed(phrase):\r\n #If the negated phrase is bracketed\r\n if negations % 2 == 1:\r\n subphrase = self.split_into_phrases(phrase[1:-1])\r\n if subphrase[0] != '@': \r\n #De Morgan's Law \r\n return self.parse(['@']+['~'+x for x in subphrase])\r\n else:\r\n #De Morgan's Law\r\n return self.parse(['~'+x for x in subphrase[1:]])\r\n else:\r\n return self.parse(phrase[1:-1])\r\n return self.parse(self.split_into_phrases((negations%2)*'~'+phrase))\r\n \r\n else:\r\n return self.parse(self.split_into_phrases(phrase))\r\n # IF the phrase is a list\r\n if self.all_is_P(phrase,predicate_function=self.is_simple):\r\n #If every terms of the phrase list is simple...\r\n #This prepares for EXIT from recursion\r\n return [self.parse(x) for x in phrase]\r\n return self.parse([self.parse(x) for x in phrase])", "def strip_tags(initial_string):\n result = re.sub('<[^<]+?>', '', initial_string)\n return result", "def strip_tags(tagged_sentences):\n untagged_sentences = []\n for taggedsent in tagged_sentences:\n untaggedsent = ''\n\tfor taggedword in taggedsent.split():\n\t word = re.split('(?<!\\\\\\)\\/', taggedword)[0]\n untaggedsent += word + ' '\n #print untaggedsent\n untagged_sentences.append(untaggedsent)\n return untagged_sentences", "def sub_tag(str):\n bracketed_tag = r\"(\\${\" + tag_start_char + tag_char + r\"*})\"\n m = re.search(bracketed_tag, str)\n if m:\n tag = m.group(1)\n tag = tag[2:len(tag)-1]\n if tag not in tag_xpath:\n raise ConversionError(\"Undefined tag in ${} substitution\", tag)\n single_replace = str[:m.start()] + tag_xpath[tag] + str[m.end():]\n return sub_tag(single_replace)\n else:\n return str", "def removeHtmlTags(self, text):\n sb = []\n text = self.removeHtmlComments(text)\n bits = text.split(u'<')\n sb.append(bits.pop(0))\n tagstack = []\n tablestack = tagstack\n for x in bits:\n m = _tagPattern.match(x)\n if not m:\n continue\n slash, t, params, brace, rest = m.groups()\n t = t.lower()\n badtag = False\n if t in _htmlelements:\n # Check our stack\n if slash:\n # Closing a tag...\n if t in _htmlsingleonly or len(tagstack) == 0:\n badtag = True\n else:\n ot = tagstack.pop()\n if ot != t:\n if ot in _htmlsingleallowed:\n # Pop all elements with an optional close tag\n # and see if we find a match below them\n optstack = []\n optstack.append(ot)\n while True:\n if len(tagstack) == 0:\n break\n ot = tagstack.pop()\n if ot == t or ot not in _htmlsingleallowed:\n break\n optstack.append(ot)\n if t != ot:\n # No match. Push the optinal elements back again\n badtag = True\n tagstack += reversed(optstack)\n else:\n tagstack.append(ot)\n # <li> can be nested in <ul> or <ol>, skip those cases:\n if ot not in _htmllist and t in _listtags:\n badtag = True\n elif t == u'table':\n if len(tablestack) == 0:\n bagtag = True\n else:\n tagstack = tablestack.pop()\n newparams = u''\n else:\n # Keep track for later\n if t in _tabletags and u'table' not in tagstack:\n badtag = True\n elif t in tagstack and t not in _htmlnest:\n badtag = True\n # Is it a self-closed htmlpair? (bug 5487)\n elif brace == u'/>' and t in _htmlpairs:\n badTag = True\n elif t in _htmlsingleonly:\n # Hack to force empty tag for uncloseable elements\n brace = u'/>'\n elif t in _htmlsingle:\n # Hack to not close $htmlsingle tags\n brace = None\n else:\n if t == u'table':\n tablestack.append(tagstack)\n tagstack = []\n tagstack.append(t)\n newparams = self.fixTagAttributes(params, t)\n if not badtag:\n rest = rest.replace(u'>', u'&gt;')\n if brace == u'/>':\n close = u' /'\n else:\n close = u''\n sb.append(u'<')\n sb.append(slash)\n sb.append(t)\n sb.append(newparams)\n sb.append(close)\n sb.append(u'>')\n sb.append(rest)\n continue\n sb.append(u'&lt;')\n sb.append(x.replace(u'>', u'&gt;'))\n\n # Close off any remaining tags\n while tagstack:\n t = tagstack.pop()\n sb.append(u'</')\n sb.append(t)\n sb.append(u'>\\n')\n if t == u'table':\n if not tablestack:\n break\n tagstack = tablestack.pop()\n\n return u''.join(sb)", "def _transform_transcript_expected(self, s):\n regex = ''\n start = 0\n\n while True:\n (regex, first_slash_pos, start) = self._escaped_find(regex, s, start, False)\n if first_slash_pos == -1:\n # no more slashes, add the rest of the string and bail\n regex += re.escape(s[start:])\n break\n else:\n # there is a slash, add everything we have found so far\n # add stuff before the first slash as plain text\n regex += re.escape(s[start:first_slash_pos])\n start = first_slash_pos+1\n # and go find the next one\n (regex, second_slash_pos, start) = self._escaped_find(regex, s, start, True)\n if second_slash_pos > 0:\n # add everything between the slashes (but not the slashes)\n # as a regular expression\n regex += s[start:second_slash_pos]\n # and change where we start looking for slashed on the\n # turn through the loop\n start = second_slash_pos + 1\n else:\n # No closing slash, we have to add the first slash,\n # and the rest of the text\n regex += re.escape(s[start-1:])\n break\n return regex", "def parseString(self, s):\n return self.parser.parseString(s)", "def parse(string):\n doc = nlp(string)\n return [str(n) for n in doc.noun_chunks]", "def deserialize_text(text):\n\n # sometimes space after/before XML tags gets removed, we fix that here\n # after\n text = re.sub(r\"</([a-zA-Z\\-]+)>([^$\\,\\.\\;\\<\\n\\s\\(\\)\\]\\[])\", \"</\\\\1> \\\\2\", text)\n # before\n text = re.sub(r\"([\\,\\.\\;\\)a-zA-Z0-9])<([a-zA-Z])\", \"\\\\1 <\\\\2\", text)\n\n text = re.sub(r\"<tr-hint\\s+v=\\\"(.*?)\\\"\\s*>(.*?)</tr-hint>\", deserialize_tr_hint, text)\n text = re.sub(r\"<md-heading\\s+v=\\\"(.*?)\\\"\\s*>(.*?)</md-heading>\", \"\\\\1 \\\\2\", text)\n text = re.sub(r\"<md-list\\s+v=\\\"(.*?)\\\"\\s*>(.*?)</md-list>\", \"\\\\1 \\\\2\", text)\n text = re.sub(r\"<md-it>(.*?)</md-it>\", \"*\\\\1*\", text)\n text = re.sub(r\"<md-strong>(.*?)</md-strong>\", \"**\\\\1**\", text)\n text = re.sub(r\"<md-code>(.*?)</md-code>\", \"`\\\\1`\", text)\n text = re.sub(r\"<md-strong-it>(.*?)</md-strong-it>\", \"***\\\\1***\", text)\n text = re.sub(r\"<md-link\\s+href=\\\"(.*?)\\\"\\s*>(.*?)</md-link>\", deserialize_md_link, text)\n text = re.sub(r\"<md-link>(.*?)</md-link>\", deserialize_md_link, text)\n # ignore needs to be deserialized last, as it can occur encoded e.g. in a\n # markdown link that was converted itself...\n text = re.sub(r\"<ignore\\s+v=\\\"(.*?)\\\"\\s*/>\", deserialize_ignore, text)\n text = text.replace(\"&amp;\", \"&\")\n return text", "def parse_string(xml):\n string = \"\"\n # Traverse all the <sentence> elements in the XML.\n dom = XML(xml)\n for sentence in dom(XML_SENTENCE):\n _anchors.clear() # Populated by calling _parse_tokens().\n _attachments.clear() # Populated by calling _parse_tokens().\n # Parse the language from <sentence language=\"\">.\n language = sentence.get(XML_LANGUAGE, \"en\")\n # Parse the token tag format from <sentence token=\"\">.\n # This information is returned in TokenString.tags,\n # so the format and order of the token tags is retained when exporting/importing as XML.\n format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA])\n format = not isinstance(format, basestring) and format or format.replace(\" \",\"\").split(\",\")\n # Traverse all <chunk> and <chink> elements in the sentence.\n # Find the <word> elements inside and create tokens.\n tokens = []\n for chunk in sentence:\n tokens.extend(_parse_tokens(chunk, format))\n # Attach PNP's to their anchors.\n # Keys in _anchors have linked anchor chunks (each chunk is a list of tokens).\n # The keys correspond to the keys in _attachments, which have linked PNP chunks.\n if ANCHOR in format:\n A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR)\n for id in sorted(A.keys()):\n for token in A[id]:\n token[i] += \"-\"+\"-\".join([\"A\"+str(a+p) for p in range(len(P[id]))])\n token[i] = token[i].strip(\"O-\")\n for p, pnp in enumerate(P[id]):\n for token in pnp: \n token[i] += \"-\"+\"P\"+str(a+p)\n token[i] = token[i].strip(\"O-\")\n a += len(P[id])\n # Collapse the tokens to string.\n # Separate multiple sentences with a new line.\n tokens = [\"/\".join([tag for tag in token]) for token in tokens]\n tokens = \" \".join(tokens)\n string += tokens + \"\\n\"\n # Return a TokenString, which is a unicode string that transforms easily\n # into a plain str, a list of tokens, or a Sentence.\n try:\n if MBSP: from mbsp import TokenString\n return TokenString(string.strip(), tags=format, language=language)\n except:\n return TaggedString(string.strip(), tags=format, language=language)", "def parse_text(text):\n return str(str(text).encode(\"ascii\", \"ignore\")).replace(\"\\\\n\",\"\\n\").replace(\"b'\",\"\")", "def parse_string(cstr):\n ret = ''\n if _RUNNING_PYTHON3 and ULog._disable_str_exceptions:\n ret = _parse_string(cstr, 'ignore')\n else:\n ret = _parse_string(cstr)\n return ret", "def _parse_tags(tags: str):\n return dict(item.split(\":\") for item in shlex.split(tags)) # type: ignore", "def parse_raw(parent, text):\n path, fname = os.path.split(os.path.abspath(parent.filename))\n path = path.split(os.sep)\n parent_tags = []\n parent_stack = [parent]\n # Parent tags for raw/{graphics, objects} are handled later\n if path[-1] == 'init':\n parent_tags = init_filename_parents.get(fname, [])\n for kind, token in tokenize_raw(text):\n if kind == 'Tag':\n contents = token[1:-1]\n if ':' in contents:\n name, value = contents.split(':', 1)\n else:\n name, value = contents, token[0] == '['\n is_parent = False\n for g in parent_tags:\n if fnmatch(name, g):\n is_parent = True\n while (parent_stack[-1].name in final_level_tags or\n any([fnmatch(p.name, g) for p in parent_stack])):\n parent_stack.pop()\n node = DFRawTag(parent_stack[-1], name, value)\n if is_parent:\n parent_stack.append(node)\n if path[-2] == 'raw' and name == 'OBJECT':\n parent_tags = object_parents[value]\n elif kind == 'Comment':\n DFRawComment(parent_stack[-1], token)\n else:\n log.e('Unknown raw token while parsing: '+kind)\n raise Exception('Unknown raw token kind: '+kind)", "def _parseSequence(string, delimiter=','):\n if not isinstance(string, str):\n return string\n string = string.strip()\n if string.startswith('[') and string.endswith(']'):\n sequenceType = 'list'\n elif string.startswith('(') and string.endswith(')'):\n sequenceType = 'tuple'\n else:\n return _parseSingle(string)\n \n string = string[1:-1]\n \n tokens = []\n current = []\n \n plev = 0\n blev = 0\n sqopen = False\n dqopen = False\n \n for char in string:\n if char == '[':\n blev += 1\n current.append(char)\n elif char == ']':\n blev -= 1\n current.append(char)\n elif char == '(':\n plev += 1\n current.append(char)\n elif char == ')':\n plev -= 1\n current.append(char)\n elif char == '\"':\n dqopen = not dqopen\n current.append(char)\n elif char == \"'\":\n sqopen = not sqopen\n current.append(char)\n elif (char == delimiter and plev == 0 and blev == 0 and \n not sqopen and not dqopen):\n tokens.append(_parseSequence(''.join(current).strip()))\n current = []\n else:\n current.append(char)\n \n if len(current) > 0:\n tokens.append(_parseSequence(''.join(current)))\n \n if sequenceType == 'tuple':\n tokens = tuple(tokens) \n return tokens", "def refined_text(text):\n import re\n text = text.replace('<e1>','')\n text = text.replace('</e1>','')\n text = text.replace('<e2>','')\n text = text.replace('</e2>','')\n\n text = text[1:-1] # trim quotes\n # text = text.replace('\"','')\n # text = text.replace(',','')\n # text = text.replace('.','')\n # text = text.replace(';','')\n # text = text.replace('`','')\n # text = text.replace('\\'','')\n # text = text.replace('(','')\n # text = text.replace(')','')\n # text = text.replace('/','')\n\n return text", "def parse_interactive_shell_result(text):\n\n def parse_bracketed(i, s):\n '''Parse word features [abc=... def = ...]\n Also manages to parse out features that have XML within them\n '''\n word = None\n attrs = {}\n temp = {}\n # Substitute XML tags, to replace them later\n for i, tag in enumerate(re.findall(r\"(<[^<>]+>.*<\\/[^<>]+>)\", s)):\n temp[\"^^^%d^^^\" % i] = tag\n s = s.replace(tag, \"^^^%d^^^\" % i)\n # Load key-value pairs, substituting as necessary\n for attr, val in re.findall(r\"([^=\\s]*)=([^=\\s]*)\", s):\n if val in temp:\n val = temp[val]\n if attr == 'Text':\n word = val\n else:\n attrs[attr] = val\n return Token(i,\n word,\n attrs.get('PartOfSpeech'),\n attrs.get('Lemma'),\n attrs.get('NamedEntityTag'),\n attrs.get('CharacterOffsetBegin'),\n attrs.get('CharacterOffsetEnd'))\n\n state = STATE_START\n new_sentences = []\n\n if sys.version_info[0] < 3 and isinstance(text, str) or \\\n sys.version_info[0] >= 3 and isinstance(text, bytes):\n text = text.decode('utf-8')\n\n for line in text.split('\\n'):\n line = line.strip()\n\n if line.startswith(\"Sentence #\"):\n if state == STATE_START:\n new_tokens = []\n syntax_tree = []\n new_dependencies = []\n else:\n s = Sentence(new_tokens, syntax_tree, new_dependencies)\n new_sentences.append(s)\n new_tokens = []\n syntax_tree = []\n new_dependencies = []\n state = STATE_TEXT\n\n elif state == STATE_TEXT:\n state = STATE_WORDS\n\n elif state == STATE_WORDS:\n if not line.startswith(\"[Text=\"):\n state = STATE_TREE\n syntax_tree.append(line)\n #raise ParserError('Parse error. Could not find \"[Text=\" in: %s' % line)\n for i, s in enumerate(WORD_PATTERN.findall(line)):\n new_tokens.append(parse_bracketed(i, s))\n\n elif state == STATE_TREE:\n if len(line) == 0:\n state = STATE_DEPENDENCY\n syntax_tree = \" \".join(syntax_tree)\n else:\n #print syntax_tree\n syntax_tree.append(line)\n\n elif state == STATE_DEPENDENCY:\n if len(line) == 0:\n state = STATE_COREFERENCE\n else:\n split_entry = re.split(\"\\(|, \", line[:-1])\n if len(split_entry) == 3:\n label, head_hidx, dep_didx = tuple(split_entry)\n hh = head_hidx.rfind('-') # in case word has hyphen\n head, hidx = head_hidx[:hh], head_hidx[hh+1:]\n dh = dep_didx.rfind('-') # in case word has hyphen\n dep, didx = dep_didx[:dh], dep_didx[dh+1:]\n new_dependency = Dep(Head(hidx, head),\n label,\n Dependent(didx, dep))\n new_dependencies.append(new_dependency)\n\n elif state == STATE_COREFERENCE:\n pass\n\n s = Sentence(new_tokens, syntax_tree, new_dependencies)\n new_sentences.append(s)\n return new_sentences", "def Unquote(quoted_string):\n if not quoted_string[0] == '\"' or quoted_string[0] == \"'\":\n return quoted_string\n assert quoted_string[0] == quoted_string[-1]\n return_list = []\n i = 1 # skip initial char\n while i < len(quoted_string) - 1:\n char = quoted_string[i]\n if char == \"\\\\\":\n # quoted section\n assert quoted_string[i + 1] == \"x\"\n return_list.append(chr(int(quoted_string[i + 2:i + 4], 16)))\n i += 4\n else:\n return_list.append(char)\n i += 1\n return \"\".join(return_list)", "def parseStr(s):\n\n return _parseHelper(s.split(\"\\n\"))" ]
[ "0.60158527", "0.5769747", "0.5667436", "0.5622337", "0.5511267", "0.5468278", "0.53972864", "0.53505325", "0.53342503", "0.5252123", "0.5247568", "0.52472585", "0.5242414", "0.5227254", "0.5216621", "0.51727295", "0.5151064", "0.50991243", "0.50976443", "0.507275", "0.5064872", "0.5038158", "0.5032123", "0.502394", "0.50215083", "0.50151515", "0.5008555", "0.5006677", "0.49923044", "0.49762788" ]
0.69416046
0
Checks if a meta character is escaped or else warns about it. If the meta character has an escape character ('\') preceding it, the meta character is escaped. If it does not, a warning is emitted that the user should escape it.
def escape_meta(self, string, pos): # Replace escape character if pos > 0 and string[pos - 1] == "\\": string = string[:pos - 1] + string[pos:] else: warnings.warn("Un-escaped meta-character: '{0}' (Escape" " it with a '\\')".format(string[pos]), Warning) pos += 1 meta = self.meta.search(string, pos) return string, meta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_escape(self):\n bad_str = '''`~!@#$%^&*()_+-={}[]|\\\\;:'\",./<>?\\n\\r\\t '''\n self.run_escape_case(bad_str)", "def test_bogus_escape_not_raised(self):\r\n problem = self.build_problem(answer=u\"\\\\\", case_sensitive=False, regexp=True)\r\n\r\n self.assert_grade(problem, u\"\\\\\", \"incorrect\")\r\n\r\n # right way to search for \\\r\n problem = self.build_problem(answer=u\"\\\\\\\\\", case_sensitive=False, regexp=True)\r\n self.assert_grade(problem, u\"\\\\\", \"correct\")", "def test_regex_doublequotehandling(self):\n with pytest.raises(yaml.scanner.ScannerError) as excinfo:\n DwcaValidator(yaml.load(self.yaml_regexitdouble, Loader=yaml.FullLoader), error_handler=WhipErrorHandler)\n assert \"found unknown escape character 'd'\" in str(excinfo.value)", "def needs_escape(self, string, target_char, quote_count=1):\n\n skip = False\n count = 0\n needs_escape = False\n for c in string:\n if skip:\n skip = False\n continue\n if c == '\\\\':\n skip = True\n elif c == target_char:\n count += 1\n if count == quote_count:\n needs_escape = True\n break\n else:\n count = 0\n return needs_escape", "def quotemeta(text):\n return re.sub(\"(\\W)\", r\"\\\\\\1\", text)", "def test_get_context_dict_escaped_character(self):\n manifest = load_manifest(StringIO(manifest_escaped_parameters))\n context_dict = manifest.get_context_dict()\n assert \"section:escapeme|escaped\" in context_dict\n tools.eq_(\n context_dict[\"section:escapeme|escaped\"],\n \"\\!\\@\\#\\$\\%\\^\\&\\*\\(\\)\\\\\\\"\\\\'\\~\\`\\/\\?\\<\\>\",\n )", "def is_special_text(text):\n return len(text) > 5 and \\\n (text[0:5] == '\"VAR:' or text[0:5] == '\"TER:') and \\\n text[-1] == '\"'", "def special_character(raw_string, force_quote = False):\n if raw_string == \"\":\n return '\"\"'\n\n # Pass through other values, such as None:\n if type(raw_string) not in types.StringTypes:\n return raw_string\n\n # quick bypass if there are no characters to force escapeaping:\n if not force_quote and not _needs_escapeaping_re.search(raw_string):\n return raw_string\n \n if '\"' not in raw_string:\n return '\"%s\"' % (_avert_unallowable(raw_string),)\n\n if \"'\" not in raw_string:\n return \"'%s'\" % (_avert_unallowable(raw_string),)\n\n # If there are both single and double special_characters in the string, we\n # enclose the whole thing in double special_characters and escape double quotes\n # in the original string.\n return '\"%s\"' % (_avert_unallowable(raw_string, True),)", "def masked_by_quotechar(S, quotechar, escapechar, test_char):\n if test_char == \"\":\n return False\n escape_next = False\n in_quotes = False\n i = 0\n while i < len(S):\n s = S[i]\n if s == quotechar:\n if escape_next:\n i += 1\n continue\n if not in_quotes:\n in_quotes = True\n else:\n if i + 1 < len(S) and S[i + 1] == quotechar:\n i += 1\n else:\n in_quotes = False\n elif s == test_char and not in_quotes:\n return False\n elif s == escapechar:\n escape_next = True\n i += 1\n return True", "def test_special_characters(self):\n testString = sanitize('[-;]\\`{\\}')\n self.assertEqual(testString, '_________')", "def _requires_quotes(self, value):\n lc_value = value.lower()\n return (lc_value in self.reserved_words\n or self.illegal_initial_characters.match(value[0])\n or not self.legal_characters.match(unicode(value))\n or (lc_value != value))", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False # pragma: no cover\n cat = unicodedata.category(char)\n if cat in (\"Cc\", \"Cf\"):\n return True # pragma: no cover\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def _is_control(char):\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False", "def test_symlit_escape():\n return \"\\\"=\\\"\"", "def _ends_in_unescaped_quote(self, string):\n if not string or string[-1] != '\"':\n return False\n for index in range(-2, -len(string)-1, -1):\n if string[index] != '\\\\':\n return index % 2 == 0\n return False", "def test_backslash_and_unicode_regexps(self):\r\n problem = self.build_problem(answer=ur\"5\\\\æ\", case_sensitive=False, regexp=True)\r\n self.assert_grade(problem, u\"5\\æ\", \"correct\")\r\n\r\n problem = self.build_problem(answer=u\"5\\\\\\\\æ\", case_sensitive=False, regexp=True)\r\n self.assert_grade(problem, u\"5\\æ\", \"correct\")", "def __is_quote(cls, char):\n return char in (\"'\", '\"')", "def test_parse_simple_quote_with_carriage_return(self):\n with self.assertRaisesRegexp(Exception, re.escape(\"the quote included a carriage return (0x0d) character\")):\n api.parse_quote(\" Quote with \\r character - Author\", simple_format=True)", "def test_raw_unicode_escape_dashes(self):\n ok = True\n try:\n unicode(b'hey', 'raw_unicode-escape')\n except LookupError:\n ok = False\n\n self.assertTrue(ok, \"dashes and underscores should be interchangable\")", "def _is_control(char):\n if char == '\\t' or char == '\\n' or char == '\\r':\n return False\n cat = unicodedata.category(char)\n if cat.startswith('C'):\n return True\n return False", "def escape_character_in_string(self, a, text):\n logging.debug(\"in escape character \" + text)\n #self.just_read_char()\n self.read_char()\n self.produce(STRING, text)", "def escape(self):\n pass", "def _avert_unallowable(raw_string, escape_double_special_characters=False):\n output = []\n for c in raw_string:\n if c in _caret_escapes:\n output.append(_caret_escapes[c])\n elif escape_double_special_characters and c == '\"':\n output.append('^\"')\n else:\n output.append(c)\n return ''.join(output)", "def is_apostrophe(ch):\n if (ch == '\\'' or ch == '\\u2019' or ch == '\\u02bc'): return True\n return False", "def test_ampersand_properly_escaped(self):\n test_string = \"<p>This contains an ampersand right here '&'</p>\"\n cleaned = sanitizeFeedback(test_string)\n self.assertIn(\"&amp;\", cleaned)", "def isSpecial(ansiCode,string):\n if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END\n else: return string", "def _has_non_ascii_characters(data_string):\r\n try:\r\n data_string.encode('ascii')\r\n except UnicodeEncodeError:\r\n return True\r\n\r\n return False", "def test_parse_simple_quote_with_pipe_character(self):\n with self.assertRaisesRegexp(Exception, \"the quote included an embedded pipe character (|)\"):\n api.parse_quote(\" Quote with | character - Author\", simple_format=True)" ]
[ "0.6439335", "0.6249847", "0.5932821", "0.5907765", "0.5904062", "0.58322513", "0.5758821", "0.5672039", "0.56712896", "0.5663409", "0.56487757", "0.56399053", "0.5608284", "0.5608284", "0.55967253", "0.55713606", "0.55477995", "0.55150324", "0.54088163", "0.54066384", "0.53713304", "0.53684753", "0.5346597", "0.52638835", "0.5259085", "0.52456003", "0.5243563", "0.51840925", "0.5179421", "0.5179264" ]
0.68352515
0
Handles phrasearguments. Sets the override and increment flags if found. Also makes sure that the argument sequence is at the start of the phrase and else warns about the unescaped meta characters. If the arguments are indeed at the start but do not match the arguments regular expression, an error is raised.
def handle_arguments(self, string, root, opening, closing): # The actual argument string (ignore whitespace) args = string[opening + 1 : closing].replace(" ", "") # The argument sequence must be at the start of the phrase # and must match the allowed argument regular expression if opening > 0 or not self.arguments.match(args): if opening == 0: raise errors.ParseError("Invalid argument sequence!") # If escape_meta does indeed escape a character and removes # a backward slash, the positions 'opening' and 'closing' are no # longer valid. escape_meta does a search for the next meta # character though, which is then the closing parantheses, # so we can use its index value (in the now escaped string) string, meta = self.escape_meta(string, opening) string, meta = self.escape_meta(string, meta.start()) return string, root, meta if "!" in args: root.override = True args = args.replace("!", "") if "+" in args: root.increment = True args = args.replace("+", "") root.arguments = [int(i) for i in args.split(",") if i] # Remove the argument string including parantheses string = string[closing + 1:] meta = self.meta.search(string) return string, root, meta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pre_argument_parsing(self):\n pass", "def process_verb_arguments(self, verb, verb_args, verb_opts):\n # Add fixed arguments passed in through the decorator to the verb object.\n args = copy.copy(verb_args) + verb.command_arguments\n # Set attributes for required arguments.\n missing = []\n exceptions = []\n iarg = 0\n nargs = verb.get_argument_count()\n for arg in verb.iter_arguments():\n # It's missing if we've exhausted all the arguments before\n # exhausting all the argument specs, unless it's the last argument\n # spec and it's optional.\n if iarg > len(args) or (iarg == len(args) and arg.min_count > 0):\n missing.append((arg.name, arg.help))\n else:\n value = None\n # The last argument can have repeated arguments. If more than\n # one are allowed the values are put into a list.\n if iarg == nargs - 1 and arg.max_count > 1:\n if len(args) - iarg < arg.min_count:\n utility.abort('A minimum of %d %s arguments are required.'\n % (arg.min_count, arg.name.upper()))\n if len(args) - iarg > arg.max_count:\n utility.abort('A maximum of %d %s arguments are allowed.'\n % (arg.max_count, arg.name.upper()))\n # Pass through argument class get() for validation, conversion, etc..\n # Skip bad values and report on them at the end.\n value = []\n for v in args[iarg:]:\n try:\n value.append(arg.get(v))\n except ArgumentException, e:\n exceptions.append(e)\n iarg = len(args)\n elif len(args) > 0:\n # All other arguments are treated as scalars.\n # Pass through argument class get() for validation, conversion, etc..\n try:\n value = arg.get(args[iarg])\n except ArgumentException, e:\n exceptions.append(e)\n iarg += 1\n if value is not None or arg.min_count == 0:\n setattr(verb_opts, arg.name, value)\n # Run the gauntlet of error disclosure. Abort and display usage as appropriate.\n had_errors = 0\n show_usage = False\n if exceptions:\n msg = 'Argument value %s:' % utility.pluralize('error', len(exceptions))\n utility.error(msg, [str(e) for e in exceptions])\n had_errors += 1\n if iarg < len(args):\n self._abort('Extra arguments were provided:', args[iarg:])\n had_errors += 1\n show_usage = True\n if missing:\n fmt = '%%-%ds %%s' % max([len(o) for (o, h) in missing])\n msg = 'Missing required %s:' % utility.pluralize('argument', len(missing))\n utility.error(msg, [fmt % (o.upper(), h) for (o, h) in missing])\n had_errors += 1\n show_usage = True\n if had_errors > 0:\n if show_usage:\n self._abort()\n sys.exit(1)", "def parse(self):\n super(CmdArxSay, self).parse()\n if self.cmdstring == \"say\":\n self.args = \" %s\" % self.args.lstrip()", "def parse_arguments(args):", "def _parse_args(self, prepared_args):\n pass", "def _post_argument_parsing(self):\n pass", "def parse_arguments(self, args, words, start_word_index, scopes,\n arg_data, fields, actions, prefix_matches, command):\n\n if len(args) == 0:\n return [[0, [], scopes, arg_data, fields, actions]]\n\n parse_results = []\n\n arg = args[0]\n\n if _is_string(arg):\n arg = {'token': arg}\n\n remaining_args = args[1:]\n\n arg_scopes = [arg] + scopes\n arg_parse_results = []\n\n # Get the attributes we need from the arg\n # FIXME: Should possibly get rid of the 'data' mechanism\n # and handle it via a custom data handler\n choices = arg.get('choices')\n nested_args = arg.get('args')\n if nested_args:\n # Convert the nested argument list into a choices argument with\n # a single choice, so that we can leverage the code below that\n # handles choices\n if choices:\n raise error.CommandDescriptionError('An argument can\\'t have both '\n '\"choices\" and \"args\" attributes', command)\n choices = (nested_args,)\n\n # Determine whether or not this argument is optional.\n # Default to making arguments optional for no commands, except for if\n # it's a choices argument. In that case it will probably be ambiguous\n # about which fields should be reset to the default values, so we just\n # don't try to handle that.\n optional_name = 'optional-for-no' if self.is_no_command else 'optional'\n #optional_default_value = self.is_no_command\n #optional_name = 'optional'\n optional_default_value = False\n #optional = arg.get(optional_name, optional_default_value)\n # FIXME: Disabling the special handling of optional arguments for no\n # command. That's causing spurious completions to be included. Not sure\n # how to fix that right now. Do we really need the special optional\n # handling anyway? Does Cisco actually support that.\n # For example, being able to use \"no ip address\" rather than\n # \"no ip address 192.168.2.2 255.255.255.0\". I haven't actually tried\n # both forms on a Cisco switch to see what it does.\n optional = arg.get(optional_name, optional_default_value)\n\n # Check to see if this arg overrides either the command type or action\n # Note that we don't want to set the \"actions\" variable with the\n # updated actions yet until we know that the current argument\n # actually matched against the command words and wasn't an optional\n # argument that was skipped.\n arg_scopes, arg_actions = self.check_command_type_and_actions(\n arg, arg_scopes, actions)\n\n if choices:\n if not _is_list(choices):\n raise error.CommandDescriptionError('\"choices\" argument must be a list '\n 'or tuple of argument descriptions from which to choose',\n command)\n\n for choice in choices:\n choice_args = _get_choice_args(choice)\n choice_arg_scopes = arg_scopes\n choice_actions = list(arg_actions)\n choice_prefix_matches = list(prefix_matches)\n if isinstance(choice, collections.Mapping):\n choice_arg_scopes = [choice] + choice_arg_scopes\n choice_optional = choice.get(optional_name, False)\n if choice_optional:\n optional = True\n choice_arg_scopes, choice_actions = \\\n self.check_command_type_and_actions(\n choice, choice_arg_scopes, choice_actions)\n choice_arg_data = dict(arg_data)\n choice_fields = list(fields)\n\n choice_parse_results = self.parse_arguments(choice_args,\n words, start_word_index, choice_arg_scopes,\n choice_arg_data, choice_fields, choice_actions,\n choice_prefix_matches, command)\n for choice_parse_result in choice_parse_results:\n words_matched = choice_parse_result[0]\n new_arg_data = choice_parse_result[3]\n # FIXME: Not sure if the code below is the best way to\n # handle things, but the idea is that we want to detect\n # the case where any of the choices in a choice block\n # is composed of all optional arguments. In that case\n # the overall choice block thus becomes optional. The\n # reason we propagate the optional attribute is that if\n # there are multiple choices that consist entirely of\n # optional arguments then we'd get mlutiple redundant\n # matches with exactly the same arg_data and prefix_matches\n # which would lead to an ambiguous command when we \n # process the results at the end. So by not adding a\n # result for each of those cases and instead just adding\n # a single result for the overall choice block.\n # The other thing we need to do is distinguish between\n # optional args and default args which will both lead to\n # cases where words_matched == 0. For the default arg\n # case though we will add the match in the nested call\n # since it will have changes to the arg_data which are\n # significant in the processing of the command action.\n # Since we've already added a result, we don't want to\n # set the overall choice to be optional or else again\n # we'll get multiple amibuous results. The way we detect\n # that case is if the arg_data from the parse_result is\n # different than the arg_data that was passed in. So\n # that's why we use the following test.\n if words_matched == 0 and new_arg_data == arg_data:\n # FIXME: I don't think this will work correctly\n # if/when we support default values for args. In that\n # case the choice may have matched 0 words, but it\n # may have updated the arg_data with some default\n # argument values, which we'll if we don't add the\n # parse_result at this point. Need to think more\n # about this.\n optional = True\n else:\n arg_parse_results.append(choice_parse_result)\n else:\n token = arg.get('token')\n field = arg.get('field')\n arg_type = arg.get('type')\n tag = arg.get('tag')\n default = self.get_default_value(arg)\n \n tag_prefix_match = None\n parsed_tag = False\n is_match = True\n words_matched = 0\n results = None\n\n # First try to parse the tag if there is one\n if tag and len(words) > 0:\n word = words[0]\n if tag.lower().startswith(word.lower()):\n if tag.lower() != word.lower():\n tag_prefix_match = [start_word_index+words_matched, tag]\n words_matched += 1\n parsed_tag = True\n else:\n self.handle_parse_error(\"Unexpected argument at \\\"%s\\\"\" % word,\n start_word_index, CommandHandler.UNEXPECTED_TOKEN_PRIORITY, tag)\n is_match = False\n\n # Handle incomplete argument matching\n if is_match:\n if words_matched < len(words):\n word = words[words_matched]\n else:\n self.handle_incomplete_command(arg, arg_scopes,\n arg_data, fields, parsed_tag, command)\n if default:\n word = default\n else:\n self.handle_parse_error(\"Unexpected end of command\",\n start_word_index + words_matched,\n CommandHandler.UNEXPECTED_END_OF_ARGUMENTS_PRIORITY)\n is_match = False\n\n # Handle the argument value\n if is_match:\n if token:\n if token.lower().startswith(word.lower()):\n value = True if arg_type == 'boolean' else token\n results = [(value, token)]\n else:\n self.handle_parse_error(\n \"Unexpected argument at \\\"%s\\\"\" % word,\n start_word_index + words_matched,\n CommandHandler.UNEXPECTED_TOKEN_PRIORITY, token)\n is_match = False\n else:\n # Check that the argument is valid\n try:\n results = validate_argument(arg, word, arg_data, arg_scopes, command)\n except error.ArgumentValidationError, e:\n expected_tokens = e.expected_tokens\n if expected_tokens:\n if _is_string(expected_tokens):\n expected_tokens = (expected_tokens,)\n self.handle_parse_error(str(e),\n start_word_index + words_matched,\n CommandHandler.UNEXPECTED_TOKEN_PRIORITY,\n expected_tokens)\n else:\n self.handle_parse_error(str(e),\n start_word_index + words_matched,\n CommandHandler.VALIDATION_ERROR_PRIORITY)\n is_match = False\n\n if is_match:\n assert results is not None\n assert _is_list(results)\n assert len(results) > 0\n # If we reach here we've successfully matched the word. The word\n # may have come from the commands words or it may have come from\n # the default value for the argument. We only want to bump the\n # words_matched in the former case, which is why we need to check\n # against the length of the words array. Note that we don't want\n # to bump words_matched in the code above where we get it from \n # the command words, because then the word offset we pass to\n # handle_parse_error would be off by 1 if the validation fails.\n if words_matched < len(words):\n words_matched += 1\n data = arg.get('data')\n arg_data_handler = _lookup_in_scopes('data-handler', arg_scopes)\n self.handle_first_matched_result(command)\n\n for result in results:\n value, match_token = result\n new_arg_data = dict(arg_data)\n if data:\n new_arg_data.update(data)\n # XXX should the mode passed in here to the handler be\n # the mode of the command, or the current mode ?\n # (mode-of-the-command in case its a higher submode push)\n if arg_data_handler:\n invocation_scope = {\n # FIXME: The 'name' attribute is deprecated. Remove once\n # everything's been converted.\n 'name': field,\n 'field': field,\n 'value': value,\n 'data': new_arg_data,\n 'is-no-command': self.is_no_command,\n 'current-mode-path': bigsh.run.finder.mode_stack.get_current_mode_path(),\n 'current-mode-obj-id': bigsh.run.finder.mode_stack.get_current_mode_obj()\n }\n new_arg_scopes = [invocation_scope] + arg_scopes\n try:\n result = _call_proc(arg_data_handler,\n argument_data_handler_registry, new_arg_scopes,\n command)\n except Exception, e:\n # XXX ought to not manage parameter exceptions for _call_proc\n if debug.cli():\n print _line(), 'Backtrace'\n traceback.print_exc()\n self.handle_parse_error(str(e),\n start_word_index + words_matched,\n CommandHandler.VALIDATION_ERROR_PRIORITY)\n return parse_results\n elif field is not None:\n new_arg_data[field] = value\n\n self.handle_matched_result(command, result, arg_scopes)\n\n # FIXME: Do we still need the separate fields dict?\n # If so, I don't think this is actually correct, since\n # we want fields to not necessarily be kept exactly in\n # sync with arg_data. Need to think about this more.\n new_fields = new_arg_data.keys()\n new_prefix_matches = list(prefix_matches)\n if tag_prefix_match:\n new_prefix_matches.append(tag_prefix_match)\n if len(match_token) > len(word):\n new_prefix_matches.append(\n [start_word_index+words_matched-1, match_token])\n arg_parse_results.append([words_matched, new_prefix_matches,\n arg_scopes, new_arg_data, new_fields, arg_actions])\n\n if optional:\n arg_parse_results.append([0, prefix_matches, scopes,\n arg_data, fields, actions])\n\n for arg_parse_result in arg_parse_results:\n (words_matched, prefix_matches, arg_scopes, arg_data,\n fields, actions) = arg_parse_result\n remaining_words = words[words_matched:]\n remaining_parse_results = self.parse_arguments(\n remaining_args, remaining_words,\n start_word_index + words_matched, scopes, arg_data,\n fields, actions, prefix_matches, command)\n # The first item in each tuple is the words consumed, but\n # that's relative to the remaining args that we passed to\n # it. For the parse results from this invocation of\n # parse args we also need to include the counts of the args\n # that we've already parsed plus the args that were parsed\n # for the current choice.\n for parse_result in remaining_parse_results:\n parse_result[0] += words_matched\n# parse_prefix_matches = parse_result[1]\n# for match in parse_prefix_matches:\n# match[0] += words_matched\n parse_result[1] = prefix_matches + parse_result[1]\n parse_results.append(parse_result)\n\n return parse_results", "def add_arguments(self, parser):", "def ParseArguments():\n\t#TODO: check input variable types!\n\t# check for integers ans strings\n\t# check for distance and distance cutoff value: ONLY CERTAIN VALUES ALLOWED\n\targ_parser = argparse.ArgumentParser(description=\"Program to get background distribution matching user input SNPs on the following parameters {MAF, distance to nearest gene, gene density}\")\n\tsubparsers = arg_parser.add_subparsers(dest='subcommand',\n\t\t\t\t\t\t\t\t\t title='subcommands in this script',\n\t\t\t\t\t\t\t\t\t description='valid subcommands. set subcommand after main program required arguments',\n\t\t\t\t\t\t\t\t\t help='You can get additional help by writing <program-name> <subcommand> --help')\n\n\t## Subparsers\n\targ_parser_annotate = subparsers.add_parser('annotate')\n\t#arg_parser_annotate.set_defaults(func=run_annotate)\n\targ_parser_match = subparsers.add_parser('match')\n\t#arg_parser_annotate.set_defaults(func=run_match)\n\n\n\targ_parser.add_argument(\"--user_snps_file\", help=\"Path to file with user-defined SNPs\", required=True) # TODO: make the program read from STDIN via '-'\n\targ_parser.add_argument(\"--output_dir\", help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\t#arg_parser.add_argument(\"--output_dir\", type=ArgparseAdditionalUtils.check_if_writable, help=\"Directory in which output files, i.e. random SNPs will be written\", required=True)\n\targ_parser.add_argument(\"--distance_type\", help=\"ld or kb\", required=True)\n\targ_parser.add_argument(\"--distance_cutoff\", help=\"r2, or kb distance\", required=True)\n\t# NEW: options\n\t#arg_parser.add_argument(\"--status_file\", help=\"Bool (switch, takes no value after argument); if set then logging is ENABLED.\", action='store_true')\n\t#arg_parser.add_argument(\"--status_file\", help=\"If set, a json file will be written. Value should be the a filepath.\")\n\targ_parser.add_argument(\"--web\", help=\"If set, the program will run in web mode. VALUE should be the a filepath to output (temporary) file - usually this will be the session_id. The web mode activates: 1) creating a status_obj and writing it to json file; 2) ENABLE writing a json report file;\")\n\targ_parser.add_argument(\"--NoLogger\", help=\"Bool (switch, takes no value after argument); if set then logging is DISAPLED. Logfile will be placed in outputdir.\", action='store_true')\n\n\n\t### MATCH arguments\n\targ_parser_match.add_argument(\"--N_sample_sets\", type=int, help=\"Number of matched SNPs to retrieve\", required=True) # 1000 - \"Permutations?\" TODO: change name to --n_random_snp_sets or --N\n\t#TODO: add argument that describes if ABSOLUTE of PERCENTAGE deviation should be used\n\targ_parser_match.add_argument(\"--max_freq_deviation\", type=int,help=\"Maximal deviation of SNP MAF bin [MAF +/- deviation]\", default=5) # 5\n\targ_parser_match.add_argument(\"--max_distance_deviation\", type=int, help=\"Maximal PERCENTAGE POINT deviation of distance to nearest gene [distance +/- %%deviation])\", default=5) # 20000\n\t#TODO: CHECK THAT max_distance_deviation > 1 %\n\t#TODO: WHY IS max_genes_count_deviation type float!!!!????\n\targ_parser_match.add_argument(\"--max_genes_count_deviation\", type=float, help=\"Maximal PERCENTAGE POINT deviation of genes in locus [gene_density +/- %%deviation]\", default=5) # 0.2\n\targ_parser_match.add_argument(\"--set_file\", help=\"Bool (switch, takes no value after argument); if set then write out set files to rand_set..gz. Default is false\", action='store_true')\n\n\targs = arg_parser.parse_args()\n\n\treturn args", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def __add_arguments__(cls, parser):", "def treat_options( opts, arg, n_arg, usage_string ) :\n global sent_handler\n global lower_attr\n global input_filetype_ext\n global output_filetype_ext\n\n ctxinfo = util.CmdlineContextInfo(opts)\n util.treat_options_simplest(opts, arg, n_arg, usage_string)\n sent_handler = LowercaserHandler.handle_sentence_simple # default\n \n for o, a in ctxinfo.iter(opts):\n if o == \"--from\":\n input_filetype_ext = a\n elif o == \"--to\":\n output_filetype_ext = a\n elif o in (\"-l\",\"--lemmas\" ) :\n lower_attr = \"lemma\"\n elif o in (\"-a\", \"--algorithm\"):\n algoname = a.lower()\n if algoname == \"simple\" : # Redundant, kept for clarity\n sent_handler = LowercaserHandler.handle_sentence_simple\n elif algoname == \"complex\" :\n sent_handler = LowercaserHandler.handle_sentence_complex\n elif algoname == \"aggressive\" : # Redundant, kept for clarity\n sent_handler = LowercaserHandler.handle_sentence_aggressive\n else :\n ctxinfo.error(\"Bad algorithm name `{name}`\", name=algoname)\n\n elif o == \"-m\":\n ctxinfo.error(\"Deprecated option. Use --from=Moses instead\" )\n elif o == \"-x\":\n ctxinfo.error(\"Deprecated option. \" \\\n \"Use --from=PlainCorpus instead\")\n else:\n raise Exception(\"Bad arg: \" + o)", "def override_if_not_in_args(flag, argument, args):\r\n if flag not in args:\r\n args.extend([flag, argument])", "def process_flags(self):\n self.parse_search_terms(self.search_terms)\n \n # If randomisation is explicitly set, we enable it outright.. if not\n # it depends on whether we've provided search terms or not\n if self.force_randomise:\n self.randomise = True\n elif self.search_terms:\n self.randomise = False\n \n if self.update_index:\n self._update_index()\n \n if self.list_only:\n self.music_client = \"echo\" # FIXME: unix-only!\n self.loop_songs = False", "def __init__(self, args, kwargs):\n\n\t\tself.always = kwargs\n\n\t\tself.positional = self.get_flags(args) if args else []\n\n\t\tself.meta = re.compile(r\"[()<>]\")\n\n\t\tself.arguments = re.compile(r\"^(-?\\d,?)+!?$|\"\n\t\t\t \t\t \t\t\t r\"^!?(-?\\d,?)+$|\"\n\t\t\t \t\t\t\t\t r\"^(!\\+?|\\+!?)$\")\n\n\t\t# Used in self.stringify to auto-increment\n\t\t# positional argument positions\n\t\tself.counter = 0", "def _repopulate_required_arguments(self, arguments):\n fixed_arguments = []\n original_arguments = self.arguments.split()\n\n original_argument_index = 0\n argument_index = 0\n\n while original_argument_index < len(original_arguments):\n original_argument = original_arguments[original_argument_index]\n if (argument_index < len(arguments) and\n original_argument == arguments[argument_index]):\n argument_index += 1\n fixed_arguments.append(original_argument)\n elif (original_argument in self.required_arguments or\n original_argument.split('=')[0] in self.required_arguments or\n '\"' in original_argument or \"'\" in original_argument):\n fixed_arguments.append(original_argument)\n\n original_argument_index += 1\n\n return fixed_arguments", "def fix_args(string):\n # Hide default values\n defs = re.compile('<span class=\"sig-paren\">\\(</span>(?P<args>[^\\)]*)<span class=\"sig-paren\">\\)</span>')\n opts = re.compile('<em class=\"sig-param\">(?P<var>[^=<]*)=(?P<val>[^<]*)</em>')\n \n prefix = ''\n remain = string\n \n match = defs.search(remain)\n while match:\n prefix += remain[:match.start(1)]\n prefargs = ''\n remnargs = remain[match.start(1):match.end(1)]\n optional = opts.search(remnargs)\n count = 0\n while optional:\n prefargs += remnargs[:optional.start(0)]+'<strong>[</strong>'\n prefargs += remnargs[optional.start(0):optional.end(1)]\n prefargs += remnargs[optional.end(2):optional.end(0)]\n remnargs = remnargs[optional.end(0):]\n optional = opts.search(remnargs)\n count += 1\n if count:\n prefargs += '<strong>'+']'*count+'</strong>'\n prefix += prefargs+remnargs\n prefix += remain[match.end(1):match.end(0)]\n remain = remain[match.end(0):]\n match = defs.search(remain)\n return prefix+remain", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def _prepare(self):\n # Customize commandline arguments\n parser = argparse.ArgumentParser()\n self.initArgumentParser(parser, defaults=self.default_binding_overrides)\n self.__options = parser.parse_args()\n self.__bindings.update(args_util.parser_args_to_bindings(self.__options))\n\n self.start_logging()", "def _process_args(self, largs, rargs, values):\n while rargs:\n arg = rargs[0]\n try:\n if arg[0:2] == \"--\" and len(arg) > 2:\n # process a single long option (possibly with value(s))\n # the superclass code pops the arg off rargs\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n # process a cluster of short options (possibly with\n # value(s) for the last one only)\n # the superclass code pops the arg off rargs\n self._process_short_opts(rargs, values)\n else:\n # it's either a non-default option or an arg\n # either way, add it to the args list so we can keep\n # dealing with options\n del rargs[0]\n raise Exception\n except:\n largs.append(arg)", "def map_arguments():\n arguments = {\n '-c': 'ogg',\n '-d': 'no',\n '-q': '4'\n }\n args = sys.argv[:]\n args.pop(0)\n while len(args) > 1:\n if args[0] == '-c' and re.search('^mp3$|^ogg$', args[1]) or \\\n args[0] == '-d' and re.search('^y(es)?$', args[1]) or \\\n args[0] == '-q' and re.search('^[0-9]$', args[1]):\n arguments[args[0]] = args[1]\n args.pop(0)\n args.pop(0)\n else:\n print_help()\n if len(args) == 1:\n print_help()\n return arguments", "def get_args():\n\n # Make argparse object, add description\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawTextHelpFormatter,\n description=textwrap.dedent(\n '''\n summary:\n Takes a VCF file and parses the variants to produce a tab delimited \n variant report.\n '''\n ))\n\n\n # Version info\n parser.add_argument(\n '-v', '--version', action='version', \n version=\n '%(prog)s\\nversion:\\t{}\\nlast updated:\\t{}'.format(\n __version__, __updated__\n ))\n\n\n # Arguments (see help string for full descriptions):\n # REQUIRED: VCF file input\n parser.add_argument(\n 'input', action='store', \n help='Filepath to input VCF file. REQUIRED.'\n )\n\n\n # OPTIONAL: Output folder, defaults to current directory if empty\n parser.add_argument(\n '-O', '--output', action='store', \n help=textwrap.dedent(\n '''\n Filepath to folder where output reports will be saved. \n If missing, defaults to current directory.\n \\n'''\n ))\n\n\n # OPTIONAL: List of preferred transcripts\n parser.add_argument(\n '-t', '--transcripts', action='store', \n help=textwrap.dedent(\n '''\n Filepath to preferred transcripts file. \n\n Must be a tab seperated file with preferred transcripts in the second \n column. If missing, all entries in the preferred transcript column \n will be labelled as 'Unknown'.\n \\n'''\n ))\n\n\n # OPTIONAL: Preferred transcripts strictness\n parser.add_argument(\n '-T', '--transcript_strictness', action='store', default='low', \n help=textwrap.dedent(\n '''\n Strictness of matching while annotating preferred transcripts.\n Default setting is low.\n\n Options: \n\n high - Transcripts must be an exact match. \n e.g. NM_001007553.2 and NM_001007553.1 won't match,\n NM_001007553.1 and NM_001007553.1 will.\n\n low - Transcripts will match regardless of the version number. The \n version number is after the . at the end of a transcript \n e.g. NM_001007553.2 and NM_001007553.1 will match.\n \\n'''\n ))\n\n\n # OPTIONAL: either a single BED file or a folder containing BED \n # files, only one of these can be used\n bed_files = parser.add_mutually_exclusive_group()\n\n # Single BED file\n bed_files.add_argument(\n '-b', '--bed', action='store', \n help=textwrap.dedent(\n '''\n Filepath to a single BED file. \n\n The BED file will be applied to the variant report and a seperate\n report saved with the BED file applied. This report will be saved in \n the same output folder as the original variant report, with the BED \n file name added to it.\n Cannot be used together with -B flag.\n \\n'''\n ))\n\n # Multiple BED files\n bed_files.add_argument(\n '-B', '--bed_folder', action='store', \n help=textwrap.dedent(\n '''\n Filepath to folder containing BED files. \n\n Each BED file will be applied to the variant report and a seperate\n report saved with the BED file applied. These reports will be saved in\n a new folder within the output folder, named the same as the input BED\n folder. \n The file names will be the same as the original variant report, with \n the BED file name added to them.\n Cannot be used together with -b flag.\n \\n'''\n ))\n\n\n # OPTIONAL: File containing known variants\n parser.add_argument(\n '-k', '--known_variants', action='store', \n help=textwrap.dedent(\n '''\n Filepath to known variants file. \n\n This is a VCF file containing any known variants and an associated \n classification. The classification will be added to the variant \n report. The VCF must have an annotation named 'Classification' within \n the INFO field for each variant.\n\n Key:\n 0 - Artifact\n 1 - Benign\n 2 - Likely benign\n 3 - VUS\n 4 - Likely pathogenic\n 5 - Pathogenic\n \\n'''\n ))\n\n\n # OPTIONAL: File containing the headers for the report\n parser.add_argument(\n '-c', '--config', action='store', \n help=textwrap.dedent(\n '''\n Filepath to config file. \n\n This is a tab seperated text file containing a number of rows, where \n each row specifies an annotation to be included in the variant report.\n Only annotations included in the config file will be included in the\n variant report.\n The columns in the variant report will be in the same order as the \n order in which the annotations appear in the config file.\n\n Each row contains:\n\n Column 1 - Required. Annotation headers, these must match up with how\n they appear in the VCF (case sensitive).\n\n Column 2 - Required. Location where to find the data within the VCF, \n used to select the correct parsing function.\n options: info, format, vep, filter or pref.\n\n Column 3 - Optional. Alternative name for column header.\n\n To make a config file with all available options from a VCF, run:\n vcf_parse -l path_to_input_vcf > config.txt\n \\n'''\n ))\n\n\n # OPTIONAL: Lists all headers in a vcf then exits\n parser.add_argument(\n '-l', '--config_list', action='store_true', \n help=textwrap.dedent(\n '''\n Return a list of all availabile config to the screen, then exit.\n See CONFIG section for usage.\n \\n'''\n ))\n\n\n # OPTIONAL: Filter out any variants where FILTER column is not PASS\n parser.add_argument(\n '-F', '--filter_non_pass', action='store_true', \n help=textwrap.dedent(\n '''\n Filters out any variants where the FILTER annotation is not \n PASS. If missing then there will be no fitering based on the\n FILTER annotation.\n \\n'''\n ))\n\n return parser.parse_args()", "def prepare_arguments(self, parser):\n pass", "def parse_args(self, unknown_args):\n arg_list = list()\n for arg in unknown_args:\n if arg.startswith((\"-\", \"--\")):\n if \".\" not in arg:\n raise Exception(\"All arguments must have a '.' in their name, like 'Robot.setting'\")\n arg_list.append(arg[2:])\n parser.add_argument(arg, type=str)\n opt_args = parser.parse_args(unknown_args)\n for arg in arg_list:\n section, setting = arg.split(\".\")\n self.logger.debug(\"Adding %s, %s from cmd line\" % (section, setting))\n self._add_setting(section, setting, opt_args.__getattribute__(arg))", "def _process_args(self, largs, rargs, values):\n while rargs:\n arg = rargs[0]\n try:\n if arg[0:2] == \"--\" and len(arg) > 2:\n # process a single long option (possibly with value(s))\n # the superclass code pops the arg off rargs\n self._process_long_opt(rargs, values)\n elif arg[:1] == \"-\" and len(arg) > 1:\n # process a cluster of short options (possibly with\n # value(s) for the last one only)\n # the superclass code pops the arg off rargs\n self._process_short_opts(rargs, values)\n else:\n # it's either a non-default option or an arg\n # either way, add it to the args list so we can keep\n # dealing with options\n del rargs[0]\n raise Exception\n except:\n largs.append(arg) # pylint: disable-msg=W0702", "def parse_args(self, args):\n raise Exception(\"Not implemented\")", "def test_compiler_arguments_fist_line(patch, compiler, lines, tree):\n patch.init(StorySyntaxError)\n lines.last.return_value = None\n with raises(StorySyntaxError):\n compiler.arguments(tree, '0')\n error = 'arguments_noservice'\n StorySyntaxError.__init__.assert_called_with(error, tree=tree)", "def set_args() -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser( # type: argparse.ArgumentParser\n description=r'''\n -----------------------------------\n < Pull DNA barcodes from FASTQ files >\n -----------------------------------\n /\n \\ ______/ V`-, /\n } /~~\n /_)^ --,r'\n |b |b\n ''',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n add_help=False\n )\n # Arguments for verbosity and logging\n parser.add_argument( # Verbosity\n '-v',\n '--verbosity',\n dest='verbosity',\n type=str.lower,\n choices=_VERBOSITY_LEVELS,\n default=_VERBOSITY_DEFAULT,\n required=False,\n metavar='verbosity',\n help=\"Set the verbosity level, choose from '%s'; defaults to '%s'\" % (\"', '\".join(_VERBOSITY_LEVELS), _VERBOSITY_DEFAULT)\n )\n parser.add_argument( # Number of cores\n '--parallel',\n dest='num_cores',\n type=_num_cores,\n const=None,\n default=1,\n nargs='?',\n required=False,\n metavar='num jobs',\n help=\"Run %(prog)s in parallel; if passed, can optionally specify the number of jobs to run at once\"\n )\n parser.add_argument( # Output directory\n '-o',\n '--output-directory',\n dest='outdirectory',\n type=str,\n default=_OUTDIR_DEFAULT,\n required=False,\n metavar='output directory',\n help=\"Choose where all output files are to be stored; defaults to '%s'\" % _OUTDIR_DEFAULT\n )\n # Input arguments\n inputs = parser.add_argument_group(\n title='input arguments',\n description='Provide inputs for %(prog)s'\n )\n inputs.add_argument( # Forward FASTQ\n '-f',\n '--forward-fastq',\n dest='forward',\n type=str,\n default=None,\n required=True,\n metavar='FORWARD FASTQ',\n help=\"Provide a filepath for the forward/single FASTQ file\"\n )\n inputs.add_argument( # Reverse FASTQ\n '-r',\n '--reverse-fastq',\n dest='reverse',\n type=str,\n default=None,\n required=False,\n metavar='REVERSE FASTQ',\n help=\"Provide a filepath for the optional reverse FASTQ file\"\n )\n inputs.add_argument( # Sample sheet\n '-s',\n '--sample-sheet',\n dest='sample_sheet',\n type=str,\n default=None,\n required=True,\n metavar='SAMPLE SHEET',\n help=\"Provide a filepath for the sample sheet\"\n )\n inputs.add_argument( # Barcodes file\n '-b',\n '--barcodes',\n dest='barcodes',\n type=str,\n required=True,\n default=None,\n metavar='BARCODES',\n help=\"Provide a filepath for the barcodes CSV file\"\n )\n barcodes = parser.add_argument_group(\n title='barcode options',\n description=\"Set parameters for barcode demultiplexing\"\n )\n barcodes.add_argument( # Number of errors allowed\n '-e',\n '--error',\n dest='error',\n type=int,\n default=_ERROR_DEFAULT,\n required=False,\n metavar='ERROR',\n help=\"This is how many mismatches in the barcode we allowed before rejecting, defaults to %s\" % _ERROR_DEFAULT\n )\n return parser", "def _arguments(self, t):\n self.RaiseError(t, \"Arguments should already have been processed\")" ]
[ "0.59499556", "0.56571984", "0.55967647", "0.54486513", "0.5432549", "0.5422066", "0.53706396", "0.5264565", "0.51700866", "0.51585585", "0.5135055", "0.51217204", "0.5113001", "0.5107572", "0.50817484", "0.50805616", "0.50753933", "0.5071565", "0.5038211", "0.50260377", "0.50183463", "0.5011507", "0.49892822", "0.49826363", "0.49764872", "0.4966551", "0.49651086", "0.49477622", "0.49338627", "0.4907723" ]
0.68998414
0
Stringifies phrases. After parsing of the string via self.parse(), this method takes the escaped string and the list of phrases returned by self.parse() and replaces the original phrases (with tags) with the Phraseobjects in the list and adds the appropriate flagcombinations as determined by the string or the position of the phrase (the string if it's in self.always, i.e. an 'always' argument). This method also works recursively to handle nested phrases (and resetting of parentphrase styles).
def stringify(self, string, phrases, parent=None): last_tag = 0 beauty = "" for phrase in phrases: beauty += string[last_tag : phrase.opening] if phrase.string in self.always and not phrase.override: phrase.style = self.always[phrase.string] if phrase.arguments: combination = 0 for i in phrase.arguments: try: combination |= self.positional[i] except IndexError: raise errors.ArgumentError("Positional argument '{0}' " "is out of range" "!".format(i)) phrase.style |= combination elif (phrase.string not in self.always or phrase.increment or phrase.override): try: combination = self.positional[self.counter] if phrase.increment or not phrase.override: self.counter += 1 except IndexError: self.raise_not_enough_arguments(phrase.string) phrase.style |= combination phrase.style = flags.codify(phrase.style) if phrase.nested: phrase.string = self.stringify(phrase.string, phrase.nested, phrase) # After a nested phrase is over, we reset the style to the # parent style, this gives the notion of nested styles. reset = parent.style if parent else "" # \033[ signifies the start of a command-line escape-sequence beauty += "\033[{0}m{1}\033[0;{2}m".format(phrase.style, phrase, reset) last_tag = phrase.closing + 1 beauty += string[last_tag:] return beauty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def beautify(self, string):\n\n\t\tif not string:\n\t\t\treturn string\n\n\t\t# string may differ because of escaped characters\n\t\tstring, phrases = self.parse(string)\n\n\t\tif not phrases:\n\t\t\treturn string\n\n\t\tif not self.positional and not self.always:\n\t\t\traise errors.ArgumentError(\"Found phrases, but no styles \"\n\t\t\t\t\t\t\t\t\t \"were supplied!\")\n\n\t\treturn self.stringify(string, phrases)", "def phraseMaker(self):\n phrase_lst = []\n phrase = str(self.phrase_ent.get())\n keyword = str(self.keyword_ent.get())\n for i in range(self.city_lbx.size()):\n city = str(self.city_lbx.get(i))\n new_phrase = re.sub(keyword, city, phrase)\n phrase_lst.append(new_phrase)\n return phrase_lst", "def split_into_phrases (self, phrase):\r\n\r\n if not self.contains(phrase,'()'):\r\n\r\n #For a phrase without parantheses\r\n \r\n\r\n if '|' in phrase:\r\n return ['@']+[x for x in phrase.split('|')]\r\n elif '&' in phrase:\r\n return [x for x in phrase.split('&')]\r\n\r\n #If the phrase contains parantheses.\r\n \r\n phrase = list (phrase)\r\n #convert string into a list of chars\r\n level = 0\r\n found = False # if one of the operators is found in the phrase \r\n\r\n for operator in ['#','>','|','&']:\r\n level = 0 # reset level\r\n if not found:\r\n \r\n \r\n for x,char in enumerate(phrase):\r\n if char == '(':\r\n level += 1\r\n if char == ')':\r\n level -=1\r\n # level indicates level within hierarchy established by parantheses\r\n\r\n if level == 0 and x+1 < len(phrase) and phrase[x+1] == operator:\r\n phrase[x+1] = '<<'+operator+'>>'\r\n found = True\r\n break\r\n \r\n \r\n\r\n if '<<&>>' in phrase:\r\n # For AND\r\n phrases = ''.join(phrase).split('<<&>>')\r\n elif '<<|>>' in phrase:\r\n # For OR \r\n phrases = ['@']+''.join(phrase).split('<<|>>')\r\n elif '<<>>>' in phrase:\r\n # For INFERENCE \r\n premise = ''.join(phrase).split('<<>>>')[0]\r\n conclusion = ''.join(phrase).split('<<>>>')[1]\r\n phrases = ['@','~'+premise,conclusion]\r\n # A => B translated as ~A OR B\r\n elif '<<#>>' in phrase:\r\n # FOR EQUIVALENCY \r\n premise = ''.join(phrase).split('<<#>>')[0]\r\n conclusion = ''.join(phrase).split('<<#>>')[1]\r\n \r\n phrase1 = '~'+'('+premise+'&'+'~'+conclusion+')'\r\n phrase2 = '~'+'('+conclusion+'&'+'~'+premise+')'\r\n phrases = [phrase1,phrase2]\r\n # A<>B translated as (~A or B) & (~B or A) \r\n \r\n return [x for x in phrases]", "def parse (self, phrase):\r\n\r\n if isinstance(phrase,str):\r\n #If the phrase is a string\r\n if self.is_simple(phrase):\r\n #EXITS the recursion\r\n if phrase[0:2] == '~~':\r\n return phrase[2:]\r\n #Eliminates negations that cancel each other\r\n return phrase\r\n elif self.bracketed(phrase):\r\n #Eliminate top-level parantheses\r\n return self.parse(phrase[1:-1])\r\n elif phrase[0] == '~':\r\n #If the phrase begins with a negating prefix...\r\n negations,phrase = self.heading_count(phrase)\r\n \r\n if self.bracketed(phrase):\r\n #If the negated phrase is bracketed\r\n if negations % 2 == 1:\r\n subphrase = self.split_into_phrases(phrase[1:-1])\r\n if subphrase[0] != '@': \r\n #De Morgan's Law \r\n return self.parse(['@']+['~'+x for x in subphrase])\r\n else:\r\n #De Morgan's Law\r\n return self.parse(['~'+x for x in subphrase[1:]])\r\n else:\r\n return self.parse(phrase[1:-1])\r\n return self.parse(self.split_into_phrases((negations%2)*'~'+phrase))\r\n \r\n else:\r\n return self.parse(self.split_into_phrases(phrase))\r\n # IF the phrase is a list\r\n if self.all_is_P(phrase,predicate_function=self.is_simple):\r\n #If every terms of the phrase list is simple...\r\n #This prepares for EXIT from recursion\r\n return [self.parse(x) for x in phrase]\r\n return self.parse([self.parse(x) for x in phrase])", "def textinterpret(self,\r\n phrase,\r\n depth=0,\r\n re_entering=False,\r\n newindex=Index(-1)):\r\n\r\n if len(phrase) > 3:\r\n if phrase[0] == LEFTNOTE and phrase[-1] == RIGHTNOTE and len(phrase) > 1:\r\n phrase = phrase[1:-1]\r\n #eliminate enclosing brackets\r\n keylist = self.pass_key_dict[depth][0]\r\n addedlist = self.pass_key_dict[depth][1]\r\n #list to keep track of new key words added on\r\n\r\n if phrase[0] == ATSIGN:\r\n # at sign signs enclose an index\r\n right_at = True\r\n as_child = False\r\n index_phrase = phrase.split(ATSIGN)[1]\r\n index = Index(index_phrase)\r\n\r\n phrase = phrase.replace(ATSIGN+index_phrase+ATSIGN, EMPTYCHAR)\r\n # eliminates index phrase\r\n\r\n elif phrase[0] == PERCENTAGE:\r\n # percentage signs enclose a child index\r\n right_at = True\r\n as_child = True\r\n index_phrase = phrase.split(PERCENTAGE)[1]\r\n index = Index(index_phrase)\r\n\r\n phrase = phrase.replace(PERCENTAGE+index_phrase+PERCENTAGE, EMPTYCHAR)\r\n #eliminates index phrase\r\n\r\n elif phrase[0] == '\"':\r\n #for a child note\r\n phrase = phrase[1:]\r\n\r\n right_at = False\r\n as_child = True\r\n as_next = False\r\n\r\n index = self.index_sort([Index(0)]\r\n +[a_temp for a_temp\r\n in self.find_within(Index(0),\r\n Index(1),\r\n orequal=False)],\r\n by_date=False,\r\n quick=False)[-1]\r\n\r\n elif phrase[0] == \"'\":\r\n #for a next note\r\n\r\n phrase = phrase[1:]\r\n as_next = True\r\n as_child = False\r\n right_at = True\r\n index = self.index_sort([Index(0)]+[a_temp for a_temp\r\n in self.find_within(Index(0),\r\n Index(1),\r\n orequal=False)],\r\n by_date=False,\r\n quick=False)[-1]\r\n\r\n elif phrase[0] == \";\":\r\n # to go back to the previous level and add a next note\r\n phrase = phrase[1:]\r\n as_next = True\r\n as_child = False\r\n right_at = True\r\n index = self.index_sort([Index(0)]\r\n +[a_temp for a_temp\r\n in self.find_within(Index(0),\r\n Index(1),\r\n orequal=False)],\r\n by_date=False,\r\n quick=False)[-1]\r\n index = Index(index)\r\n index = index.previous()\r\n# index = str(index)\r\n\r\n\r\n elif phrase[0] not in [DOLLAR, DASH, PLUS, STAR]:\r\n # for an ordinary note\r\n\r\n j_temp = Index(int(Index(self.indexes()[-1])))\r\n # Procedure for moving notes out of the ZERO range\r\n for i_temp in self.find_within(Index(0), Index(1)):\r\n # j_temp is the next integer index\r\n self.move(i_temp, j_temp+Index(i_temp))\r\n\r\n right_at = False\r\n as_child = False\r\n as_next = False\r\n index = Index(0)\r\n\r\n if phrase[0] == DOLLAR:\r\n #new keyword set\r\n keylist = []\r\n if len(phrase) > 1:\r\n keylist += phrase[1:].split(COMMA)\r\n elif phrase[0] == PLUS:\r\n #add keyword set to existing\r\n if len(phrase) > 1:\r\n for k_temp in phrase[1:].split(COMMA):\r\n keylist.append(k_temp)\r\n addedlist.append(len(phrase[1:].split(COMMA)))\r\n\r\n elif phrase[0] == DASH:\r\n #delete keyword\r\n if addedlist and len(keylist) > addedlist[-1]:\r\n for a_temp in range(1, addedlist[-1]+1):\r\n keylist.pop()\r\n addedlist.pop()\r\n\r\n elif phrase[0] == STAR:\r\n #adds a single note with new keys,\r\n #yet without erasing the old keyset.\r\n # NEED TO CHECK IF THIS FUNCTION WORKS\r\n\r\n ks_temp = set(phrase[1:].split(SEMICOLON)[0].split(COMMA))\r\n ks_temp.update(extract.extract(phrase.split(SEMICOLON, 1)[1],\r\n LEFTCURLY,\r\n RIGHTCURLY))\r\n newindex = self.addnew(ks_temp,\r\n phrase.split(SEMICOLON, 1)[1])\r\n else:\r\n\r\n if not flatten.isflat(keylist):\r\n keylist = flatten.flatten(keylist)\r\n ks_temp = set(keylist)\r\n meta = {}\r\n if LEFTCURLY in phrase:\r\n ks_temp.update(extract.extract(phrase,\r\n LEFTCURLY,\r\n RIGHTCURLY))\r\n # extracts keywords that are enclosed\r\n #in curly brackets within the text\r\n if '^:' in phrase:\r\n metadatalist = extract.extract(phrase, '^:', ':^')\r\n # extract metadata\r\n\r\n for md_temp in metadatalist:\r\n #assigns metadata\r\n if VERTLINE in md_temp and len(md_temp.split(VERTLINE)) >= 2:\r\n if md_temp.split(VERTLINE)[1] == 'S':\r\n meta[md_temp.split(VERTLINE)[0]] = str(md_temp.split(VERTLINE)[2])\\\r\n .replace('\"'+\"'\",\"'\")\\\r\n .replace(\"'\"+'\"',\"'\")\r\n if md_temp.split(VERTLINE)[1] == 'I':\r\n meta[md_temp.split(VERTLINE)[0]] = int(md_temp.split(VERTLINE)[2])\r\n if md_temp.split(VERTLINE)[1] == 'L':\r\n meta[md_temp.split(VERTLINE)[0]] = [x_temp.replace('\"'+\"'\",\"'\")\\\r\n .replace(\"'\"+'\"',\"'\") for x_temp in\r\n md_temp.split(VERTLINE)[2][1:-1].split(COMMA)]\r\n phrase = nformat.remove_between(phrase, '^:', ':^')\r\n newindex = self.enter(ks_temp,\r\n phrase,\r\n meta,\r\n query=False,\r\n not_parsing=False,\r\n right_at=right_at,\r\n as_child=as_child,\r\n ind=str(index),\r\n re_entering=re_entering)\r\n self.pass_key_dict[depth][0] = keylist\r\n self.pass_key_dict[depth][1] = addedlist\r\n return newindex", "def _combineFragmentedString (cls, st : String) -> String:\n\n Logging.trace(\">>: %r\", st)\n\n ParseState_inLimbo = 0\n ParseState_inOther = 1\n ParseState_inString = 2\n ParseState_inLiteral = 3\n ParseState_inEscape = 4\n\n parseState = ParseState_inLimbo\n result = \"\"\n\n for ch in st:\n # process finite state automaton with three states based\n # on next character in string\n # Logging.trace(\"--: (%d) character: %r\", parseState, ch)\n\n if parseState == ParseState_inLimbo:\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inString\n elif not cls._whiteSpaceCharRegExp.search(ch):\n parseState = ParseState_inLiteral\n result += ch\n elif parseState == ParseState_inString:\n if ch == cls._doubleQuoteCharacter:\n parseState = ParseState_inLimbo\n else:\n result += ch\n parseState = iif(ch == cls._escapeCharacter,\n ParseState_inEscape, parseState)\n elif parseState == ParseState_inLiteral:\n result += ch\n if cls._whiteSpaceCharRegExp.search(ch):\n parseState = ParseState_inLimbo\n elif parseState == ParseState_inEscape:\n result += ch\n parseState = ParseState_inString\n else:\n Assertion.check(False,\n \"bad parse state - %s\" % parseState)\n\n Logging.trace(\"<<: %r\", result)\n return result", "def retag_string(self, string, tags):\r\n for (i, tag) in enumerate(tags):\r\n p = '<%s>' % i\r\n string = re.sub(p, tag, string, 1)\r\n return string", "def substitute_words(self, string):\n condensed_string = '_'.join(string.split())\n\n for word in self.math_words['words']:\n condensed_string = re.sub(\n '_'.join(word.split(' ')),\n self.math_words['words'][word],\n condensed_string\n )\n\n for number in self.math_words['numbers']:\n condensed_string = re.sub(\n number,\n str(self.math_words['numbers'][number]),\n condensed_string\n )\n\n for scale in self.math_words['scales']:\n condensed_string = re.sub(\n '_' + scale,\n ' ' + self.math_words['scales'][scale],\n condensed_string\n )\n\n condensed_string = condensed_string.split('_')\n for chunk_index in range(0, len(condensed_string)):\n value = ''\n\n try:\n value = str(eval(condensed_string[chunk_index]))\n\n condensed_string[chunk_index] = value\n except:\n pass\n\n for chunk_index in range(0, len(condensed_string)):\n condensed_chunk = condensed_string[chunk_index]\n if self.is_integer(condensed_chunk) or self.is_float(condensed_chunk):\n i = 1\n start_index = chunk_index\n end_index = -1\n while (chunk_index + i < len(condensed_string) and (self.is_integer(condensed_string[chunk_index + i]) or self.is_float(condensed_string[chunk_index + i]))):\n end_index = chunk_index + i\n i += 1\n\n for sub_chunk in range(start_index, end_index):\n condensed_string[sub_chunk] += ' +'\n\n condensed_string[start_index] = '( ' + condensed_string[start_index]\n condensed_string[end_index] += ' )'\n\n return ' '.join(condensed_string)", "def multi_replace(stringlike, pettern_to_replacement_dict):\n string = str(stringlike)\n for pattern, replacement in pettern_to_replacement_dict.items():\n string = string.replace(pattern, replacement)\n return string", "def phrases(string):\n\n string = upper(string) # pass the string to the upper function to capitalize it\n string_list = string.split() # split the string by spaces into a list\n\n for i in range(len(string_list)): # Itterate over words in the list\n if len(string_list) >= 3: # Edgecase (if userinput is too short)\n if string_list[i] == 'BY' and string_list[i+1] == 'THE' and string_list[i+2] == 'WAY': # Check if the string \"BY THE WAY\" appears in the list\n string_list[i] = 'BTW' # Change the first occurence in list to 'BTW'\n string_list[i+1] = '' # Change the second occurence in list to an empty character\n string_list[i+2] = '' # Change the third occurence in list to an empty character\n\n if string_list[i] == 'YOU' and string_list[i+1] == 'ARE' and string_list[i+2] == 'WELCOME': # Check if the string \"YOU ARE WELCOME\" appears in the list\n string_list[i] = 'YW' # Change the first occurence in list to 'YW'\n string_list[i+1] = '' # Change the second occurence in list to an empty character\n string_list[i+2] = '' # Change the third occurence in list to an empty character\n\n if string_list[i] == 'ON' and string_list[i+1] == 'MY' and string_list[i+2] == 'WAY': # Check if the string \"ON MY WAY\" appears in the list\n string_list[i] = 'OMW' # Change the first occurence in list to 'OMW'\n string_list[i+1] = '' # Change the second occurence in list to an empty character\n string_list[i+2] = '' # Change the third occurence in list to an empty character\n\n elif len(string_list) >= 2: # Edgecase (if userinput is too short)\n if string_list[i] == 'NO' and string_list[i+1] == 'PROBLEM': # Check if the string \"NO PROBLEM\" appears in the list\n string_list[i] = 'NP' # Change the first occurence in list to 'NP'\n string_list[i+1] = '' # Change the second occurence in list to an empty character\n \n new_string_list = [] # New empty array to store words without the empty characters\n for word in string_list: # Itterate over words in the list\n if word != '': # if the word is not an empty character\n new_string_list.append(word) # append the word to the new list\n \n new_string = ' '.join(new_string_list) # Join the list to a string\n return new_string # Return the string", "def open_phrase(self, string, pos):\n\n\t\t# Check for escaping\n\t\tif string[pos - 1] == \"\\\\\":\n\t\t\t# Remove the escape character\n\t\t\tstring = string[:pos - 1] + string[pos:]\n\n\t\t\t# When removing the escape character, the\n\t\t\t# pos tag index is pushed one back\n\t\t\tpos -= 1\n\n\t\t\t# If the escape character was not itself (double)\n\t\t\t# escaped we can look for the next tag\n\t\t\tif pos == 0 or string[pos - 1] != \"\\\\\":\n\t\t\t\ttag = self.meta.search(string, pos + 1)\n\n\t\t\t\treturn string, None, tag\n\n\t\tchild = Phrase(pos)\n\n\t\tescaped, child = self.parse(string[pos + 1:], child)\n\n\t\tstring = string[:pos + 1] + escaped\n\n\t\ttag = self.meta.search(string, child.closing + 1)\n\n\t\treturn string, child, tag", "def _postprocess(\n self,\n tags: List[str],\n words: List[str],\n pos: bool = False,\n ):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def parse(self, string, root=None):\n\n\t\tphrases = []\n\n\t\tmeta = self.meta.search(string)\n\n\t\twhile meta:\n\n\t\t\t# Save some function calls\n\t\t\tpos = meta.start()\n\n\t\t\tif meta.group() == \"<\":\n\t\t\t\tstring, child, meta = self.open_phrase(string, pos)\n\n\t\t\t\tif child and root:\n\t\t\t\t\troot.nested.append(child)\n\t\t\t\telif child:\n\t\t\t\t\tphrases.append(child)\n\n\t\t\t\t# else it was escaped (+ new meta)\n\t\t\t\tcontinue\n\n\t\t\telif root:\n\n\t\t\t\tif meta.group() == \"(\":\n\t\t\t\t\tmeta = self.meta.search(string, pos + 1)\n\t\t\t\t\tif meta.group() == \")\":\n\t\t\t\t\t\tstring, root, meta = self.handle_arguments(string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t root,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t meta.start())\n\t\t\t\t\t\tcontinue\n\n\t\t\t\telif meta.group() == \">\":\n\t\t\t\t\tstring, phrase, meta = self.close_phrase(string,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t root,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t pos)\n\t\t\t\t\tif phrase:\n\t\t\t\t\t\treturn string, phrase\n\n\t\t\t\t\t# else was escaped (+ new meta)\n\t\t\t\t\tcontinue\n\n\t\t\tstring, meta = self.escape_meta(string, pos)\n\n\t\tif not root:\n\t\t\treturn string, phrases\n\n\t\t# If this is not the first stack-depth the function should\n\t\t# have returned upon finding a closing tag,\n\t\t# i.e. we should never have gotten here.\n\t\tword = re.search(r\"([\\w\\s]+)(?![\\d]*>[\\w\\s]+>)\", string)\n\n\t\twhat = \"No closing tag found for opening tag\"\n\n\t\tif word:\n\t\t\twhat += \" after expression '{0}'\".format(word.group())\n\n\t\traise errors.ParseError(what + \"!\")", "def encode(self, text):\n if self.verbatim:\n return text\n # compile the regexps once. do it here so one can see them.\n #\n # first the braces.\n if not self.__dict__.has_key('encode_re_braces'):\n self.encode_re_braces = re.compile(r'([{}])')\n text = self.encode_re_braces.sub(r'{\\\\\\1}',text)\n if not self.__dict__.has_key('encode_re_bslash'):\n # find backslash: except in the form '{\\{}' or '{\\}}'.\n self.encode_re_bslash = re.compile(r'(?<!{)(\\\\)(?![{}]})')\n # then the backslash: except in the form from line above:\n # either '{\\{}' or '{\\}}'.\n text = self.encode_re_bslash.sub(r'{\\\\textbackslash}', text)\n\n # then dollar\n text = text.replace(\"$\", '{\\\\$}')\n if not ( self.literal_block or self.literal or self.mathmode ):\n # the vertical bar: in mathmode |,\\vert or \\mid\n # in textmode \\textbar\n text = text.replace(\"|\", '{\\\\textbar}')\n text = text.replace(\"<\", '{\\\\textless}')\n text = text.replace(\">\", '{\\\\textgreater}')\n # then\n text = text.replace(\"&\", '{\\\\&}')\n # the ^:\n # * verb|^| does not work in mbox.\n # * mathmode has wedge. hat{~} would also work.\n # text = text.replace(\"^\", '{\\\\ensuremath{^\\\\wedge}}')\n text = text.replace(\"^\", '{\\\\textasciicircum}')\n text = text.replace(\"%\", '{\\\\%}')\n text = text.replace(\"#\", '{\\\\#}')\n text = text.replace(\"~\", '{\\\\textasciitilde}')\n # Separate compound characters, e.g. \"--\" to \"-{}-\". (The\n # actual separation is done later; see below.)\n separate_chars = '-'\n if self.literal_block or self.literal:\n # In monospace-font, we also separate \",,\", \"``\" and \"''\"\n # and some other characters which can't occur in\n # non-literal text.\n separate_chars += ',`\\'\"<>'\n # pdflatex does not produce doublequotes for ngerman.\n text = self.babel.double_quotes_in_tt(text)\n if self.font_encoding == 'OT1':\n # We're using OT1 font-encoding and have to replace\n # underscore by underlined blank, because this has\n # correct width.\n text = text.replace('_', '{\\\\underline{ }}')\n # And the tt-backslash doesn't work in OT1, so we use\n # a mirrored slash.\n text = text.replace('\\\\textbackslash', '\\\\reflectbox{/}')\n else:\n text = text.replace('_', '{\\\\_}')\n else:\n text = self.babel.quote_quotes(text)\n text = text.replace(\"_\", '{\\\\_}')\n for char in separate_chars * 2:\n # Do it twice (\"* 2\") becaues otherwise we would replace\n # \"---\" by \"-{}--\".\n text = text.replace(char + char, char + '{}' + char)\n if self.insert_newline or self.literal_block:\n # Insert a blank before the newline, to avoid\n # ! LaTeX Error: There's no line here to end.\n text = text.replace(\"\\n\", '~\\\\\\\\\\n')\n elif self.mbox_newline:\n if self.literal_block:\n closings = \"}\" * len(self.literal_block_stack)\n openings = \"\".join(self.literal_block_stack)\n else:\n closings = \"\"\n openings = \"\"\n text = text.replace(\"\\n\", \"%s}\\\\\\\\\\n\\\\mbox{%s\" % (closings,openings))\n # lines starting with \"[\" give errors.\n text = text.replace('[', '{[}')\n if self.insert_none_breaking_blanks:\n text = text.replace(' ', '~')\n if self.latex_encoding != 'utf8':\n text = self.unicode_to_latex(text)\n return text", "def _postprocess(self, tags: List[str], words: List[str], pos: List[str]):\n result = list()\n\n i = 0\n for tag in tags:\n if (\"<\" not in tag) and (\">\" not in tag):\n if pos:\n result.append(f\"{words[i]}/{pos[i]}\")\n else:\n result.append(words[i])\n i += 1\n else:\n result.append(tag)\n\n return \" \".join(result)", "def pretty_str(self,print_option=PrintOption()):\n\n po = print_option.clone()\n po.is_canonical = self.is_canonical\n po.grammar = self\n\n token_rules = set()\n\n # Look for defined rules that look better as absorbed into their uses.\n for name, rule in self.rules.items():\n # Star-able is also optional-able, so starrable must come first.\n starred_phrase = rule.as_starred(name)\n if starred_phrase is not None:\n po.replace_with_starred[name] = starred_phrase\n continue\n optional_phrase = rule.as_optional()\n if optional_phrase is not None:\n po.replace_with_optional[name] = optional_phrase\n continue\n options = rule.as_container()\n if len(options)==1:\n phrase = options[0].as_container()\n if len(phrase)==1 and phrase[0].is_token():\n token_rules.add(name)\n\n # A rule that was generated to satisfy canonicalization is better\n # presented as absorbed in its original parent.\n for name, rule in self.rules.items():\n # We only care about rules generated during canonicalization\n if name.find('.') > 0 or name.find('/') > 0:\n options = rule.as_container()\n if len(options) != 2:\n continue\n if any([len(x.as_container())!=1 for x in options]):\n continue\n if any([(not x.as_container()[0].is_symbol_name()) for x in options]):\n continue\n # Rule looks like A -> X | Y\n po.replace_with_nested[name] = rule\n\n parts = []\n for key in sorted(self.rules):\n if key == LANGUAGE:\n # This is synthetic, for analysis\n continue\n rule_content = self.rules[key].pretty_str(po)\n if key in po.replace_with_optional:\n continue\n if key in po.replace_with_starred:\n continue\n if key in po.replace_with_nested:\n continue\n if (not po.print_terminals) and (key in token_rules):\n continue\n space = \"\" if po.multi_line_choice else \" \"\n if po.bikeshed:\n key_content = \" <dfn for='recursive descent syntax'>{}</dfn>\".format(key)\n content = \"<div class='syntax' noexport='true'>\\n{}:\\n{}\\n</div>\".format(key_content,rule_content)\n else:\n content = \"{}:{}{}\".format(key,space,rule_content)\n parts.append(content)\n content = (\"\\n\\n\" if po.more_newlines else \"\\n\").join(parts)\n return content", "def on_text(self):\n if self.get_state() == BOUNDED_STATE.DONE:\n # Stop if done\n self._complete_parse()\n return\n if self.text == \"\":\n # Nothing to parse\n return\n\n # TODO: Optimize loop to find in one pass?\n lpos = self.text.find(self.lbound)\n rpos = self.text.find(self.rbound)\n if self.string_char is None:\n # If no string is started, look for the earliest delimeter\n strd_pos = -1\n for delim in self.string_delims:\n dpos = self.text.find(delim)\n if strd_pos == -1:\n strd_pos = dpos\n elif (dpos >= 0) and (dpos < strd_pos):\n strd_pos = dpos\n else:\n # Use the previous delimeter if a string is started\n strd_pos = self.text.find(self.string_char)\n stre_pos = self.text.find(self.string_escape)\n self.debug(\"lpos={} rpos={} sdpos={} sepos={}\".format(lpos, rpos, strd_pos, stre_pos))\n\n # Check string logic first, since it overrides boundary logic\n if self.string_char is not None:\n # String has been started; end string before returning to boundary logic\n if strd_pos >= 0:\n # String delimiter is found\n self.debug(\"Append string\")\n if (stre_pos >= 0) and ((stre_pos + 1) == strd_pos):\n # Escaped delimiter, skip\n self._append_parsed_text(self.text[:strd_pos + 1])\n self.text = self.text[strd_pos + 1:]\n return\n else:\n self.debug(\"End string\")\n self.string_char = None\n if not self.string_is_bound:\n # If the string is not the boundary, consider it parsed here\n self._append_parsed_text(self.text[:strd_pos + 1])\n self.text = self.text[strd_pos + 1:]\n return\n else:\n # No need to check string_is_bound, since no boundary to process either way.\n self.debug(\"Append whole string\")\n self._append_parsed_text(self.text)\n self.text = \"\"\n return\n\n elif strd_pos >= 0:\n if (lpos >= 0) and (strd_pos > lpos):\n # String begins after the left boundary\n pass\n elif (rpos >= 0) and (strd_pos > rpos):\n # String begins after the right boundary\n pass\n else:\n # Not in a string; but one is being started\n self.string_char = self.text[strd_pos]\n self.debug(\"Enter string: {}\".format(self.string_char))\n if not self.string_is_bound:\n # If the string is not the boundary, consider it parsed here\n self._append_parsed_text(self.text[:strd_pos + 1])\n self.text = self.text[strd_pos + 1:]\n return\n\n # Check boundary logic\n if (lpos < 0) and (rpos < 0):\n # Neither boundary is present\n if (self.parsed_text == \"\") and (self.get_state() == BOUNDED_STATE.LBOUND):\n # A boundary should be the first thing we find\n self.error(\"Unexpected text '{}'\".format(self.text))\n return\n\n if self.get_state() == BOUNDED_STATE.LBOUND:\n if (rpos >= 0) and (rpos < lpos):\n # Exiting an inner level\n self.pop_stack(rpos, self.rbound, True)\n return\n elif lpos >= 0:\n # Entering new level, include boundary only for inner levels\n self.push_stack(lpos, self.lbound)\n self.set_state(BOUNDED_STATE.RBOUND)\n return\n else:\n # Input prior to the boundary is illegal\n self.error(\"Expected '{}'\".format(self.lbound))\n elif self.get_state() == BOUNDED_STATE.RBOUND:\n if (lpos >= 0) and (lpos < rpos):\n # Entering an inner level\n self.push_stack(lpos, self.lbound)\n return\n elif rpos >= 0:\n # Exiting level, include boundary only for inner levels\n self.pop_stack(rpos, self.rbound, (self.stack_level > 1))\n if self.stack_level == 0:\n self.set_state(BOUNDED_STATE.DONE)\n return\n else:\n # This should never happen\n self.error(\"Unexpected state {}\".format(self.get_state()))", "def _apply_filters(self, text, tag):\n\n # The order of the filters below is important\n # and should not be changed\n\n # intial_quotes needs to happen at this point so that\n # attribute values introduced later on do not get affected\n text = self.initial_quotes(text)\n text = self.smarty_pants(text)\n text = self.amp(text)\n text = self.caps(text)\n\n return text", "def applyRegularExpressions(strText, substitutionPatternList, languageId, debug=False):\n # print substitutionPatternList\n if debug:\n RegularExpressionFormula.logger.info(\n \"Applying regular expressions to transcript ...\")\n\n # For successive regular expressions\n strText = RegularExpressionFormula.normalizeSpaces(strText, True)\n\n if debug:\n RegularExpressionFormula.logger.info(\n \"Initial transcript: \" + strText)\n\n # For each known regular expression\n for regex, alternate, regexType, regexLanguageId in substitutionPatternList:\n regexLanguageId = int(regexLanguageId)\n\n # Does it match the text language\n if regexLanguageId != languageId and \\\n regexLanguageId != 0:\n continue\n\n # Convert from type\n regexListForType = \\\n RegexType.typeToRegularExpressions(\n regex, alternate, int(regexType))\n\n # Get regular expressions for the given type\n for regexForType in regexListForType:\n regexPattern = regexForType[0] # What to match\n regexSubstitution = regexForType[1] # What to substitute\n\n strLineOriginal = strText\n\n # Is it some python code\n if alternate.startswith(\"lambda\"):\n # Use alternate version\n strText = re.sub(regexPattern, eval(\n alternate), strText, flags=re.UNICODE | re.MULTILINE)\n else:\n # print regexPattern, regexSubstitution\n # No ignore case available\n # print regexPattern, \" --> \", strText\n strText = re.sub(regexPattern, regexSubstitution,\n strText, flags=re.UNICODE | re.MULTILINE)\n\n if debug:\n if strText.encode('utf-8') != strLineOriginal.encode('utf-8'):\n sys.stdout.write(\n \" --> Original string: >\" + strLineOriginal.encode('utf-8') + \"<\\n\")\n sys.stdout.write(\" Match pattern: >\" + regexPattern.encode('utf-8') + \"<\"\n \"\\n Substitution: >\" + regexSubstitution.encode('utf-8') + \"<\")\n sys.stdout.write(\n \"\\n >\" + strText.encode('utf-8') + \"<\\n\")\n\n strText = RegularExpressionFormula.normalizeSpaces(strText)\n\n if debug:\n sys.stdout.flush()\n RegularExpressionFormula.logger.info(\n \"Final transcript: \" + strText + \"\\n\")\n\n return strText", "def parse(text):\n # Sanitize text case to meet phonetic comparison standards\n #fixed_text = validate.fix_string_case(utf(text))\n # prepare output list\n fixed_text = text\n output = []\n # cursor end point\n cur_end = 0\n # iterate through input text\n for cur, i in enumerate(fixed_text):\n # Trap characters with unicode encoding errors\n try:\n i.encode('utf-8')\n except UnicodeDecodeError:\n uni_pass = False\n else:\n uni_pass = True\n # Default value for match\n match = {'matched': False}\n # Check cur is greater than or equals cur_end. If cursor is in\n # a position that has alread been processed/replaced, we don't\n # process anything at all\n if not uni_pass:\n cur_end = cur + 1\n output.append(i)\n elif cur >= cur_end and uni_pass:\n # Try looking in non rule patterns with current string portion\n match = match_non_rule_patterns(fixed_text, cur)\n # Check if non rule patterns have matched\n if match[\"matched\"]:\n output.append(match[\"replaced\"])\n cur_end = cur + len(match[\"found\"])\n else:\n # if non rule patterns have not matched, try rule patterns\n match = match_rule_patterns(fixed_text, cur)\n # Check if rule patterns have matched\n if match[\"matched\"]:\n # Update cur_end as cursor + length of match found\n cur_end = cur + len(match[\"found\"])\n # Process its rules\n replaced = process_rules(rules = match[\"rules\"],\n fixed_text = fixed_text,\n cur = cur, cur_end = cur_end)\n # If any rules match, output replacement from the\n # rule, else output it's default top-level/default\n # replacement\n if replaced is not None:\n # Rule has matched\n output.append(replaced)\n else:\n # No rules have matched\n # output common match\n output.append(match[\"replaced\"])\n\n # If none matched, append present cursor value\n if not match[\"matched\"]:\n cur_end = cur + 1\n output.append(i)\n\n # End looping through input text and produce output\n return ''.join(output)", "def set_doc_phrases(doc_phrases, docs, phrases):\n for doc in docs:\n if not doc in doc_phrases:\n doc_phrases[doc] = []\n doc_phrases[doc] = doc_phrases[doc] + phrases", "def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result", "def encode(self, text):\n if self.verbatim:\n return text\n # compile the regexps once. do it here so one can see them.\n #\n # first the braces.\n if not self.__dict__.has_key('encode_re_braces'):\n self.encode_re_braces = re.compile(r'([{}])')\n text = self.encode_re_braces.sub(r'{\\\\\\1}',text)\n if not self.__dict__.has_key('encode_re_bslash'):\n # find backslash: except in the form '{\\{}' or '{\\}}'.\n self.encode_re_bslash = re.compile(r'(?<!{)(\\\\)(?![{}]})')\n # then the backslash: except in the form from line above:\n # either '{\\{}' or '{\\}}'.\n text = self.encode_re_bslash.sub(r'{\\\\textbackslash}', text)\n\n # then dollar\n text = text.replace(\"$\", '{\\\\$}')\n # then all that needs math mode\n text = text.replace(\"<\", '{$<$}')\n text = text.replace(\">\", '{$>$}')\n # then\n text = text.replace(\"&\", '{\\\\&}')\n text = text.replace(\"_\", '{\\\\_}')\n # the ^:\n # * verb|^| does not work in mbox.\n # * mathmode has wedge. hat{~} would also work.\n text = text.replace(\"^\", '{\\\\ensuremath{^\\\\wedge}}')\n text = text.replace(\"%\", '{\\\\%}')\n text = text.replace(\"#\", '{\\\\#}')\n text = text.replace(\"~\", '{\\\\~{}}')\n if self.insert_newline:\n # HACK: insert a blank before the newline, to avoid \n # ! LaTeX Error: There's no line here to end.\n text = text.replace(\"\\n\", '~\\\\\\\\\\n')\n elif self.mbox_newline:\n text = text.replace(\"\\n\", '}\\\\\\\\\\n\\\\mbox{')\n if self.insert_none_breaking_blanks:\n text = text.replace(' ', '~')\n # unicode !!! \n text = text.replace(u'\\u2020', '{$\\\\dagger$}')\n return text", "def strtr(text, items):\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, items.keys())))\n out = regex.sub(lambda mo: items[mo.string[mo.start():mo.end()]], text)\n return out", "def replace_strings(text: str, replacement_pair_list):\n\n new_text = text\n for pair in replacement_pair_list:\n old, new = pair\n new_text = new_text.replace(old, new)\n return new_text", "def __PerformSubstitutions(self, text):\n\n for substitution in self.substitutions:\n pattern, replacement = self.SplitValue(substitution)\n text = re.compile(pattern,re.M).sub(replacement, text)\n return text", "def replace_many(text: str, /, mapping: _Mapping[str, str], *, ignore_case: bool = False) -> str:\n if not mapping:\n return text\n\n if ignore_case:\n normalize: _Callable[[str], str] = lambda s: s.lower()\n re_mode = _re.IGNORECASE\n\n mapping = {normalize(key): val for key, val in mapping.items()}\n\n else:\n normalize: _Callable[[str], str] = lambda s: s\n re_mode = 0\n\n # Place longer ones first to keep shorter subtexts from matching where the longer ones should\n # take place. For instance given the replacements {'ab': 'AB', 'abc': 'ABC'} against the text\n # 'hey abc', it should produce 'hey ABC' and not 'hey ABc'\n rep_sorted = sorted(mapping, key=len, reverse=True)\n rep_escaped = map(_re.escape, rep_sorted)\n\n # Create a big OR regex that matches any of the subtexts to replace\n pattern = _re.compile(\"|\".join(rep_escaped), re_mode) # type: ignore\n\n return pattern.sub(lambda match: mapping[normalize(match.group(0))], text)", "def changeKeywords(madLibsString):\n for word in madLibsString.split():\n word = re.sub('[^A-Za-z0-9]+', '', word)\n if word == ADJECTIVE:\n madLibsString = madLibsString.replace(word, getWord(ADJECTIVE), 1)\n elif word == VERB:\n madLibsString = madLibsString.replace(word, getWord(VERB), 1)\n elif word == NOUN:\n madLibsString = madLibsString.replace(word, getWord(NOUN), 1)\n elif word == ADVERB:\n madLibsString = madLibsString.replace(word, getWord(ADVERB), 1)\n else:\n continue\n return madLibsString", "def add_string(self, s):\n \n words_list= clean_text(s)\n\n self.total=len(words_list)\n \n for w in words_list:\n if w not in self.words:\n self.words[w]= 1\n else:\n self.words[w]+= 1\n\n for w in words_list:\n if len(w) not in self.word_lengths:\n self.word_lengths[len(w)]= 1\n else:\n self.word_lengths[len(w)]+= 1\n \n for w in words_list:\n word_stem=stem(w)\n if word_stem not in self.stems:\n self.stems[word_stem]= 1\n else:\n self.stems[word_stem]+= 1\n\n for w in words_list:\n end=ending(w)\n if end != None: \n if end not in self.endings:\n self.endings[end]=1\n else:\n self.endings[end]+=1\n\n count=0\n for r in s:\n if r ==' ':\n count +=1\n if r in '.!?':\n count+=1\n if count not in self.sentence_lengths:\n self.sentence_lengths[count]=1\n else:\n self.sentence_lengths[count]+=1\n count=0", "def _setCleanString(self, words):\n \n if self.type=='DIAGNOSIS':\n excludes = ['DX']\n elif self.type == 'SECOND_LEVEL_DIAGNOSIS':\n #excludes = ['Assessment','Impression', 'Possible', 'ModifierCertainty']\n excludes = ['Assessment','Impression']\n elif self.type == 'DRUG':\n excludes = ['Route']\n elif self.type == 'MEDICAL_HISTORY':\n excludes = ['History', 'MedicalHistory']\n elif self.type == 'FAMILY_HISTORY':\n excludes = ['History', 'FamilyHistory', 'Family']\n else:\n return self.string\n \n s = ''\n pretk = ','\n for i, w in enumerate(words):\n if self.tags[i][1] in excludes:\n continue\n elif self.tags[i][1]=='COMMA':\n if pretk==',': \n continue\n else:\n s += w\n pretk = w\n continue\n elif s=='': \n s += w\n else:\n s += ' ' + w\n pretk = w\n \n return s" ]
[ "0.6217344", "0.5188642", "0.5167498", "0.5113326", "0.5067161", "0.49915335", "0.49479842", "0.4930011", "0.48945203", "0.4888225", "0.48151892", "0.47859526", "0.4769635", "0.47651082", "0.47107962", "0.46882594", "0.4659527", "0.46528217", "0.4637156", "0.463006", "0.45907682", "0.4582954", "0.45354956", "0.4526124", "0.4511113", "0.4500398", "0.44809476", "0.44710904", "0.44668615", "0.4439405" ]
0.6856967
0
Raises an errors.ArgumentError if not enough arguments were supplied. Takes care of formatting for detailed error messages.
def raise_not_enough_arguments(self, string): requested = errors.number(self.counter + 1) number = len(self.positional) verb = "was" if number == 1 else "were" what = "Requested {} formatting argument for "\ "'{}' but only {} {} supplied!" what = what.format(requested, string, number, verb) raise errors.ArgumentError(what)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_args(args) -> None:\n if args.input_file is not None:\n assert args.num_entries_per_input_and_label is not None, \"If 'input_file' is set, 'num_entries_per_input_and_label' must be set\"\n assert args.num_entries_per_label is None, \"If 'input_file' is set, 'num_entries_per_label' must not be set\"\n assert args.batch_size is None, \"If 'input_file' is set, batch_size must not be set as 'num_entries_per_input_and_label' also \" \\\n \"serves as batch size in this case\"\n else:\n assert args.num_entries_per_input_and_label is None, \"If 'input_file' is not set, 'num_entries_per_input_and_label' must not be set\"\n assert args.num_entries_per_label is not None, \"If 'input_file' is not set, 'num_entries_per_label' must be set\"\n assert args.batch_size is not None, \"If 'input_file' is not set, 'batch_size' must be set\"", "def handle_invalid_arguments(e):\n errors = e.message\n return generic_errors(errors, code=400)", "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def validate_arguments(self,args):\n\t\tif args.org == None:\n\t\t\tprint('Please specify Organization name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.repo == None:\n\t\t\tprint('Please specify Repositories name. Exiting.')\n\t\t\tsys.exit(0)\n\t\tif args.event_type == None:\n\t\t\tprint('Please specify type of the event. Exiting.')\n\t\t\tsys.exit(0)", "def __check_errors(self):\n if not(\"input\" in self.passedArgs or \"source\" in self.passedArgs):\n raise ArgError(\"Program did not receive any of mandatory arguments! (--source=file, --input=file)\")", "def subjectively_prettier_error(arg, message):\n try:\n raise argparse.ArgumentError(arg, message)\n except argparse.ArgumentError as err:\n print(f\"\\n{err}\")\n os._exit(1) # noqa", "def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))", "def test_rule_create_command_when_blank_arguments_provided(err_msg, args, mock_client):\n with pytest.raises(Exception) as err:\n rule_create_command(mock_client, args)\n assert str(err.value) == err_msg", "def validate_args(args):\n\n if args.batch_size % args.batch_splits != 0:\n raise ValueError(BATCH_SIZE_SPLIT_ERR.format(args.batch_size, args.batch_splits))\n\n if args.data_parallel and args.model_parallel:\n raise ValueError(DATA_AND_MODEL_PARALLEL_ERR)\n\n if args.class_bal and args.year_weighted_class_bal:\n raise ValueError(CONFLICTING_WEIGHTED_SAMPLING_ERR)\n\n assert args.ten_fold_test_index in range(-1, 10)", "def test_argument_errors(self):\n method = self.Test.default_scope\n self.assertRaises(errors.ArgumentError,\n method,\n { 'where': 'foo' },\n where='bar')\n\n self.assertRaises(errors.ArgumentError, method, \"POOP\")", "def invalid_args(event):\n\n s.sendReply(\n event,\n f'Please provide the proper arguments. Use \"@{s.BOT_NAME} help\" for help.',\n )", "def test_missing_args(self):\n s = Square.create()\n self.assertEqual(str(s), '[Square] (5) 0/0 - 1')", "def _raise_argument_validation_exception(typedef, value, detail, expected_tokens=None):\n typedef_name = typedef.get('help-name')\n if typedef_name is None:\n typedef_name = typedef.get('name')\n if typedef_name is None:\n typedef_name = typedef.get('field')\n if typedef_name is None:\n typedef_name = '<unknown-type>'\n if detail is None:\n detail = ''\n validation_error_format = typedef.get('validation-error-format',\n 'Invalid %(typedef)s: %(value)s; %(detail)s')\n validation_error = (validation_error_format %\n {'typedef': typedef_name, 'value': str(value), 'detail': detail})\n raise error.ArgumentValidationError(validation_error, expected_tokens)", "def _validate_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))", "def error(self, message):\n raise ArgumentParseError(message)", "def arg_err(self,func):\n print 'Error in arguments:'\n print inspect.getdoc(func)", "def _check_input(self, func, args, kwargs):\n fullargspec = inspect.getfullargspec(func)\n return_msg = ''\n if fullargspec.varkw is None:\n for key in kwargs:\n if not key in fullargspec.kwonlyargs:\n return_msg += f'[Error]: not support param `{key}`. \\n'\n if fullargspec.varargs is None:\n if len(fullargspec.args) == 0:\n max_args_len = 0\n else:\n max_args_len = len(fullargspec.args)-1 if fullargspec.args[0] == 'self' else len(fullargspec.args)\n defaults_nums = 0 if fullargspec.defaults is None else len(fullargspec.defaults)\n min_args_len = max_args_len - defaults_nums\n if len(args) < min_args_len:\n return_msg += f'[Error]: have min {min_args_len} input, but you input {len(args)} args. \\n'\n if max_args_len < len(args):\n return_msg += f'[Error]: have max {max_args_len} input, but you input {len(args)} args. \\n'\n return return_msg", "def test_missing_arg_repr():\n argspec = inspect.getfullargspec(exceptional.wrap)\n assert repr(argspec.kwonlydefaults[\"message\"]) == \"<MISSING>\"", "def error(self, *args, **kwargs):\n if len(args) == 3:\n print(f\"ERROR: {args[1]}\")\n else:\n print(f\"ERROR: {args[0]}\")", "def check_args():\n schema = Schema({\n 'FOLDREC': Use(open, error='FOLDREC file should be readable'),\n 'CLUSTAL': Use(open, error='CLUSTAL file should be readable'),\n 'CCMPRED': Use(open, error='CCMPRED file should be readable'),\n '--metafold': Use(open, error='METAFOLD_FILE should be readable'),\n '--nb_pdb': And(Use(int), lambda n: 1 <= n <= 405,\n error='--nb_pdb=NUM should be integer 1 <= N <= 405'),\n '--dssp': Use(open, error='dssp/mkdssp should be readable'),\n '--dope': Use(open, error='dope file should be readable'),\n '--benchmark': Use(open, error='BENCHMARK_FILE should be readable'),\n '--cpu': And(Use(int), lambda n: 0 <= n <= cpu_count(),\n error='--cpus=NUM should be integer 1 <= N <= ' + str(cpu_count())),\n # The output PATH is created (if not exists) at the end of the program\n # so we skip the check.\n object: object})\n try:\n schema.validate(ARGUMENTS)\n except SchemaError as err:\n exit(err)", "def test_enforcement_boundary_create_command_when_invalid_arguments_provided(\n err_msg, args, err_type, mock_client\n):\n with pytest.raises(err_type) as err:\n enforcement_boundary_create_command(mock_client, args)\n assert str(err.value) == err_msg", "def bad_args(args):\n PARSER.print_help()\n exit(0)", "def test_04_one_args(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(7)\n self.assertEqual(\"__init__() missing 1 required positional argument:\\\n 'height'\", str(x.exception))", "def validate_input(self, *args):\n return", "def make_error( title, *args, **kwargs ):\n blocks = list()\n blocks.append( '<h1>{}</h1>'.format( title ) )\n if args:\n blocks.append( '<h4>{}</h4>'.format( args[ 0 ] ) )\n for arg in args[ 1 : ]:\n blocks.append( '<p>{}</p>'.format( arg ) )\n if kwargs:\n dl = list()\n for key, value in kwargs.items():\n dl.append( '<dt>{}</dt><dd>{}</dd>'.format( key, value ) )\n blocks.append( '<dl>\\n{}\\n</dl>'.format( '\\n'.join( dl ) ) )\n return _html.format(\n title = title,\n head = '',\n body = '\\n'.join( blocks )\n )", "def test_errors_on_bad_argument(self):\n self.assertRaises(Exception, Scope, 'foo')\n self.assertRaises(Exception, Scope, 1)\n self.assertRaises(Exception, Scope, [])\n self.assertRaises(Exception, Scope, tuple())", "def test_empty_arguments(self):\n arg1 = {'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg1)\n\n arg2 = {'src': '.',\n 'attributesDiff': 'Sail',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg2)\n\n arg3 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'target': '.'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg3)\n\n arg4 = {'src': '.',\n 'keyAttributes': 'Cruiser',\n 'attributesDiff': 'Sail'}\n\n with self.assertRaises(ValidationError):\n self.processing.validate(arg4)", "def _validate_create_args(self, args):\r\n invalid_args = [k for k in self.required_params if args.get(k) is None]\r\n if invalid_args:\r\n raise ArgumentError('Missing required options: %s'\r\n % ','.join(invalid_args))", "def raise_error(cls, *args):\n raise cls(cls.message)", "def test_create_service_binding_when_blank_arguments_provided(\n err_msg, args, mock_client\n):\n with pytest.raises(Exception) as err:\n service_binding_create_command(mock_client, args)\n\n assert str(err.value) == err_msg" ]
[ "0.68111414", "0.66778743", "0.65783757", "0.6564304", "0.6534988", "0.65230286", "0.6501002", "0.6431251", "0.6368672", "0.6361214", "0.6355897", "0.63518065", "0.6280557", "0.627626", "0.6258548", "0.6256336", "0.62545747", "0.6239572", "0.62368023", "0.6219729", "0.6205022", "0.6193158", "0.6154671", "0.6124328", "0.61135834", "0.61095566", "0.61023957", "0.61017233", "0.6087884", "0.60862756" ]
0.71657497
0
Save the GraphicsContext to a file. Output files are always saved in RGB or RGBA format; if this GC is not in one of these formats, it is automatically converted. If filename includes an extension, the image format is inferred from it. file_format is only required if the format can't be inferred from the filename (e.g. if you wanted to save a PNG file as a .dat or .bin). filename may also be "filelike" object such as a StringIO, in which case a file_format must be supplied pil_options is a dict of formatspecific options that are passed down to the PIL image file writer. If a writer doesn't recognize an option, it is silently ignored. If the image has an alpha channel and the specified output file format does not support alpha, the image is saved in rgb24 format.
def save(gc, filename, file_format=None, pil_options=None): FmtsWithoutAlpha = ("jpg", "bmp", "eps", "jpeg") from PIL import Image as PilImage size = (gc.width(), gc.height()) fmt = gc.format() # determine the output pixel format and PIL format if fmt.endswith("32"): pilformat = "RGBA" pixelformat = "rgba32" if ( isinstance(filename, six.string_types) and filename[-3:].lower() in FmtsWithoutAlpha ) or (file_format is not None and file_format.lower() in FmtsWithoutAlpha): pilformat = "RGB" pixelformat = "rgb24" elif fmt.endswith("24"): pilformat = "RGB" pixelformat = "rgb24" # perform a conversion if necessary if fmt != pixelformat: newimg = GraphicsContextArray(size, fmt) newimg.draw_image(gc) newimg.convert_pixel_format(pixelformat, 1) bmp = newimg.bmp_array else: bmp = gc.bmp_array img = PilImage.frombytes(pilformat, size, bmp.tostring()) img.save(filename, format=file_format, options=pil_options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveFormatFile(self, filename, format):\n ret = libxml2mod.xmlSaveFormatFile(filename, self._o, format)\n return ret", "def save_figure(\n self,\n filename,\n format=\"png\",\n dpi=None,\n face_colour=\"w\",\n edge_colour=\"w\",\n orientation=\"portrait\",\n paper_type=\"letter\",\n transparent=False,\n pad_inches=0.1,\n overwrite=False,\n ):\n from menpo.io.output.base import _export\n\n save_fig_args = {\n \"dpi\": dpi,\n \"facecolour\": face_colour,\n \"edgecolour\": edge_colour,\n \"orientation\": orientation,\n \"papertype\": paper_type,\n \"format\": format,\n \"transparent\": transparent,\n \"pad_inches\": pad_inches,\n \"bbox_inches\": \"tight\",\n \"frameon\": None,\n }\n # Use the export code so that we have a consistent interface\n _export(\n save_fig_args, filename, self._extensions_map, format, overwrite=overwrite\n )", "def save_as(\n cls, figure_or_data, filename, format=None, width=None, height=None, scale=None\n ):\n # todo: format shadows built-in name\n (base, ext) = os.path.splitext(filename)\n if not ext and not format:\n filename += \".png\"\n elif ext and not format:\n format = ext[1:]\n elif not ext and format:\n filename += \".\" + format\n\n img = cls.get(figure_or_data, format, width, height, scale)\n\n f = open(filename, \"wb\")\n f.write(img)\n f.close()", "def save(self, fp, format=None, **params):\r\n if isinstance(fp, basstring):\r\n if fp.lower().endswith(\".gif\"):\r\n if numpy2gif_installed:\r\n if self.is_animated:\r\n numpy2gif.write_gif(self.frames, fp, fps=100//self.exts[0][['delay_time']])\r\n else:\r\n numpy2gif.write_gif(self._instance, fp)\r\n else:\r\n NotImplementedError(\"numpy2gif is not installed so cannot save gif images, install it with: pip install numpy2gif\")\r\n else:\r\n cv2.imwrite(fp, self._instance)\r\n return None\r\n if isinstance(fp, fil_object):\r\n fl = open(format, 'w')\r\n fl.write(fp.read())\r\n fl.close()\r\n return None\r\n return None", "def save(self,filename,format=None,double_precision=False):\n\n\t\tif format is None:\n\t\t\t\n\t\t\textension = filename.split(\".\")[-1]\n\t\t\tif extension in [\"fit\",\"fits\"]:\n\t\t\t\tformat=\"fits\"\n\t\t\telif extension in [\"npy\",\"npz\"]:\n\t\t\t\tformat=\"npz\"\n\t\t\telse:\n\t\t\t\traise IOError(\"File format not recognized from extension '{0}', please specify it manually\".format(extension))\n\n\t\t\n\t\tif format==\"fits\":\n\t\t\tsaveFITS(self,filename,double_precision)\n\t\telif format==\"npz\":\n\t\t\tsaveNPZ(self,filename)\n\t\telse:\n\t\t\traise ValueError(\"Format {0} not implemented yet!!\".format(format))", "def save(self, filename, format=None, verbose=True):\n from . import Formats\n Formats.save(self, filename, format=format, verbose=verbose)", "def save(self, file_path: str, format: str = None) -> None:\n if format is not None:\n self.get_image().save(file_path, format)\n else:\n self.get_image().save(file_path)", "def save_figure(self, filename, format='png', size=None,\n magnification='auto', overwrite=False):\n from menpo.io.output.base import _export\n savefig_args = {'size': size, 'figure': self.figure,\n 'magnification': magnification}\n # Use the export code so that we have a consistent interface\n _export(savefig_args, filename, self._extensions_map, format,\n overwrite=overwrite)", "def save_image(self, file_obj):\n manager = pyglet.image.get_buffer_manager()\n colorbuffer = manager.get_color_buffer()\n\n # if passed a string save by name\n if hasattr(file_obj, 'write'):\n colorbuffer.save(file=file_obj)\n else:\n colorbuffer.save(filename=file_obj)", "def save_to_image_file(self, filename, image_format='png', scale_x=1, scale_y=1):\n\n self.save_barcode_to_pillow(scale_x=scale_x, scale_y=scale_y).save(filename,\n format=image_format)", "def write(self, filename=DEFAULT_FILENAME):\n\n gdspy.write_gds('{0}.gds'.format(filename), unit=self.unit, precision=self.precision)", "def save(self, filename):\n self.image.save(filename, self.options.img_format)", "def save(self, file=None, filename=None):\n if file is None and filename is None:\n raise TypeError('expected an argument')\n elif file is not None and filename is not None:\n raise TypeError('expected only one argument; but two passed')\n elif file is not None:\n if isinstance(file, types.FileType) and hasattr(libc, 'fdopen'):\n fd = libc.fdopen(file.fileno(), file.mode)\n r = library.MagickWriteImageFile(self.wand, fd)\n if not r:\n self.raise_exception()\n else:\n if not callable(getattr(file, 'write', None)):\n raise TypeError('file must be a writable file object, '\n 'but it does not have write() method: ' +\n repr(file))\n file.write(self.make_blob())\n else:\n if not isinstance(filename, basestring):\n raise TypeError('filename must be a string, not ' +\n repr(filename))\n r = library.MagickWriteImage(self.wand, filename)\n if not r:\n self.raise_exception()", "def dump(self, filename, format=None, mode='wb'):\n data = dict([(k, getattr(self, k, None)) for k in self.datafields])\n format = infer_format(filename, format)\n if format == 'pkl.gz':\n f = gzip.open(filename, mode)\n f.write(pickle.dumps(data, pickle.HIGHEST_PROTOCOL))\n f.close()\n elif format == 'pkl':\n with io.open(filename, mode) as f:\n f.write(pickle.dumps(data, pickle.HIGHEST_PROTOCOL))", "def saveFormatFileTo(self, cur, encoding, format):\n if cur is None: cur__o = None\n else: cur__o = cur._o\n ret = libxml2mod.xmlSaveFormatFileTo(self._o, cur__o, encoding, format)\n return ret", "def write_file(self, filename, fileformat=\"json\"):\n if self.df_avg is None:\n self.collect_stats()\n if fileformat == \"json\":\n self.write_json(filename)\n elif fileformat == \"excel\":\n self.write_excel(filename)", "def save(self, filename, format=FORMAT_PEM):\n bio = BIO.openfile(filename, 'wb')\n if format == FORMAT_PEM:\n return m2.x509_write_pem(bio.bio_ptr(), self.x509)\n elif format == FORMAT_DER:\n return m2.i2d_x509_bio(bio.bio_ptr(), self.x509)\n else:\n raise ValueError(\"Unknown filetype. Must be either FORMAT_PEM or FORMAT_DER\")", "def save_as(self, filename):\n raise NotImplementedError(\n \"Saving ring buffers to other formats is not yet implemented.\")\n\n if filename[-3:] == 'zip':\n pass # TODO\n elif filename[-2:] == 'h5':\n pass # TODO\n elif filename[-4:] == 'fits':\n pass # TODO\n elif filename[-3:] == 'npz':\n self.save_as_numpy(filename)", "def saveFormatFileEnc(self, filename, encoding, format):\n ret = libxml2mod.xmlSaveFormatFileEnc(filename, self._o, encoding, format)\n return ret", "def save_plain_image_as_file(self, filepath, format='png', quality=90):\n img_w = self.get_plain_image_as_widget()\n # assumes that the image widget has some method for saving to\n # a file\n img_w.save(filepath, format=format, quality=quality)", "def SaveFile(*args, **kwargs):\n return _gdi_.Bitmap_SaveFile(*args, **kwargs)", "def save_file(filename, target, svmtd=\"pickle\"):\n with open(filename, \"wb\") as opfh:\n if hasattr(target, \"savefig\"):\n target.savefig(opfh)\n elif svmtd == \"pickle\":\n pickle.dump(target, opfh)\n else:\n joblib.dump(target, opfh)", "def save(self, filename):\n try:\n import PIL\n except ImportError:\n raise RuntimeError('Could not import PIL. PIL (pillow) is required to save fresnel images.')\n else:\n if self._output is None:\n self.render()\n image = PIL.Image.fromarray(self._output[:], mode='RGBA')\n image.save(filename)", "def save():\n file_name = filedialog.asksaveasfilename(\n filetypes=[\n (\"Scalable Vector Graphics\", \"*.svg\"),\n (\"Postscript\", \"*.ps\"),\n (\"Portable Network Graphics\", \"*.png\")\n ],\n initialdir=os.getcwd())\n if file_name: # save option not cancelled by user\n extension = re.search(r\"\\.[\\w]+$\", file_name)[0]\n if extension == '.png':\n self.parent_class.save_png(file_name)\n elif extension == \".ps\":\n self.parent_class.save_postscript(file_name)\n elif extension == \".svg\":\n self.parent_class.save_canvas_svg(file_name)\n else:\n raise TypeError(\"Unknown Filetype\")", "def write(filename, parameters, file_format=None, **kwargs):\n if not isinstance(filename, str):\n raise TypeError()\n if not isinstance(parameters, dict):\n raise TypeError()\n if not (file_format is None or file_format in {\"tough\", \"json\"}):\n raise ValueError()\n\n fmt = (\n file_format\n if file_format\n else filetype_from_filename(filename, _extension_to_filetype)\n )\n fmt = fmt if fmt else \"tough\"\n\n _writer_map[fmt](filename, parameters, **kwargs)", "def saveFormatFileTo(self, buf, encoding, format):\n if buf is None: buf__o = None\n else: buf__o = buf._o\n ret = libxml2mod.xmlSaveFormatFileTo(buf__o, self._o, encoding, format)\n return ret", "def write(self, filename=None, as_type='json'):\n if not filename:\n filename = self.uri\n self.create_output_dir(filename)\n if as_type == 'json':\n with open(filename, 'w') as outfile:\n outfile.write(self.transform_data(outformat=formats.JSON))\n elif as_type == 'shapefile':\n self.data.to_file(filename)\n else:\n raise NotImplementedError('{} not a valid type'.format(as_type))\n return self.uri", "def to_file(self, filename):\n\n img = self.to_image()\n img.save(filename, optimize=True)\n return self", "def save(self, filename, compression=True, transpose=False,\n sparse=False, support=False, compression_opts=1):\n write_ga_file(filename, self.value, self[0].layout.metric, self[0].layout.basis_names,\n compression=compression, transpose=transpose,\n sparse=sparse, support=support, compression_opts=compression_opts)", "def _save_mpl_figure(self, fig, filename, **kwargs):\n\n fig.savefig(filename, **kwargs)\n\n return filename" ]
[ "0.6349469", "0.6296424", "0.62946284", "0.6211686", "0.6203548", "0.6184435", "0.61366194", "0.6020945", "0.59857905", "0.58402926", "0.583672", "0.57930124", "0.57857025", "0.5732518", "0.57183915", "0.57100964", "0.57058245", "0.56932086", "0.56529623", "0.5650282", "0.562013", "0.5580621", "0.5531667", "0.5516235", "0.54913694", "0.54792315", "0.54432654", "0.5439838", "0.5407429", "0.5402457" ]
0.7718939
0
Returns a pandas DataFrame with (Open, High, Low, Close, Volume) columns for the specific symbol and specific time_interval. Returns None if an exception occurs.
def get_stock(symbol, interval): try: time_interval = TIME_INTERVALS[interval] if(time_interval == TIME_INTERVALS['Intraday']): json_data = requests.request('GET', 'https://www.alphavantage.co'+ '/query?function=TIME_SERIES_INTRADAY&symbol='+symbol+ '&interval=1min&apikey='+API_KEY).json() data_frame = pd.DataFrame.from_records(json_data['Time Series (1min)']) else: json_data = requests.request('GET', 'https://www.alphavantage.co'+ '/query?function='+time_interval+'&symbol='+symbol+ '&apikey='+API_KEY).json() data_key = '' if(time_interval == TIME_INTERVALS['Daily']): data_key = 'Time Series (Daily)' elif(time_interval == TIME_INTERVALS['Weekly']): data_key = 'Weekly Time Series' else: data_key = 'Monthly Time Series' data_frame = pd.DataFrame.from_records(json_data[data_key]) data_frame = data_frame.transpose() data_frame.columns = ['Open', 'High', 'Low', 'Close', 'Volume'] return data_frame except: print("Error while loading data") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stock_data_frame(time, stock):\n\n print(\"Getting\", time, \"stock data for\", stock)\n url = 'https://api.iextrading.com/1.0/stock/'+stock+'/chart/'+time\n req = requests.get(url)\n print(url)\n\n print(\"Parsing data.\")\n rjson = req.text\n\n rdata = json.loads(rjson)\n\n dates = []\n openprices = []\n highprices = []\n lowprices = []\n closeprices = []\n volumes = []\n\n for i in rdata:\n date = i['date']\n dates.append(date)\n openprices.append(float(i['open']))\n highprices.append(float(i['high']))\n lowprices.append(float(i['low']))\n closeprices.append(float(i['close']))\n volumes.append(float(i['volume']))\n\n index = pd.DatetimeIndex(dates, dtype='datetime64[ns]')\n _open = pd.Series(openprices, index=index)\n high = pd.Series(highprices, index=index)\n low = pd.Series(lowprices, index=index)\n close = pd.Series(closeprices, index=index)\n data_frame_data = {'Open' : _open, 'High' : high, 'Low' : low, 'Close' : close}\n\n return pd.DataFrame(data_frame_data)", "def time_series_intraday(symbol: str, interval: str = '60min',\n outputsize: str = 'compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_INTRADAY', interval=interval,\n outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series ({interval})'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata", "def GetHistoricalData(symbol : str, time_range : str) -> Optional[DataFrame]: \n\n time_format = \"%Y-%m-%d\"\n start_date = Equity.__time_range_to_date(time_range)\n end_date = dt.datetime.now()\n\n symbol_could_not_be_fixed = False\n while True:\n try:\n df = DataReader(symbol, data_source='yahoo', start=start_date, end=end_date)\n break\n except KeyError: # Yahoo does not recognize the inputted equity symbol\n if symbol_could_not_be_fixed:\n return None\n else:\n symbol = symbol.replace('.', '-')\n symbol_could_not_be_fixed = True\n except _utils.RemoteDataError: # Yahoo does not have trading data available for this equity\n return None\n except ConnectionError: # Most likely just a timeout. Retry the request\n continue\n\n start_dates_match = abs((dt.datetime.strptime(df.index[0]._date_repr, time_format) - start_date).days) < 5\n end_dates_match = abs((dt.datetime.strptime(df.index[-1]._date_repr, time_format) - end_date).days) < 5\n\n if not(start_dates_match or end_dates_match):\n return None\n \n return df", "def get_stock_price_df(info, symbols):\n\n df_l = []\n\n for num, i in enumerate(info):\n df = pd.DataFrame.from_dict(i, orient='index')\n df['Symbol'] = symbols[num]\n df_l.append(df)\n\n df_full = pd.concat(df_l)\n df_full = df_full.rename(columns={'1. open': 'Open',\n '2. high': 'High',\n '3. low': 'Low',\n '4. close': 'Close',\n '5. volume': 'Volume'})\n\n return df_full", "def fetchOHLC(ticker,interval,duration):\r\n instrument = instrumentLookup(instrument_df,ticker)\r\n data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\r\n data.set_index(\"date\",inplace=True)\r\n return data", "def fetchOHLC(ticker,interval,duration):\n instrument = instrumentLookup(instrument_df,ticker)\n data = pd.DataFrame(kite.historical_data(instrument,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\n data.set_index(\"date\",inplace=True)\n return data", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n \n if 'SPY' not in symbols: #add SPY for reference\n symbols.insert(0,'SPY')\n \n # This for loop will loop through all the desired\n # symbols in the symbol list and create a dataframe\n # containing all the required data\n for symbol in symbols:\n # Get path for .csv file for symbol\n sym_path = symbol_to_path(symbol)\n # Load data from csv and create dataframe\n if symbol == 'SPY':\n dfSPY = pd.read_csv(sym_path,\n index_col=\"Date\",\n parse_dates=True,\n usecols=[\"Date\",\"Adj Close\"],\n na_values=['nan'])\n \n dfSPY = dfSPY.rename(columns={\"Adj Close\": str(symbol)})\n df = df.join(dfSPY,how='inner') # inner joins and drops NaN\n else:\n df_temp = pd.read_csv(sym_path,\n index_col=\"Date\",\n parse_dates=True,\n usecols=[\"Date\",\"Adj Close\"],\n na_values=['nan'])\n df_temp = df_temp.rename(columns={\"Adj Close\": str(symbol)})\n df = df.join(df_temp,how='inner')\n df = df.sort_index(ascending=True)\n return df", "def fetchOHLC(ticker,interval = \"minute\",duration=4):\r\n data = pd.DataFrame(kite.historical_data(ticker,dt.date.today()-dt.timedelta(duration), dt.date.today(),interval))\r\n data.date =data.date.map(lambda t: t.strftime('%Y-%m-%d %H:%M'))\r\n return data", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n if os.path.isfile(symbol_to_path(symbol)): \n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date', \n parse_dates = True, usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_temp = df_temp.rename(columns = {'Adj Close': symbol})\n df = df.join(df_temp)\n if symbol == 'SPY': #drop dates SPY did not trade\n df = df.dropna(subset=[\"SPY\"])\n# else:\n# download_symbol(symbol) \n return df", "def hist_data(symbols, timeframe=\"15Min\", limit=200, start=\"\", end=\"\", after=\"\", until=\"\"):\r\n df_data = {}\r\n bar_url = endpoint + \"/bars/{}\".format(timeframe)\r\n params = {\"symbols\" : symbols, \r\n \"limit\" : limit,\r\n \"start\" : start,\r\n \"end\" : end,\r\n \"after\" : after, \r\n \"until\" : until}\r\n r = requests.get(bar_url, headers=headers, params=params)\r\n json_dump = r.json()\r\n for symbol in json_dump:\r\n temp = pd.DataFrame(json_dump[symbol])\r\n temp.rename({\"t\": \"time\", \r\n \"o\": \"open\",\r\n \"h\": \"high\",\r\n \"l\": \"low\",\r\n \"c\": \"close\",\r\n \"v\": \"volume\"}, axis=1, inplace=True)\r\n temp[\"time\"] = pd.to_datetime(temp[\"time\"], unit=\"s\")\r\n temp.set_index(\"time\", inplace=True)\r\n temp.index = temp.index.tz_localize(\"UTC\").tz_convert(\"America/New_York\")\r\n temp.between_time(\"09:31\", \"16:00\")\r\n df_data[symbol] = temp\r\n \r\n return df_data", "def getData(symbol, dataKind):\n try:\n link = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol='+symbol+'&outputsize=compact&apikey=ENTER_KEY'\n htmltext = urllib.request.urlopen(link)\n data = json.load(htmltext)\n myDict = {}\n print(type(data))\n price_data = data['Time Series (Daily)']\n for key, value in price_data.items():\n date_num = datetime.strptime(key,\"%Y-%m-%d\")\n price = value[dataKind]\n myDict[date_num] = float(price)\n masterDF = pd.DataFrame.from_dict(myDict, orient = 'index')\n masterDF.index.name = \"Time\"\n masterDF.columns = [symbol]\n return masterDF\n\n except:\n print('Error occured when fetching data.')\n exit(0)", "def get_data(symbols, dates, base_dir=\"../data/\"):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols:\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n temp_df = pd.read_csv(symbol_to_path(symbol, base_dir), index_col='Date',\n parse_dates=True, usecols=['Date', 'Close'],\n na_values=['nan'])\n temp_df = temp_df.rename(columns={'Close': symbol})\n df = df.join(temp_df, how='inner')\n return df", "def volumes(interval,symbol):\n\ttoday = datetime.utcnow()\n\tcurrent_time = today.time()\n\tdaily_start_time = dtime(3,45)\n\tdaily_end_time = dtime(10,15)\n\tvolume_indicator = {} \n\tstart_timestamp = 0\n\tend_timestamp = 0\n\tif current_time < daily_start_time:\n\t\tyesterday = today - timedelta(days=1)\n\t\tstart_timestamp = time.mktime(datetime(yesterday.year,yesterday.month,yesterday.day,\n\t\t\t\t\t\t\t\t\t\t\t\t\t9,15,0,0,tzinfo=pytz.UTC).timetuple())\n\t\tend_timestamp = time.mktime(datetime(yesterday.year,yesterday.month,yesterday.day,\n\t\t\t\t\t\t\t\t\t\t\t\t\t15,45,0,0,tzinfo=pytz.UTC).timetuple())\n\t\tintervals = Interval.get_intervals(start_timestamp,end_timestamp,interval)\n\t\tdata = Data.get_data(symbol)\n\t\tvolume_indicator = Volume.get_volume_indications(intervals,data)\n\n\telif current_time > daily_end_time:\n\t\tstart_timestamp = time.mktime(datetime(today.year,today.month,today.day,9,15,0,0,\n\t\t\t\t\t\t\t\t\t\ttzinfo=pytz.UTC).timetuple())\n\t\tend_timestamp = time.mktime(datetime(today.year,today.month,today.day,15,45,0,0,\n\t\t\t\t\t\t\t\t\t\ttzinfo=pytz.UTC).timetuple())\n\t\tintervals = Interval.get_intervals(start_timestamp,end_timestamp,interval)\n\t\tdata = Data.get_data(symbol)\n\t\tvolume_indicator = Volume.get_volume_indications(intervals,data)\n\n\telse:\n\n\t\tstart_timestamp = time.mktime(datetime(today.year,today.month,today.day,9,15,0,0,\n\t\t\t\t\t\t\t\t\t\t\ttzinfo=pytz.UTC).timetuple())\n\t\tcurrent_time = datetime.now()\n\t\tend_timestamp = time.mktime(datetime(today.year,today.month,today.day,current_time.hour,\n\t\t\t\t\t\t\t\t\t\t\t\tcurrent_time.minute,0,0,tzinfo=pytz.UTC).timetuple())\n\t\tintervals = Interval.get_intervals(start_timestamp,end_timestamp,interval)\n\t\tdata = Data.get_data(symbol)\n\t\tvolume_indicator = Volume.get_volume_indications(intervals,data)\n volume_indicator['symbol']=symbol\n\treturn json.dumps(volume_indicator,sort_keys=True,indent=4,separators=(',',': '))", "def get_stock_data(symbol):\n # Set current dates\n start = date(date.today().year, 1, 1) # first of current year\n end = date.today() # today\n\n # Get yahoo Yahoo data\n data = pdr.get_data_yahoo(symbol, start=start, end=end)\n\n # Rename columns\n data.columns = [\"Highest price (USD)\",\n \"Lowest price (USD)\",\n \"Opening price (USD)\",\n \"Closing price (USD)\",\n \"Volume\",\n \"Adjusted closing price (USD)\"]\n\n return data", "def get_data(symbols, dates):\r\n df = pd.DataFrame(index=dates)\r\n if 'SPY' not in symbols: # add SPY for reference, if absent\r\n symbols.insert(0, 'SPY')\r\n\r\n for symbol in symbols:\r\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col=\"Date\", parse_dates=True,\r\n usecols=['Date', 'Adj Close'], na_values=['nan'])\r\n\r\n # rename to prevent clash\r\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\r\n df = df.join(df_temp)\r\n if symbol == 'SPY': #drop dates SPY did not trade\r\n df = df.dropna(subset=[\"SPY\"])\r\n\r\n return df", "def getStock(symbol, start, end):\n df = data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date',\n parse_dates=True, usecols=['Date', 'Adj Close'],\n na_values=['nan'])\n df_temp = df_temp.rename(columns={'Adj Close' : symbol})\n df = df.join(df_temp)\n if symbol == 'SPY':\n df = df.dropna(subset=['SPY'])\n\n return df", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if \"SPY\" not in symbols:\n symbols.insert(0, \"SPY\")\n for symbol in symbols:\n temp = pd.read_csv(symbol_to_path(symbol, base_dir=\"data\"), \n index_col=\"Date\", \n parse_dates=True, \n usecols=[\"Date\", \"Adj Close\"])\n \n temp = temp.rename(columns={\"Adj Close\": symbol})\n \n df = df.join(temp, how=\"inner\")\n df = df.sort_index(axis=0, ascending=[1])\n \n return df", "def price_dataframe(symbols=('sne',),\n start=datetime.datetime(2008, 1, 1),\n end=datetime.datetime(2009, 12, 31),\n price_type='actual_close',\n cleaner=util.clean_dataframe,\n ):\n if isinstance(price_type, basestring):\n price_type = [price_type]\n start = nlp.util.normalize_date(start or datetime.date(2008, 1, 1))\n end = nlp.util.normalize_date(end or datetime.date(2009, 12, 31))\n symbols = util.make_symbols(symbols)\n df = get_dataframes(symbols)\n # t = du.getNYSEdays(start, end, datetime.timedelta(hours=16))\n # df = clean_dataframes(dataobj.get_data(t, symbols, price_type))\n if not df or len(df) > 1:\n return cleaner(df)\n else:\n return cleaner(df[0])", "def get_data(symbols, dates):\n \n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols:\n symbols.insert(0,'SPY')\n for symbol in symbols:\n \n df1 = pd.read_csv(symbol_to_path(symbol),usecols=['Date','Adj Close'],\n index_col='Date',na_values =['nan'] )\n df1 = df1.rename(columns ={\"Adj Close\": symbol}) \n #print df1\n df = df.join(df1,how='inner')\n \n return df.sort_index()", "def getStock(symbol, start, end):\n df = pd.io.data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n path = symbol_to_path(symbol)\n df_tmp = pd.read_csv(path, index_col=\"Date\", parse_dates=True,\n usecols=['Date', 'Adj Close'], na_values=['nan'])\n df_tmp = df_tmp.rename(columns={'Adj Close':symbol})\n # or use inner join to drop the NaN values\n df = df.join(df_tmp, how='inner')\n return df", "def get_data(symbols, dates):\n df = pd.DataFrame(index=dates)\n if 'SPY' not in symbols: # add SPY for reference, if absent\n symbols.insert(0, 'SPY')\n\n for symbol in symbols:\n # TODO: Read and join data for each symbol\n df_temp = pd.read_csv(symbol_to_path(symbol),\n index_col=\"Date\",\n parse_dates=True,\n usecols=['Date', 'Adj Close'],\n na_values=['nan'])\n df_temp = df_temp.rename(columns={'Adj Close': symbol})\n df = df.join(df_temp)\n\n df = df.dropna()\n return df", "def time_series_daily(symbol: str, outputsize: str = 'compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_DAILY', outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series (Daily)'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata", "def _get_data(self):\n\n data = self.get_data()\n\n required_data = ['open','close','open_date','high','low']\n if not np.isin(required_data, data.columns).all():\n raise ImplementationError(f'''\n Data must contain columns: {required_data}\n ''')\n\n data = data.sort_values('open_date')\n data.index = data.open_date\n\n temp_dates = pd.unique(data.open_date)\n self.total_candles = len(temp_dates)\n self.start_date, self.end_date = min(temp_dates), max(temp_dates)\n\n # Divide df based on symbol, create DataEngine object, add to dict.\n data_dict = {}\n for symbol in self.symbols.symbol:\n try:\n data_dict[symbol] = DataEngine(data[data.symbol == symbol])\n except DiscontinuousError as err:\n print(f'There are missing dates in data for {symbol}')\n raise err\n except ValueError as err:\n print(f'No data for provided for symbol: {symbol}')\n self.symbols = self.symbols.drop(symbol)\n\n return data_dict", "async def _get_stock_hist_bars(\n self,\n symbol: str,\n multiplier: int,\n timespan: str,\n from_date: str,\n to_date: str,\n ) -> Union[pd.DataFrame, type(NotImplemented)]:\n # https://polygon.io/docs/get_v2_aggs_ticker__stocksTicker__range__multiplier___timespan___from___to__anchor\n symbol = urllib.parse.quote(symbol)\n response = await self._get(\n f\"/v2/aggs/ticker/{symbol}/range/{multiplier}/{timespan}/{from_date}/{to_date}\",\n params={\n \"adjusted\": \"false\",\n \"limit\": \"50000\",\n },\n )\n if response[\"status\"] == \"DELAYED\":\n raise errors.DataPermissionError\n if \"results\" not in response:\n return pd.DataFrame()\n return pd.DataFrame(response[\"results\"])", "def get_data(ticker, interval, start_date, end_date):\r\n # Display indication\r\n print('[INFO] {} - Retrieving {}_{} historical data'.format(get_now(), ticker, interval))\r\n # Download ticker's ohlcv\r\n ohlcv = yf.download(tickers=ticker, start=start_date, end=end_date, interval=interval)\r\n # Modify dataframe\r\n ohlcv.drop(columns=['Adj Close'], inplace=True)\r\n ohlcv.sort_index(axis=0, ascending=False, inplace=True)\r\n ohlcv.reset_index(inplace=True)\r\n if \"Datetime\" in ohlcv.columns:\r\n ohlcv['Datetime'] = ohlcv['Datetime'].astype(str).str[:-9]\r\n return ohlcv", "def time_series_daily_adj(symbol: str, outputsize: str ='compact') -> Tuple[pd.DataFrame, dict]:\n response = _fetch(symbol=symbol, function='TIME_SERIES_DAILY_ADJUSTED', outputsize=outputsize)\n\n response_dict = json.loads(response.content)\n\n df = pd.DataFrame.from_dict(response_dict[f'Time Series (Daily)'], orient='index', dtype=np.float64)\n df.index = pd.to_datetime(df.index)\n df = df.rename(columns=_string_map(df.columns))\n\n metadata = response_dict['Meta Data']\n _rename_dict_keys(metadata)\n\n metadata['begin_datetime'] = df.index.min()\n metadata['end_datetime'] = df.index.max()\n\n return df, metadata", "def get_index_portfolio_value_data(game_id: int, symbol: str, start_time: float = None,\n end_time: float = None) -> pd.DataFrame:\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n base_value = get_index_reference(game_id, symbol)\n\n with engine.connect() as conn:\n df = pd.read_sql(\"\"\"\n SELECT timestamp, `value` FROM indexes\n WHERE symbol = %s AND timestamp >= %s AND timestamp <= %s;\"\"\", conn, params=[symbol, start_time, end_time])\n index_info = query_to_dict(\"SELECT * FROM index_metadata WHERE symbol = %s\", symbol)[0]\n\n # normalizes index to the same starting scale as the user\n df[\"value\"] = STARTING_VIRTUAL_CASH * df[\"value\"] / base_value\n df[\"username\"] = index_info[\"name\"]\n\n # When a game kicks off, it will generally be that case that there won't be an index data point at exactly that\n # time. We solve this here, create a synthetic \"anchor\" data point that starts at the same time at the game\n trade_start = make_index_start_time(start_time)\n return pd.concat([pd.DataFrame(dict(username=index_info[\"name\"], timestamp=[trade_start],\n value=[STARTING_VIRTUAL_CASH])), df])", "def _get_symbols(self):\n\n symbols = self.get_symbols()\n\n if isinstance(symbols, dict):\n keys = ['symbol', 'from_symbol', 'to_symbol']\n correct_keys = np.isin(keys, list(symbols.keys())).all()\n\n if not correct_keys:\n raise ImplementationError('''\n Dict should be in the form:\n {'symbol':[], 'from_symbol':[], 'to_symbol':[]}\n ''')\n else:\n symbols = pd.DataFrame(symbols, index = [symbols['symbol']])\n\n symbols.index = symbols.symbol\n\n return symbols" ]
[ "0.680046", "0.64363235", "0.64280266", "0.63833207", "0.62522256", "0.6238682", "0.6104469", "0.61005485", "0.5904347", "0.5890219", "0.58884716", "0.5882514", "0.5852515", "0.5850313", "0.58449453", "0.58279145", "0.5820402", "0.58095807", "0.58015716", "0.580064", "0.58005494", "0.5771778", "0.5769041", "0.5758837", "0.5680841", "0.567855", "0.5669645", "0.56686485", "0.5667307", "0.5635213" ]
0.768913
0
Register a new User 1. Flag the user as inactive 2. Create the link for the password creation 3. Send the email
def register_new_user(user): user.is_active = False user.set_unusable_password() user.save() url = generate_url_reset(user) #TODO: mettere un body decente per l'email send_email(user.email, url, 'aMUX Registration Confirm')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')", "def create_inactive_user(self, form):\n new_user = form.save(commit=False)\n new_user.is_active = False\n new_user.save()\n\n self.send_activation_email(new_user)\n\n return new_user", "def signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activate Your neighwatch Account'\n message = render_to_string('registration/activation_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(subject, message)\n return redirect('account_activation_sent')\n else:\n form = SignUpForm()\n return render(request, 'registration/registration_form.html', {'form': form})", "def create_inactive_user(self,request,\n username,password,email,\n send_email=True, profile_callback=None, **kwargs):\n #如果存在用户的话不必进行新建只需对权限表进行操作即可,否则新建用户\n if User.objects.filter(email=email).count() == 0:\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.save()\n registration_profile = self.create_profile(new_user)\n registration_profile.save()\n current_site = Site.objects.get_current()\n site_domain=current_site.domain\n if send_email:\n from django.core.mail import send_mail\n subject = render_to_string('registration/activation_email_subject.txt',\n\t\t\t\t\t\t\t {'site':get_current_site(request),\n\t\t\t\t\t\t\t\t 'username':username,\n\t\t\t\t\t\t\t\t 'password':password})\n subject = ''.join(subject.splitlines())\n message = render_to_string('registration/activation_email.txt',\n\t\t\t\t\t\t {'activation_key':registration_profile.activation_key,\n\t\t\t\t\t\t\t 'expiration_days':settings.ACCOUNT_ACTIVATION_DAYS,\n\t\t\t\t\t\t\t 'site':site_domain,\n\t\t\t\t\t\t\t 'username':username,\n\t\t\t\t\t\t\t 'password':password})\n logger.error(message)\n send_mail(subject,message,settings.DEFAULT_FROM_EMAIL,[new_user.email])\n else:\n\t\t\tnew_user = User.objects.get(email=email)\n\n# 创建普通用户NORMALUSER Profile\n new_normalprofile = NormalProfile(userid = new_user)\n new_normalprofile.save()\n# 对用户权限写入数据库\n new_authority = UserIdentity.objects.get(identity=NORMAL_USER)\n new_authority.auth_groups.add(new_user)\n new_authority.save()\n\n if profile_callback is not None:\n profile_callback(user=new_user)\n return new_user", "def new_user_form_valid(self, form):\n new_user = form.save()\n new_user.set_password(form.cleaned_data[\"password\"])\n\n h = hashlib.sha1()\n h.update(str(random.random()).encode('utf-8'))\n salt = h.hexdigest()[:5]\n\n h = hashlib.sha1()\n text = salt+new_user.name\n h.update(text.encode('utf-8'))\n\n new_user.activation_key = h.hexdigest()\n new_user.save()\n\n subject = \"Your Work Schedule: Confirm registration\"\n text = (\n \"\"\"Hi {}, \\n please confirm Your registration by clicking or\n copy-past this link \\n {}/user_account/activate/{}/ \\n\n Please confirm with in 48 houers. Thank You for using our app.\n \\n Your Sandbox Team\n \"\"\".format(new_user.name, HOST_NAME, new_user.activation_key))\n\n send_mail(\n subject,\n text,\n EMAIL_HOST_USER,\n [new_user.email],\n fail_silently=False\n )\n return HttpResponseRedirect(self.get_success_url())", "def create_inactive_user(self, form):\n\t\tnew_user = form.save(commit=False)\n\t\tnew_user.is_active = False\n\t\tnew_user.save()\n\n\t\tself.send_activation_email(new_user)\n\n\t\treturn new_user", "def post(self):\n requestData = request.form\n\n # Grab username and password from request\n # Generate a hash from password so its not stored in plaintext\n username = requestData['username']\n pwdhash = generate_password_hash(requestData['password'])\n\n # Check if user with given username already exists\n user = User.query.filter_by(username=username).first()\n\n # If not, create a new user and redirect to login page\n if user is None:\n try:\n user = User(username=username, pwdhash=pwdhash)\n except AssertionError:\n flash('Forbidden character detected in username', 'warning')\n return redirect(url_for('page.RegisterView:index'))\n db.session.add(user)\n db.session.commit()\n print user\n print user.get_activation_link()\n flash(\"\"\"\n We\\'ve sent you an email. Please click the link in the\n email to complete the creation of your account.\n \"\"\", 'info')\n link = user.get_activation_link()\n body = render_template(\"email.html\", link=link)\n self.send_email('Account activation',\n '[email protected]',\n [username], body)\n return redirect(url_for('page.LoginView:index'))\n\n # Otherwise show error message\n flash('Username already taken', 'info')\n return redirect(url_for('page.RegisterView:index'))", "def register(request):#, success_url=reverse('registrationsuccess')):\n\tif request.method == 'POST':\n\t\tform = RegistrationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tnew_user = RegistrationProfile.objects.create_inactive_user(username=form.cleaned_data['username'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tpassword=form.cleaned_data['password1'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\temail=form.cleaned_data['email'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tname=form.cleaned_data['first_name'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tsurname=form.cleaned_data['last_name'])\n\t\t\treturn HttpResponseRedirect(reverse('registrationsuccess'))\n\telse:\n\t\tform = RegistrationForm()\n\treturn render_to_response(request, 'registration/registration_form.html', {'form': form })", "def register_user():\n pass", "def test_user_creation_email(self):\n self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n self.assertEqual(len(mail.outbox), 1)", "def register_user():\n new_email = request.form.get('email')\n new_password = request.form.get('password')\n\n # if email exists, flash message to say you can't create an account \n # if it doens't, create new user flash message telling it created successfully\n\n user = crud.get_user_by_email(new_email)\n\n if user:\n flash(\"Can't create an account with that email. Try again.\")\n else: \n crud.create_user(new_email, new_password)\n flash('Account created. Please log in.')\n \n return redirect('/')", "def register_new_user():\n register_form = UserAddForm()\n login_form = LoginForm()\n\n if register_form.validate_on_submit():\n try:\n user = User.signup(\n email=register_form.new_email.data,\n password=register_form.new_password.data,\n username=register_form.new_username.data,\n first_name=register_form.first_name.data.capitalize(),\n last_name=register_form.last_name.data.capitalize(),\n image_url=register_form.image_url.data or User.image_url.default.arg,\n cover_url=register_form.cover_url.data or User.cover_url.default.arg\n )\n db.session.commit()\n\n do_login(user)\n return redirect('/')\n except IntegrityError:\n flash(\n \"Email or username already registered! Please log in or try again\", 'danger')\n return render_template('home_anon.html', register_form=register_form, login_form=login_form)\n\n else:\n return render_template('home_anon.html', register_form=register_form, login_form=login_form)", "async def user_signup(\n form: SignUp,\n task: BackgroundTasks,\n db: Session = Depends(db_session)):\n user = User()\n user.name = form.name\n user.email = form.login\n user.hashed_password = PWD_CONTEXT.hash(form.password)\n user.disabled = False\n db.add(user)\n try:\n db.commit()\n except exc.IntegrityError:\n db.rollback\n return {\"success\": False, \"msg\": \"Пользователь уже зарегистрирован\"}\n\n task.add_task(send_welcome_email, user.email)\n return {\"success\": True}", "def register():\n if current_user.is_authenticated:\n return redirect(url_for('main.index'))\n form = RegistrationForm()\n if form.validate_on_submit():\n user = User(username=form.username.data.lower(), email=form.email.data)\n user.set_password(form.password.data)\n user.email_confirmed = False\n db.session.add(user)\n db.session.commit()\n send_confirmation_email(user.email)\n flash('Thanks for Registering. Account Successfully got created, \\\n Please check your email to confirm',\n 'success')\n return redirect(url_for('auth.login'))\n return render_template('register.html', title='Register',\n form=form), 417", "def register_user():\n\n email = request.form.get('email')\n password = request.form.get('password')\n user = crud.get_user_by_email(email)\n \n if user:\n flash(\"can't create account with email\")\n else:\n crud.create_user(email, password)\n flash(\"account successfully created\")\n\n return redirect('/')", "def create_inactive_user(self, username, password, email, send_email=True, profile_callback=None):\n # Create the user.\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.save()\n \n # And finally create the registration profile.\n registration_profile = self.create_profile(new_user)\n \n # Create site-specific profile, if specified.\n if profile_callback is not None:\n profile_callback(user=new_user)\n \n if send_email:\n from django.core.mail import send_mail\n current_domain = Site.objects.get_current().domain\n subject = \"Activate your new account at %s\" % current_domain\n message_template = loader.get_template('registration/activation_email.txt')\n message_context = Context({ 'site_url': 'http://%s/' % current_domain,\n 'activation_key': registration_profile.activation_key,\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS })\n message = message_template.render(message_context)\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [new_user.email])\n return new_user", "def register():\n\n if current_user.is_authenticated:\n return redirect(url_for('general.show_dash'))\n\n form = RegistrationForm()\n\n if form.validate_on_submit():\n\n #Continua con la creacion de un usuario\n hashed_password = user_manager.hash_password(form.password.data)\n new_user = User(\n username=form.username.data,\n email=form.email.data,\n password=hashed_password,\n confirmed_at=datetime.datetime.utcnow(),\n is_enabled=True,\n )\n\n\n role='User'\n role_default = Role.query.filter_by(name=role).first()\n\n if not role_default:\n new_role_default = Role(name = 'User')\n new_user.roles.add(new_role_default)\n else:\n new_user.roles.add(role_default)\n\n try:\n correct = True\n db.session.add(new_user)\n db.session.commit()\n\n except Exception as e:\n # Catch anything unknown\n print(e)\n correct = False\n\n finally:\n if not correct:\n # Cleanup and show error\n db.session.rollback()\n flash('Error creating user, make sure username and email are unique','error')\n\n else:\n flash('Congratulations, you are now a registered user!','success')\n return redirect(url_for('user.login'))\n return render_template('extensions/flask_user/register.html', title='Register', form=form)", "def register():\r\n form = RegisterForm(request.form)\r\n\r\n if request.method == 'POST' and form.validate():\r\n new_user = User(form.email.data, form.password.data)\r\n g.session.add(new_user)\r\n g.session.commit()\r\n\r\n new_profile = Profile(form.first_name.data, form.last_name.data, new_user.id)\r\n g.session.add(new_profile)\r\n g.session.commit()\r\n # TODO: make it async\r\n if current_app.config[\"REQUIRE_EMAIL_CONFIRMATION\"]:\r\n send_confirmation(new_user)\r\n new_user.init_folders()\r\n logout_user()\r\n return redirect(url_for(\".login\"))\r\n return render_template(\"account/register_user.pug\", form=form)", "def create_inactive_user(self, username, password, email,\r\n send_email=True, profile_callback=None):\r\n new_user = User.objects.create_user(username, email, password)\r\n new_user.is_active = False\r\n new_user.save()\r\n \r\n registration_profile = self.create_profile(new_user)\r\n \r\n if profile_callback is not None:\r\n profile_callback(user=new_user)\r\n \r\n if send_email:\r\n from django.core.mail import send_mail\r\n current_site = Site.objects.get_current()\r\n \r\n subject = render_to_string('registration/activation_email_subject.txt',\r\n { 'site': current_site })\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n \r\n message = render_to_string('registration/activation_email.txt',\r\n { 'activation_key': registration_profile.activation_key,\r\n 'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS,\r\n 'site': current_site })\r\n \r\n send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [new_user.email])\r\n return new_user", "def register_user():\n\n email = request.form.get('email')\n password = request.form.get('password')\n user = crud.get_user_by_email(email)\n\n if user == None:\n crud.create_user(email, password)\n flash('Account created! You can now log in.')\n else: \n flash('Email already exists. Try again.')\n\n return redirect('/')", "def register(self, form):\n new_user = self.create_inactive_user(form)\n signals.user_registered.send(\n sender=self.__class__, user=new_user, request=self.request\n )\n return new_user", "def create_inactive_user(self, username, email, password, first_name=None, last_name=None):\n\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.first_name = first_name\n new_user.last_name = last_name\n new_user.save()\n\n registration_profile = self.create_registration_profile(new_user)\n registration_profile.send_activation_email()\n\n if not registration_profile:\n return None\n\n return new_user", "def user_activation(user):\n act_hash = random_password(32)\n user.set_hashword(act_hash)\n user.save()\n base_url = url_for('public.home', _external=True)\n act_url = url_for(\n 'auth.activate',\n userid=user.id,\n userhash=act_hash,\n _external=True)\n if not 'mailman' in current_app.extensions:\n logging.warning('E-mail extension has not been configured')\n return act_hash\n msg = EmailMessage()\n msg.subject = 'Your dribdat account'\n msg.body = \\\n \"Hello %s,\\n\" % user.username \\\n + \"Thanks for signing up at %s\\n\\n\" % base_url \\\n + \"Tap here to activate your account:\\n\\n%s\" % act_url\n msg.to = [user.email]\n logging.info('Sending activation mail to user %d' % user.id)\n logging.debug(act_url)\n msg.send(fail_silently=True)\n return act_hash", "def register_user():\n\n username = request.form.get('username')\n email = request.form.get('email')\n password = request.form.get('password')\n\n user = crud.get_user_by_email(email)\n if user:\n flash('Cannot create an account with that email. Try again.')\n return redirect('/')\n else: \n crud.create_user(username, email, password)\n flash('Account created!') \n return redirect('/login')", "def user_register(request):\n DEBUG = False\n form = Form(request, RegistrationSchema)\n #mailer = get_mailer(request)\n\n # create a random string for email verification procedure\n # http://stackoverflow.com/questions/2257441/\n # python-random-string-generation-with-upper-case-letters-and-digits\n N = 6\n randomstring = ''.join(random.choice(string.ascii_uppercase\n + string.digits) for x in range(N))\n #print \" -- the random string: \" + randomstring\n\n URL = \"localhost:6543\"\n # ToDo XXX change this to be more generic\n\n if 'form.submitted' in request.POST and not form.validate():\n # form didn't validate\n request.session.flash('form does not validate!')\n if DEBUG: # pragma: no cover\n print \"submitted, but not validated\"\n else: # pragma: NO COVER # just for debugging, RLY\n if DEBUG:\n print \"form.submitted was not seen\"\n pass\n\n if 'form.submitted' in request.POST and form.validate():\n # ready for registration!\n #request.session.flash('form validated!')\n username = unicode(form.data['username'])\n\n message = Message(\n subject=\"C3S: confirm your email address\",\n sender=\"[email protected]\",\n recipients=[form.data['email']],\n body=\"Hello, \" + form.data['surname'] + \", \\n\"\n \"Please confirm your email address by clicking this link: \\n\"\n \"http://\" + URL + \"/user/confirm/\" + randomstring + \"/\"\n + form.data['username'] + \" \\n\"\n \"Thanks!\")\n msg_accountants = Message(\n subject=\"[C3S] new member registration\",\n sender=\"[email protected]\",\n recipients=['[email protected]'],\n body=\"Hello \\n\"\n \"A new member has registered with your site: \\n\"\n \"Username: \" + form.data['username'] + \" \\n\"\n \"First name: \" + form.data['surname'] + \" \\n\"\n \"Last name: \" + form.data['lastname'] + \" \\n\"\n \"Email: \" + form.data['email'] + \" \\n\"\n \"Thanks!\")\n\n user = User(\n username=username,\n password=unicode(form.data['password']),\n surname=unicode(form.data['surname']),\n lastname=unicode(form.data['lastname']),\n email=unicode(form.data['email']),\n email_is_confirmed=False,\n email_confirm_code=unicode(randomstring),\n phone=unicode(form.data['phone']),\n fax=unicode(form.data['fax']),\n )\n user.set_address(street=unicode(form.data['street']),\n number=unicode(form.data['number']),\n postcode=unicode(form.data['postcode']),\n city=unicode(form.data['city']),\n country=unicode(form.data['country']),\n )\n\n user_group = Group.get_Users_group()\n user.groups = [user_group]\n\n # dbsession.add(user)\n dbsession.flush(user)\n\n #\n # boto stuff: creating a bucket for that user\n # don't do that -- we better have one bucket for all tracks...\n #\n # from boto.exception import S3CreateError, BotoServerError\n # try:\n # c3sI2Conn.create_named_bucket(username)\n # request.session.flash(u'created bucket for ' + username)\n # except BotoServerError, e:\n # print(\"There was an error: \" + str(e) )\n # except S3CreateError, e:\n # print(\"There was an error: \" + str(e) )\n #\n # send email\n try:\n if DEBUG: # pragma: no cover\n print(\"sending email........\")\n else:\n pass\n #mailer.send(message)\n #mailer.send(msg_accountants)\n\n # instead of sending mails, we inform in-browser\n request.session.flash(\n 'DEBUG: not sending email. to test email confirmation view, '\n 'append this to URL to confirm email: /user/confirm/'\n + randomstring + '/'\n + str(user.username) + '/' + str(form.data['email']))\n except: # pragma: no cover\n print \"could not send email. no mail configured?\"\n\n # remember who this was == sign in user == log her in\n headers = remember(request, username)\n\n redirect_url = route_url('home', request)\n\n return HTTPFound(location=redirect_url, headers=headers)\n\n return {'form': FormRenderer(form), }", "def register():\n form = RegistrationForm(request.form)\n if request.method == 'POST' and form.validate():\n hash_var = pbkdf2_sha256.encrypt(form.password.data, rounds=200000, salt_size=16)\n user = Users(form.email.data, hash_var)\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=True)\n flash('User Registered')\n return redirect(url_for('home'))\n return render_template('register.html', form=form)", "def create_inactive_user(self, username, password, email,\n locale=settings.LANGUAGE_CODE,\n text_template=None, html_template=None,\n subject=None, email_data=None,\n volunteer_interest=False, **kwargs):\n new_user = User.objects.create_user(username, email, password)\n new_user.is_active = False\n new_user.save()\n Profile.objects.create(user=new_user, locale=locale)\n\n registration_profile = self.create_profile(new_user)\n\n self.send_confirmation_email(\n registration_profile,\n text_template,\n html_template,\n subject,\n email_data,\n **kwargs)\n\n if volunteer_interest:\n statsd.incr('user.registered-as-contributor')\n group = Group.objects.get(name=CONTRIBUTOR_GROUP)\n new_user.groups.add(group)\n\n return new_user", "def register_user():\n\n fname = request.form.get('fname')\n lname = request.form.get('lname')\n email = request.form.get('email')\n password = request.form.get('password')\n tel = request.form.get('tel')\n\n user = crud.get_user_by_email(email)\n # Check to see if user exists in db already\n\n if user:\n flash('This email already exists. Log in or try again.')\n return redirect('/user-registration')\n else:\n user = crud.create_user(fname, lname, email, password, tel)\n flash('Account created sucessfully! Please log in')\n\n return render_template('login.html')", "async def create_user_open(\n *,\n user_in: schemas.UnprivilegedUserCreate,\n db: Session = Depends(deps.get_db),\n redis: aioredis.Redis = Depends(deps.get_redis),\n) -> Any:\n if not settings.USERS_OPEN_REGISTRATION:\n raise HTTPException(\n status_code=403,\n detail=\"Open user registration is forbidden on this server\",\n )\n user = crud.user.get_by_email(db, email=user_in.email)\n if user is not None:\n raise HTTPException(\n status_code=400,\n detail=\"The user with this username already exists in the system\",\n )\n user_in = schemas.UserCreate(user_in.dict(exclude_unset=True))\n user = await crud.user_cachedb.create(db, redis, obj_in=user_in)\n if settings.EMAILS_ENABLED and user_in.email:\n send_new_account_email(\n email_to=user_in.email, username=user_in.email, password=user_in.password\n )", "def registration_view(request):\n json = request.json_body\n user_query = User.get_user_by_email(request, request.json['email'])\n nickname_query = User\\\n .get_user_by_nickname(request, request.json['nickname'])\n if user_query is None:\n if nickname_query is None:\n url_token_confirmation = generate_secret()\n if json['repeat_password'] == json['password']:\n User.add_user(request,\n email=request.json['email'],\n nickname=request.json['nickname'],\n password=pbkdf2_sha256\n .hash(request.json['password']),\n url_token=url_token_confirmation,\n status_id=UserStatus\n .get_user_by_status(request,\n status=\"Non_active\").id,\n create_date=datetime.now())\n mailer = request.mailer\n message = Message(subject=\"confirm email\",\n sender=\"[email protected]\",\n recipients=[json['email']],\n body='http://localhost:3000/#/email_confirm/{}'\n .format(url_token_confirmation)\n )\n mailer.send_immediately(message, fail_silently=False)\n\n return {\"msg\": \"We sent token to your email address\"}\n else:\n return {\n \"msg\": \"Invalid password, please try again\",\n \"error\": \"password\",\n }\n else:\n return {\n \"msg\": \"Your nickname is taken, please choose another\",\n \"error\": \"nickname\",\n }\n else:\n return {\n \"msg\": \"Your email address is already registered\",\n \"error\": \"email\",\n }" ]
[ "0.7495984", "0.7278033", "0.72545713", "0.7148108", "0.7121109", "0.71074307", "0.7102032", "0.6869794", "0.68437666", "0.68377155", "0.6825812", "0.68167186", "0.6780573", "0.67794126", "0.67672414", "0.66898435", "0.6678319", "0.6676729", "0.6669521", "0.66605", "0.6625422", "0.66161865", "0.660641", "0.6593484", "0.659045", "0.6579601", "0.65782803", "0.6539588", "0.65195453", "0.6513095" ]
0.8152107
0
Apply and ESN defined by esncell (as in created from `sparse_esncell`) to each input in xs with the initial state h0. Each new input uses the updated state from the previous step.
def generate_states(esncell, xs, h0): (map_ih, (Whh, shape), bh) = esncell def _step(h, x): #h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x) + bh) h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(x)) return (h, h) (h, hs) = lax.scan(_step, h0, xs) return (h, hs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(esncell, states, labels):\n Who = lstsq_stable(states, labels)\n return esncell + (Who,)", "def feedESN(features, neurons, mask, mask_bias, scale, mem, func, f_arg):\n \n ESN = np.hstack((np.matmul(features, mask), np.ones((np.shape(features)[0],1), dtype=np.double)))\n print(np.shape(ESN))\n print(np.min(ESN), np.max(ESN))\n p = np.zeros((1,neurons),dtype=np.double)\n \n for i in range(np.shape(features)[0]):\n in_val = scale * (ESN[i,:-1] + mask_bias) + p * mem\n \n ## Apply transform\n ESN[i,:-1] = func(in_val, f_arg)\n \n ## Connect preceding neighbour \n p = np.copy(np.roll(ESN[i,:-1],1))\n return ESN", "def compute_updates(self, xs, gs, state=None):\n raise NotImplementedError()", "def switch_to_tuned_inputs(self):\n \n self.h_e=self.inputs_flat.T\n self.h=np.vstack([self.h_e,self.h_i])", "def set_sparse_signals(self):\n\t\n\t\tparams_dSs = [self.mu_dSs, self.sigma_dSs]\n\t\tparams_Ss0 = [self.mu_Ss0, self.sigma_Ss0]\n\t\tself.dSs, self.idxs = sparse_vector([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\tparams_dSs,\tseed=self.seed_dSs)\n\t\t\n\t\t# Replace components with conflicting background odor \n\t\tif self.Kk_split is not None and self.Kk_split != 0:\n\t\t\tassert 0 <= self.Kk_split <= self.Kk, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires Kk_split\" \\\n\t\t\t\t\" to be non-negative and less than or equal to Kk.\"\n\t\t\tassert self.mu_dSs_2 is not None \\\n\t\t\t\tand self.sigma_dSs_2 is not None, \\\n\t\t\t\t\"Splitting sparse signal into two levels requires that\" \\\n\t\t\t\t\" mu_dSs_2 and sigma_dSs_2 are set.\"\n\n\t\t\tsp.random.seed(self.seed_dSs)\n\t\t\tself.idxs_2 = sp.random.choice(self.idxs[0], self.Kk_split, \n\t\t\t\t\t\t\t\t\t\t\treplace=False)\n\t\t\tfor idx_2 in self.idxs_2:\n\t\t\t\tself.dSs[idx_2] = sp.random.normal(self.mu_dSs_2, \n\t\t\t\t\t\t\t\t\t\t\t\t\tself.sigma_dSs_2)\n\t\telse:\n\t\t\tself.idxs_2 = []\n\t\t\tself.Kk_split = 0\n\t\t\t\n\t\t# Ss0 is the ideal (learned) background stimulus without noise\n\t\tself.Ss0, self.Ss0_noisy = sparse_vector_bkgrnd([self.Nn, self.Kk], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.idxs, params_Ss0,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tseed=self.seed_Ss0)\n\t\t\n\t\tself.Ss = self.dSs + self.Ss0_noisy", "def train_imed(esncell, states, inputs, labels, sigma=1.):\n Who = imed_lstsq_stable(states, inputs, labels, sigma)\n return esncell + (Who,)", "def _cells_initialize_states(cells, batch_axis, **kwargs):\n\n return list(itertools.chain(\n *[c.initialize_states(batch_axis, **kwargs) for c in cells]))", "def ldos_finite(h,e=0.0,n=10,nwf=4,delta=0.0001):\n if h.dimensionality!=1: raise # if it is not one dimensional\n intra = csc(h.intra) # convert to sparse\n inter = csc(h.inter) # convert to sparse\n interH = inter.H # hermitian\n m = [[None for i in range(n)] for j in range(n)] # full matrix\n for i in range(n): # add intracell\n m[i][i] = intra\n for i in range(n-1): # add intercell\n m[i][i+1] = inter\n m[i+1][i] = interH\n m = bmat(m) # convert to matrix\n (ene,wfs) = slg.eigsh(m,k=nwf,which=\"LM\",sigma=0.0) # diagonalize\n wfs = wfs.transpose() # transpose wavefunctions\n dos = (wfs[0].real)*0.0 # calculate dos\n for (ie,f) in zip(ene,wfs): # loop over waves\n c = 1./(1.+((ie-e)/delta)**2) # calculate coefficient\n dos += np.abs(f)*c # add contribution\n odos = spatial_dos(h,dos) # get the spatial distribution\n go = h.geometry.supercell(n) # get the supercell\n write_ldos(go.x,go.y,odos) # write in a file\n return dos # return the dos", "def update(self, x_train_single, updated_h):\n x_row = x_train_single.toarray()\n for i in range(self.num_models):\n self.models[i].partial_fit(x_row, [updated_h[i]])", "def __call__(self, inputs, state, scope=None):\n\n if not tf.contrib.framework.nest.is_sequence(state):\n raise ValueError(\n \"Expected state to be a tuple of length %d, but received: %s\"\n % (len(self.state_size), state))\n\n with tf.variable_scope(scope or \"gnmt_attention_multi_cell\"):\n new_states = []\n\n with tf.variable_scope(\"cell_0_attention\"):\n attention_cell = self._cells[0]\n attention_state = state[0]\n cur_inp, new_attention_state = attention_cell(inputs, attention_state)\n new_states.append(new_attention_state)\n\n for i in range(1, len(self._cells)):\n with tf.variable_scope(\"cell_%d\" % i):\n cell = self._cells[i]\n cur_state = state[i]\n\n if not isinstance(cur_state, tf.contrib.rnn.LSTMStateTuple):\n raise TypeError(\"`state[{}]` must be a LSTMStateTuple\".format(i))\n\n cur_state = cur_state._replace(h=tf.concat(\n [cur_state.h, new_attention_state.attention], 1))\n\n cur_inp, new_state = cell(cur_inp, cur_state)\n new_states.append(new_state)\n\n return cur_inp, tuple(new_states)", "def _propagate(dim_indices, conf, cells, c_prev, m_prev, new_output, new_state,\n first_call):\n if len(dim_indices) == 0:\n return\n\n # Because of the way RNNCells are implemented, we take the last dimension\n # (H_{N-1}) out and feed it as the state of the RNN cell\n # (in `last_dim_output`).\n # The input of the cell (H_0 to H_{N-2}) are concatenated into `cell_inputs`\n if conf.num_dims > 1:\n ls_cell_inputs = [None] * (conf.num_dims - 1)\n for d in conf.dims[:-1]:\n if new_output[d.idx] is None:\n ls_cell_inputs[d.idx] = m_prev[d.idx]\n else:\n ls_cell_inputs[d.idx] = new_output[d.idx]\n cell_inputs = array_ops.concat(ls_cell_inputs, 1)\n else:\n cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0],\n m_prev[0].dtype)\n\n last_dim_output = (new_output[-1]\n if new_output[-1] is not None else m_prev[-1])\n\n for i in dim_indices:\n d = conf.dims[i]\n if d.non_recurrent_fn:\n if conf.num_dims > 1:\n linear_args = array_ops.concat([cell_inputs, last_dim_output], 1)\n else:\n linear_args = last_dim_output\n with vs.variable_scope('non_recurrent' if conf.tied else\n 'non_recurrent/cell_{}'.format(i)):\n if conf.tied and not (first_call and i == dim_indices[0]):\n vs.get_variable_scope().reuse_variables()\n\n new_output[d.idx] = layers.fully_connected(\n linear_args,\n num_outputs=conf.num_units,\n activation_fn=d.non_recurrent_fn,\n weights_initializer=(vs.get_variable_scope().initializer or\n layers.initializers.xavier_initializer),\n weights_regularizer=vs.get_variable_scope().regularizer)\n else:\n if c_prev[i] is not None:\n cell_state = (c_prev[i], last_dim_output)\n else:\n # for GRU/RNN, the state is just the previous output\n cell_state = last_dim_output\n\n with vs.variable_scope('recurrent' if conf.tied else\n 'recurrent/cell_{}'.format(i)):\n if conf.tied and not (first_call and i == dim_indices[0]):\n vs.get_variable_scope().reuse_variables()\n cell = cells[i]\n new_output[d.idx], new_state[d.idx] = cell(cell_inputs, cell_state)", "def update(self, x_train_single, updated_h):\n # x_row = cp.array(x_train_single.toarray())\n # cp.cuda.Stream.null.synchronize()\n updater(x_train_single,updated_h,self.weights,self.num_features,self.num_models,self.learning_rate)\n # self.biases += updated_h * self.learning_rate", "def lstmcell_grad_h(input, hx, cx, w_ih, w_hh, b_ih, b_hh, dh, dc, target=\"cce\"):\n # things from fwd\n batch, input_size = get_shape(input)\n _, hidden_size = get_shape(hx)\n xh = akg.topi.concatenate((hx, input), 1)\n whl = [w_ih, w_hh]\n W = Concat(whl, 1) # [4*hidden_size, input_size+hidden_size]\n\n gates = dense(input, w_ih, b_ih, True) + dense(hx, w_hh, b_hh, True)\n\n ingate_in, forgetgate_in, cellgate_in, outgate_in = Split(gates, 4, 1)\n\n ingate = sigmoid(ingate_in)\n forgetgate = sigmoid(forgetgate_in)\n cellgate = Tanh(cellgate_in)\n outgate = sigmoid(outgate_in)\n cy = (forgetgate * cx) + (ingate * cellgate)\n tanh_cy = Tanh(cy)\n #hy = outgate * tanh_cy\n\n # starts bwd\n # head * dh/do shape [n,]\n doutgate = dh * tanh_cy\n doutgate_in = outgate * (1 - outgate) * doutgate\n kk = akg.tvm.reduce_axis((0, batch))\n dWo = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk, j] * doutgate_in(kk, i), axis=kk), name=\"dWo\")\n\n dtanh_cy = dh * outgate\n dc = (1 - tanh_cy * tanh_cy) * dtanh_cy\n\n dingate = cellgate * dc\n dingate_in = ingate * (1 - ingate) * dingate\n kk3 = akg.tvm.reduce_axis((0, batch))\n dWi = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk3, j] * dingate_in(kk3, i), axis=kk3), name=\"dWi\")\n\n dforgetgate = dc * cx\n dforgetgate_in = forgetgate * (1 - forgetgate) * dforgetgate\n kk2 = akg.tvm.reduce_axis((0, batch))\n dWf = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk2, j] * dforgetgate_in(kk2, i), axis=kk2), name=\"dWf\")\n\n dcellgate = ingate * dc\n dcellgate_in = (1 - cellgate * cellgate) * dcellgate\n kk4 = akg.tvm.reduce_axis((0, batch))\n dWc = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk4, j] * dcellgate_in(kk4, i), axis=kk4), name=\"dWc\")\n\n dW = akg.topi.concatenate((dWi, dWf, dWc, dWo))\n\n db = akg.topi.concatenate((dingate_in, dforgetgate_in, dcellgate_in, doutgate_in), 1)\n\n kk5 = akg.tvm.reduce_axis((0, 4 * hidden_size))\n dxh = akg.tvm.compute((batch, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(W[kk5, j] * db[i, kk5], axis=kk5), name=\"dxh\")\n dhx = akg.tvm.compute((batch, hidden_size), lambda i, j: dxh[i, j], name=\"dhx\")\n dx = akg.tvm.compute((batch, input_size), lambda i, j: dxh[i, j + hidden_size], name=\"dx\")\n\n dcx = forgetgate * dc\n\n dw_ih = akg.tvm.compute(w_ih.shape, lambda i, j: dW[i, j])\n #dw_hh = akg.tvm.compute(w_hh.shape, lambda i, j: dW[i, j + input_size])\n\n bhr = akg.tvm.reduce_axis((0, batch))\n\n db_ih = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bhr], axis=bhr), name=\"dbih\")\n\n bir = akg.tvm.reduce_axis((0, batch))\n\n db_hh = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bir], axis=bir), name=\"dbhh\")\n\n return dw_ih, w_hh, db_ih, db_hh, dcx, dhx, dx", "def SA_run(seed, alpha, rho, x0=5, n0=100, iter_count=1000, mu_1=2, mu_2=5, sigma_1=1, sigma_2=1, SAA_seed=None):\n np.random.seed(seed)\n begin = datetime.datetime.now()\n val_list = []\n der_list = []\n x_list = [x0]\n for t in range(1, iter_count + 1):\n eps = eps_num / (eps_base + t) ** 0.8\n n = n0 + int(n_step * t)\n val, der = estimate(x_list[t - 1], n, alpha, rho, mu_1, mu_2, sigma_1, sigma_2, SAA_seed)\n x_next = np.array(x_list[t - 1]) - eps * np.array(der)\n x_list.append(x_next)\n val_list.append(val)\n der_list.append(der)\n now = datetime.datetime.now()\n print(rho + \"_\" + str(alpha) + \" t = \", t, \" x = \", x_list[t], \" val = \", val, \" der = \",\n der, \" time: \", now - begin)\n # np.save(\"sa_out/normal/\" + rho + \"_\" + str(alpha) + \"_iter_\" + str(iter_count) + \"_eps\" + str(\n # eps_num) + \"-\" + str(eps_base) + \"_x.npy\", x_list)\n return x_list[1:]", "def updateNodeStates (self,listAtoms):\r\n \r\n for i in range(len(listAtoms)):\r\n for j in range(len(listAtoms[i].nodeArray)):\r\n self.mol[i].nodeArray[j].state = listAtoms[i].nodeArray[j].state", "def call(self, inputs, state):\n _check_rnn_cell_input_dtypes([inputs, state])\n\n h = self._real_units\n s = self._slots + 1\n state, last = state[:, : s * h], state[:, s * h :]\n state = tf.reshape(state, [-1, s, h])\n\n att_logit_mat = tf.matmul(inputs, self.heads, transpose_b=True)\n\n att_weights = tf.nn.softmax(self._beta * att_logit_mat, axis=-1)\n att_weights = tf.expand_dims(att_weights, 2)\n\n h_hat = tf.reduce_sum(\n input_tensor=tf.multiply(state[:, : self._slots, :], att_weights), axis=1\n )\n h_hat = (h_hat + state[:, self._slots, :]) / 2\n\n n_a, n_b = tf.nn.l2_normalize(last, 1), tf.nn.l2_normalize(inputs, 1)\n dist = tf.expand_dims(tf.reduce_sum(input_tensor=n_a * n_b, axis=1), 1)\n dist = tf.math.pow(self._alpha, dist)\n\n att_weights = att_weights * tf.expand_dims(dist, 1)\n\n reset = tf.sigmoid(\n tf.compat.v1.nn.xw_plus_b(\n tf.concat([inputs, h_hat], axis=-1), self._reset_W, self._reset_b\n )\n )\n erase = tf.sigmoid(\n tf.compat.v1.nn.xw_plus_b(\n tf.concat([inputs, h_hat], axis=-1), self._erase_W, self._erase_b\n )\n )\n add = tf.tanh(\n tf.compat.v1.nn.xw_plus_b(\n tf.concat([inputs, reset * h_hat], axis=-1), self._add_W, self._add_b\n )\n )\n\n start_part01 = state[:, : self._slots, :]\n state01 = start_part01 * (\n tf.ones_like(start_part01) - att_weights * tf.expand_dims(erase, 1)\n )\n state01 = state01 + att_weights * tf.expand_dims(erase, 1) * tf.expand_dims(\n add, 1\n )\n state01 = tf.reshape(state01, [-1, self._slots * self._real_units])\n\n start_part02 = state[:, self._slots, :]\n state02 = start_part02 * (tf.ones_like(start_part02) - dist * erase)\n state02 = state02 + dist * erase * add\n state = tf.concat([state01, state02, inputs], axis=-1)\n return state, state", "def calculate_xs(self, xs_dict):\n self._xs = xs_dict[self.cell.fill].calculate_xs(self.e)", "def ndhess(f, delta=DELTA):\n def hess_f(*args, **kwargs):\n x = args[0]\n hess_val = numpy.zeros(x.shape + x.shape)\n it = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])\n for xi in it:\n i = it.multi_index\n jt = numpy.nditer(x, op_flags=['readwrite'], flags=['multi_index'])\n for xj in jt:\n j = jt.multi_index\n xi += delta/2\n xj += delta/2\n fpp = f(x)\n xj -= delta\n fpm = f(x)\n xi -= delta\n fmm = f(x)\n xj += delta\n fmp = f(x)\n xi += delta/2\n xj -= delta/2\n hess_val[i + j] = (fpp + fmm - fpm - fmp)/delta**2\n return hess_val\n return hess_f", "def neighbor_dE(self, state):\n\n dE = np.zeros(self.n)\n for i in range(self.n):\n dE[i] = 2*state[i]*self.hJ[i] +2*state[i]*(state*self.Jmat[i]).sum()\n return dE", "def energy_step(inputs, states):\n\n assert_msg = \"States must be a list. However states {} is of type {}\".format(states, type(states))\n assert isinstance(states, list) or isinstance(states, tuple), assert_msg\n\n \"\"\" Some parameters required for shaping tensors\"\"\"\n en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]\n de_hidden = inputs.shape[-1]\n\n \"\"\" Computing S.Wa where S=[s0, s1, ..., si]\"\"\"\n # <= batch_size*en_seq_len, latent_dim\n reshaped_enc_outputs = K.reshape(encoder_out_seq, (-1, en_hidden))\n # <= batch_size*en_seq_len, latent_dim\n W_a_dot_s = K.reshape(K.dot(reshaped_enc_outputs, self.W_a), (-1, en_seq_len, en_hidden))\n if verbose:\n print('wa.s>',W_a_dot_s.shape)\n\n \"\"\" Computing hj.Ua \"\"\"\n U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim\n if verbose:\n print('Ua.h>',U_a_dot_h.shape)\n\n \"\"\" tanh(S.Wa + hj.Ua) \"\"\"\n # <= batch_size*en_seq_len, latent_dim\n reshaped_Ws_plus_Uh = K.tanh(K.reshape(W_a_dot_s + U_a_dot_h, (-1, en_hidden)))\n if verbose:\n print('Ws+Uh>', reshaped_Ws_plus_Uh.shape)\n\n \"\"\" softmax(va.tanh(S.Wa + hj.Ua)) \"\"\"\n # <= batch_size, en_seq_len\n e_i = K.reshape(K.dot(reshaped_Ws_plus_Uh, self.V_a), (-1, en_seq_len))\n # <= batch_size, en_seq_len\n e_i = K.softmax(e_i)\n\n if verbose:\n print('ei>', e_i.shape)\n\n return e_i, [e_i]", "def define_ising_helper_functions():\n\n @njit(cache=True)\n def fast_sum(J, s):\n \"\"\"Helper function for calculating energy in calc_e(). Iterates couplings J.\"\"\"\n e = np.zeros(s.shape[0])\n for n in range(s.shape[0]):\n k = 0\n for i in range(s.shape[1]-1):\n for j in range(i+1,s.shape[1]):\n e[n] += J[k]*s[n,i]*s[n,j]\n k += 1\n return e\n\n @njit(\"float64[:](int64[:,:],float64[:])\")\n def calc_e(s, params):\n \"\"\"\n Parameters\n ----------\n s : 2D ndarray of ints\n state either {0,1} or {+/-1}\n params : ndarray\n (h, J) vector\n\n Returns\n -------\n E : ndarray\n Energies of all given states.\n \"\"\"\n \n e = -fast_sum(params[s.shape[1]:],s)\n e -= np.sum(s*params[:s.shape[1]],1)\n return e\n \n def mch_approximation(samples, dlamda):\n \"\"\"Function for making MCH approximation step for Ising model.\"\"\"\n dE = calc_e(samples, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = pair_corr(samples, weights=np.exp(-dE)/len(dE), concat=True) * ZFraction \n assert not (np.any(predsisj < -1.00000001) or\n np.any(predsisj>1.000000001)),\"Predicted values are beyond limits, (%1.6f,%1.6f)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n \n @njit(cache=True)\n def calc_observables(samples):\n \"\"\"Observables for Ising model.\"\"\"\n n = samples.shape[1]\n obs = np.zeros((samples.shape[0], n+n*(n-1)//2))\n \n k = 0\n for i in range(n):\n obs[:,i] = samples[:,i]\n for j in range(i+1,n):\n obs[:,n+k] = samples[:,i] * samples[:,j]\n k += 1\n return obs\n return calc_e, calc_observables, mch_approximation", "def calculate_E(self):\n \n E = 0\n for i in xrange(self.size):\n Ei = self.h[i]\n Ei += 0.5*sum((1 if self.spins[j] else -1)*self.J[i,j] for j in self.adjacency[i])\n if not self.spins[i]:\n Ei *= -1\n E += Ei\n \n return E", "def iwae(cell,\n inputs,\n seq_lengths,\n float_type=tf.float32,\n num_samples=1, # if num_samples=1, then iwae==elbo\n parallel_iterations=30,\n swap_memory=True):\n batch_size = tf.shape(seq_lengths)[0]\n max_seq_len = tf.reduce_max(seq_lengths)\n seq_mask = tf.transpose(\n tf.sequence_mask(seq_lengths, maxlen=max_seq_len, dtype=float_type),\n perm=[1, 0]) # compute mask for inputs. [max_seq_len, batch_size]\n if num_samples > 1:\n inputs, seq_mask = nested.tile_tensors([inputs, seq_mask], [1, num_samples])\n inputs_ta, mask_ta = nested.tas_for_tensors([inputs, seq_mask], max_seq_len)\n\n t0 = tf.constant(0, tf.int32)\n init_states = cell.zero_state(batch_size * num_samples, float_type)\n ta_names = ['log_weights', 'log_ess']\n tas = [tf.TensorArray(float_type, max_seq_len, name='%s_ta' % n)\n for n in ta_names] # define tas for while\n log_weights_acc = tf.zeros([num_samples, batch_size], dtype=float_type)\n kl_acc = tf.zeros([num_samples * batch_size], dtype=float_type)\n accs = (log_weights_acc, kl_acc)\n\n trajectories = {\n \"mask\": tf.TensorArray(tf.float32, max_seq_len, name=\"mask\"),\n \"ob\": tf.TensorArray(tf.float32, max_seq_len, name=\"ob\"),\n \"ac\": tf.TensorArray(tf.float32, max_seq_len, name=\"ac\"),\n \"fef_vpred\": tf.TensorArray(tf.float32, max_seq_len, name=\"fef_vpred\"),\n \"log_p_z\": tf.TensorArray(tf.float32, max_seq_len, name=\"log_p_z\"),\n \"log_p_x_given_z\": tf.TensorArray(tf.float32, max_seq_len, name=\"log_p_x_given_z\"),\n \"log_q_z\": tf.TensorArray(tf.float32, max_seq_len, name=\"log_q_z\")\n }\n\n def while_predicate(t, *unused_args):\n return t < max_seq_len\n\n def while_step(t, rnn_state, tas, accs, trajectories):\n \"\"\"Implements one timestep of IWAE computation.\"\"\"\n log_weights_acc, kl_acc = accs\n cur_inputs, cur_mask = nested.read_tas([inputs_ta, mask_ta], t)\n # Run the cell for one step.\n log_q_z, log_p_z, log_p_x_given_z, kl, new_state, rl_term = cell(\n cur_inputs,\n rnn_state,\n cur_mask,\n )\n # Compute the incremental weight and use it to update the current\n # accumulated weight.\n kl_acc += kl * cur_mask\n log_alpha = (log_p_x_given_z + log_p_z - log_q_z) * cur_mask\n log_alpha = tf.reshape(log_alpha, [num_samples, batch_size])\n log_weights_acc += log_alpha\n # Calculate the effective sample size.\n ess_num = 2 * tf.reduce_logsumexp(log_weights_acc, axis=0)\n ess_denom = tf.reduce_logsumexp(2 * log_weights_acc, axis=0)\n log_ess = ess_num - ess_denom\n # Update the Tensorarrays and accumulators.\n ta_updates = [log_weights_acc, log_ess]\n new_tas = [ta.write(t, x) for ta, x in zip(tas, ta_updates)]\n new_accs = (log_weights_acc, kl_acc)\n\n new_trajectories = {\n \"mask\": trajectories[\"mask\"].write(t, cur_mask),\n \"ob\": trajectories[\"ob\"].write(t, rl_term[0]),\n \"ac\": trajectories[\"ac\"].write(t, rl_term[1]),\n \"fef_vpred\": trajectories[\"fef_vpred\"].write(t, rl_term[2]),\n \"log_p_z\": trajectories[\"log_p_z\"].write(t, log_p_z),\n \"log_p_x_given_z\": trajectories[\"log_p_x_given_z\"].write(t, log_p_x_given_z),\n \"log_q_z\": trajectories[\"log_q_z\"].write(t, log_q_z)\n }\n\n return t + 1, new_state, new_tas, new_accs, new_trajectories\n\n _, _, tas, accs, trajectories = tf.while_loop(\n while_predicate,\n while_step,\n loop_vars=(t0, init_states, tas, accs, trajectories),\n parallel_iterations=parallel_iterations,\n swap_memory=swap_memory)\n\n log_weights, log_ess = [x.stack() for x in tas]\n final_log_weights, kl = accs\n log_p_hat = (tf.reduce_logsumexp(final_log_weights, axis=0) -\n tf.log(tf.to_float(num_samples)))\n kl = tf.reduce_mean(tf.reshape(kl, [num_samples, batch_size]), axis=0)\n log_weights = tf.transpose(log_weights, perm=[0, 2, 1])\n\n trajectories = {\n \"mask\": trajectories[\"mask\"].stack(),\n \"ob\": trajectories[\"ob\"].stack(),\n \"ac\": trajectories[\"ac\"].stack(),\n \"fef_vpred\": trajectories[\"fef_vpred\"].stack(),\n \"log_p_z\": trajectories[\"log_p_z\"].stack(),\n \"log_p_x_given_z\": trajectories[\"log_p_x_given_z\"].stack(),\n \"log_q_z\": trajectories[\"log_q_z\"].stack(),\n \"log_weights\": log_weights,\n }\n\n return log_p_hat, kl, log_weights, log_ess, trajectories", "def __init__(self, time_grid=None, space_grid=None,\n sensors=None,\n loc_onramp=None, loc_offramp=None,\n vm_cells=None, beta_cells=None, rhoc_cells=None, wc_cells=None,\n num_ensembles=0,\n std_model_noise=None, queue_threshold=17.88,\n init_rho=0, init_qin=0.5, init_qout=0.0):\n\n self.__debug = False\n self.__debug_entrance_sensor = 'IDEALLoc100m'\n self.__debug_exit_sensor = 'IDEALLoc8300m'\n\n # initialize the superclass Estimator\n Estimator.__init__(self, time_grid, space_grid,\n loc_onramp, loc_offramp,\n sensors,\n queue_threshold)\n\n # build the index for the system state\n self.x_index, dim_state = self.__build_state_index()\n\n # initialize the super class\n EnKF.__init__(self, dim_state, num_ensembles)\n\n # y_index, and dim_obs, which will be dynamically updated upon arrival of each new data\n self.y_index = None\n self.dim_obs = None\n\n # keep track of the flow between cells for each ensemble which will be used to construct the observation\n self.__f_flow = {}\n self.__f_flow['time'] = np.array(self.time_grid[1:])\n self.__f_flow['data'] = OrderedDict()\n for i in range(0, self.num_ensembles):\n self.__f_flow['data'][i] = []\n\n # keep track of the speed between cells for each ensemble which will be used to construct the observation\n self.__f_speed = {}\n self.__f_speed['time'] = np.array(self.time_grid[1:])\n self.__f_speed['data'] = OrderedDict()\n for i in range(0, self.num_ensembles):\n self.__f_speed['data'][i] = []\n\n # save all the estimated states here\n self.est_state_all = np.matrix(np.zeros((self.dim_state, self.num_steps), float))\n\n # =================================================\n # Add additive noise to state.\n self.Q = OrderedDict()\n # initialize with all cell var\n self.Q = np.diag(np.ones(dim_state) * (std_model_noise['cell'] ** 2))\n\n # print('onramps:{0}; offramps:{1}'.format(self.cell_onramp, self.cell_offramp))\n # add onramp larger noise\n if self.cell_onramp is not None:\n for on_cell in self.cell_onramp:\n if 0 <= on_cell <= self.num_cells:\n idx = self.x_index['density'][on_cell]\n self.Q[idx, idx] = std_model_noise['oncell'] ** 2\n # add offramp larger noise\n if self.cell_offramp is not None:\n for off_cell in self.cell_offramp:\n if 0 <= off_cell <= self.num_cells:\n idx = self.x_index['density'][off_cell]\n self.Q[idx, idx] = std_model_noise['offcell'] ** 2\n # add qin variance\n idx = self.x_index['qin']\n self.Q[idx, idx] = std_model_noise['qin'] ** 2\n # add qout variance\n idx = self.x_index['qout']\n self.Q[idx, idx] = std_model_noise['qout'] ** 2\n\n # self.Q = std_model_noise\n # if np.size( self.Q['vm'] ) == 1:\n # # if it was a single value, then it was specified as std, not var (which = std^2)\n # self.Q['vm'] = np.diag( np.ones( self.num_cells )*(self.Q['vm']**2) )\n # if np.size( self.Q['beta'] ) == 1:\n # self.Q['beta'] = np.diag( np.ones( self.num_cells )*(self.Q['beta']**2) )\n # if np.size( self.Q['rhoc'] ) == 1:\n # self.Q['rhoc'] = np.diag( np.ones( self.num_cells )*(self.Q['rhoc']**2) )\n # if np.size( self.Q['wc'] ) == 1:\n # self.Q['wc'] = np.diag( np.ones( self.num_cells )*(self.Q['wc']**2) )\n #\n # if self.loc_onramp is not None and np.size(self.Q['onramp']) == 1:\n # self.Q['onramp'] = np.diag( np.ones(len(loc_onramp))*(self.Q['onramp']**2) )\n # if self.loc_offramp is not None and np.size(self.Q['offramp']) == 1:\n # self.Q['offramp'] = np.diag( np.ones(len(loc_offramp))*(self.Q['offramp']**2) )\n\n\n # =================================================\n # save the fundamental diagram for each cell\n # vm parameter\n if isinstance(vm_cells, numbers.Number):\n self.vm_cells = np.ones((self.num_cells, 1)) * float(vm_cells)\n else:\n self.vm_cells = np.array(vm_cells).astype(float)\n self.vm_cells = self.vm_cells.reshape((self.num_cells, 1))\n\n # beta parameter\n if isinstance(beta_cells, numbers.Number):\n self.beta_cells = np.ones((self.num_cells, 1)) * float(beta_cells)\n else:\n self.beta_cells = np.array(beta_cells).astype(float)\n self.beta_cells = self.beta_cells.reshape((self.num_cells, 1))\n\n # rhoc parameter\n if isinstance(rhoc_cells, numbers.Number):\n self.rhoc_cells = np.ones((self.num_cells, 1)) * float(rhoc_cells)\n else:\n self.rhoc_cells = np.array(rhoc_cells).astype(float)\n self.rhoc_cells = self.rhoc_cells.reshape((self.num_cells, 1))\n\n # wc parameter\n if isinstance(wc_cells, numbers.Number):\n self.wc_cells = np.ones((self.num_cells, 1)) * float(wc_cells)\n else:\n self.wc_cells = np.array(wc_cells).astype(float)\n self.wc_cells = self.wc_cells.reshape((self.num_cells, 1))\n\n # other use ful parameters\n self.qmax_cells = self.vm_cells * self.rhoc_cells - \\\n self.vm_cells * (self.rhoc_cells ** 2) / self.beta_cells\n\n self.rhomax_cells = - self.qmax_cells / self.wc_cells + self.rhoc_cells\n\n # =======================================================================\n self.init_rho = init_rho\n self.init_qin = init_qin\n self.init_qout = init_qout\n\n # =======================================================================\n # FOR DEBUGGING\n # recored the forecast and analysis value for qin and qout\n if self.__debug:\n self.qin_f = []\n self.qin_a = []\n self.qin_obs = []\n self.qout_f = []\n self.qout_a = []\n self.qout_obs = []", "def NM_run(seed, alpha, rho, x0=5, n0=100, iter_count=1000, mu_1=2, mu_2=5, sigma_1=1, sigma_2=1, SAA_seed=None):\n np.random.seed(seed)\n begin = datetime.datetime.now()\n res = minimize(estimate_no_grad, np.array([x0]),\n args=(n0, alpha, rho, mu_1, mu_2, sigma_1, sigma_2, SAA_seed),\n method='Nelder-Mead',\n options={'disp': True,\n 'maxiter': iter_count,\n 'maxfev': iter_count,\n 'return_all': True})\n print(res)\n x_list = np.array(res.allvecs).reshape(-1).tolist()\n now = datetime.datetime.now()\n print('done time: %s' % (now - begin))\n print('call count: %d' % call_count)\n # np.save(\"sa_out/normal/NM_\" + rho + \"_\" + str(alpha) + \"_iter_\" + str(iter_count) + \"_x.npy\", x_list)\n return x_list", "def set_cell(state: State) -> State:\n assert state.index < state.array_len\n return state._replace(\n array=state.array[: state.index] + [state.acc] + state.array[state.index + 1 :]\n )", "def _apply_sparse(self, grad, var):\n return self._apply_sparse_shared(\n grad.values,\n var,\n grad.indices,\n lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda\n x,\n i,\n v,\n use_locking=self._use_locking))", "def inner_apply(self, inputs, states, cells, mask=None):\n def slice_last(x, no):\n return x[:, no*self.dim: (no+1)*self.dim]\n\n activation = tensor.dot(states, self.W_state) + inputs\n in_gate = self.gate_activation.apply(\n slice_last(activation, 0) + cells * self.W_cell_to_in)\n forget_gate = self.gate_activation.apply(\n slice_last(activation, 1) + cells * self.W_cell_to_forget)\n next_cells = (\n forget_gate * cells +\n in_gate * self.activation.apply(slice_last(activation, 2)))\n out_gate = self.gate_activation.apply(\n slice_last(activation, 3) + next_cells * self.W_cell_to_out)\n next_states = out_gate * self.activation.apply(next_cells)\n\n if mask:\n next_states = (mask[:, None] * next_states +\n (1 - mask[:, None]) * states)\n next_cells = (mask[:, None] * next_cells +\n (1 - mask[:, None]) * cells)\n\n return next_states, next_cells, in_gate, forget_gate, out_gate", "def _step_EM(\n self, X, indices_ones, pi, alpha_1, alpha_2, tau_1, tau_2, n1, n2\n ):\n\n eps_1 = max(1e-4 / n1, 1e-9)\n eps_2 = max(1e-4 / n2, 1e-9)\n nq, nl = self.n_row_clusters, self.n_column_clusters\n\n ########################## E-step ##########################\n u = X.dot(tau_2) # Shape is (n1,nl)\n v = X.T.dot(tau_1) # Shape is (n2,nq)\n\n # Update of tau_1 with sparsity trick.\n l_tau_1 = (\n (\n (u.reshape(n1, 1, nl))\n * (self._np.log(pi) - self._np.log(1 - pi)).reshape(1, nq, nl)\n ).sum(2)\n + self._np.log(alpha_1.reshape(1, nq))\n + (self._np.log(1 - pi) @ tau_2.T).sum(1)\n )\n\n # For computationnal stability reasons 1.\n l_tau_1 -= l_tau_1.max(axis=1).reshape(n1, 1)\n tau_1 = self._np.exp(l_tau_1)\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1) # Normalize.\n\n # For computationnal stability reasons 2.\n tau_1[tau_1 < eps_1] = eps_1\n tau_1 /= tau_1.sum(axis=1).reshape(n1, 1) # Re-Normalize.\n\n # Update of tau_2 with sparsity trick.\n l_tau_2 = (\n (\n (v.reshape(n2, nq, 1))\n * (self._np.log(pi) - self._np.log(1 - pi)).reshape(1, nq, nl)\n ).sum(1)\n + self._np.log(alpha_2.reshape(1, nl))\n + (tau_1 @ self._np.log(1 - pi)).sum(0)\n )\n\n # For computationnal stability reasons 1.\n l_tau_2 -= l_tau_2.max(axis=1).reshape(n2, 1)\n tau_2 = self._np.exp(l_tau_2)\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1) # Normalize.\n\n # For computationnal stability reasons 2.\n tau_2[tau_2 < eps_2] = eps_2\n tau_2 /= tau_2.sum(axis=1).reshape(n2, 1) # Re-Normalize.\n ########################## M-step ##########################\n alpha_1 = tau_1.mean(0)\n alpha_2 = tau_2.mean(0)\n pi = (\n tau_1[indices_ones[0]].reshape(-1, nq, 1)\n * tau_2[indices_ones[1]].reshape(-1, 1, nl)\n ).sum(0) / (tau_1.sum(0).reshape(nq, 1) * tau_2.sum(0).reshape(1, nl))\n return pi, alpha_1, alpha_2, tau_1, tau_2", "def __call__(self, inputs, state, scope=None):\n with vs.variable_scope(scope or type(self).__name__): # \"BasicLSTMCell\" \n # Parameters of gates are concatenated into one multiply for efficiency. \n c, h = array_ops.split(1, 2, state)\n concat = rnn_cell.linear([inputs, h], 4 * self._num_units, True)\n\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate \n i, j, f, o = array_ops.split(1, 4, concat)\n\n new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * self._activation(j)\n new_h = self._activation(new_c) * tf.sigmoid(o)\n\n return new_h, array_ops.concat(1, [new_c, new_h])" ]
[ "0.54891646", "0.53112125", "0.5259833", "0.5257837", "0.51097727", "0.51014096", "0.5074188", "0.49335226", "0.49120232", "0.4882265", "0.48616293", "0.48532745", "0.4843997", "0.4836578", "0.48361284", "0.4808079", "0.48075026", "0.47982457", "0.47965336", "0.47853282", "0.47833", "0.47522515", "0.4727599", "0.47195938", "0.47186142", "0.47027037", "0.46990725", "0.46781853", "0.46741915", "0.46672803" ]
0.6747349
0
Given a trained model = (Wih,Whh,bh,Who), a start internal state h0, and input y0 predict in freerunning mode for Npred steps into the future, with
def predict(model, y0, h0, Npred): if y0.ndim == 1: aug_len = y0.shape[0] + 1 elif y0.ndim == 2: aug_len = y0.shape[0] * y0.shape[1] + 1 else: raise ValueError("'y0' must either be a vector or a matrix.") (map_ih,(Whh,shape),bh,Who) = model def _step(input, xs): (y,h_augmented) = input h = h_augmented[aug_len:] #h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(y) + bh) h = jnp.tanh(sp_dot(Whh, h, shape[0]) + map_ih(y)) h = jnp.hstack([[1.], y.reshape(-1), h]) y = Who.dot(h).reshape(y.shape) return ((y,h), (y,h)) xs = jnp.arange(Npred) # necessary for lax.scan ((y,h), (ys,hs)) = lax.scan(_step, (y0,h0), xs) return ((y,h), (ys,hs))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self, hyps):\n\n # Print Hyperparameters To Screen\n items = list(hyps.items())\n for k, v in sorted(items):\n print(k+\":\", v)\n\n # Make Save Files\n if \"save_folder\" in hyps:\n save_folder = hyps['save_folder']\n else:\n save_folder = \"./saved_data/\"\n\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n base_name = save_folder + hyps['exp_name']\n net_save_file = base_name+\"_net.p\"\n best_net_file = base_name+\"_best.p\"\n optim_save_file = base_name+\"_optim.p\"\n log_file = base_name+\"_log.txt\"\n if hyps['resume']: log = open(log_file, 'a')\n else: log = open(log_file, 'w')\n for k, v in sorted(items):\n log.write(k+\":\"+str(v)+\"\\n\")\n\n # Miscellaneous Variable Prep\n logger = Logger()\n shared_len = hyps['n_tsteps']*hyps['n_rollouts']\n env = gym.make(hyps['env_type'])\n obs = env.reset()\n prepped = hyps['preprocess'](obs)\n hyps['state_shape'] = [hyps['n_frame_stack']] + [*prepped.shape[1:]]\n if hyps['env_type'] == \"Pong-v0\":\n action_size = 3\n else:\n action_size = env.action_space.n*(hyps['env_type']!=\"Pong-v0\")\n hyps['action_shift'] = (4-action_size)*(hyps['env_type']==\"Pong-v0\") \n print(\"Obs Shape:,\",obs.shape)\n print(\"Prep Shape:,\",prepped.shape)\n print(\"State Shape:,\",hyps['state_shape'])\n print(\"Num Samples Per Update:\", shared_len)\n print(\"Samples Wasted in Update:\", shared_len % hyps['batch_size'])\n del env\n\n # Make Network\n net = hyps['model'](hyps['state_shape'],action_size,h_size=hyps['h_size'],bnorm=hyps['use_bnorm'])\n if hyps['resume']:\n net.load_state_dict(torch.load(net_save_file))\n base_net = copy.deepcopy(net)\n net = cuda_if(net)\n net.share_memory()\n base_net = cuda_if(base_net)\n\n # Prepare Shared Variables\n shared_data = {'states': cuda_if(torch.zeros(shared_len, *hyps['state_shape']).share_memory_()),\n 'rewards': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'deltas': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'dones': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'actions': torch.zeros(shared_len).long().share_memory_()}\n if net.is_recurrent:\n shared_data['h_states'] = cuda_if(torch.zeros(shared_len, hyps['h_size']).share_memory_())\n n_rollouts = hyps['n_rollouts']\n gate_q = mp.Queue(n_rollouts)\n stop_q = mp.Queue(n_rollouts)\n reward_q = mp.Queue(1)\n reward_q.put(-1)\n\n # Make Runners\n runners = []\n for i in range(hyps['n_envs']):\n runner = Runner(shared_data, hyps, gate_q, stop_q, reward_q)\n runners.append(runner)\n\n # Start Data Collection\n print(\"Making New Processes\")\n procs = []\n for i in range(len(runners)):\n proc = mp.Process(target=runners[i].run, args=(net,))\n procs.append(proc)\n proc.start()\n print(i, \"/\", len(runners), end='\\r')\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Make Updater\n updater = Updater(base_net, hyps)\n if hyps['resume']:\n updater.optim.load_state_dict(torch.load(optim_save_file))\n updater.optim.zero_grad()\n updater.net.train(mode=True)\n updater.net.req_grads(True)\n\n # Prepare Decay Precursors\n entr_coef_diff = hyps['entr_coef'] - hyps['entr_coef_low']\n epsilon_diff = hyps['epsilon'] - hyps['epsilon_low']\n lr_diff = hyps['lr'] - hyps['lr_low']\n\n # Training Loop\n past_rews = deque([0]*hyps['n_past_rews'])\n last_avg_rew = 0\n best_rew_diff = 0\n best_avg_rew = -1000\n epoch = 0\n T = 0\n while T < hyps['max_tsteps']:\n basetime = time.time()\n epoch += 1\n\n # Collect data\n for i in range(n_rollouts):\n stop_q.get()\n collection_time = time.time() - col_start_time\n\n T += shared_len\n\n # Reward Stats\n avg_reward = reward_q.get()\n reward_q.put(avg_reward)\n last_avg_rew = avg_reward\n if avg_reward > best_avg_rew:\n best_avg_rew = avg_reward\n updater.save_model(best_net_file, None)\n\n # Calculate the Loss and Update nets\n start_time = time.time()\n updater.update_model(shared_data)\n update_time = time.time() - start_time\n net.load_state_dict(updater.net.state_dict()) # update all collector nets\n \n # Resume Data Collection\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Decay HyperParameters\n if hyps['decay_eps']:\n updater.epsilon = (1-T/(hyps['max_tsteps']))*epsilon_diff + hyps['epsilon_low']\n print(\"New Eps:\", updater.epsilon)\n if hyps['decay_lr']:\n new_lr = (1-T/(hyps['max_tsteps']))*lr_diff + hyps['lr_low']\n updater.new_lr(new_lr)\n print(\"New lr:\", new_lr)\n if hyps['decay_entr']:\n updater.entr_coef = entr_coef_diff*(1-T/(hyps['max_tsteps']))+hyps['entr_coef_low']\n print(\"New Entr:\", updater.entr_coef)\n\n # Periodically save model\n if epoch % 10 == 0:\n updater.save_model(net_save_file, optim_save_file)\n\n # Print Epoch Data\n past_rews.popleft()\n past_rews.append(avg_reward)\n max_rew, min_rew = deque_maxmin(past_rews)\n updater.print_statistics()\n avg_action = shared_data['actions'].float().mean().item()\n print(\"Epoch\", epoch, \"– T =\", T)\n print(\"Grad Norm:\",float(updater.norm),\"– Avg Action:\",avg_action,\"– Best AvgRew:\",best_avg_rew)\n print(\"Avg Rew:\", avg_reward, \"– High:\", max_rew, \"– Low:\", min_rew, end='\\n')\n updater.log_statistics(log, T, avg_reward, avg_action, best_avg_rew)\n updater.info['AvgRew'] = avg_reward\n logger.append(updater.info, x_val=T)\n\n # Check for memory leaks\n gc.collect()\n max_mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print(\"Time:\", time.time()-basetime, \"– Collection:\", collection_time, \"– Update:\", update_time)\n if 'hyp_search_count' in hyps and hyps['hyp_search_count'] > 0 and hyps['search_id'] != None:\n print(\"Search:\", hyps['search_id'], \"/\", hyps['hyp_search_count'])\n print(\"Memory Used: {:.2f} memory\\n\".format(max_mem_used / 1024))\n\n logger.make_plots(base_name)\n log.write(\"\\nBestRew:\"+str(best_avg_rew))\n log.close()\n # Close processes\n for p in procs:\n p.terminate()\n return best_avg_rew", "def warmup_predict(model, imgs, Npred):\n H = augmented_state_matrix(model[:-1], imgs, 0)\n h0 = H[-2]\n y0 = imgs[-1]\n return predict(model, y0, h0, Npred)", "def _predict(cls, model, is_log_transformed,\n raw_actual, interpolated_actual,\n training_end=None, seasonal_feature_scoring=None, pred_date=None, order_of_diff=None,\n training_tail=None, ext_training_features=None, pred_len=None, freq=None,\n include_holidays_exog=None):\n\n import numpy as np\n import pandas as pd\n import scipy.stats as st\n from numpy.linalg import LinAlgError\n import math\n\n alpha = cls._sig_level\n alpha_extreme = cls._sig_level_extreme\n\n include_holidays_exog = include_holidays_exog if ext_training_features else 0\n\n index = pd.date_range(start=training_end, end=pred_date, freq=freq)[1:] # Holidays are always daily.\n\n de_obj = DataExploration()\n pred_exog = de_obj._get_exog_data(pred_date, pred_date, index) if include_holidays_exog else None\n\n if pred_exog is not None and set(pred_exog.columns.values) != set(ext_training_features):\n missing_col_list = list(set(ext_training_features) - set(pred_exog.columns.values))\n common_cols = list(set(ext_training_features).intersection(set(pred_exog.columns.values)))\n temp_df = pred_exog[common_cols]\n missing_feat_df = pd.DataFrame(np.zeros([len(pred_exog), len(missing_col_list)]),\n columns=missing_col_list, index=pred_exog.index.values)\n pred_exog = pd.concat([temp_df, missing_feat_df], axis=1)\n pred_exog = pred_exog[ext_training_features]\n\n freq = \"1\" + freq if not any(char.isdigit() for char in freq) else freq\n\n forecast_ndays = int((pred_date - pd.Timestamp(training_end)) / pd.Timedelta(freq))\n model_freshness = forecast_ndays / float(pred_len)\n\n try:\n if forecast_ndays > pred_len:\n raise ValueError('Current trained model object expired')\n\n float_min = 1e-10\n\n # set exogenous (holiday) variables for input data\n if include_holidays_exog:\n pred_exog = pred_exog.loc[pd.Timestamp(training_end) + pd.Timedelta(freq): pred_date]\n else:\n pred_exog = None\n\n if seasonal_feature_scoring:\n if not include_holidays_exog:\n pred_exog = seasonal_feature_scoring[:forecast_ndays]\n else:\n pred_exog['fourier_feature'] = seasonal_feature_scoring[:forecast_ndays]\n\n forecast = list(model.forecast(steps=forecast_ndays, alpha=alpha, exog=pred_exog))\n interpolated_training_data = list(zip(*training_tail))[1]\n\n for order in list(reversed(range(order_of_diff))):\n training_data_diff = np.diff(interpolated_training_data,\n order) if order > 0 else interpolated_training_data\n\n forecast_diff_mean = [training_data_diff[-1]]\n forecast_diff_ci = []\n\n for i in range(forecast_ndays):\n forecast_diff_mean.append(forecast_diff_mean[-1] + forecast[0][i])\n forecast_diff_ci.append([forecast_diff_mean[-1] -\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i]),\n forecast_diff_mean[-1] +\n (st.norm.ppf(1 - (alpha / 2.0)) * forecast[1][i])])\n forecast[0] = forecast_diff_mean[1:]\n forecast[2] = forecast_diff_ci\n\n if is_log_transformed:\n transformed_back_forecast = np.exp(forecast[0][-1] + ((forecast[1][-1] ** 2) / 2.0)) - 1\n transformed_back_std_err = np.sqrt((np.exp(forecast[1][-1] ** 2) - 1) * (np.exp((2 * forecast[0][-1]) +\n (forecast[1][\n -1] ** 2))))\n transformed_back_CILower = transformed_back_forecast - \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_CIUpper = transformed_back_forecast + \\\n st.norm.ppf(1 - (alpha / 2.0), 0, transformed_back_std_err) \\\n if transformed_back_std_err != 0 else transformed_back_forecast\n transformed_back_interpolated_actual = float(np.exp(interpolated_actual) - 1)\n if np.sum(np.isnan(forecast[0][-1])) or np.isnan(forecast[1][-1]):\n raise ValueError('Predicted null value')\n\n if is_log_transformed:\n zscore = (transformed_back_interpolated_actual -\n transformed_back_forecast) / max(float(transformed_back_std_err), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(transformed_back_CILower) \\\n or math.isnan(transformed_back_CIUpper):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN under log transform')\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'LogTransformedAdjustedActual': interpolated_actual,\n 'LogTransformedPrediction': float(forecast[0][-1]),\n 'LogTransformedStdErr': float(forecast[1][-1]),\n 'LogTransformedCILower': float(forecast[2][-1][0]),\n 'LogTransformedCIUpper': float(forecast[2][-1][1]),\n 'AdjustedActual': transformed_back_interpolated_actual,\n 'Prediction': float(transformed_back_forecast) if not float(\n transformed_back_forecast) == float('inf') else 0.0,\n 'StdErr': float(transformed_back_std_err) if not float(\n transformed_back_std_err) == float('inf') else 0.0,\n 'CILower': float(transformed_back_CILower) if not float(\n transformed_back_CILower) == float('-inf') else 0.0,\n 'CIUpper': float(transformed_back_CIUpper) if not float(\n transformed_back_CIUpper) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n else:\n zscore = (interpolated_actual - forecast[0][-1]) / max(float(forecast[1][-1]), float_min)\n\n anomaly_probability = (2 * st.norm(0, 1).cdf(abs(zscore))) - 1\n if math.isnan(anomaly_probability) or math.isnan(forecast[2][-1][0]) or math.isnan(forecast[2][-1][1]):\n raise ValueError('Either Anomaly probability or CILower or CIUpper is NaN')\n\n down_anomaly_probability = 1 - st.norm(0, 1).cdf(zscore)\n up_anomaly_probability = st.norm(0, 1).cdf(zscore)\n\n result = {'Success': True,\n 'IsLogTransformed': is_log_transformed,\n 'AdjustedActual': interpolated_actual,\n 'Prediction': float(forecast[0][-1]) if not float(\n forecast[0][-1]) == float('inf') else 0.0,\n 'StdErr': float(forecast[1][-1]) if not float(\n forecast[1][-1]) == float('inf') else 0.0,\n 'CILower': float(forecast[2][-1][0]) if not float(\n forecast[2][-1][0]) == float('-inf') else 0.0,\n 'CIUpper': float(forecast[2][-1][1]) if not float(\n forecast[2][-1][1]) == float('inf') else 0.0,\n 'ConfLevel': float(1.0 - alpha) * 100,\n 'ExogenousHolidays': include_holidays_exog,\n 'IsAnomaly': bool(anomaly_probability > 1 - alpha),\n 'IsAnomalyExtreme': bool(anomaly_probability > 1 - alpha_extreme),\n 'AnomalyProbability': 1 if raw_actual is None else float(anomaly_probability),\n 'DownAnomalyProbability': 1 if raw_actual is None else float(down_anomaly_probability),\n 'UpAnomalyProbability': 1 if raw_actual is None else float(up_anomaly_probability),\n 'ModelFreshness': model_freshness}\n\n except (LinAlgError, ValueError, LADStructuralError) as e:\n result = {'Success': False,\n 'AdjustedActual': interpolated_actual,\n 'ErrorMessage': str(e)}\n\n return result", "def on_predict_begin(self, logs=None):", "def on_predict_begin(self, logs=None):", "def train_machine(data,init_model_file):\n M = MyHmmLog(init_model_file)\n M.forward_backward_multi(data)\n\n return(M)", "def entry(self):\n if not os.path.isfile('model'):\n train()\n schedule.every(0.01).seconds.do(predict, self)\n while True:\n schedule.run_pending()", "def fit_recurrent(self, x, y):\n # print('Stage 1')\n x_ = self.scaler_s1.fit_transform(x)\n\n self.basemodel.fit(x_, y)\n self.training_hit_probability = self._hitprobability(x_, y)\n\n # Learn the hit probability\n self.hitproba = HitProbability()\n self.hitproba.fit(x_, self.training_hit_probability)\n\n # Learn high confidence for all classes\n hm_y, auto_gamma = self._adjust_gamma(self.training_hit_probability)\n self.joint_class_hc = HC_LR()\n self.joint_class_hc.fit(x_, hm_y)\n\n # hm_subtypes = []\n # proba_subtypes = []\n\n # while np.mean(y_) > 0.01:\n # for label in np.unique(y):\n\n hm_1hot = []\n hm_1hot.append(self._one_hot(self.training_hit_probability, y)[0])\n y_ = y.copy()\n\n self.recurrent_base = []\n self.recurrent_hpc = []\n for ii in range(self.recurrent_modes):\n print('Stage 1 iter: ' + str(ii))\n #self.recurrent_base.append(BaseSvc())\n\n if np.sum(y_) > 2:\n self.basemodel = BaseSvc()\n hm_y, proba_tmp = self._fit_mode(x_, y_)\n hm_candidate = self._one_hot(proba_tmp, y_)[1]\n else:\n hm_candidate = np.zeros_like(y_)\n\n self.recurrent_base.append(self.basemodel)\n\n #if np.sum(hm_candidate) >= 2:\n hm_1hot.append(hm_candidate)\n\n # remove the selected subgroup from the target list\n y_[hm_1hot[-1] == 1] = 0\n\n # make the default base model the first\n self.basemodel = self.recurrent_base[0]\n\n print('Stage 2')\n # Stage 2\n # hm_1hot = hm_subtypes\n # train stage2\n self.confidencemodel.fit(x_, hm_1hot)", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def build_lm(self, x, y=None, mode=TRAINING, prev_h=None, step_num=None): \n one_step = False\n \n # Check parameter consistency\n if mode == LanguageModel.EVALUATION or mode == LanguageModel.TRAINING:\n assert y\n else:\n assert not y\n assert prev_h\n one_step = True\n\n # if x.ndim == 2 then \n # x = (n_steps, batch_size)\n if x.ndim == 2:\n batch_size = x.shape[1]\n # else x = (word_1, word_2, word_3, ...)\n # or x = (last_word_1, last_word_2, last_word_3, ..)\n # in this case batch_size is \n else:\n batch_size = 1\n \n if not prev_h:\n prev_h = T.alloc(np.float32(0.), batch_size, self.qdim)\n \n xe = self.approx_embedder(x)\n # Gated Encoder\n if self.step_type == \"gated\":\n f_enc = self.gated_step\n o_enc_info = [prev_h, None, None, None]\n else:\n f_enc = self.plain_step\n o_enc_info = [prev_h]\n \n # Run through all the sentence (encode everything)\n if not one_step: \n _res, _ = theano.scan(f_enc,\n sequences=[xe],\\\n outputs_info=o_enc_info) \n # Make just one step further\n else:\n _res = f_enc(xe, prev_h)\n\n h = _res[0]\n # Store last h for further use\n pre_activ = self.output_layer(h, xe)\n \n # EVALUATION : Return target_probs\n # target_probs.ndim == 3\n outputs = self.output_softmax(pre_activ)\n \n if mode == LanguageModel.EVALUATION:\n target_probs = GrabProbs(outputs, y)\n return target_probs, h, outputs\n # BEAM_SEARCH : Return output (the softmax layer) + the new hidden states\n elif mode == LanguageModel.BEAM_SEARCH:\n return outputs, h\n # SAMPLING : Return a vector of n_sample from the output layer \n # + log probabilities + the new hidden states\n elif mode == LanguageModel.SAMPLING:\n if outputs.ndim == 1:\n outputs = outputs.dimshuffle('x', 0)\n \n sample = self.trng.multinomial(pvals=outputs, dtype='int64').argmax(axis=-1)\n if outputs.ndim == 1:\n sample = sample[0]\n \n log_prob = -T.log(T.diag(outputs.T[sample]))\n return sample, log_prob, h", "def E_step_precompute(self, model_params, my_suff_stat, my_data):", "def update_rnnlm_state_batch(lm, hyps, y):\n lmout, lmstate, scores_lm = None, None, None\n if lm is not None:\n if hyps[0]['lmstate'] is not None:\n lm_hxs = torch.cat([beam['lmstate']['hxs'] for beam in hyps], dim=1)\n lm_cxs = torch.cat([beam['lmstate']['cxs'] for beam in hyps], dim=1)\n lmstate = {'hxs': lm_hxs, 'cxs': lm_cxs}\n lmout, lmstate, scores_lm = lm.predict(y, lmstate)\n return lmout, lmstate, scores_lm", "def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction", "def predict_sequence(self, h_0):\n\n # Inital values. The are required to be reshaped to rank2-tensor be concated afterwards\n init_predict_sentence = tf.zeros([10, 1], dtype=tf.float64, name='whileloop_init_sentence')\n init_prediction = tf.reshape(h_0, shape=[-1, 1], name='whileloop_init_prediction')\n\n def loop_cond(prediction, predict_sentence): # predict_sentence argument is required by tf.while_loop\n\n threshold = tf.constant(0.01, dtype=tf.float64, name='whileloop_threshold')\n boolean = tf.greater((tf.reduce_sum(tf.pow(prediction, 2)) ** 0.5), threshold, name='whileloop_boolean')\n return boolean\n\n def loop_body(prev_prediction, prev_predict_sentence):\n\n \"\"\"This function is a little bit hacky. Tensorflow's loops don't support neither fetching global scope variables\n that are transformed but not returned from the loop nor modify the rank of the returned tensor in every\n iteration of the loop.\n\n This seems to be overcome defining the predict_sentence in two stages, one for the previous iter state an\n another one for the next state.\n\n :param prev_prediction:\n :param prev_predict_sentence:\n :return: [next_prediction, next_predict_sentence]\n \"\"\"\n\n # In the predict_model the previous state and the input state for the forward_pass are the same\n next_prediction = self.forward_pass(prev_prediction, prev_prediction)\n next_prediction = tf.reshape(next_prediction, shape=[-1, 1], name='whileloop_next_prediction')\n\n # Concat the predicted word to the sentence (instead of list.append() cause tf.while_loop() doesn't support\n # no-tensor arguments)\n next_predict_sentence = tf.concat(axis=1, values=[prev_prediction, prev_predict_sentence],\n name='whileloop_next_prediction_sentence')\n\n return [next_prediction, next_predict_sentence]\n\n # While loop that return the predict sentence\n _, predict_sentence = tf.while_loop(cond=loop_cond,\n body=loop_body,\n loop_vars=[init_prediction, init_predict_sentence],\n shape_invariants=[tf.TensorShape([10, 1]), tf.TensorShape([10, None])],\n maximum_iterations=10,\n name='whileloop_predict_sentence')\n return predict_sentence", "def run_model(Xp_train=None,y_train=None,Xp_test=None,y_test=None,mt='bl', params={}):\n models = { \n \"bl\": lambda : None,\n \"lr\": LinearRegression,\n \"dtr\": DecisionTreeRegressor,\n \"gbr\": GradientBoostingRegressor\n }\n\n # Select Model, returns None for baseline\n model = models[mt](**params)\n\n # Fit Model and make predictions\n if mt != 'bl': # If model not baseline\n # Dummy encoding , (each category of n levels or attributes is converted into n-1 dichotomous variables)\n X_train = pd.get_dummies(Xp_train, columns=['type', 'duration','location'],drop_first=True)\n X_test = pd.get_dummies(Xp_test, columns=['type', 'duration','location'],drop_first=True)\n model.fit(X_train,y_train)\n # Make Predictions\n y_pred = model.predict(X_test)\n #print X_test.shape\n y_predtr = model.predict(X_train) \n else: # Compute baseline\n y_pred = np.median(y_train).repeat(len(y_test))\n y_predtr = np.median(y_train).repeat(len(y_train))\n \n \n\n # Report metrics\n print(\"Model name: %s\" % (model.__class__.__name__ if model else \"Price Median Baseline\"))\n if mt != 'bl':\n print(\"hyper-parameters: \" + \", \".join( \"{0}: {1}\".format(k,v) for (k,v) in params.items() ) )\n print(\"Mean absolute error training set: %.2f\" % mean_absolute_error(y_train, y_predtr)) \n print(\"Mean absolute error testing set: %.2f \\n\" % mean_absolute_error(y_test, y_pred))", "def sequence_predict(self, load_script=False, variant=\"predict\"):\n\n if variant != 'internal':\n # Open an existing model and get the input dataset. \n # Target for historical data are expected if using previous targets as a feature.\n request_data = self._get_model_and_data(ordered_data=True) \n if type(request_data) == list:\n X, y = request_data\n else:\n X = request_data\n else:\n X = self.X_test.copy()\n y = self.y_test.copy()\n\n # Scale the targets and increase stationarity if required\n if variant != 'internal' and self.model.lag_target and (self.model.scale_target or self.model.make_stationary):\n # If using differencing, we retain original y values for inversing the transformation later\n y_orig = y.values.ravel() if self.model.make_stationary=='difference' else None\n # Apply the transformer to the targets\n y = self.model.target_transformer.transform(y)\n # Drop samples where y cannot be transformed due to insufficient lags\n X = X.iloc[len(X)-len(y):]\n\n # Set the number of periods to be predicted\n prediction_periods = self.model.prediction_periods\n # Set the number of rows required for one prediction\n self.rows_per_pred = 1\n self.diff_lags = max(self.model.stationarity_lags) if self.model.lag_target and self.model.make_stationary=='difference' else 0\n # Set property depending on whether the current sample will be included as an input, or if we only use lag observations for predictions\n self.first_pred_modifier = 1 if self.model.current_sample_as_input else 0 \n\n # Check that the input data includes history to meet any lag calculation requirements\n if self.model.lags:\n # An additional lag observation is needed if previous targets are being added to the features\n self.rows_per_pred = self.model.lags+self.first_pred_modifier+1 if self.model.lag_target else self.model.lags+self.first_pred_modifier\n # If the target is being lagged and made stationary through differencing additional lag periods are required\n if self.model.lag_target and self.model.make_stationary=='difference':\n extra_msg = \" plus an additional {} periods for making the target stationary using differencing\".format(self.diff_lags)\n # For multi-step predictions we only expect lag values, not the current period's values\n # self.rows_per_pred = self.rows_per_pred-1 if prediction_periods > 1 else self.rows_per_pred\n assert len(X) >= self.rows_per_pred + self.diff_lags, \"Insufficient input data as the model requires {} lag periods for each prediction\".format(self.rows_per_pred) + extra_msg\n\n if variant != 'internal':\n # Prepare the response DataFrame\n # Initially set up with the 'model_name' and 'key' columns and the same index as request_df\n self.response = self.request_df.drop(columns=['n_features'])\n \n # Set up a list to contain predictions and probabilities if required\n predictions = []\n get_proba = False\n if variant == 'predict_proba':\n get_proba = True\n probabilities = [] \n\n # Refresh the keras model to avoid tensorflow errors\n if self.model.using_keras:\n self._keras_refresh()\n\n if prediction_periods > 1:\n if not self.model.lag_target:\n y = None\n\n # Check that we can generate 1 or more predictions of prediction_periods each\n n_samples = len(X)\n assert (n_samples - self.rows_per_pred) >= prediction_periods, \\\n \"Cannot generate predictions for {} periods with {} rows, with {} rows required for lag observations. You may need to provide more historical data or sufficient placeholder rows for future periods.\"\\\n .format(prediction_periods, n_samples, self.rows_per_pred)\n \n # For multi-step predictions we can add lag observations up front as we only use actual values\n # i.e. We don't use predicted y values for further predictions \n if self.model.lags or self.model.lag_target:\n X = self._add_lags(X, y=y, extrapolate=self.first_pred_modifier) \n\n # We start generating predictions from the first row as lags will already have been added to each sample\n start = 0\n else:\n # We start generating predictions from the point where we will have sufficient lag observations\n start = self.rows_per_pred\n \n if self.model.lag_target or prediction_periods > 1:\n # Get the predictions by walking forward over the data\n for i in range(start, len(X) + self.first_pred_modifier, prediction_periods): \n # For multi-step predictions we take in self.rows_per_pred rows of X to generate predictions for prediction_periods\n if prediction_periods > 1:\n batch_X = X.iloc[[i]]\n \n if not get_proba:\n # Get the prediction. \n pred = self.model.pipe.predict(batch_X)\n # Flatten the predictions for multi-step outputs and add to the list\n pred = pred.ravel().tolist()\n predictions += pred\n else:\n # Get the predicted probability for each sample \n proba = self.model.pipe.predict_proba(batch_X)\n proba = proba.reshape(-1, len(self.model.pipe.named_steps['estimator'].classes_))\n probabilities += proba.tolist()\n # For walk forward predictions with lag targets we use each prediction as input to the next prediction, with X values avaialble for future periods.\n else:\n batch_X = X.iloc[i-self.rows_per_pred : i] \n # Add lag observations\n batch_y = y.iloc[i-self.rows_per_pred : i]\n batch_X = self._add_lags(batch_X, y=batch_y, extrapolate=self.first_pred_modifier)\n\n # Get the prediction. We only get a prediction for the last sample in the batch, the remaining samples only being used to add lags.\n pred = self.model.pipe.predict(batch_X.iloc[[-1],:])\n\n # Add the prediction to the list. \n predictions.append(pred)\n \n # Add the prediction to y to be used as a lag target for the next prediction\n y.iloc[i - self.first_pred_modifier, 0] = pred\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities.append(self.model.pipe.predict_proba(batch_X.iloc[[-1],:]))\n else:\n # Add lag observations to the samples if required\n if self.model.lags:\n X = self._add_lags(X, extrapolate=self.first_pred_modifier)\n\n # Get prediction for X\n predictions = self.model.pipe.predict(X)\n\n # If probabilities need to be returned\n if get_proba:\n # Get the predicted probability for each sample \n probabilities = self.model.pipe.predict_proba(X)\n \n # Set the number of placeholders needed in the response\n # These are samples for which predictions were not generated due to insufficient lag periods or for meeting multi-step prediction period requirements\n self.placeholders = self.rows_per_pred + self.diff_lags - self.first_pred_modifier\n\n # Transform probabilities to a readable string\n if get_proba:\n # Add the required number of placeholders at the start of the response list\n y = [\"\\x00\"] * self.placeholders\n \n # Truncate multi-step predictions if the (number of samples - self.rows_per_pred) is not a multiple of prediction_periods\n if prediction_periods > 1 and ((n_samples-self.rows_per_pred) % prediction_periods) > 0: \n probabilities = probabilities[:-len(probabilities)+(n_samples-self.rows_per_pred)]\n \n for a in probabilities:\n s = \"\"\n i = 0\n for b in a:\n s = s + \", {0}: {1:.3f}\".format(self.model.pipe.named_steps['estimator'].classes_[i], b)\n i += 1\n y.append(s[2:])\n\n # Prepare predictions\n else:\n if prediction_periods > 1:\n # Set the value to use for nulls\n null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n\n # Truncate multi-step predictions if the (number of samples - self.placeholders) is not a multiple of prediction_periods\n if (n_samples-self.rows_per_pred) % prediction_periods > 0:\n predictions = predictions[:-len(predictions)+(n_samples-self.rows_per_pred)]\n\n # Add null values at the start of the response list to match the cardinality of the input from Qlik\n y = np.array(([null] * (self.rows_per_pred - self.first_pred_modifier)) + predictions)\n elif self.model.lag_target: \n # Remove actual values for which we did not generate predictions due to insufficient lags\n if is_numeric_dtype(y.iloc[:, 0].dtype):\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = np.NaN\n else:\n y.iloc[:self.placeholders - self.first_pred_modifier, 0] = \"\\x00\"\n # Flatten y to the expected 1D shape\n y = y.values.ravel()\n else:\n y = np.array(predictions)\n \n # Inverse transformations on the targets if required \n if variant != 'internal' and (self.model.scale_target or self.model.make_stationary):\n # Take out placeholder values before inverse transform of targets\n null_values = y[:self.rows_per_pred - self.first_pred_modifier] if prediction_periods > 1 or self.model.lag_target else []\n # Add placeholders for samples removed during differencing\n if self.model.make_stationary=='difference':\n null_values = np.append(null_values, np.repeat(null_values[0], self.diff_lags))\n y = y if len(null_values) == 0 else y[-len(predictions):]\n # Add untransformed lag values for differencing if required\n end = self.placeholders\n start = end - self.diff_lags\n y = y if y_orig is None else np.append(y_orig[start : end], y)\n\n # Apply the transformer to the test targets\n y = self.model.target_transformer.inverse_transform(y) \n\n # Remove lags used for making the series stationary in case of differencing\n if self.model.make_stationary == 'difference':\n y = y[self.diff_lags:]\n\n # Replace lags used for making the series stationary with nulls in case of differencing\n # if self.model.make_stationary == 'difference':\n #null = np.NaN if is_numeric_dtype(np.array(predictions)) else \"\\x00\"\n # y = np.append(np.array([null]*self.diff_lags), y[self.diff_lags:])\n \n # Add back the placeholders for lag values\n if len(null_values) > 0:\n y = np.append(null_values, y)\n \n if variant == 'internal':\n return y\n\n # Add predictions / probabilities to the response\n self.response['result'] = y\n\n # Reindex the response to reset to the original sort order\n self.response = self.response.reindex(self.original_index)\n \n if load_script:\n # If the function was called through the load script we return a Data Frame\n self._send_table_description(\"predict\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response\n \n # If the function was called through a chart expression we return a Series\n else:\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n return self.response.loc[:,'result']", "def fit(self, h_init):\n M = h_init.shape[0]\n\n cn_states = self.create_cn_states(M, 2, self.max_copy_number, self.max_copy_number_diff)\n cn_states = np.array([cn_states] * self.N)\n cn_states[:, :, 0, :] = self.normal_copies[:, np.newaxis, :]\n\n # Remap cn states\n cn_states = cn_states[self.seg_rev_remap, :, :, :]\n\n brk_states = self.create_brk_states(M, self.max_copy_number, self.max_copy_number_diff)\n\n self.model = remixt.bpmodel.RemixtModel(\n M,\n self.N1,\n self.num_breakpoints,\n self.normal_contamination,\n cn_states,\n brk_states,\n h_init,\n self.l1,\n self.x1[:, 2],\n self.x1[:, 0:2],\n self.is_telomere,\n self.breakpoint_idx,\n self.breakpoint_orient,\n self.transition_log_prob,\n self.divergence_weight,\n )\n\n self.model.total_likelihood_mask = self._total_likelihood_mask.astype(int)\n self.model.allele_likelihood_mask = self._allele_likelihood_mask.astype(int)\n\n if self.breakpoint_init is not None:\n p_breakpoint = np.ones((self.model.self.num_breakpoints, self.model.num_brk_states))\n brk_states = np.array(self.model.brk_states)\n\n for k, bp in enumerate(self.breakpoints):\n cn = self.breakpoint_init[bp]\n\n for s in range(self.model.num_brk_states):\n if np.all(cn == brk_states[s]):\n p_breakpoint[k, s] = 1000.\n\n p_breakpoint /= np.sum(p_breakpoint, axis=-1)[:, np.newaxis]\n\n self.model.p_breakpoint = p_breakpoint\n\n self.model.transition_model = self.transition_model\n\n if self.prev_elbo is None:\n self.prev_elbo = self.model.calculate_elbo()\n\n for i in range(self.num_em_iter):\n for j in range(self.num_update_iter):\n self.variational_update()\n\n if self.do_h_update:\n self.em_update_h()\n\n self.em_update_params()\n\n elbo = self.model.calculate_elbo()\n\n self.prev_elbo_diff = elbo - self.prev_elbo\n self.prev_elbo = elbo\n\n print ('[{}] completed iteration {}'.format(_gettime(), i))\n print ('[{}] elbo: {:.10f}'.format(_gettime(), self.prev_elbo))\n print ('[{}] elbo diff: {:.10f}'.format(_gettime(), self.prev_elbo_diff))\n print ('[{}] h = {}'.format(_gettime(), np.asarray(self.model.h)))\n for name, value in self.get_likelihood_param_values().items():\n print ('[{}] {} = {}'.format(_gettime(), name, value))", "def _build_fixmatch_training_step(model, optimizer, lam=0, \n tau=0.95, weight_decay=0):\n trainvars = model.trainable_variables\n \n \n def train_step(lab, unlab):\n x,y = lab\n x_unlab_wk, x_unlab_str = unlab\n \n \n with tf.GradientTape() as tape:\n # semisupervised case\n if lam > 0:\n # concatenate labeled/pseudolabeled batches\n N = x.shape[0]\n mu_N = x_unlab_str.shape[0]\n x_batch = tf.concat([x,x_unlab_wk, x_unlab_str],0)\n pred_batch = model(x_batch, training=True)\n # then split the labeled/pseudolabeled pieces\n preds = pred_batch[:N,:]\n wk_preds = pred_batch[N:N+mu_N,:]\n str_preds = pred_batch[N+mu_N:,:]\n # GENERATE FIXMATCH PSEUDOLABELS\n # round predictions to pseudolabels\n with tape.stop_recording():\n pseudolabels = tf.cast(wk_preds > 0.5, \n tf.float32)\n # also compute a mask from the predictions,\n # since we only incorporate high-confidence cases,\n # compute a mask that's 1 every place that's close\n # to 1 or 0\n mask = _build_mask(wk_preds, tau)\n \n # let's try keeping track of how accurate these\n # predictions are\n ssl_acc = tf.reduce_mean(tf.cast(\n tf.cast(str_preds > 0.5, tf.float32)==pseudolabels,\n tf.float32))\n \n crossent_tensor = K.binary_crossentropy(pseudolabels,\n str_preds)\n fixmatch_loss = tf.reduce_mean(mask*crossent_tensor)\n \n else: \n fixmatch_loss = 0\n ssl_acc = -1\n mask = -1\n preds = model(x, training=True)\n \n trainloss = tf.reduce_mean(K.binary_crossentropy(y, preds))\n \n if (weight_decay > 0)&(\"LARS\" not in optimizer._name):\n l2_loss = compute_l2_loss(model)\n else:\n l2_loss = 0\n \n total_loss = trainloss + lam*fixmatch_loss + weight_decay*l2_loss\n \n # compute and apply gradients\n gradients = tape.gradient(total_loss, trainvars)\n optimizer.apply_gradients(zip(gradients, trainvars))\n\n return {\"total_loss\":total_loss, \"supervised_loss\":trainloss,\n \"fixmatch_loss\":fixmatch_loss, \"l2_loss\":l2_loss,\n \"fixmatch_prediction_accuracy\":ssl_acc, \n \"fixmatch_mask_fraction\":tf.reduce_mean(mask)}\n return train_step", "def make_predict_step(self):\n return self.make_eval_step()", "def update_rnnlm_state_batch(self, lm, hyps, y):\n lmout, lmstate, scores_lm = None, None, None\n if lm is not None:\n if hyps[0]['lmstate'] is not None:\n lm_hxs = torch.cat([beam['lmstate']['hxs'] for beam in hyps], dim=1)\n lm_cxs = torch.cat([beam['lmstate']['cxs'] for beam in hyps], dim=1)\n lmstate = {'hxs': lm_hxs, 'cxs': lm_cxs}\n lmout, lmstate, scores_lm = lm.predict(y, lmstate)\n return lmout, lmstate, scores_lm", "def update_rnnlm_state(lm, hyp, y):\n lmout, lmstate, scores_lm = None, None, None\n if lm is not None:\n lmout, lmstate, scores_lm = lm.predict(y, hyp['lmstate'])\n return lmout, lmstate, scores_lm", "def predict(model, last, num):\n total_list = list(last)\n start_number = 0 - len(last)\n time = 0\n# print start_number\n# print total_list\n# print total_list[start_number:]\n # For num times, repeatedly try to predict next state with\n # model\n while time < num:\n time += 1\n# print 'This time, total_list is', total_list\n element_tuple = tuple(total_list[start_number:])\n# print 'This time, element_tuple is', element_tuple\n if element_tuple in model.keys():\n# print 'In dictionary'\n# print 'The number added is', compare(model[element_tuple])\n total_list.append(compare(model[element_tuple]))\n else:\n# print 'Not in dictionary, random number generated'\n total_list.append(randrange(0, 4))\n# print 'In the end, total_list is', total_list\n return_number = 0 - num\n return total_list[return_number:]", "def main(argv):\n # Define the model (same as when creating the model file)\n x1 = tf.placeholder(tf.float32, [None, 784])\n W = tf.Variable(tf.zeros([784, 62]))\n b = tf.Variable(tf.zeros([62]))\n \n W_conv1 = weight_variable([5, 5, 1, 32])\n b_conv1 = bias_variable([32])\n \n x_image = tf.reshape(x1, [-1,28,28,1])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n h_pool1 = max_pool_2x2(h_conv1)\n \n W_conv2 = weight_variable([5, 5, 32, 64])\n b_conv2 = bias_variable([64])\n \n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\n h_pool2 = max_pool_2x2(h_conv2)\n \n W_fc1 = weight_variable([7 * 7 * 64, 1024])\n b_fc1 = bias_variable([1024])\n \n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n \n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n \n W_fc2 = weight_variable([1024, 62])\n b_fc2 = bias_variable([62])\n \n y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n \n init_op = tf.global_variables_initializer()\n saver = tf.train.Saver()\n \n '''\n This part will call the actual algorithm for predicting all the value present in the given image,\n Here we are resoting model saved in previous tutorial.\n '''\n with tf.Session() as sess: \n sess.run(init_op)\n saver.restore(sess, os.path.join(\"/ReadIT/tensorflow/mnist\", 'model.ckpt'))\n prediction=tf.argmax(y_conv,1)\n startBreakImage(argv,prediction,keep_prob,sess,x1)\n '''\n imvalue = imageprepare(argv)\n #print(imvalue)\n predictValue= prediction.eval(feed_dict={x: [imvalue],keep_prob: 1.0}, session=sess)\n #print(predictValue)\n #print(chr(int(ct.mapping[str(predictValue[0])])))\n '''", "def predict(self,inputs,keep_prob, _):\n #Non-Dynamic Unidirectional RNN\n hidden_size = self.config.mRNN._hidden_size\n batch_size = self.config.batch_size\n embed_size = self.config.mRNN._embed_size\n\n if keep_prob == None:\n keep_prob = 1\n\n with tf.variable_scope('InputDropout'):\n inputs = [tf.nn.dropout(x,keep_prob) for x in inputs]\n \n with tf.variable_scope('RNN') as scope:\n state = self.initial_state\n RNN_H = tf.get_variable('HMatrix',[hidden_size,hidden_size])\n RNN_I = tf.get_variable('IMatrix', [embed_size,hidden_size])\n RNN_b = tf.get_variable('B',[hidden_size])\n\n self.variable_summaries(RNN_H, 'HMatrix')\n self.variable_summaries(RNN_I, 'IMatrix')\n self.variable_summaries(RNN_b, 'Bias')\n \n with tf.variable_scope('RNN',reuse=True):\n rnn_outputs = []\n for tstep, current_input in enumerate(inputs):\n RNN_H = tf.get_variable('HMatrix',[hidden_size,hidden_size])\n RNN_I = tf.get_variable('IMatrix', [embed_size,hidden_size])\n RNN_b = tf.get_variable('B',[hidden_size])\n #state = tf.nn.tanh(tf.matmul(state,RNN_H) + tf.matmul(current_input,RNN_I) + RNN_b)\n\n state = tf.matmul(state,RNN_H) + current_input\n rnn_outputs.append(state)\n\t\t#How to pass state info for subsequent sentences\n self.final_state = rnn_outputs[-1]\n \n with tf.variable_scope('RNNDropout'):\n rnn_outputs = [tf.nn.dropout(x,keep_prob) for x in rnn_outputs]\n\n return rnn_outputs", "def update_rnnlm_state(self, lm, hyp, y):\n lmout, lmstate, scores_lm = None, None, None\n if lm is not None:\n lmout, lmstate, scores_lm = lm.predict(y, hyp['lmstate'])\n return lmout, lmstate, scores_lm", "def on_predict_end(self, logs=None):", "def on_predict_end(self, logs=None):", "def buildFirstModel():\n model = build(IMAGE_HEIGHT, IMAGE_WIDTH, 3, y.shape[1], finalAct=\"sigmoid\")\n opt = Adam(lr=INIT_LE, decay=INIT_LE / EPOCHS)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"acc\"])", "def init_hmc_model_iteration_wf(modelname, transform, precision=\"coarse\", name=\"hmc_model_iter0\"):\n\n workflow = Workflow(name=name)\n inputnode = pe.Node(\n niu.IdentityInterface(\n fields=['original_dwi_files', 'bvals', 'approx_aligned_dwi_files',\n 'approx_aligned_bvecs', 'b0_mask', 'b0_mean', 'original_bvecs']),\n name='inputnode')\n outputnode = pe.Node(\n niu.IdentityInterface(\n fields=['hmc_transforms', 'aligned_dwis', 'aligned_bvecs', 'predicted_dwis',\n 'motion_params']),\n name='outputnode')\n\n ants_settings = pkgrf(\n \"qsiprep\",\n \"data/shoreline_{precision}_{transform}.json\".format(precision=precision,\n transform=transform))\n\n predict_dwis = pe.MapNode(SignalPrediction(model=modelname),\n iterfield=['bval_to_predict', 'bvec_to_predict'],\n name=\"predict_dwis\")\n predict_dwis.synchronize = True\n\n # Register original images to the predicted images\n register_to_predicted = pe.MapNode(ants.Registration(from_file=ants_settings),\n iterfield=['fixed_image', 'moving_image'],\n name='register_to_predicted')\n register_to_predicted.synchronize = True\n\n # Apply new transforms to bvecs\n post_bvec_transforms = pe.Node(GradientRotation(), name=\"post_bvec_transforms\")\n\n # Summarize the motion\n calculate_motion = pe.Node(CombineMotions(), name=\"calculate_motion\")\n\n workflow.connect([\n # Send inputs to DWI prediction\n (inputnode, predict_dwis, [('approx_aligned_dwi_files', 'aligned_dwis'),\n ('approx_aligned_bvecs', 'aligned_bvecs'),\n ('bvals', 'bvals'),\n ('b0_mean', 'aligned_b0_mean'),\n ('b0_mask', 'aligned_mask'),\n (('approx_aligned_bvecs', _bvecs_to_list), 'bvec_to_predict'),\n (('bvals', _bvals_to_floats), 'bval_to_predict')]),\n (predict_dwis, register_to_predicted, [('predicted_image', 'fixed_image')]),\n (inputnode, register_to_predicted, [\n ('original_dwi_files', 'moving_image'),\n ('b0_mask', 'fixed_image_masks')]),\n\n (register_to_predicted, calculate_motion, [\n (('forward_transforms', _list_squeeze), 'transform_files')]),\n (inputnode, calculate_motion, [('original_dwi_files', 'source_files'),\n ('b0_mean', 'ref_file')]),\n (calculate_motion, outputnode, [('motion_file', 'motion_params')]),\n\n (register_to_predicted, post_bvec_transforms, [\n (('forward_transforms', _list_squeeze), 'affine_transforms')]),\n (inputnode, post_bvec_transforms, [('original_bvecs', 'bvec_files'),\n ('bvals', 'bval_files')]),\n\n (predict_dwis, outputnode, [('predicted_image', 'predicted_dwis')]),\n (post_bvec_transforms, outputnode, [('bvecs', 'aligned_bvecs')]),\n (register_to_predicted, outputnode, [('warped_image', 'aligned_dwis'),\n ('forward_transforms', 'hmc_transforms')])\n ])\n\n return workflow", "def reset(self, model):\n def replace_parameters(module, target_weight, target_bias=None):\n module.weight = nn.Parameter(target_weight)#torch.from_numpy(target_weight).to(self.args.device))\n if (hasattr(module, 'bias')):\n module.bias = nn.Parameter(target_bias)#torch.from_numpy(target_bias).to(self.args.device))\n def replace_recurrent(module, l, cur_idx, prev_kept):\n # Retrieve parameters\n cur_ih = getattr(module, 'weight_ih_l' + str(l)).data#.cpu().numpy()\n cur_hh = getattr(module, 'weight_hh_l' + str(l)).data#.cpu().numpy()\n cur_bih = getattr(module, 'bias_ih_l' + str(l)).data#.cpu().numpy()\n cur_bhh = getattr(module, 'bias_hh_l' + str(l)).data#.cpu().numpy()\n cur_hidden = cur_hh.shape[1]\n if (prev_kept is not None): \n cur_ih = cur_ih[:, prev_kept]\n if (len(cur_idx[0]) < 3):\n n_hid = cur_ih.shape[0] / {nn.LSTM:4, nn.GRU:3}[module.__class__]\n cur_idx[0] = np.linspace(0, n_hid - 1, n_hid).astype('int')\n setattr(module, 'weight_ih_l' + str(l), cur_ih)\n return\n if (len(cur_idx[1]) < 3):\n cur_idx[1] = np.linspace(0, cur_hh.shape[0] - 1, cur_hh.shape[0]).astype('int')\n setattr(module, 'weight_ih_l' + str(l), cur_ih)\n return\n cur_hh = cur_hh[:, cur_idx[0]]\n rep_id0, rep_id1 = cur_idx[0], cur_idx[1]\n # Handle repetitions for LSTM and GRU\n if (module.__class__ in [nn.LSTM, nn.GRU]):\n n_reps = {nn.LSTM:4, nn.GRU:3}[module.__class__]\n final_id0, final_id1 = [], []\n for i in range(n_reps):\n final_id0.extend(rep_id0 + (cur_hidden * i))\n final_id1.extend(rep_id1 + (cur_hidden * i))\n rep_id0, rep_id1 = final_id0, final_id1\n # Finally replace parameters\n cur_ih = nn.Parameter(cur_ih[rep_id0])#torch.from_numpy(cur_ih[rep_id0]).to(self.args.device))\n cur_hh = nn.Parameter(cur_hh[rep_id1])#torch.from_numpy(cur_hh[rep_id1]).to(self.args.device))\n cur_bih = nn.Parameter(cur_bih[rep_id0])#torch.from_numpy(cur_bih[rep_id0]).to(self.args.device))\n cur_bhh = nn.Parameter(cur_bhh[rep_id1])#torch.from_numpy(cur_bhh[rep_id1]).to(self.args.device))\n setattr(module, 'weight_ih_l' + str(l), cur_ih)\n setattr(module, 'weight_hh_l' + str(l), cur_hh)\n setattr(module, 'bias_ih_l' + str(l), cur_bih)\n setattr(module, 'bias_hh_l' + str(l), cur_bhh)\n module.hidden_size = len(cur_idx[1])\n # Possibility to reinit\n if self.prune_reset == 'reinit': \n model.apply(self.initializer)\n else:\n model.apply(self.initializer)\n # Rewind the weights to the saved state dict\n for name, param in model.named_parameters():\n param.data = self.rewind_state_dict[name]\n # Need to track previous modules\n prev_kept = None\n for (name, m) in self.leaf_modules:\n if (hasattr(m, 'unprune_idx')):\n if ('Transpose' in str(m.__class__)):\n kept_weights = m.weight.data[:, m.unprune_idx]#.cpu().numpy()[:, m.unprune_idx]\n if (prev_kept is not None and len(kept_weights.shape) > 1):\n kept_weights = kept_weights[prev_kept]\n elif (m.__class__ in [nn.LSTM, nn.GRU, nn.RNN]):\n for l in range(32):\n if (not hasattr(m, 'weight_ih_l' + str(l))):\n break\n replace_recurrent(m, l, m.unprune_idx[l], prev_kept)\n prev_kept = m.unprune_idx[l][1]\n continue\n else:\n kept_weights = m.weight.data[m.unprune_idx]#.cpu().numpy()[m.unprune_idx]\n if (prev_kept is not None and len(kept_weights.shape) > 1):\n kept_weights = kept_weights[:, prev_kept]\n if (hasattr(m, 'bias')):\n kept_biases = m.bias.data[m.unprune_idx]#.cpu().numpy()[m.unprune_idx]\n replace_parameters(m, kept_weights, kept_biases)\n prev_kept = m.unprune_idx\n if ('Norm' in str(m.__class__) and (not 'LayerNorm' in str(m.__class__))):\n running_mean = m.running_mean[m.unprune_idx]#.cpu().numpy()[m.unprune_idx]\n m.running_mean.data = running_mean#torch.from_numpy(running_mean).to(self.args.device)\n running_var = m.running_var[m.unprune_idx]#.cpu().numpy()[m.unprune_idx]\n m.running_var.data = running_var#torch.from_numpy(running_var).to(self.args.device)\n if ('LayerNorm' in str(m.__class__)):\n m.normalized_shape = (m.unprune_idx.shape[0],)\n elif (hasattr(m, 'weight') and (m.weight is not None) and prev_kept is not None):\n kept_weights = m.weight.data#.cpu().numpy()\n if (prev_kept is not None):\n if ('Transpose' in str(m.__class__) or 'Norm' in str(m.__class__)):\n kept_weights = kept_weights[prev_kept]\n else:\n kept_weights = kept_weights[:, prev_kept]\n kept_biases = m.bias.data#.cpu().numpy() \n if ('Norm' in str(m.__class__)):\n if (not 'LayerNorm' in str(m.__class__)):\n running_mean = m.running_mean[prev_kept]#.cpu().numpy()[prev_kept]\n m.running_mean.data = running_mean#torch.from_numpy(running_mean).to(self.args.device)\n running_var = m.running_var[prev_kept]#.cpu().numpy()[prev_kept]\n m.running_var.data = running_var#torch.from_numpy(running_var).to(self.args.device)\n kept_biases = kept_biases[prev_kept]\n if ('LayerNorm' in str(m.__class__)):\n m.normalized_shape = (prev_kept.shape[0],)\n else:\n prev_kept = None\n replace_parameters(m, kept_weights, kept_biases)\n # Skip non-prunable layers\n if (hasattr(m, 'unprunable') and m.unprunable):\n prev_kept = None\n continue\n self.rewind_state_dict = copy.deepcopy(model.state_dict())\n return model" ]
[ "0.6569588", "0.6339002", "0.6296439", "0.62247455", "0.62247455", "0.62224156", "0.617453", "0.61637294", "0.61443996", "0.6075079", "0.6071582", "0.60711384", "0.60624826", "0.6026027", "0.60046995", "0.599758", "0.5973139", "0.5960339", "0.59414625", "0.5919527", "0.59161633", "0.59155595", "0.59089595", "0.5901493", "0.5866725", "0.5863784", "0.5863784", "0.5854056", "0.5850727", "0.5839218" ]
0.66313094
0
Given a trained ESN and a number input images 'imgs', predicts 'Npred' frames after the last frame of 'imgs'. The input images are used to create the inital state 'h0' for the prediction (warmup).
def warmup_predict(model, imgs, Npred): H = augmented_state_matrix(model[:-1], imgs, 0) h0 = H[-2] y0 = imgs[-1] return predict(model, y0, h0, Npred)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def predict(self, images, batch_size):\n pass", "def predict(self, images):\n\t\t#testing_dataset = tf.data.Dataset.from_tensor_slices(images)\n\t\ttf.keras.backend.set_learning_phase(0)\n\t\ttesting_dataset = tf.data.Dataset.from_tensor_slices(np.asarray(images)).map(lambda x: tf.image.resize(x, [self.image_size, self.image_size]) / 255.0)\n\t\t#testing_dataset_shape = tf.data.Dataset.from_tensor_slices(np.full((len(images), 2), 500, dtype=np.int32))\n\t\ttesting_iterator_X = tf.data.Dataset.zip((testing_dataset, )).batch(self.batch_size).make_initializable_iterator()\n\n\t\tself.sess.run(testing_iterator_X.initializer)\n\t\ttesting_handle_X = self.sess.run(testing_iterator_X.string_handle())\n\n\t\tfinal_output = np.zeros([len(images), 500, 500, num_classes])\n\t\tj = 0\n\t\tcount = 0\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t[test_output] = self.sess.run(\n\t\t\t\t\t[self.output],\n\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\tself.is_training: False,\n\t\t\t\t\t\t\tself.handle_X: testing_handle_X,\n\t\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\tthis_len = len(test_output)\n\t\t\t\tfor z in range(len(test_output)):\n\t\t\t\t\tfor dim in range(num_classes):\n\t\t\t\t\t\tfinal_output[count+z:count+z+1, :, :, dim] = scipy.misc.imresize(test_output[z, :, :, dim], [500, 500])\n\n\t\t\t\t#final_output[count:count+this_len, :, :, :] = test_output\n\t\t\t\tto = final_output[count:count+this_len, :, :, :].argmax(axis=-1)\n\t\t\t\t'''\n\t\t\t\tpdb.set_trace()\n\t\t\t\tfor z in range(this_len):\n\t\t\t\t\tplt.matshow(to[z])\n\t\t\t\t\tplt.colorbar()\n\t\t\t\t\tplt.show()\n\t\t\t\t'''\n\t\t\t\tcount += this_len\n\t\t\t\tprint(f'Batch: {j}')\n\t\t\t\tj += 1\n\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\tbreak\n\t\treturn final_output", "def predict(model, img, target_size, top_n=3):\r\n print('img.size=',img.size)\r\n if img.size != target_size:\r\n img = img.resize(target_size)\r\n \r\n x = image.img_to_array(img)\r\n x = np.expand_dims(x, axis=0)\r\n x = preprocess_input(x)\r\n preds = model.predict(x)\r\n return decode_predictions(preds,top=top_n)[0]", "def batch_predict(filenames, net):\n N, C, H, W = net.blobs[net.inputs[0]].data.shape\n F = net.blobs[net.outputs[0]].data.shape[1]\n Nf = len(filenames)\n allftrs = np.zeros((Nf, F))\n #allpreds = []\n for i in range(0, Nf, N):\n tic = time.time()\n in_data = np.zeros((N, C, H, W), dtype=np.float32)\n\n batch_range = range(i, min(i+N, Nf))\n batch_filenames = [filenames[j] for j in batch_range]\n Nb = len(batch_range)\n\n batch_images = np.zeros((Nb, 3, H, W))\n for j,fname in enumerate(batch_filenames):\n im = np.array(Image.open(fname))\n \n if len(im.shape) == 2:\n im = np.tile(im[:,:,np.newaxis], (1,1,3))\n # RGB -> BGR\n im = im[:,:,(2,1,0)]\n # mean subtraction\n im = im - np.array([103.939, 116.779, 123.68])\n # resize\n im = imresize(im, (H, W))\n # get channel in correct dimension\n im = np.transpose(im, (2, 0, 1))\n batch_images[j,:,:,:] = im\n\n # insert into correct place\n in_data[0:len(batch_range), :, :, :] = batch_images\n \n # predict features\n ftrs = predict(in_data, net)\n toc = time.time()\n \n for j in range(len(batch_range)):\n allftrs[i+j,:] = ftrs[j,:]\n\n return allftrs", "def predict(model, images):\n return model.predict_classes(images)", "def predict_all(self, imgs):\n return self._predict(imgs)", "def predict_ch3(net, test_iter, n=6): #@save\n for X, y in test_iter:\n break\n trues = d2l.get_fashion_mnist_labels(y)\n preds = d2l.get_fashion_mnist_labels(d2l.argmax(net(X), axis=1))\n titles = [true +'\\n' + pred for true, pred in zip(trues, preds)]\n d2l.show_images(d2l.reshape(X[0:n], (n, 28, 28)), 1, n, titles=titles[0:n])", "def predict(self, img_path):\n\n img = cv2.imread(img_path)\n img0 = img.copy()\n \n #This happens inside datasets\n # Convert\n img = letterbox(img, new_shape=self.img_size)[0]\n\n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n #this happens on detect\n img = torch.from_numpy(img).to(self.device)\n img = img.float() # uint8 to fp16/32\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n\n # Inference\n pred = self.model(img)[0]\n\n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n \n # Process detections\n for i, det in enumerate(pred): # detections per image\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()\n\n pred = [d.cpu().detach().numpy() for d in pred if d is not None]\n pred = pred[0] if len(pred) else pred\n \n pred = [[[x1, y1, x2, y2],conf] for x1, y1, x2, y2, conf, clss in pred]\n\n return pred", "def batch_predict(filenames, net):\n\n N, C, H, W = net.blobs[net.inputs[0]].data.shape\n F = net.blobs[net.outputs[0]].data.shape[1]\n Nf = len(filenames)\n Hi, Wi, _ = imread(filenames[0]).shape\n allftrs = np.zeros((Nf, F))\n for i in range(0, Nf, N):\n start = time.time()\n in_data = np.zeros((N, C, H, W), dtype=np.float32)\n\n batch_range = range(i, min(i+N, Nf))\n batch_filenames = [filenames[j] for j in batch_range]\n Nb = len(batch_range)\n\n batch_images = np.zeros((Nb, 3, H, W))\n for j,fname in enumerate(batch_filenames):\n im = imread(fname)\n if len(im.shape) == 2:\n im = np.tile(im[:,:,np.newaxis], (1,1,3))\n # RGB -> BGR\n im = im[:,:,(2,1,0)]\n # mean subtraction\n im = im - np.array([103.939, 116.779, 123.68])\n # resize\n im = imresize(im, (H, W), 'bicubic')\n # get channel in correct dimension\n im = np.transpose(im, (2, 0, 1))\n batch_images[j,:,:,:] = im\n\n # inserhttp://web.engr.illinois.edu/~slazebni/spring14/lec24_cnn.pdft into correct place\n in_data[0:len(batch_range), :, :, :] = batch_images\n\n # predict features\n ftrs = predict(in_data, net)\n\n for j in range(len(batch_range)):\n allftrs[i+j,:] = ftrs[j,:]\n\n end = time.time()\n files_left = (len(filenames) - i+len(batch_range)) / 10.0\n one_batch_time = end - start\n print 'Done %d/%d files. Took %d seconds. %f minutes left,' % (i+len(batch_range), len(filenames), one_batch_time, (one_batch_time * files_left) / 60.0)\n\n return allftrs", "def predict(self, preprocessed_dict):\r\n preprocessed_images_fg = preprocessed_dict.get('images_fg')\r\n\r\n net_image = slim.conv2d(preprocessed_images_fg, num_outputs=5, kernel_size=3,\r\n padding='SAME', scope='psp_conv1')\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n net_image = slim.conv2d(net_image, num_outputs=4, kernel_size=3,\r\n padding='SAME', scope='psp_conv2')\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n # pyramid scence pooling\r\n pool1 = self.pyramid_pooling(net_image, (60, 60), 64, scope='pyramid_pooling1')\r\n pool2 = self.pyramid_pooling(net_image, (30, 30), 64, scope='pyramid_pooling2')\r\n pool3 = self.pyramid_pooling(net_image, (20, 20), 64, scope='pyramid_pooling3')\r\n pool4 = self.pyramid_pooling(net_image, (10, 10), 64, scope='pyramid_pooling4')\r\n\r\n net_image = tf.concat(values=[net_image, pool1, pool2, pool3, pool4], axis=3)\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n pred_trimap = slim.conv2d(net_image, num_outputs=3, kernel_size=3,\r\n padding='SAME', scope='psp_conv3')\r\n pred_trimap_soft = tf.nn.softmax(pred_trimap, axis=3)\r\n # background_trimap = tf.slice(pred_trimap, [0, 0, 0, 0], [-1, -1, -1, 1])\r\n # foreground_trimap = tf.slice(pred_trimap, [0, 0, 0, 1], [-1, -1, -1, 1])\r\n # unsure_trimap = tf.slice(pred_trimap, [0, 0, 0, 2], [-1, -1, -1, 1])\r\n background = tf.slice(pred_trimap, [0, 0, 0, 0], [-1, -1, -1, 1])\r\n background_trimap = tf.slice(pred_trimap_soft, [0, 0, 0, 0], [-1, -1, -1, 1])\r\n foreground = tf.slice(pred_trimap, [0, 0, 0, 1], [-1, -1, -1, 1])\r\n foreground_trimap = tf.slice(pred_trimap_soft, [0, 0, 0, 1], [-1, -1, -1, 1])\r\n unsure = tf.slice(pred_trimap, [0, 0, 0, 2], [-1, -1, -1, 1])\r\n unsure_trimap = tf.slice(pred_trimap_soft, [0, 0, 0, 2], [-1, -1, -1, 1])\r\n\r\n # net_image_trimap = tf.concat(values=[preprocessed_images_fg, pred_trimap], axis=3)\r\n # VGG-16\r\n _, endpoints = nets.vgg.vgg_16(preprocessed_images_fg,\r\n num_classes=1,\r\n spatial_squeeze=False,\r\n is_training=self._is_training)\r\n # Note: The `padding` method of fc6 of VGG-16 in tf.contrib.slim is\r\n # `VALID`, but the expected value is `SAME`, so we must replace it.\r\n net_image = endpoints.get('vgg_16/pool5')\r\n net_image = slim.batch_norm(net_image, is_training=self._is_training)\r\n # net_image = slim.conv2d(net_image, num_outputs=4096, kernel_size=7,\r\n # padding='SAME', scope='fc6_')\r\n\r\n # VGG-16 for alpha channel\r\n net_alpha = slim.repeat(pred_trimap, 2, slim.conv2d, 64,\r\n [3, 3], scope='conv1_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool1_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 128, [3, 3],\r\n scope='conv2_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool2_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 256, [3, 3],\r\n scope='conv3_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool3_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 512, [3, 3],\r\n scope='conv4_alpha')\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool4_alpha')\r\n net_alpha = slim.repeat(net_alpha, 2, slim.conv2d, 512, [3, 3],\r\n scope='conv5_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n net_alpha = slim.max_pool2d(net_alpha, [2, 2], scope='pool5_alpha')\r\n # net_alpha = slim.conv2d(net_alpha, 4096, [7, 7], padding='SAME',\r\n # scope='fc6_alpha')\r\n net_alpha = slim.batch_norm(net_alpha, is_training=self._is_training)\r\n\r\n # Concate the first stage prediction\r\n net = tf.concat(values=[net_image, net_alpha], axis=3)\r\n net.set_shape([None, self._default_image_size // 32,\r\n self._default_image_size // 32, 1024])\r\n\r\n # Deconvlution\r\n with slim.arg_scope([slim.conv2d_transpose], stride=2, kernel_size=5):\r\n # Deconv6\r\n net = slim.conv2d_transpose(net, num_outputs=512, kernel_size=1, scope='deconv6')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv5\r\n net = slim.conv2d_transpose(net, num_outputs=512, scope='deconv5')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv4\r\n net = slim.conv2d_transpose(net, num_outputs=256, scope='deconv4')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv3\r\n net = slim.conv2d_transpose(net, num_outputs=128, scope='deconv3')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv2\r\n net = slim.conv2d_transpose(net, num_outputs=64, scope='deconv2')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n # Deconv1\r\n net = slim.conv2d_transpose(net, num_outputs=64, stride=1, scope='deconv1')\r\n net = slim.batch_norm(net, is_training=self._is_training)\r\n\r\n # Predict alpha matte\r\n alpha_matte_r = slim.conv2d(net, num_outputs=1, kernel_size=[5, 5],\r\n activation_fn=tf.nn.sigmoid,\r\n scope='AlphaMatte')\r\n\r\n alpha_matte_p = foreground_trimap + tf.multiply(unsure_trimap, alpha_matte_r)\r\n prediction_dict = {'alpha_matte_r': alpha_matte_r,\r\n 'alpha_matte_p': alpha_matte_p,\r\n 'pred_trimap': pred_trimap,\r\n 'background': background,\r\n 'foreground': foreground,\r\n 'background_trimap': background_trimap,\r\n 'foreground_trimap': foreground_trimap,\r\n 'unsure_trimap': unsure_trimap,\r\n }\r\n return prediction_dict", "def inception_score(images):\n height, width = 90, 90\n\n images = np.array([np.array(Image.fromarray(x, mode=\"RGB\").resize((height, width))) for x in np.reshape(images, (-1, 28, 28, 3))]) / 255. # Transform images to a suitable form\n\n with loaded_model[0].as_default():\n predictions = loaded_model[1].predict(images)\n preds = np.argmax(predictions, axis=1)\n aux_preds = np.zeros(10)\n unique, counts = np.unique(preds, return_counts=True)\n for number, appearances in zip(unique, counts):\n aux_preds[number] = appearances\n aux_preds = aux_preds / predictions.shape[0]\n predictions = np.sort(predictions, axis=1)\n predictions = np.mean(predictions, axis=0)\n\n sam_error = np.sum([aux_preds[w] * np.log(aux_preds[w] / predictions[w]) if aux_preds[w] > 0 else 0 for w in range(predictions.shape[0])])\n\n return sam_error", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def prediction():\r\n\r\n loaded_model = load_model('imageTrainedModel.h5')\r\n print(loaded_model.summary())\r\n\r\n # retrieve history also:\r\n f = open('history.pckl', 'rb')\r\n history = pickle.load(f)\r\n f.close()\r\n\r\n print(history.keys())\r\n print(history)\r\n\r\n epochs = len(history['loss']) # length of the list stored at 'loss'\r\n # Plot losses for train and validation\r\n plt.figure()\r\n plt.title('Loss as training progresses')\r\n plt.xlabel('Epoch')\r\n plt.ylabel('Loss')\r\n plt.plot(history['loss'], label='Train Error')\r\n plt.plot(history['val_loss'], label='Val Error')\r\n plt.legend()\r\n plt.show()\r\n\r\n # Plot metrics\r\n plt.plot(history['acc']) # use same metric that was used for training. 'history' is a dictionary.\r\n plt.title('Accuracy as training progresses')\r\n plt.ylabel('Accuracy (%)')\r\n plt.xlabel('Epoch')\r\n ymax = max(history['acc'])\r\n xpos = history['acc'].index(ymax)\r\n xmax = xpos\r\n plt.annotate('Maximum accuracy: %s' % round(ymax, 3),\r\n xy=(xmax, ymax), xycoords='data',\r\n xytext=(0.5, 0.5), textcoords='axes fraction',\r\n fontsize=12)\r\n plt.show()\r\n\r\n # make predictions using x_test\r\n test_y_predictions = loaded_model.predict(x_test, batch_size=None, verbose=1, steps=None)\r\n test_y_predictions = np.around(test_y_predictions, decimals=0) # round to whole integers\r\n true_false_array = np.equal(y_test, test_y_predictions) # test of equality.\r\n true_count = np.sum(true_false_array) # number of correctly categorised images\r\n false_count = true_false_array.shape[0] - true_count # number of images not correctly categorised\r\n\r\n # Plot predicted and actual image categories\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111)\r\n plt.title('Classification of Image Categories')\r\n plt.ylabel('Number of Images')\r\n plt.xlabel('Image Classification')\r\n label = ['Correct', 'Incorrect']\r\n index = np.arange(len(label))\r\n plt.xticks(index, label, fontsize=10, rotation=0)\r\n ax1.bar(index, [true_count, false_count])\r\n plt.show()", "def predictint(test_images):\n # Define the model (same as when creating the model file)\n x = tf.placeholder(tf.float32, [None, image_size])\n W = tf.Variable(tf.zeros([image_size, image_labels]))\n b = tf.Variable(tf.zeros([image_labels]))\n is_test = tf.placeholder(tf.bool)\n # Model Parameters\n W_conv1 = tf.get_variable(\"W_conv1\", shape=[5, 5, 1, 32], initializer=weight_xavier_init(5 * 5 * 1, 32))\n W_conv2 = tf.get_variable(\"W_conv2\", shape=[5, 5, 32, 64], initializer=weight_xavier_init(5 * 5 * 32, 64))\n W_fc1 = tf.get_variable(\"W_fc1\", shape=[64 * 7 * 7, 1024], initializer=weight_xavier_init(64 * 7 * 7, 1024))\n W_fc2 = tf.get_variable(\"W_fc2\", shape=[1024, image_labels], initializer=weight_xavier_init(1024, image_labels))\n\n b_conv1 = bias_variable([32])\n b_conv2 = bias_variable([64])\n b_fc1 = bias_variable([1024])\n b_fc2 = bias_variable([image_labels])\n\n x_image = tf.reshape(x, [-1, image_width, image_height, 1])\n conv1 = conv2d(x_image, W_conv1) + b_conv1\n conv1_bn = batchnorm(conv1, b_conv1, is_test, True)\n h_conv1 = tf.nn.relu(conv1_bn)\n h_pool1 = max_pool_2x2(h_conv1)\n\n conv2 = conv2d(h_pool1, W_conv2) + b_conv2\n conv2_bn = batchnorm(conv2, b_conv2, is_test, True)\n h_conv2 = tf.nn.relu(conv2_bn)\n h_pool2 = max_pool_2x2(h_conv2)\n\n h_pool2_flat = tf.reshape(h_pool2, [-1, W_fc1.get_shape().as_list()[0]])\n fc1 = tf.matmul(h_pool2_flat, W_fc1) + b_fc1\n fc1_bn = batchnorm(fc1, b_fc1, is_test, False)\n h_fc1 = tf.nn.relu(fc1_bn)\n\n keep_prob = tf.placeholder(tf.float32)\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)\n\n y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)\n\n init_op = tf.initialize_all_variables()\n saver = tf.train.Saver()\n\n \"\"\"\n Load the my-model file\n file is stored in the same directory as this python script is started\n Use the model to predict the integer. Integer is returend as list.\n\n Based on the documentatoin at\n https://www.tensorflow.org/versions/master/how_tos/variables/index.html\n \"\"\"\n predicted_lables = np.zeros(test_images.shape[0])\n with tf.Session() as sess:\n sess.run(init_op)\n saver.restore(sess, \"F:\\PycharmProject\\CNN_mnist_base\\model\\my-model\")\n # print (\"Model restored.\")\n predict = tf.argmax(y_conv, 1)\n for i in range(0, test_images.shape[0]):\n imagein = test_images[i]\n predicted_lables[i] = predict.eval(feed_dict={x: [imagein], keep_prob: 1.0, is_test: False}, session=sess)\n sess.close()\n return predicted_lables", "def predict(image_data):\n PAYLOAD = {}\n PAYLOAD[\"timestamp\"] = str(datetime.now())\n PAYLOAD[\"inference-type\"] = \"image-classification\"\n PAYLOAD[\"inference-description\"] = \"Top {} predictions with score {} or above \".format(\n config_utils.MAX_NO_OF_RESULTS, config_utils.SCORE_THRESHOLD\n )\n PAYLOAD[\"inference-results\"] = []\n\n try:\n # Run DLR to perform inference with DLC optimized model\n model_output = dlr_model.run(image_data)\n config_utils.logger.info(\"pred shape: '{}'.\".format(model_output[0][0].shape)) \n probabilities = softmax(model_output[0][0])\n config_utils.logger.info(\"pred shape softmax: '{}'.\".format(probabilities.shape)) \n sort_classes_by_probability = argsort(probabilities)[::-1]\n\n config_utils.logger.info(\"pred classes: '{}'.\".format(sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS])) \n\n for i in sort_classes_by_probability[: config_utils.MAX_NO_OF_RESULTS]:\n if probabilities[i] >= config_utils.SCORE_THRESHOLD:\n result = {\"Label\": str(synset[i]), \"Score\": str(probabilities[i])}\n PAYLOAD[\"inference-results\"].append(result)\n\n config_utils.logger.info(dumps(PAYLOAD))\n\n if config_utils.TOPIC.strip() != \"\":\n ipc_utils.IPCUtils().publish_results_to_cloud(PAYLOAD)\n else:\n config_utils.logger.info(\"No topic set to publish the inference results to the cloud.\")\n\n except Exception as e:\n config_utils.logger.error(\"Exception occured during prediction: {}\".format(e))", "def predict(self, inputs, oversample=True):\r\n # Scale to standardize input dimensions.\r\n input_ = np.zeros((len(inputs),\r\n self.image_dims[0],\r\n self.image_dims[1],\r\n inputs[0].shape[2]),\r\n dtype=np.float32)\r\n print inputs[0].shape\r\n print input_.shape\r\n for ix, in_ in enumerate(inputs):\r\n input_[ix] = caffe.io.resize_image(in_, self.image_dims)\r\n\r\n # if oversample:\r\n # # Generate center, corner, and mirrored crops.\r\n # input_ = caffe.io.oversample(input_, self.crop_dims)\r\n # else:\r\n # # Take center crop.\r\n # center = np.array(self.image_dims) / 2.0\r\n # crop = np.tile(center, (1, 2))[0] + np.concatenate([\r\n # -self.crop_dims / 2.0,\r\n # self.crop_dims / 2.0\r\n # ])\r\n # crop = crop.astype(int)\r\n # input_ = input_[:, crop[0]:crop[2], crop[1]:crop[3], :]\r\n\r\n # Classify\r\n caffe_in = np.zeros(np.array(input_.shape)[[0, 3, 1, 2]],\r\n dtype=np.float32)\r\n for ix, in_ in enumerate(input_):\r\n caffe_in[ix] = self.transformer.preprocess(self.inputs[0], in_)\r\n out = self.forward_all(**{self.inputs[0]: caffe_in})\r\n predictions = out[self.outputs[0]]\r\n\r\n # # For oversampling, average predictions across crops.\r\n # if oversample:\r\n # predictions = predictions.reshape((len(predictions) / 10, 10, -1))\r\n # predictions = predictions.mean(1)\r\n\r\n return predictions", "def predict_data(img): \n return gennet.predict_data(img, 'Resnet50')", "def predict_images(self, img_paths):\n img_gen = ImageGenerator(img_paths, batch_size=1, shuffle=False, normalize='std_norm', augmentation=False)\n\n return self.model.predict_generator(img_gen, verbose=1)", "def _build_prediction(self, examples, post_process=True):\n options = self._model_proto\n is_training = self._is_training\n\n (inputs, num_proposals,\n proposals) = (examples[InputDataFields.image],\n examples[InputDataFields.num_proposals],\n examples[InputDataFields.proposals])\n\n tf.summary.image('inputs', inputs, max_outputs=10)\n model_utils.visl_proposals(\n inputs, num_proposals, proposals, name='proposals', top_k=100)\n\n # FRCNN.\n\n proposal_features = self._extract_frcnn_feature(inputs, num_proposals,\n proposals)\n\n # Build the OICR network.\n # proposal_scores shape = [batch, max_num_proposals, 1 + num_classes].\n # See `Multiple Instance Detection Network with OICR`.\n\n predictions = {}\n with slim.arg_scope(build_hyperparams(options.fc_hyperparams, is_training)):\n for i in range(options.oicr_iterations):\n predictions[NOD2Predictions.oicr_proposal_scores + '_at_{}'.format(\n i + 1)] = proposal_scores = slim.fully_connected(\n proposal_features,\n num_outputs=1 + self._num_classes,\n activation_fn=None,\n scope='oicr/iter{}'.format(i + 1))\n\n if post_process and options.HasField('pcl_preprocess'):\n proposal_scores = tf.nn.softmax(\n tf.stop_gradient(proposal_scores), axis=-1)[:, :, 1:]\n (num_proposals, proposals,\n _, _, additional_fields) = self._pcl_preprocess_fn(\n proposals, proposal_scores, {'proposal_features': proposal_features})\n proposal_features = additional_fields['proposal_features']\n\n # Build MIDN network.\n # proba_r_given_c shape = [batch, max_num_proposals, num_classes].\n\n with slim.arg_scope(build_hyperparams(options.fc_hyperparams, is_training)):\n if options.attention_type == nod2_model_pb2.NOD2Model.PER_CLASS:\n (midn_class_logits, midn_proposal_scores,\n midn_proba_r_given_c) = self._build_midn_network(\n num_proposals, proposal_features, num_classes=self._num_classes)\n elif options.attention_type == nod2_model_pb2.NOD2Model.PER_CLASS_TANH:\n (midn_class_logits, midn_proposal_scores,\n midn_proba_r_given_c) = self._build_midn_network_tanh(\n num_proposals, proposal_features, num_classes=self._num_classes)\n else:\n raise ValueError('Invalid attention type.')\n\n predictions.update({\n DetectionResultFields.class_labels:\n tf.constant(self._vocabulary_list),\n DetectionResultFields.num_proposals:\n num_proposals,\n DetectionResultFields.proposal_boxes:\n proposals,\n NOD2Predictions.midn_class_logits:\n midn_class_logits,\n NOD2Predictions.midn_proba_r_given_c:\n midn_proba_r_given_c,\n NOD2Predictions.oicr_proposal_scores + '_at_0':\n midn_proposal_scores\n })\n\n # Post process to get final predictions.\n\n if post_process:\n predictions.update(self._post_process(inputs, predictions))\n\n return predictions", "def classify_images(net, images):\n prediction = net.predict(images) # predict takes any number of images, and formats them for the Caffe net automatically\n return prediction", "def predict(self, images, batch_size=1):\n predictions = []\n \n for image in images.astype(\"float\"):\n filtered_image = self.apply_filter(image)\n _, pred = cv2.threshold(filtered_image.astype('uint8'), 0, 1, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n predictions.append(pred)\n \n return np.reshape(predictions, images.shape)", "def score(self, imgs, batch_size=32, splits=10):\n \n # preprocess images\n if imgs.shape[0] != 299 or imgs.shape[1] != 299:\n imgs = np.array([scipy.misc.imresize(img, (299, 299)) for img in imgs])\n n_batches = 1 + (len(imgs) / batch_size)\n batches = np.array_split(imgs, n_batches)\n \n # get prediction vectors of inception net for images\n preds = []\n for batch in tqdm(batches):\n imgs = [Image.fromarray(img) for img in batch]\n imgs = torch.stack([self.preprocess(img) for img in imgs])\n if self.gpu:\n imgs = imgs.cuda()\n imgs = Variable(imgs)\n pred = self.incept(imgs)\n pred = F.softmax(pred)\n preds.append(pred.data.cpu().numpy()) \n preds = np.concatenate(preds)\n \n # compute inception score\n scores = []\n for i in range(splits):\n part = preds[(i * preds.shape[0] // splits): \\\n ((i + 1) * preds.shape[0] // splits), :]\n kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, 0), 0)))\n kl = np.mean(np.sum(kl, 1))\n scores.append(np.exp(kl))\n \n return np.mean(scores), np.std(scores)", "def prediction_on_a_image(self, input, output,model_saved_path):\n\n # load the saved model\n if os.path.isfile(model_saved_path) is False:\n raise IOError('trained model: %s not exist' % model_saved_path)\n\n clf = joblib.load(model_saved_path)\n\n # split a large image to many small ones\n patch_w = 500 # parameters.get_digit_parameters(\"\", \"train_patch_width\", None, 'int')\n patch_h = 500 # parameters.get_digit_parameters(\"\", \"train_patch_height\", None, 'int')\n overlay_x = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_x\", None, 'int')\n overlay_y = 0 # parameters.get_digit_parameters(\"\", \"train_pixel_overlay_y\", None, 'int')\n\n img_folder = os.path.dirname(input)\n img_name = os.path.basename(input)\n inf_list_txt = 'inf_image_list.txt'\n with open(inf_list_txt, 'w') as txt_obj:\n txt_obj.writelines(img_name + '\\n')\n\n img_patches = build_RS_data.make_dataset(img_folder, inf_list_txt, patch_w, patch_h, overlay_x, overlay_y,\n train=False)\n\n for img_idx, aImg_patches in enumerate(img_patches):\n inf_output_dir = 'inf_results' #os.path.splitext(img_name)[0]\n os.system('mkdir -p '+inf_output_dir)\n os.system('rm '+inf_output_dir+'/*')\n\n ## parallel inference patches\n # but it turns out not work due to the Pickle.PicklingError\n # not working due to mulitple parameters. Jan 9, 2019, hlc\n # use multiple thread\n num_cores = multiprocessing.cpu_count()\n print('number of thread %d' % num_cores)\n # theadPool = mp.Pool(num_cores) # multi threads, can not utilize all the CPUs? not sure hlc 2018-4-19\n theadPool = Pool(num_cores) # multi processes\n\n # inference_one_patch_svm(img_idx, image_count, p_idx, patch_count, inf_output_dir, img_patch, scaler,clf)\n\n parameters_list = [\n (img_idx, len(img_patches), idx, len(aImg_patches), inf_output_dir, img_patch, self._scaler, clf)\n for (idx, img_patch) in enumerate(aImg_patches)]\n # results = theadPool.map(inference_one_patch_svm, parameters_list) # not working\n results = theadPool.starmap(inference_one_patch_svm, parameters_list) # need python3\n print('result_list', results)\n\n # for p_idx, img_patch in enumerate(aImg_patches):\n # # read images\n # patch_data = build_RS_data.read_patch(img_patch) # read_whole_x_pixels(input)\n #\n # nbands, height, width = patch_data.shape\n #\n # X_predit = patch_data.reshape(nbands, -1)\n # X_predit = np.transpose(X_predit, (1, 0))\n #\n # if os.path.isfile(scaler_saved_path) and self._scaler is None:\n # self._scaler = joblib.load(scaler_saved_path)\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # elif self._scaler is not None:\n # result = self._scaler.transform(X_predit)\n # X = result.tolist()\n # else:\n # X = X_predit\n # basic.outputlogMessage('warning, no pre-processing of data before prediction')\n #\n # # more method on prediction can be foudn in :\n # # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html\n # pre_result = clf.predict(X)\n # result_img = pre_result.reshape((height, width))\n #\n # # save results\n # print('Save patch:%d/%d on Image:%d/%d , shape:(%d,%d)' %\n # (p_idx,len(aImg_patches), img_idx,len(img_patches), result_img.shape[0], result_img.shape[1]))\n #\n # # short the file name to avoid error of \" Argument list too long\", hlc 2018-Oct-29\n # file_name = \"I%d_%d\" % (img_idx, p_idx)\n #\n # save_path = os.path.join(inf_output_dir, file_name + '.tif')\n # build_RS_data.save_patch_oneband_8bit(img_patch,result_img.astype(np.uint8),save_path)\n #\n # with rasterio.open(input) as src_obj:\n # # Set spatial characteristics of the output object to mirror the input\n # kwargs = src_obj.meta\n # kwargs.update(\n # dtype=rasterio.uint8,\n # count=1)\n # # Create the file\n # with rasterio.open(output, 'w', **kwargs) as dst:\n # dst.write_band(1, result_img.astype(rasterio.uint8))\n # basic.outputlogMessage(\"save to %s\" % output)\n\n return True", "def predict_on_frames(frames):\n frame_predictions = []\n print(\"Total Number of Frames \",len(frames))\n count = 0\n #for i, frame in tqdm(enumerate(frames)):\n for frame in tqdm(frames):\n filename = frame[0]\n label = frame[1]\n frameCount = frame[2]\n\n if(count%200 == 0):\n print(count)\n \n prediction = label_image.get_prediction(filename)\n \n frame_predictions.append([prediction, label, frameCount])\n count = count + 1\n\n return frame_predictions", "def prediction(input_path=INPUT_DIR,\n output_path=OUTPUT_DIR,\n model_path=MODEL_PATH,\n test=False):\n\n X = tf.placeholder(shape=[None, chunk_size, chunk_size], dtype=tf.float32, name='input_area')\n y_inter = deepcn.deepcn(X, chunk_size, False)\n y_pred = tf.cast(tf.argmax(tf.squeeze(y_inter), -1), tf.uint8)\n\n img_ids = []\n for name in os.listdir(input_path):\n if os.path.isdir(os.path.join(input_path, name)):\n img_ids.append(name)\n all_preds = np.zeros((len(img_ids), 256, 256))\n print('num of images: ', len(img_ids))\n\n loader = tf.train.Saver()\n\n with tf.Session() as sess:\n print(\"Import model from: %s\" %model_path)\n loader.restore(sess, model_path)\n # sess.run(tf.global_variables_initializer())\n\n batch_start_pos = 0\n while batch_start_pos < len(img_ids):\n batch_size = 100\n batch_end_pos = min(batch_start_pos + batch_size, len(img_ids))\n print('predict from %s, to %s' % (batch_start_pos, batch_end_pos))\n batch = img_ids[batch_start_pos:batch_end_pos]\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=batch)\n input_arr = pw.ResizedTestData()\n print(\"input_arr.shape: \", input_arr.shape)\n # input test_data_batch, output prediction of shape batch_size * 256 * 256\n pred_arr = sess.run(y_pred, feed_dict={X: input_arr})\n print(\"pred_arr.shape: \", pred_arr.shape)\n all_preds[batch_start_pos:batch_end_pos] = pred_arr\n pw.OutputPrediction(pred_arr*100, path=output_path)\n batch_start_pos = batch_end_pos\n\n # Use all img_ids and all_preds to generate single cell split csv file\n pw = predict_data_wrapper.PredictWrapper(path=input_path,\n resize_size=chunk_size,\n img_ids=img_ids)\n pw.GenerateSubmit(all_preds, output_path, cutoff=0.5)", "def translate_images(estimator, test_images_list, label, checkpoint_path, num_domains):\n img_rows = []\n\n def test_input_fn():\n dataset_lbls = [tf.one_hot([label], num_domains)] * len(test_images_list)\n\n # Make into a dataset.\n dataset_imgs = np.stack(test_images_list)\n dataset_imgs = np.expand_dims(dataset_imgs, 1)\n dataset_lbls = tf.stack(dataset_lbls)\n unused_tensor = tf.zeros(len(test_images_list))\n return tf.data.Dataset.from_tensor_slices(((dataset_imgs, dataset_lbls),\n unused_tensor))\n\n prediction_iterable = estimator.predict(test_input_fn, checkpoint_path=checkpoint_path)\n predictions = [next(prediction_iterable) for _ in range(len(test_images_list))] # range(len(test_images_list))]\n normalized_summary = [(result + 1.0) / 2.0 for result in predictions]\n return normalized_summary", "def predict_image(self, image_paths):\n predictions = list()\n for image_path in image_paths:\n img = ImageHelper.get_image_by_path(image_path, self.target_size)\n\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = self.preprocess_input(x)\n\n with self.graph.as_default():\n features = self.model_base.predict(x)\n preds = self.model_top.predict(features)\n label, probability = self.decode_prediction(preds)\n\n predictions.append({\"image_path\": image_path,\n \"label\": label,\n \"probability\": probability})\n return predictions", "def build_prediction(self, examples, **kwargs):\n options = self._model_proto\n is_training = self._is_training\n\n if is_training or len(options.eval_min_dimension) == 0:\n return self._build_prediction(examples)\n\n inputs = examples[InputDataFields.image]\n assert inputs.get_shape()[0].value == 1\n\n proposal_scores_list = [[] for _ in range(1 + options.oicr_iterations)]\n\n # Get predictions from different resolutions.\n\n reuse = False\n for min_dimension in options.eval_min_dimension:\n inputs_resized = tf.expand_dims(\n imgproc.resize_image_to_min_dimension(inputs[0], min_dimension)[0],\n axis=0)\n examples[InputDataFields.image] = inputs_resized\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):\n predictions = self._build_prediction(examples, post_process=False)\n\n for i in range(1 + options.oicr_iterations):\n proposals_scores = predictions[NOD2Predictions.oicr_proposal_scores +\n '_at_{}'.format(i)]\n proposal_scores_list[i].append(proposals_scores)\n\n reuse = True\n\n # Aggregate (averaging) predictions from different resolutions.\n\n predictions_aggregated = predictions\n for i in range(1 + options.oicr_iterations):\n proposal_scores = tf.stack(proposal_scores_list[i], axis=-1)\n proposal_scores = tf.reduce_mean(proposal_scores, axis=-1)\n predictions_aggregated[NOD2Predictions.oicr_proposal_scores +\n '_at_{}'.format(i)] = proposal_scores\n\n predictions_aggregated.update(\n self._post_process(inputs, predictions_aggregated))\n\n return predictions_aggregated", "def _process_batch(sess, original_images, semantic_predictions, image_names,\n image_heights, image_widths, image_id_offset, save_dir,\n raw_save_dir, train_id_to_eval_id=None):\n (original_images,\n semantic_predictions,\n image_names,\n image_heights,\n image_widths) = sess.run([original_images, semantic_predictions,\n image_names, image_heights, image_widths])\n\n num_image = semantic_predictions.shape[0]\n for i in range(num_image):\n image_height = np.squeeze(image_heights[i])\n image_width = np.squeeze(image_widths[i])\n original_image = np.squeeze(original_images[i])\n semantic_prediction = np.squeeze(semantic_predictions[i])\n crop_semantic_prediction = semantic_prediction[:image_height, :image_width]\n\n # Save image.\n save_annotation.save_annotation(\n original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),\n add_colormap=False)\n\n # Save prediction.\n save_annotation.save_annotation(\n crop_semantic_prediction, save_dir,\n _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,\n colormap_type=FLAGS.colormap_type)\n\n if FLAGS.also_save_raw_predictions:\n image_filename = os.path.basename(image_names[i])\n\n if train_id_to_eval_id is not None:\n crop_semantic_prediction = _convert_train_id_to_eval_id(\n crop_semantic_prediction,\n train_id_to_eval_id)\n save_annotation.save_annotation(\n crop_semantic_prediction, raw_save_dir, image_filename,\n add_colormap=False)", "def predict(model, img, imgSize):\n \n #Reajusta o tamanho da imagem para o tamanho esperado caso necessario.\n if img.size != imgSize :\n img = img.resize(imgSize)\n\n #Converte a imagem num array tridimensional.\n x = image.img_to_array(img)\n x = numpy.expand_dims(x, axis=0)\n #Normaliza a imagem.\n x = preprocess_input(x)\n \n #Faz a previsao atraves da rede.\n pred = model.predict(x)\n return imagenet_utils.decode_predictions(pred, top=5)[0]" ]
[ "0.641343", "0.63212353", "0.6258062", "0.61219174", "0.60748583", "0.6041475", "0.60248953", "0.59354454", "0.5916165", "0.58947873", "0.58889335", "0.5845712", "0.583477", "0.58249575", "0.58211285", "0.5807858", "0.57785034", "0.5753667", "0.5744161", "0.5721407", "0.57166237", "0.56900495", "0.5672552", "0.5649591", "0.56369424", "0.5635983", "0.561325", "0.5555674", "0.55466676", "0.5544764" ]
0.74643695
0
Clusters the data of X into k clusters using T iterations of Lloyd's algorithm.
def lloyds_algorithm(X, k, T): n, d = X.shape # Initialize clusters random. clustering = np.random.randint(0, k, (n,)) centroids = np.zeros((k, d)) # Used to stop if cost isn't improving (decreasing) cost = 0 oldcost = 0 # Column names # print("Iterations\tCost") for i in range(T): # Update centroid centroids = np.zeros((k, d)) # YOUR CODE HERE numberOfPointsInClusters = np.zeros((k,)) for idx, point in enumerate(clustering): numberOfPointsInClusters[point] += 1 centroids[point] += X[idx] for n in range(k): if numberOfPointsInClusters[n] == 0: numberOfPointsInClusters[n] = float('-inf') centroids = [centroid / numberOfPointsInClusters[idx] for idx, centroid in enumerate(centroids)] # END CODE # Update clustering # YOUR CODE HERE for idx, point in enumerate(X): clustering[idx] = np.argmin([np.linalg.norm(point - cluster) for cluster in centroids]) # END CODE # Compute and print cost cost = 0 for j in range(n): cost += np.linalg.norm(X[j] - centroids[clustering[j]]) ** 2 # print(i + 1, "\t\t", cost) # Stop if cost didn't improve more than epislon (decrease) if np.isclose(cost, oldcost): break # TODO oldcost = cost return clustering, centroids, cost
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kmeans(X, k, iterations=1000):\n\n # Initialize the cluster centroids (C <- centroid \"means\")\n C = initialize(X, k)\n\n if C is None:\n return None, None\n if not isinstance(iterations, int) or iterations <= 0:\n return None, None\n\n # n: number of dada points\n # d: dimension of each data point\n n, d = X.shape\n\n # # Initialize the cost/distortion function;\n # # defined as J = sum/n(sum/k(r(ij)*||x(i) - c(j)||**2))\n # J = np.inf\n\n # Iterate over iterations\n for iteration in range(iterations):\n # print(\"iteration:\", iteration)\n\n # Maintain a deep copy of C\n # C_prev = np.array([x for x in C])\n # Another alternative (removes for loop):\n C_prev = np.copy(C)\n\n # OPTION 1: FOR LOOPS\n\n # Initialize the array of pairwise data point-centroid\n # distances with zeros\n # dist = np.zeros((n, k))\n\n # for i in range(n):\n # for j in range(k):\n # dist[i, j] = np.linalg.norm(X[i, ...] - C[j, ...])\n # Note: squared distances can alternatively be inferred\n # directtly from the inner product of (X - C) with itself\n # dist[i, j] = np.inner(X[i,:]-C[j,:], X[i,:]-C[j,:])\n # print(\"dist:\", dist)\n # Squared distances from \"dist\":\n # print(\"dist ** 2:\", dist ** 2)\n\n # OPTION 2: VECTORIZATION\n\n # Convert X into an array suitable for vectorization\n Xv = np.repeat(X, k, axis=0)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n Xv = Xv.reshape(n, k, d)\n # print(\"Xv:\", Xv)\n # print(\"Xv.shape:\", Xv.shape)\n\n # Convert C into an array suitable for vectorization\n Cv = np.tile(C, (n, 1))\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n Cv = Cv.reshape(n, k, d)\n # print(\"Cv:\", Cv)\n # print(\"Cv.shape:\", Cv.shape)\n\n # Compute the \"dist\" matrix of euclidean distances between\n # data points and centroids; shape (n, k)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n\n # Assign each point of the dataset to a centroid:\n # Evaluate argmin(dist**2) for comparison with k\n # r(ij) = 1 if argmin(dist**2) == j\n # -> point i assigned to centroid k\n # otherwise r(ij) = 0 -> ignore point i wrt centroid k\n clss = np.argmin(dist ** 2, axis=1)\n # print(\"centroid indices:\", clss)\n # print(\"clss.shape:\", clss.shape)\n # Note: here, clss is a 1D array of the unique centroid index\n # to which each point in the dataset as been assigned (closest to);\n # the indices array is used in place of r(ij) in J evaluations\n\n # OPTION 1: EXIT CONDITION BASED ON J_prev == J\n\n # # Make a copy of the previous J value & reinitialize J\n # J_prev = J\n # # J = 0\n\n # # Update J (summing over the n data points),\n # # based on the (shortest) distances inferred from \"indices\"\n # # From \"for\" loop:\n # # for i in range(n):\n # # J += (dist[i, clss[i]] ** 2)\n # # From vectorization:\n # J = np.sum(dist[..., clss] ** 2)\n # # Normalize J to the number of data points to\n # # reduce the computational cost (optional)\n # J /= n\n # # print(\"J:\", J)\n\n # if J == J_prev:\n # # print(\"last iteration:\", iteration)\n # return C, clss\n\n # Move the cluster centroids to the center (mean) of\n # the refined cluster by updating C (centroid coordinates)\n for j in range(k):\n # Infer the array of data point indices that correspond\n # to each assigned cluster centroid\n indices = np.where(clss == j)[0]\n # print(\"indices:\", indices)\n if len(indices) == 0:\n C[j] = initialize(X, 1)\n else:\n C[j] = np.mean(X[indices], axis=0)\n\n # OPTION 2: EXIT CONDITION BASED ON C == C_prev\n\n if (C == C_prev).all():\n # print(\"last iteration:\", iteration)\n return C, clss\n\n # Update clss before returning C, clss\n Cv = np.tile(C, (n, 1))\n Cv = Cv.reshape(n, k, d)\n dist = np.linalg.norm(Xv - Cv, axis=2)\n clss = np.argmin(dist ** 2, axis=1)\n\n return C, clss", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def run_k_means(self):\r\n centroids = self.centroids\r\n\r\n for i in range(self.max_iters):\r\n self.closestcentroids()\r\n self.newcentroids()\r\n\r\n J = 0\r\n X = self.x\r\n m = len(X)\r\n idx = self.index\r\n K = self.K\r\n dim = X.shape[1]\r\n\r\n for num in range(K):\r\n # find the index of all entries where idx==n\r\n indexentries = np.nonzero(idx == num)[0]\r\n # the values in X that have the index in indesxentries\r\n values = X[indexentries]\r\n # using one of the K centroids to do the calculation. K<=2 doesn't\r\n # work here for some reason.\r\n centroid = centroids[num, 0]\r\n J += np.sum((values - centroid) ** 2)\r\n\r\n return [centroids.reshape((1, K, dim)), [X[idx == k].size for k in range(K)], J / m]", "def clusters(l, K): # noqa\n if l:\n prev = None\n for t in clusters(l[1:], K):\n tup = sorted(t)\n if tup != prev:\n prev = tup\n for i in range(K):\n yield tup[:i] + [\n [l[0]] + tup[i],\n ] + tup[i + 1 :]\n else:\n yield [[] for _ in range(K)]", "def fit(self, X, epochs=50):\n self.clusters = [[] for _ in range(self.k)]\n for i in range(X.shape[0]):\n index = random.randint(0, self.k - 1)\n self.clusters[index].append(X[i])\n self.sample_in_cluster.append(index)\n for e in range(epochs):\n #beregn nye centers\n self.estimate_centers()\n #nullstill clusters\n self.reset_clusters()\n #legg til alle punkter på nytt i clusters\n self.make_clusters(X)\n if self.changed == False:\n break", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans", "def cluster(self, k=3, max_iter=10):\n\n # create a set of k random clusters as seeds\n old_clusters = [None] * k # just a placeholder\n clusters = self.random_clusters(k)\n\n iter = 0\n while (iter < max_iter) and not (old_clusters == clusters):\n print \"iteration %d...\" % iter\n # assign new clusters to old clusters\n for i in xrange(0, k):\n old_clusters[i] = copy(clusters[i])\n clusters[i].documents = []\n\n # for each document\n for document in self.documents:\n\n # determine the cluster with the highest similarity\n similarities = [cosine_similarity(document, cluster) for cluster in old_clusters]\n max_index = array(similarities).argmax()\n\n # assign document to that cluster\n clusters[max_index].add(document)\n\n # update cluster means\n for cluster in clusters:\n cluster.update_centroid()\n \n iter += 1\n \n return clusters", "def kmeans(X, k, iterations=1000):\n if not isinstance(X, np.ndarray) or len(X.shape) != 2:\n return None, None\n if not isinstance(k, int) or k <= 0:\n return None, None\n if not isinstance(iterations, int) or iterations <= 0:\n return None, None\n n, d = X.shape\n X_min = X.min(axis=0)\n X_max = X.max(axis=0)\n C = np.random.uniform(X_min, X_max, size=(k, d))\n\n for i in range(iterations):\n centroids = np.copy(C)\n centroids_extended = C[:, np.newaxis]\n distances = np.sqrt(((X - centroids_extended) ** 2).sum(axis=2))\n clss = np.argmin(distances, axis=0)\n for c in range(k):\n if X[clss == c].size == 0:\n C[c] = np.random.uniform(X_min, X_max, size=(1, d))\n else:\n C[c] = X[clss == c].mean(axis=0)\n\n centroids_extended = C[:, np.newaxis]\n distances = np.sqrt(((X - centroids_extended) ** 2).sum(axis=2))\n clss = np.argmin(distances, axis=0)\n\n if (centroids == C).all():\n break\n\n return C, clss", "def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]", "def k_means(x_input, n_cluster=3, n_iter=100, n_tries=10):\n results = []\n for _ in range(n_tries):\n error_value = 0\n rand.seed(None)\n centers = sorted([rand.uniform(0.0, 100.0) for i in range(n_cluster)])\n min_dist_idx = [0] * len(x_input)\n i = 0\n while i < n_iter:\n failed = False\n dist_mat = l2_pairwise_distance(x_input, centers)\n error_value = calculate_error(dist_mat)\n min_dist_idx = [dist.index(min(dist)) for dist in dist_mat]\n centers = [0] * n_cluster\n count = [0] * n_cluster\n for j in range(len(x_input)):\n centers[min_dist_idx[j]] += x_input[j]\n count[min_dist_idx[j]] += 1\n\n for j in range(n_cluster):\n if count[j] == 0:\n centers = sorted(\n [rand.uniform(0.0, 100.0) for i in range(n_cluster)])\n failed = True\n break\n\n if failed:\n i = 0\n continue\n\n for j in range(n_cluster):\n centers[j] = centers[j] / count[j]\n i += 1\n\n results.append((centers, min_dist_idx, error_value))\n\n return min(results, key=lambda x: x[2])", "def k_clusters(old_ops, max_outputs, mut):\n \n # DM construction\n matrix = starting_centroids(old_ops, max_outputs, mut)\n\n\n # Clustering\n seed = []\n for i in matrix.OPs:\n seed.append(i)\n centroids = cluster(old_ops, seed, mut)\n disto = distortion(centroids, old_ops, mut)\n\n return centroids, disto", "def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)", "def kbcht(data, k=10, shrinking_threshold=2):\n if k <= 0:\n raise ValueError('k={} must be > 0'.format(k))\n \n if shrinking_threshold <= 0:\n raise ValueError('shrinking_threshold={} must be > 0'\n .format(shrinking_threshold))\n \n if data.shape[1] != 2:\n raise ValueError('KBCHT only works on two-dimensional data')\n\n # perform k-Means for finding inital clusters\n km = kmeans(data, k)\n km_clusters = km.predict(data)\n initial_clusters = create_clusters(data, km_clusters)\n\n visualizations = []\n\n visualizations.append(visualize(initial_clusters, 'K-Means Clustering'))\n\n # get subclusters from convex hulls and shrinking\n sub_clusters, sc_average_distances, released, shrinked_vertices = \\\n get_all_subclusters(initial_clusters, shrinking_threshold)\n\n visualizations.append(visualize_vertices(\n shrinked_vertices, initial_clusters, released, 'Shrinked Vertices'))\n visualizations.append(visualize(\n sub_clusters + [released], 'Subclusters', contains_noise=True))\n\n # merge subclusters\n clusters, average_distances = \\\n merge_clusters(sub_clusters, sc_average_distances)\n\n clusters, contains_noise = \\\n add_released(clusters, average_distances, released)\n\n visualizations.append(\n visualize(clusters, 'KBCHT Clustering', contains_noise=contains_noise))\n\n # recreate cluster assignments for points in original data set\n assignments = create_assignments(data, clusters)\n\n return assignments, visualizations", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result", "def kmeans(X, n_clust):\n\n X = scale(X)\n estimator = KMeans(init = 'k-means++', n_clusters = n_clust, n_init = 10, verbose = 2)\n \n estimator.fit(X)\n labels = estimator.predict(X)\n return labels", "def run_various_Ks(x, K):\n m = len(x) # length of data points\n min_list = [] # list that will contain minimum costs\n Ks = [i for i in range(1,K+1)] # values of K's\n\n for i in range(1, K+1):\n # runs algorithm with different values of K\n kmeans = KMeans(n_clusters=i, random_state=0).fit(x)\n minval = kmeans.inertia_\n print(minval)\n min_list.append(minval) # appends minimum cost \n\n # Plotting J vs. K to choose best value of K\n plt.plot(Ks, min_list)\n plt.plot(Ks, min_list, '-o')\n plt.xlabel('K (# of clusters)')\n plt.ylabel('Cost function J')\n plt.title('J vs. K plot')\n plt.show()", "def kMeans(d, k):\n #First get the random centroids from the data\n newCentroids = getRandomCentroids(d, k)\n #newCentroids = [[-2.0, 1.0], [-2.0, -2.0], [2.0, 2.0], [0.0, 0.0]]\n\n #Get the clusters from these random centroids\n clusters = initiateCentroid(d, newCentroids, k)\n oldCentroids = []\n\n counter = 0\n #While the old centroids are not equal to the new ones\n while oldCentroids != newCentroids:\n #old is equal to new\n oldCentroids = newCentroids\n #Calculate the new centroids\n k, newCentroids = calcCentroids(d, clusters)\n #Calculate the new clusters\n clusters = initiateCentroid(d, newCentroids, k)\n #Count how many iterations\n counter += 1\n\n return counter, clusters", "def k_means (X, K):\n K_clusters = initialize_centroids(X, K)\n m = X.shape[0]\n dif = 1\n while (dif > 10**(-7)): # we stop when the centroids almost don't move\n groups = np.empty(m)\n K_clusters_old = K_clusters\n #cluster assignment step\n for i in range(m):\n groups[i] = np.argmin(compute_distance(X[i,:],K_clusters))\n #centroids update step\n for k in range(K):\n K_clusters[k,:] = np.mean(X[groups==k,:],axis=0)\n dif = np.linalg.norm(K_clusters-K_clusters_old, 2) / (np.linalg.norm(K_clusters, 2) + np.linalg.norm(K_clusters_old, 2))\n return groups.astype(int), K_clusters", "def k_means_multiple(self, K):\r\n self.K = K\r\n table = []\r\n\r\n for numberoftimes in range(self.tries):\r\n self.randomcentroids()\r\n try:\r\n atry = self.run_k_means()\r\n table.append(atry)\r\n except ValueError:\r\n pass\r\n\r\n c = ['centroid position', 'how many for each', 'J']\r\n\r\n self.table = pd.DataFrame(table, columns=c).sort_index(by=['J']).head()", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def integrated_clustering(t_all,y_all,num_of_days=500,period = 1440,trim=10,min_n_clusters = 4, max_n_clusters=10,hierarchical=0):\n\n\n\n all_seg_april = initial_disaggregate(t_all,y_all,num_of_days,period = period)\n \n ''' '''\n all_seg_april_normalized = [np.array(x[0])-np.mean(x[1]) for x in all_seg_april if len(x[1])==3]\n \n ''' filter the empty segments'''\n all_seg_april_normalized = [x for x in all_seg_april_normalized if len(x)>0]\n \n ''' clustering in different ranges will probably have a better result'''\n if hierarchical == 0:\n pass\n elif hierarchical ==1:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()>1000]\n else:\n all_seg_april_normalized = [x for x in all_seg_april_normalized if x.mean()<1000]\n \n ''' filter out the positive segments'''\n all_positive_seg_april_normalized = [x for x in all_seg_april_normalized if x.min()>0]\n \n \n all_seg_april_normalized_trim50 = extract_first_n(all_positive_seg_april_normalized, trim)\n cluster_average = []\n \n # find optimal clustering number using silhouette score\n \n optimal_dict = {}\n \n for n_clusters in range(min_n_clusters,max_n_clusters):\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n\n # sihouette score\n cluster_labels = y_pred\n sample_silhouette_values = silhouette_samples(all_seg_april_normalized_trim50, cluster_labels)\n \n silhouette_avg = silhouette_score(pd.DataFrame(all_seg_april_normalized_trim50), cluster_labels)\n\n optimal_dict[n_clusters] = silhouette_avg +(sample_silhouette_values.min()+sample_silhouette_values.max())/2\n \n # n_clusters will give us the optimal number of clusters\n n_clusters = max(optimal_dict.iteritems(), key=operator.itemgetter(1))[0]\n\n #print n_clusters\n \n y_pred = KMeans(n_clusters=n_clusters).fit_predict(all_seg_april_normalized_trim50)\n\n cluster_average = []\n \n for i_cluster in range(n_clusters):\n cluster_average.append(\n np.mean([np.mean(x) for i, x in enumerate(all_seg_april_normalized_trim50) if y_pred[i]==i_cluster])\n ) \n cluster_average_rank = np.argsort(cluster_average)[::-1]\n rank_map = {cluster_average_rank[i_cluster]:i_cluster for i_cluster in range(n_clusters)} # old index:new index\n\n y_pred_old = y_pred\n y_pred = [rank_map[x] for x in y_pred]\n all_seg_per_cluster = [[] for i in range(n_clusters) ]\n for i_seg in range(len(all_seg_april_normalized_trim50)):\n all_seg_per_cluster[y_pred[i_seg]].append(all_seg_april_normalized_trim50[i_seg])\n \n cluster_mean = [[] for i in range(n_clusters) ]\n cluster_std = [[] for i in range(n_clusters) ]\n for i_cluster in range(n_clusters):\n cluster_mean[ i_cluster ] = np.mean(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n cluster_std[ i_cluster ] = np.std(np.array(all_seg_per_cluster[i_cluster]), axis=0)\n \n \n \n \n #cluster_mean_2 = cluster_mean[5:6]\n \n return cluster_mean,cluster_std,n_clusters,all_seg_per_cluster", "def kmeans(self,mydata, k=None, centroids=None, steps=200):\n\t\tif centroids is not None and k is not None:\n\t\t\tassert(k == len(centroids))\n\t\telif centroids is not None:\n\t\t\tk = len(centroids)\n\t\telif k is not None:\n\t\t\t# Forgy initialization method: choose k data points randomly.\n\t\t\tcentroids = mydata[np.random.choice(np.arange(len(mydata)), k, False)]\n\t\telse:\n\t\t\traise RuntimeError(\"Need a value for k or centroids.\")\n\n\t\tfor _ in range(max(steps, 1)):\n\t\t\t# Squared distances between each point and each centroid.\n\t\t\tsqdists = scipy.spatial.distance.cdist(centroids, mydata, 'sqeuclidean')\n\n\t\t\t# Index of the closest centroid to each data point.\n\t\t\tclusters = np.argmin(sqdists, axis=0)\n\n\t\t\tnew_centroids = self.cluster_centroids(mydata, clusters, k)\n\t\t\tif np.array_equal(new_centroids, centroids):\n\t\t\t\tbreak\n\n\t\t\tcentroids = new_centroids\n\n\t\treturn clusters", "def KMeansCluster(matrix):\n\n # Possibly need to scale the data first\n data = scale(matrix)\n\n # Approximate the number of clusters using c = root(n/2)\n # num_clusters = int(sqrt(len(matrix) / 2))\n num_clusters = 5\n number_init = 10 # Default\n number_iter = 300\n num_cpus = 2\n\n print \"===================\"\n print \"Training KMeans with (num_clusters, num_init, num_iters, num_cpus)\"\n print num_clusters, number_init, number_iter, num_cpus\n\n # estimator = KMeans(init='k-means++', n_clusters = num_clusters, n_init = number_init)\n # estimator.fit(data)\n # clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, \n # init='k-means++', n_jobs = num_cpus)\n clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, n_jobs = num_cpus)\n\n\n return clusters", "def train(self, data):\n\t\tepsilon = self.epsilon\n\t\ttempDist = 1.0\n\t\tk = self.k\n\t\tcenters = data.rdd.takeSample(False, k, 1)\n\t\ti = 0 \n\t\twhile tempDist > epsilon or self.maxNoOfIteration > i:\n\t\t\ti+=1\t\t\t\n\t\t\tclosest = data.map(lambda p: (closestCluster(p, centers), (np.array(p), 1)))\n \t\t\tpointStats = closest.reduceByKey(lambda x, y: (x[0] + y[0], x[1] + y[1]))\n \t\tnewPoints = pointStats.map(lambda x: (x[0], x[1][0] / float(x[1][1]))).collect()\n \t\ttempDist = sum(np.sum((centers[index] - p) ** 2) for (index, p) in newPoints)\n \t\tfor (ind, p) in newPoints:\n\t\t\t\tcenters[ind] = p\n\t\tself.centers = centers\n\t\treturn self.centers", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def knn(p, k, x, t):\r\n\r\n # Number of instances in data set\r\n N = x.shape[0]\r\n\r\n Euclidean_Distance = numpy.square(x - p) #Euclidean distance\r\n dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance\r\n inds = numpy.argsort(dis)[:k] #sort the indices of the distance array\r\n tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels\r\n top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points\r\n\r\n\r\n #top_class = 0\r\n\r\n return top_class", "def create_clusters(N, K):\n clusters = []\n centroids = create_points(N, K)\n for idx, centroid in enumerate(centroids):\n cluster = Cluster(centroid)\n cluster.label = _cluster_name(idx)\n clusters.append(cluster)\n return clusters", "def k_means_clustering(rows, distance=pearson_distance, k=4):\n # Determine the min and max values for each point\n ranges = [(min(row[i] for row in rows), max([row[i] for row in rows])) for i in range(len(rows[0]))]\n\n # Create k RANDOMLY placed centroids\n clusters = [[random() * (ranges[i][1] - ranges[i][0]) + ranges[i][0] for i in range(len(rows[0]))] for j in\n range(k)]\n distances_from_centroids = {}\n last_matches = None\n best_matches = None\n for t in range(100):\n print ('Iteration {}'.format(t))\n best_matches = [[] for i in range(k)]\n\n # Find the centroid that is the closest for each row\n for j in range(len(rows)):\n row = rows[j]\n best_match = 0\n for i in range(k):\n d = distance(clusters[i], row)\n if d < distance(clusters[best_match], row):\n best_match = i\n best_matches[best_match].append(j)\n\n # if the results are the same as last time, then this is complete\n if best_matches == last_matches:\n break\n last_matches = best_matches\n\n # Move the centroids to the average of their members\n for i in range(k):\n avgs = [0.0] * len(rows[0])\n if len(best_matches[i]) > 0:\n for row_id in best_matches[i]:\n for m in range(len(rows[row_id])):\n avgs[m] += rows[row_id][m]\n for j in range(len(avgs)):\n avgs[j] /= len(best_matches[i])\n clusters[i] = avgs\n\n # Chapter 3 Exercise 5: Return along with the cluster results the total distance between all items\n # and their respective centroids\n for i in range(k):\n for j in range(len(best_matches[i])):\n distances_from_centroids[best_matches[i][j]] = distance(clusters[i],rows[best_matches[i][j]])\n return best_matches, distances_from_centroids" ]
[ "0.67442775", "0.6576771", "0.6541134", "0.64928246", "0.64676577", "0.6435194", "0.6364645", "0.6314384", "0.6309969", "0.625905", "0.6255361", "0.6239175", "0.62359464", "0.6196312", "0.6167873", "0.61381966", "0.6135124", "0.6130115", "0.61040205", "0.6082534", "0.607699", "0.60637265", "0.60609823", "0.6048113", "0.6018045", "0.60020053", "0.5997541", "0.59591454", "0.5957646", "0.59539837" ]
0.77015615
0
Dequeue at most ``max_length`` bytes. If ``max_length`` is not specified, dequeue the maximum possible contiguous amount of bytes (at least one). Regardless of what was written into the FIFO, ``read`` always returns a ``memoryview`` object.
def read(self, max_length=None): if max_length is None and self._chunk is None: # Fast path. return self._queue.popleft() if max_length == 0: return memoryview(b"") if self._chunk is None: self._chunk = self._queue.popleft() self._offset = 0 if max_length is None: result = self._chunk[self._offset:] else: result = self._chunk[self._offset:self._offset + max_length] if self._offset + len(result) == len(self._chunk): self._chunk = None else: self._offset += len(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def peek(self, size, timeout=_UNSET):\n with self._recv_lock:\n if len(self.rbuf) >= size:\n return self.rbuf[:size]\n data = self.recv_size(size, timeout=timeout)\n self.rbuf = data + self.rbuf\n return data", "def dequeue(self, size=None, returns=None):\n size = size or len(self)\n if not self.chunks:\n return b''\n\n data = []\n data_size = 0\n\n # dequeue chunks\n size = min(size + self.offset, self.chunks_size)\n while self.chunks:\n if data_size >= size:\n break\n chunk = self.chunks.popleft()\n data.append(chunk)\n data_size += len(chunk)\n\n if data_size == size:\n # no chunk re-queue\n self.chunks_size -= data_size\n offset, self.offset = self.offset, 0\n else:\n # If offset is beyond the middle of the chunk it will be split\n offset = len(chunk) - (data_size - size)\n if offset << 1 > len(chunk):\n chunk = chunk[offset:]\n offset, self.offset = self.offset, 0\n else:\n offset, self.offset = self.offset, offset\n\n # re-queue chunk\n self.chunks.appendleft(chunk)\n self.chunks_size += len(chunk) - data_size\n\n if returns is None or returns:\n return b''.join(data)[offset:size]", "def recv (self, max_size=None):\n if max_size and max_size < len(self.buffer):\n msg = self.buffer[0:max_size]\n self.buffer = self.buffer[max_size:]\n else:\n msg = self.buffer\n self.buffer = \"\"\n return msg", "async def _read_content(\n self, length: int, max_parts=1000, max_empties=200\n ) -> Optional[bytes]:\n raw = None\n raw_parts: List[bytes] = []\n received_size = 0\n while received_size < length and len(raw_parts) < max_parts and max_empties > 0:\n part = None\n try:\n part = self.stream.read(length - received_size)\n except OSError: # pragma: no cover\n pass\n if part is None:\n max_empties -= 1\n await self.sleep()\n continue\n received_size += len(part)\n raw_parts.append(part)\n\n if raw_parts:\n raw = b\"\".join(raw_parts)\n if len(raw) != length: # pragma: no cover\n self.log.warning(\n f\"Readout and content-length mismatch: {len(raw)} vs {length};\"\n f\"remaining empties: {max_empties}; remaining parts: {max_parts}\"\n )\n\n return raw", "def read_until_size(self, size):\n if not size:\n do_return(b'')\n with self.reading:\n while len(self.read_buffer) < size:\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return(self.read_buffer.dequeue(size))", "def recv_size(self, size, timeout=_UNSET):\n with self._recv_lock:\n if timeout is _UNSET:\n timeout = self.timeout\n chunks = []\n total_bytes = 0\n try:\n start = time.time()\n self.sock.settimeout(timeout)\n nxt = self.rbuf or self.sock.recv(self._recvsize)\n while nxt:\n total_bytes += len(nxt)\n if total_bytes >= size:\n break\n chunks.append(nxt)\n if timeout:\n cur_timeout = timeout - (time.time() - start)\n if cur_timeout <= 0.0:\n raise socket.timeout()\n self.sock.settimeout(cur_timeout)\n nxt = self.sock.recv(self._recvsize)\n else:\n msg = ('connection closed after reading %s of %s requested'\n ' bytes' % (total_bytes, size))\n raise ConnectionClosed(msg) # check recv buffer\n except socket.timeout:\n self.rbuf = b''.join(chunks)\n msg = 'read %s of %s bytes' % (total_bytes, size)\n raise Timeout(timeout, msg) # check recv buffer\n except Exception:\n # received data is still buffered in the case of errors\n self.rbuf = b''.join(chunks)\n raise\n extra_bytes = total_bytes - size\n if extra_bytes:\n last, self.rbuf = nxt[:-extra_bytes], nxt[-extra_bytes:]\n else:\n last, self.rbuf = nxt, b''\n chunks.append(last)\n return b''.join(chunks)", "def peek(self, size=1):\n if size < -1:\n raise ValueError(\"Cannot peek backwards\")\n\n if size == 0:\n return b''\n\n if size == -1:\n size = self.remaining()\n\n peeked = b''\n with Excursion(self._buf):\n while len(peeked) < size:\n c = self._buf.read(1)\n if not c:\n break\n peeked += c\n\n return peeked", "def recv (self, max_size=None):\n return self.receiving.recv(max_size)", "def limit(self, max_size):\n return self.__class__(itertools.islice(self, max_size))", "def __init__(self, max_size):\n self.buffer = cns.deque(maxlen=max_size)", "def read(self, timeout=None):\n if self._in_queue:\n return self._in_queue.pop(0)\n else:\n return self._filter_read(timeout)", "def recv_close(self, timeout=_UNSET, maxsize=_UNSET):\n # recv_close works by using recv_size to request maxsize data,\n # and ignoring ConnectionClose, returning and clearing the\n # internal buffer instead. It raises an exception if\n # ConnectionClosed isn't raised.\n with self._recv_lock:\n if maxsize is _UNSET:\n maxsize = self.maxsize\n if maxsize is None:\n maxsize = _RECV_LARGE_MAXSIZE\n try:\n recvd = self.recv_size(maxsize + 1, timeout)\n except ConnectionClosed:\n ret, self.rbuf = self.rbuf, b''\n else:\n # put extra received bytes (now in rbuf) after recvd\n self.rbuf = recvd + self.rbuf\n size_read = min(maxsize, len(self.rbuf))\n raise MessageTooLong(size_read) # check receive buffer\n return ret", "def test_pop_to_full_deque_size_decrease(full_deque):\n full_deque.pop()\n assert full_deque._deque._length == 2", "def dequeue(self):\n if self.isEmpty():\n raise Exception(\"Queue underflow\")\n item = self._q[self._first]\n self._q[self._first] = None # to avoid loitering\n self._N -= 1\n self._first += 1\n if self._first == len(self._q):\n self._first = 0 # wrap-around\n # shrink size of array if necessary\n if self._N > 0 and self._N == len(self._q)/4:\n self._resize(len(self._q)/2)\n return item", "def consume(iterable, keep_last=0):\n return _coconut.collections.deque(iterable, maxlen=keep_last)", "def _receive(self, length, timeout=None, skip_len_check=False):\n data = self._receive_internal(length, timeout)\n if not skip_len_check and len(data) < length:\n raise ButtshockIOError(\"Received unexpected length {}, expected {}!\".format(len(data), length))\n if len(data) > 0:\n self._debug_print('received data: %s' % data)\n return data", "def recv(self, maxlength=None):\n raise NotImplementedError", "def dequeue(self):\n if not self.is_empty():\n answer = self.data[self.head]\n self.data[self.head] = None # help garbage collection\n self.head = (self.head + 1) % self.capacity\n self.size -= 1\n if self.size <= self.capacity//4 and self.capacity > 4:\n self.shrink()\n return answer\n return None", "def consume(iterator):\n deque(iterator, maxlen=0)", "def read(self, size=-1):\n _complain_ifclosed(self._closed)\n buf = self._buf\n while size < 0 or len(buf) < size:\n try:\n buf = buf + next(self._generator)\n except StopIteration:\n break\n\n returned = b\"\"\n if size >= 1:\n self._buf = buf[size:]\n returned = buf[:size]\n else:\n self._buf = b\"\"\n returned = buf\n\n self._position = self._position + len(returned)\n return returned", "def pop(self, blocking = False, timeout = TIMEOUT_CURRENT):\n if len(self) == 0 and blocking:\n self.channel.receive(timeout)\n return collections.deque.pop(self)", "def read(self, size=-1):\n if not self._buf:\n self._buf.append(next(self._iter, b''))\n if len(self._buf[0]) < size or size < 0:\n return self._buf.pop(0)\n block = self._buf.pop(0)\n self._buf.insert(0, block[size:])\n return block[:size]", "def pop(self):\n while self.number > self.maxlength:\n self.buffer.popleft()\n self.number -= 1", "def read(self, size=None):\n if size is None or size < 0:\n return \"\".join(list(self))\n else:\n data_chunks = []\n data_readed = 0\n try:\n while data_readed < size:\n chunk = self.next_chunk()\n data_chunks.append(chunk)\n data_readed += len(chunk)\n except StopIteration:\n pass\n\n if data_readed > size:\n last_chunk = data_chunks.pop()\n extra_length = data_readed - size\n last_chunk, extra_data = last_chunk[:-extra_length], last_chunk[-extra_length:]\n self.unshift(extra_data)\n data_chunks.append(last_chunk)\n return \"\".join(data_chunks)", "def recv(self, size, flags=0, timeout=_UNSET):\n with self._recv_lock:\n if timeout is _UNSET:\n timeout = self.timeout\n if flags:\n raise ValueError(\"non-zero flags not supported: %r\" % flags)\n if len(self.rbuf) >= size:\n data, self.rbuf = self.rbuf[:size], self.rbuf[size:]\n return data\n if self.rbuf:\n ret, self.rbuf = self.rbuf, b''\n return ret\n self.sock.settimeout(timeout)\n try:\n data = self.sock.recv(self._recvsize)\n except socket.timeout:\n raise Timeout(timeout) # check the rbuf attr for more\n if len(data) > size:\n data, self.rbuf = data[:size], data[size:]\n return data", "def get(self, length):\n if length > self.remaining():\n raise BufferError(\"Not enough bytes [remaining=%d,requested=%d]\" %\n (self.remaining(), length))\n start = self.offset\n end = self.offset + length\n self.offset += length\n return self.data[start:end]", "def dequeue(self):\r\n if self.size():\r\n self.queue.pop(0)\r\n else:\r\n raise IndexError(\"Queue is empty.\")", "def SafeReadBytes(self, length):\n data = self.ReadBytes(length)\n if len(data) < length:\n raise ValueError(\"Not enough data available\")\n else:\n return data", "def dequeue_rear(self):\n try:\n return self._items.pop()\n except:\n raise IndexError('The deque is empty')", "def read(self, length=-1):\n if length == 0:\n return b''\n if length < -1:\n raise IOError(\"invalid read length: %r\" % length)\n if self.at_eof():\n raise IOError(\n \"Attempted to read from the buffer but already at the end\")\n\n if length == -1:\n return self._buf.read()\n else:\n if self._buf.tell() + length > len(self):\n raise IOError(\n \"Attempted to read %d bytes from the buffer but only %d \"\n \"remain\" % (length, len(self) - self.tell())\n )\n return self._buf.read(length)" ]
[ "0.61215013", "0.59867436", "0.5926825", "0.56842846", "0.56382906", "0.56014174", "0.55567545", "0.55366075", "0.55232185", "0.5293869", "0.5284016", "0.5228891", "0.5205536", "0.5144939", "0.5099088", "0.50602716", "0.504818", "0.5042906", "0.50306386", "0.5030135", "0.49921483", "0.49865115", "0.49839482", "0.4965913", "0.49454424", "0.4941595", "0.49277627", "0.4925435", "0.49253458", "0.49249935" ]
0.72821385
0
Identifies all squares of a disk region from a given start coordinate and sets them to zeroes
def removeRegion(disk, startCoord): coordinates = [startCoord] while coordinates: coord = coordinates.pop() coordinates.extend(getNeighbors(disk, coord)) disk[coord[0], coord[1]] = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setZeroes(self, matrix: List[List[int]]) -> None:\n colsToZero = set()\n rowsToZero = set()\n for rowIdx, row in enumerate(matrix):\n for colIdx, num in enumerate(row): \n if num == 0: \n colsToZero.add(colIdx)\n rowsToZero.add(rowIdx)\n \n for col in colsToZero:\n self.writeZeroCol(col, matrix)\n for row in rowsToZero:\n self.writeZeroRow(row, matrix)", "def mask_region(self, ypos, xpos, r):\r\n for j, i in product(np.arange(ypos - r, ypos + r + 1), np.arange(xpos - r, xpos + 1 + r)): # Create square\r\n if (j - ypos) ** 2 + (i - xpos) ** 2 <= r ** 2 and 0 <= j<= self.shapes[0] - 1 and 0<= i <=self.shapes[1] - 1:\r\n j = int(j)\r\n i = int(i)\r\n self.masked[j, i] = 0", "def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m = len(matrix)\n n = len(matrix[0])\n \n i = 0\n j = 0\n row_zero = set()\n col_zero = set()\n \n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n row_zero.update({i})\n col_zero.update({j})\n \n ## update rows with zero\n for t in row_zero:\n matrix[t][0:n] = [0 for i in range(n)]\n \n ## update cols with zero\n for i in range(m):\n for c in col_zero:\n #print(i, c)\n matrix[i][c] = 0", "def setZeroes(self, matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 0 and (i, j) not in self.visited:\n for neighbor in self.setter(matrix, i, j):\n matrix[neighbor[0]][neighbor[1]] = 0\n self.visited.add((neighbor[0], neighbor[1]))\n print(matrix)", "def occupied_cells(self):\n\n for lm in self.landmarks:\n if self.cell_size < 1:\n # expand the range the landmark exists\n lm_x_range = np.arange(lm[0]-self.R, lm[0]+self.R, self.cell_size)\n lm_y_range = np.arange(lm[1]-self.R, lm[1]+self.R, self.cell_size)\n\n # loop through expanded ranges and compute grid positions\n for lm_x in lm_x_range:\n for lm_y in lm_y_range:\n\n row, col = self.cell_index([lm_x, lm_y])\n\n # apply cost of occupied cell\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass\n\n else:\n # apply cost of occupied cell\n row, col = self.cell_index(lm)\n try:\n self.world[row][col] = 1000\n except IndexError:\n pass", "def reset(self):\n\t\tself.offsets = self.start_off.copy()", "def make_all_zero(curr_clusters, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n curr_clusters[i][j] = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\r\n \r\n rows, cols = len(matrix), len(matrix[0])\r\n \r\n #This solution has been done in constant space\r\n #using first row and first column for deciding which rows and columns\r\n #should be zero\r\n \r\n #using one extra variable for first row\r\n #because first cell is common in first row and column\r\n first_row = 1\r\n \r\n for i in range(rows):\r\n for j in range(cols):\r\n if matrix[i][j]==0:\r\n #for column\r\n matrix[0][j] = 0\r\n \r\n #for row\r\n if i==0:\r\n first_row = 0\r\n else:\r\n matrix[i][0] = 0\r\n \r\n #checking for rows except first row\r\n for i in range(1,rows):\r\n if matrix[i][0] == 0:\r\n for j in range(cols):\r\n matrix[i][j] = 0\r\n \r\n #checking for columns except first column\r\n for j in range(1, cols):\r\n if matrix[0][j] == 0:\r\n for i in range(rows):\r\n matrix[i][j] = 0\r\n \r\n #for first column\r\n if matrix[0][0] == 0:\r\n for i in range(rows):\r\n matrix[i][0] = 0\r\n \r\n #for first row\r\n if first_row == 0:\r\n for j in range(cols):\r\n matrix[0][j] = 0\r\n \r\n #print(matrix)", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n if not matrix:\n return\n row = set()\n col = set()\n rowl = len(matrix[0])\n for i, r in enumerate(matrix):\n for j, c in enumerate(r):\n if c == 0:\n row.add(i)\n col.add(j)\n \n for r in row:\n matrix[r] = [0]*rowl\n for j in col:\n for i in range(len(matrix)):\n matrix[i][j] = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m = len(matrix)\n n = len(matrix[0])\n set_rows = set()\n set_cols = set()\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n set_rows.add(i)\n set_cols.add(j)\n \n while set_rows:\n temp = set_rows.pop()\n for j in range(n):\n matrix[temp][j] = 0\n \n while set_cols:\n temp = set_cols.pop()\n for i in range(m):\n matrix[i][temp] = 0", "def setZeroes(self, matrix) -> None:\n if not matrix: return\n h, w = len(matrix), len(matrix[0])\n col0 = False\n for i in range(h):\n if matrix[i][0] == 0:\n col0 = True # 第一列是否要全置为0\n for j in range(1, w): # 从第二列开始\n if matrix[i][j] == 0: # (i,j)为0\n matrix[0][j] = 0 # 第i行为0\n matrix[i][0] = 0 # 第j列为0\n for i in range(1, h):\n for j in range(1, w):\n if matrix[i][j] != 0 and (matrix[i][0] == 0 or matrix[0][j] == 0):\n matrix[i][j] = 0 # 需要置为0的位置\n if matrix[0][0] == 0: # 第一行是否要全置为0\n for j in range(w):\n matrix[0][j] = 0\n if col0: # 第一列是否要全置为0\n for i in range(h):\n matrix[i][0] = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m, n = len(matrix), len(matrix[0])\n zero_row, zero_col = set(), set()\n for row in range(m):\n for col in range(n):\n if matrix[row][col] == 0:\n if row not in zero_row:\n zero_row.add(row)\n if col not in zero_col:\n zero_col.add(col)\n for row in zero_row:\n for col in range(n):\n matrix[row][col] = 0\n for col in zero_col:\n for row in range(m):\n matrix[row][col] = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n row = set()\n column = set()\n \n m = len(matrix)\n n = len(matrix[0])\n \n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n row.add(i)\n column.add(j)\n \n for i in range(m):\n for j in range(n):\n if i in row or j in column:\n matrix[i][j] = 0", "def reset(self):\n self.nodes = []\n self.start = self.start\n self.end = self.end\n\n for row in self.charMap:\n for c in row:\n if c == \"2\":\n c.c = \"0\"\n self.n_checked = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m = len(matrix)\n n = len(matrix[0])\n rows = set()\n columns = set()\n for i in range(m):\n for j in range(n):\n if not matrix[i][j]:\n rows.add(i)\n columns.add(j)\n\n for i in rows:\n for j in range(n):\n matrix[i][j] = 0\n for j in columns:\n for i in range(m):\n matrix[i][j] = 0", "def setZeroes(self, matrix) -> None:\n m, n = len(matrix), len(matrix[0])\n rows, cols = set(), set()\n # 获取0所在的位置对应的行和列\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n rows.add(i)\n cols.add(j)\n # 将该行和列的所有元素置为0\n for row in rows:\n for j in range(n):\n matrix[row][j] = 0\n for col in cols:\n for i in range(m):\n matrix[i][col] = 0", "def _set_rows(self, start, end):\n if start <= end <= self.height:\n self._write(ST7789_RASET, _encode_pos(\n start+self.ystart, end+self.ystart))", "def fillIn(self):\n\n # Grabs first point (which is a shore) and prefills in hashes\n toBeAnalyzed = [self.points[0]]\n islandHash = defaultdict(list)\n islandHash[toBeAnalyzed[0].x].append(toBeAnalyzed[0].x)\n islandGridPoints = toBeAnalyzed[:]\n\n # Find all points not at pond-level.\n while toBeAnalyzed:\n gridPoint = toBeAnalyzed.pop()\n neighbors = self.analyzeData.iterateDiagonal(gridPoint.x,\n gridPoint.y)\n for _x, _y, elevation in neighbors:\n\n if elevation != self.pondElevation and _y not in\\\n islandHash[_x]:\n branch = GridPoint(_x, _y, elevation)\n islandHash[_x].append(_y)\n toBeAnalyzed.append(branch)\n islandGridPoints.append(branch)\n self.points = islandGridPoints", "def zero_blind_range(data):\n try:\n start_i = data['first_data_bin']\n except:\n start_i = 0\n data['data'][...,:start_i] = 0.0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n row = []\n col = []\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 0:\n col.append(j)\n row.append(i)\n\n for i in range(len(row)):\n self.col(matrix, col[i])\n for i in range(len(col)):\n self.row(matrix, row[i])", "def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m, n = len(matrix), len(matrix[0])\n firstRow, firstCol = False, False\n for i in range(m):\n for j in range(n):\n if i == 0 and matrix[i][j] == 0:\n firstRow = True\n if j == 0 and matrix[i][j] == 0:\n firstCol = True\n if i != 0 and j != 0 and matrix[i][j] == 0:\n matrix[i][0] = 0\n matrix[0][j] = 0\n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][0] == 0 or matrix[0][j] == 0:\n matrix[i][j] = 0\n if firstRow:\n for i in range(n):\n matrix[0][i] = 0\n if firstCol:\n for i in range(m):\n matrix[i][0] = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n \n m = len(matrix)\n n = len(matrix[0])\n \n i_zeros = []\n j_zeros = []\n \n for i in range(m): ## find the zeros\n for j in range(n):\n \n if matrix[i][j] == 0:\n i_zeros.append(i)\n j_zeros.append(j)\n \n for i in i_zeros:\n matrix[i] = [0]*n\n \n for j in j_zeros:\n for i in range(m):\n matrix[i][j] = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n rows, cols = [], []\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 0:\n rows.append(i)\n cols.append(j)\n for i in rows:\n for j in range(len(matrix[i])):\n matrix[i][j] = 0\n for j in cols:\n for i in range(len(matrix)):\n matrix[i][j] = 0\n\n return", "def nine_regions(self):\n\n coordinateList = []\n\n # Top left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Top center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] ) \n coordinateList.append( [x, y] )\n\n # Top right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * ( 1.0 - self.ratioTopLeft[IDX_X] ) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * self.ratioTopLeft[IDX_Y] )\n coordinateList.append( [x, y] )\n\n # Center left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Center right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * 0.5 - self.regionSize[IDX_HEIGHT] / 2 )\n coordinateList.append( [x, y] )\n\n # Bottom left.\n x = (int)( self.oriImgSize[IDX_WIDTH] * self.ratioTopLeft[IDX_X] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom center.\n x = (int)( self.oriImgSize[IDX_WIDTH] * 0.5 - self.regionSize[IDX_WIDTH] / 2 )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n # Bottom right.\n x = (int)( self.oriImgSize[IDX_WIDTH] * (1.0 - self.ratioTopLeft[IDX_X]) - self.regionSize[IDX_WIDTH] )\n y = (int)( self.oriImgSize[IDX_HEIGHT] * (1.0 - self.ratioTopLeft[IDX_Y]) - self.regionSize[IDX_HEIGHT] )\n coordinateList.append( [x, y] )\n\n return coordinateList", "def setZeroes(self, matrix: List[List[int]]) -> None:\n row_num, col_num = len(matrix), len(matrix[0])\n # 创建集合set()用于存放需要置零的行和列\n row_set, col_set = set(), set()\n for row in range(row_num):\n for col in range(col_num):\n if matrix[row][col]==0:\n row_set.add(row)\n col_set.add(col)\n # 将记录的行、列中的元素赋值为0\n # 再次遍历赋值\n for row in range(row_num):\n for col in range(col_num):\n if row in row_set or col in col_set:\n matrix[row][col] = 0\n # # 或者行列单独赋值均可\n # for row in row_set:\n # for col in range(col_num):\n # matrix[row][col] = 0\n # for col in col_set:\n # for row in range(row_num):\n # matrix[row][col] = 0", "def start_region(self, x, y):\n if x>0 and x<100 and y>0 and y<100:\n return 1\n elif x>700 and x<800 and y>0 and y<100:\n return 2\n elif x>0 and x<100 and y>400 and y<500:\n return 3\n elif x>700 and x<800 and y>400 and y<500:\n return 4\n return 0", "def remove_region(data, i):\n x = data.shape[0]\n y = data.shape[1]\n\n data = data.reshape(x * y)\n idx = np.where(data == i)[0]\n data[idx] = 0\n data = data.reshape(x, y)\n\n return data" ]
[ "0.54954433", "0.5491927", "0.5477426", "0.5468548", "0.54557747", "0.5449332", "0.5430204", "0.53731966", "0.53578746", "0.5356577", "0.53494614", "0.53400874", "0.5335713", "0.5330552", "0.5324981", "0.53230536", "0.5312219", "0.5310138", "0.5302936", "0.52781385", "0.5273465", "0.5264816", "0.52602285", "0.52572465", "0.52445155", "0.5237707", "0.52358216", "0.5228697", "0.52282834", "0.5215331" ]
0.7064379
0
Parse a block of code into a parse tree. Then assert the equality of that parse tree to a list of expected tokens.
def assert_parse_tree (code, expected): tranql = TranQL () tranql.resolve_names = False actual = tranql.parser.parse (code).parse_tree #print (f"{actual}") assert_lists_equal ( actual, expected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ParseBlock(self, ast):\n for node in ast:\n node_name = node[0]\n node_value = node[1]\n if node_name == 'statement':\n self.__ParseStatement(node_value)\n else:\n logging.info('Unknown AST node in message block: %s' % (node_name))", "def test_fenced_code_blocks_extra_09a():\n\n # Arrange\n source_markdown = \"\"\"- abc\n ```yaml\n def:\n - ghi\n ```\"\"\"\n expected_tokens = [\n \"[ulist(1,1):-::2:: \\n \\n \\n ]\",\n \"[para(1,3):]\",\n \"[text(1,3):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,3):`:3:yaml:::::]\",\n \"[text(3,3):def:\\n - ghi:]\",\n \"[end-fcode-block:::3:False]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"<ul>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n - ghi\n</code></pre>\n</li>\n</ul>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens, show_debug=False)", "def test_parser(test_input, expected):\n tokens = list(sp.tokenize(test_input))\n assert tokens == expected", "def test_fenced_code_blocks_extra_09b():\n\n # Arrange\n source_markdown = \"\"\"> abc\n> ```yaml\n> def:\n> - ghi\n> ```\"\"\"\n expected_tokens = [\n \"[block-quote(1,1)::> \\n> \\n> \\n> \\n> ]\",\n \"[para(1,3):]\",\n \"[text(1,3):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,3):`:3:yaml:::::]\",\n \"[text(3,3):def:\\n - ghi:]\",\n \"[end-fcode-block:::3:False]\",\n \"[end-block-quote:::True]\",\n ]\n expected_gfm = \"\"\"<blockquote>\n<p>abc</p>\n<pre><code class=\"language-yaml\">def:\n - ghi\n</code></pre>\n</blockquote>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def test_fenced_code_blocks_extra_10a():\n\n # Arrange\n source_markdown = \"\"\"- abc\n ```yaml\n def:\n > ghi\n ```\"\"\"\n expected_tokens = [\n \"[ulist(1,1):-::2:: \\n \\n \\n ]\",\n \"[para(1,3):]\",\n \"[text(1,3):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,3):`:3:yaml:::::]\",\n \"[text(3,3):def:\\n\\a>\\a&gt;\\a ghi:]\",\n \"[end-fcode-block:::3:False]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"<ul>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n&gt; ghi\n</code></pre>\n</li>\n</ul>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens, show_debug=False)", "def test_fenced_code_blocks_extra_09c():\n\n # Arrange\n source_markdown = \"\"\"1. abc\n ```yaml\n def:\n1. ghi\n ```\"\"\"\n expected_tokens = [\n \"[olist(1,1):.:1:3:: \\n \\n ]\",\n \"[para(1,4):]\",\n \"[text(1,4):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,4):`:3:yaml:::::]\",\n \"[text(3,4):def::]\",\n \"[end-fcode-block::::True]\",\n \"[li(4,1):3::1]\",\n \"[para(4,4):]\",\n \"[text(4,4):ghi:]\",\n \"[end-para:::False]\",\n \"[fcode-block(5,4):`:3::::::]\",\n \"[end-fcode-block::::True]\",\n \"[end-olist:::True]\",\n ]\n expected_gfm = \"\"\"<ol>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n</code></pre>\n</li>\n<li>ghi\n<pre><code></code></pre>\n</li>\n</ol>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def test_fenced_code_blocks_extra_09e():\n\n # Arrange\n source_markdown = \"\"\"1. abc\n ```yaml\n def:\n 1. ghi\n ```\"\"\"\n expected_tokens = [\n \"[olist(1,1):.:1:3:: \\n \\n \\n ]\",\n \"[para(1,4):]\",\n \"[text(1,4):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,4):`:3:yaml:::::]\",\n \"[text(3,4):def:\\n1. ghi:]\",\n \"[end-fcode-block:::3:False]\",\n \"[end-olist:::True]\",\n ]\n expected_gfm = \"\"\"<ol>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n1. ghi\n</code></pre>\n</li>\n</ol>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def test_fenced_code_blocks_extra_09d():\n\n # Arrange\n source_markdown = \"\"\"- abc\n ```yaml\n def:\n- ghi\n ```\"\"\"\n expected_tokens = [\n \"[ulist(1,1):-::2:: \\n \\n ]\",\n \"[para(1,3):]\",\n \"[text(1,3):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,3):`:3:yaml:::::]\",\n \"[text(3,3):def::]\",\n \"[end-fcode-block::::True]\",\n \"[li(4,1):2::]\",\n \"[para(4,3):]\",\n \"[text(4,3):ghi:]\",\n \"[end-para:::False]\",\n \"[fcode-block(5,3):`:3::::::]\",\n \"[end-fcode-block::::True]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"<ul>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n</code></pre>\n</li>\n<li>ghi\n<pre><code></code></pre>\n</li>\n</ul>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens, show_debug=False)", "def test_fenced_code_blocks_extra_09x():\n\n # Arrange\n source_markdown = \"\"\"1. abc\n ```yaml\n def:\n - ghi\n ```\"\"\"\n expected_tokens = [\n \"[olist(1,1):.:1:3:: \\n \\n \\n ]\",\n \"[para(1,4):]\",\n \"[text(1,4):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,4):`:3:yaml:::::]\",\n \"[text(3,4):def:\\n - ghi:]\",\n \"[end-fcode-block:::3:False]\",\n \"[end-olist:::True]\",\n ]\n expected_gfm = \"\"\"<ol>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n - ghi\n</code></pre>\n</li>\n</ol>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def test_fenced_code_blocks_extra_10x():\n\n # Arrange\n source_markdown = \"\"\"1. abc\n ```yaml\n def:\n > ghi\n ```\"\"\"\n expected_tokens = [\n \"[olist(1,1):.:1:3:: \\n \\n \\n ]\",\n \"[para(1,4):]\",\n \"[text(1,4):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,4):`:3:yaml:::::]\",\n \"[text(3,4):def:\\n\\a>\\a&gt;\\a ghi:]\",\n \"[end-fcode-block:::3:False]\",\n \"[end-olist:::True]\",\n ]\n expected_gfm = \"\"\"<ol>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n&gt; ghi\n</code></pre>\n</li>\n</ol>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def memoized_parse_block(code):\n internal_assert(lambda: code not in parse_block_memo.values(), \"attempted recompilation of\", code)\n success, result = parse_block_memo.get(code, (None, None))\n if success is None:\n try:\n parsed = COMPILER.parse_block(code)\n except Exception as err:\n success, result = False, err\n else:\n success, result = True, parsed\n parse_block_memo[code] = (success, result)\n if success:\n return result\n else:\n raise result", "def test_nested_three_unordered_ordered_block():\n\n # Arrange\n source_markdown = \"\"\"+ 1. > list\n > item\"\"\"\n expected_tokens = [\n \"[ulist(1,1):+::2:]\",\n \"[olist(1,3):.:1:5: :]\",\n \"[block-quote(1,6): : > \\n > ]\",\n \"[para(1,8):\\n]\",\n \"[text(1,8):list\\nitem::\\n]\",\n \"[end-para:::True]\",\n \"[end-block-quote:::True]\",\n \"[end-olist:::True]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"<ul>\n<li>\n<ol>\n<li>\n<blockquote>\n<p>list\nitem</p>\n</blockquote>\n</li>\n</ol>\n</li>\n</ul>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens)", "def CheckToken(self, token, state):\n # Store some convenience variables\n first_in_line = token.IsFirstInLine()\n last_in_line = token.IsLastInLine()\n last_non_space_token = state.GetLastNonSpaceToken()\n\n type = token.type\n\n # Process the line change.\n if not self._is_html and FLAGS.strict:\n # TODO(robbyw): Support checking indentation in HTML files.\n indentation_errors = self._indentation.CheckToken(token, state)\n for indentation_error in indentation_errors:\n self._HandleError(*indentation_error)\n\n if last_in_line:\n self._CheckLineLength(token, state)\n\n if type == Type.PARAMETERS:\n # Find missing spaces in parameter lists.\n if self.MISSING_PARAMETER_SPACE.search(token.string):\n self._HandleError(errors.MISSING_SPACE, 'Missing space after \",\"',\n token)\n\n # Find extra spaces at the beginning of parameter lists. Make sure\n # we aren't at the beginning of a continuing multi-line list.\n if not first_in_line:\n space_count = len(token.string) - len(token.string.lstrip())\n if space_count:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space after \"(\"',\n token, Position(0, space_count))\n\n elif (type == Type.START_BLOCK and\n token.metadata.context.type == Context.BLOCK):\n self._CheckForMissingSpaceBeforeToken(token)\n\n elif type == Type.END_BLOCK:\n # This check is for object literal end block tokens, but there is no need\n # to test that condition since a comma at the end of any other kind of\n # block is undoubtedly a parse error.\n last_code = token.metadata.last_code\n if last_code.IsOperator(','):\n self._HandleError(errors.COMMA_AT_END_OF_LITERAL,\n 'Illegal comma at end of object literal', last_code,\n Position.All(last_code.string))\n\n if state.InFunction() and state.IsFunctionClose():\n is_immediately_called = (token.next and\n token.next.type == Type.START_PAREN)\n if state.InTopLevelFunction():\n # When the function was top-level and not immediately called, check\n # that it's terminated by a semi-colon.\n if state.InAssignedFunction():\n if not is_immediately_called and (last_in_line or\n not token.next.type == Type.SEMICOLON):\n self._HandleError(errors.MISSING_SEMICOLON_AFTER_FUNCTION,\n 'Missing semicolon after function assigned to a variable',\n token, Position.AtEnd(token.string))\n else:\n if not last_in_line and token.next.type == Type.SEMICOLON:\n self._HandleError(errors.ILLEGAL_SEMICOLON_AFTER_FUNCTION,\n 'Illegal semicolon after function declaration',\n token.next, Position.All(token.next.string))\n\n if (state.InInterfaceMethod() and last_code.type != Type.START_BLOCK):\n self._HandleError(errors.INTERFACE_METHOD_CANNOT_HAVE_CODE,\n 'Interface methods cannot contain code', last_code)\n\n elif (state.IsBlockClose() and\n token.next and token.next.type == Type.SEMICOLON):\n self._HandleError(errors.REDUNDANT_SEMICOLON,\n 'No semicolon is required to end a code block',\n token.next, Position.All(token.next.string))\n\n elif type == Type.SEMICOLON:\n if token.previous and token.previous.type == Type.WHITESPACE:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \";\"',\n token.previous, Position.All(token.previous.string))\n\n if token.next and token.next.line_number == token.line_number:\n if token.metadata.context.type != Context.FOR_GROUP_BLOCK:\n # TODO(robbyw): Error about no multi-statement lines.\n pass\n\n elif token.next.type not in (\n Type.WHITESPACE, Type.SEMICOLON, Type.END_PAREN):\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space after \";\" in for statement',\n token.next,\n Position.AtBeginning())\n\n last_code = token.metadata.last_code\n if last_code and last_code.type == Type.SEMICOLON:\n # Allow a single double semi colon in for loops for cases like:\n # for (;;) { }.\n # NOTE(user): This is not a perfect check, and will not throw an error\n # for cases like: for (var i = 0;; i < n; i++) {}, but then your code\n # probably won't work either.\n for_token = tokenutil.CustomSearch(last_code,\n lambda token: token.type == Type.KEYWORD and token.string == 'for',\n end_func=lambda token: token.type == Type.SEMICOLON,\n distance=None,\n reverse=True)\n\n if not for_token:\n self._HandleError(errors.REDUNDANT_SEMICOLON, 'Redundant semicolon',\n token, Position.All(token.string))\n\n elif type == Type.START_PAREN:\n if token.previous and token.previous.type == Type.KEYWORD:\n self._HandleError(errors.MISSING_SPACE, 'Missing space before \"(\"',\n token, Position.AtBeginning())\n elif token.previous and token.previous.type == Type.WHITESPACE:\n before_space = token.previous.previous\n if (before_space and before_space.line_number == token.line_number and\n before_space.type == Type.IDENTIFIER):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"(\"',\n token.previous, Position.All(token.previous.string))\n\n elif type == Type.START_BRACKET:\n if (not first_in_line and token.previous.type == Type.WHITESPACE and\n last_non_space_token and\n last_non_space_token.type in Type.EXPRESSION_ENDER_TYPES):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"[\"',\n token.previous, Position.All(token.previous.string))\n # If the [ token is the first token in a line we shouldn't complain\n # about a missing space before [. This is because some Ecma script\n # languages allow syntax like:\n # [Annotation]\n # class MyClass {...}\n # So we don't want to blindly warn about missing spaces before [.\n # In the the future, when rules for computing exactly how many spaces\n # lines should be indented are added, then we can return errors for\n # [ tokens that are improperly indented.\n # For example:\n # var someVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryVeryLongVariableName =\n # [a,b,c];\n # should trigger a proper indentation warning message as [ is not indented\n # by four spaces.\n elif (not first_in_line and token.previous and\n not token.previous.type in (\n [Type.WHITESPACE, Type.START_PAREN, Type.START_BRACKET] +\n Type.EXPRESSION_ENDER_TYPES)):\n self._HandleError(errors.MISSING_SPACE, 'Missing space before \"[\"',\n token, Position.AtBeginning())\n\n elif type in (Type.END_PAREN, Type.END_BRACKET):\n # Ensure there is no space before closing parentheses, except when\n # it's in a for statement with an omitted section, or when it's at the\n # beginning of a line.\n if (token.previous and token.previous.type == Type.WHITESPACE and\n not token.previous.IsFirstInLine() and\n not (last_non_space_token and last_non_space_token.line_number ==\n token.line_number and\n last_non_space_token.type == Type.SEMICOLON)):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \"%s\"' %\n token.string, token.previous, Position.All(token.previous.string))\n\n if token.type == Type.END_BRACKET:\n last_code = token.metadata.last_code\n if last_code.IsOperator(','):\n self._HandleError(errors.COMMA_AT_END_OF_LITERAL,\n 'Illegal comma at end of array literal', last_code,\n Position.All(last_code.string))\n\n elif type == Type.WHITESPACE:\n if self.ILLEGAL_TAB.search(token.string):\n if token.IsFirstInLine():\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in whitespace before \"%s\"' % token.next.string,\n token, Position.All(token.string))\n else:\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in whitespace after \"%s\"' % token.previous.string,\n token, Position.All(token.string))\n\n # Check whitespace length if it's not the first token of the line and\n # if it's not immediately before a comment.\n if last_in_line:\n # Check for extra whitespace at the end of a line.\n self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',\n token, Position.All(token.string))\n elif not first_in_line and not token.next.IsComment():\n if token.length > 1:\n self._HandleError(errors.EXTRA_SPACE, 'Extra space after \"%s\"' %\n token.previous.string, token,\n Position(1, len(token.string) - 1))\n\n elif type == Type.OPERATOR:\n last_code = token.metadata.last_code\n\n if not self._ExpectSpaceBeforeOperator(token):\n if (token.previous and token.previous.type == Type.WHITESPACE and\n last_code and last_code.type in (Type.NORMAL, Type.IDENTIFIER)):\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space before \"%s\"' % token.string, token.previous,\n Position.All(token.previous.string))\n\n elif (token.previous and\n not token.previous.IsComment() and\n token.previous.type in Type.EXPRESSION_ENDER_TYPES):\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space before \"%s\"' % token.string, token,\n Position.AtBeginning())\n\n # Check that binary operators are not used to start lines.\n if ((not last_code or last_code.line_number != token.line_number) and\n not token.metadata.IsUnaryOperator()):\n self._HandleError(errors.LINE_STARTS_WITH_OPERATOR,\n 'Binary operator should go on previous line \"%s\"' % token.string,\n token)\n\n elif type == Type.DOC_FLAG:\n flag = token.attached_object\n\n if flag.flag_type == 'bug':\n # TODO(robbyw): Check for exactly 1 space on the left.\n string = token.next.string.lstrip()\n string = string.split(' ', 1)[0]\n\n if not string.isdigit():\n self._HandleError(errors.NO_BUG_NUMBER_AFTER_BUG_TAG,\n '@bug should be followed by a bug number', token)\n\n elif flag.flag_type == 'suppress':\n if flag.type is None:\n # A syntactically invalid suppress tag will get tokenized as a normal\n # flag, indicating an error.\n self._HandleError(errors.INCORRECT_SUPPRESS_SYNTAX,\n 'Invalid suppress syntax: should be @suppress {errortype}. '\n 'Spaces matter.', token)\n elif flag.type not in state.GetDocFlag().SUPPRESS_TYPES:\n self._HandleError(errors.INVALID_SUPPRESS_TYPE,\n 'Invalid suppression type: %s' % flag.type,\n token)\n\n elif FLAGS.strict and flag.flag_type == 'author':\n # TODO(user): In non strict mode check the author tag for as much as\n # it exists, though the full form checked below isn't required.\n string = token.next.string\n result = self.AUTHOR_SPEC.match(string)\n if not result:\n self._HandleError(errors.INVALID_AUTHOR_TAG_DESCRIPTION,\n 'Author tag line should be of the form: '\n '@author [email protected] (Your Name)',\n token.next)\n else:\n # Check spacing between email address and name. Do this before\n # checking earlier spacing so positions are easier to calculate for\n # autofixing.\n num_spaces = len(result.group(2))\n if num_spaces < 1:\n self._HandleError(errors.MISSING_SPACE,\n 'Missing space after email address',\n token.next, Position(result.start(2), 0))\n elif num_spaces > 1:\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space after email address',\n token.next,\n Position(result.start(2) + 1, num_spaces - 1))\n\n # Check for extra spaces before email address. Can't be too few, if\n # not at least one we wouldn't match @author tag.\n num_spaces = len(result.group(1))\n if num_spaces > 1:\n self._HandleError(errors.EXTRA_SPACE,\n 'Extra space before email address',\n token.next, Position(1, num_spaces - 1))\n\n elif (flag.flag_type in state.GetDocFlag().HAS_DESCRIPTION and\n not self._limited_doc_checks):\n if flag.flag_type == 'param':\n if flag.name is None:\n self._HandleError(errors.MISSING_JSDOC_PARAM_NAME,\n 'Missing name in @param tag', token)\n\n if not flag.description or flag.description is None:\n flag_name = token.type\n if 'name' in token.values:\n flag_name = '@' + token.values['name']\n self._HandleError(errors.MISSING_JSDOC_TAG_DESCRIPTION,\n 'Missing description in %s tag' % flag_name, token)\n else:\n self._CheckForMissingSpaceBeforeToken(flag.description_start_token)\n\n # We want punctuation to be inside of any tags ending a description,\n # so strip tags before checking description. See bug 1127192. Note\n # that depending on how lines break, the real description end token\n # may consist only of stripped html and the effective end token can\n # be different.\n end_token = flag.description_end_token\n end_string = htmlutil.StripTags(end_token.string).strip()\n while (end_string == '' and not\n end_token.type in Type.FLAG_ENDING_TYPES):\n end_token = end_token.previous\n if end_token.type in Type.FLAG_DESCRIPTION_TYPES:\n end_string = htmlutil.StripTags(end_token.string).rstrip()\n\n if not (end_string.endswith('.') or end_string.endswith('?') or\n end_string.endswith('!')):\n # Find the position for the missing punctuation, inside of any html\n # tags.\n desc_str = end_token.string.rstrip()\n while desc_str.endswith('>'):\n start_tag_index = desc_str.rfind('<')\n if start_tag_index < 0:\n break\n desc_str = desc_str[:start_tag_index].rstrip()\n end_position = Position(len(desc_str), 0)\n\n self._HandleError(\n errors.JSDOC_TAG_DESCRIPTION_ENDS_WITH_INVALID_CHARACTER,\n ('%s descriptions must end with valid punctuation such as a '\n 'period.' % token.string),\n end_token, end_position)\n\n if flag.flag_type in state.GetDocFlag().HAS_TYPE:\n if flag.type_start_token is not None:\n self._CheckForMissingSpaceBeforeToken(\n token.attached_object.type_start_token)\n\n if flag.type and flag.type != '' and not flag.type.isspace():\n self._CheckJsDocType(token)\n\n if type in (Type.DOC_FLAG, Type.DOC_INLINE_FLAG):\n if (token.values['name'] not in state.GetDocFlag().LEGAL_DOC and\n token.values['name'] not in FLAGS.custom_jsdoc_tags):\n self._HandleError(errors.INVALID_JSDOC_TAG,\n 'Invalid JsDoc tag: %s' % token.values['name'], token)\n\n if (FLAGS.strict and token.values['name'] == 'inheritDoc' and\n type == Type.DOC_INLINE_FLAG):\n self._HandleError(errors.UNNECESSARY_BRACES_AROUND_INHERIT_DOC,\n 'Unnecessary braces around @inheritDoc',\n token)\n\n elif type == Type.SIMPLE_LVALUE:\n identifier = token.values['identifier']\n\n if ((not state.InFunction() or state.InConstructor()) and\n not state.InParentheses() and not state.InObjectLiteralDescendant()):\n jsdoc = state.GetDocComment()\n if not state.HasDocComment(identifier):\n # Only test for documentation on identifiers with .s in them to\n # avoid checking things like simple variables. We don't require\n # documenting assignments to .prototype itself (bug 1880803).\n if (not state.InConstructor() and\n identifier.find('.') != -1 and not\n identifier.endswith('.prototype') and not\n self._limited_doc_checks):\n comment = state.GetLastComment()\n if not (comment and comment.lower().count('jsdoc inherited')):\n self._HandleError(errors.MISSING_MEMBER_DOCUMENTATION,\n \"No docs found for member '%s'\" % identifier,\n token);\n elif jsdoc and (not state.InConstructor() or\n identifier.startswith('this.')):\n # We are at the top level and the function/member is documented.\n if identifier.endswith('_') and not identifier.endswith('__'):\n if jsdoc.HasFlag('override'):\n self._HandleError(errors.INVALID_OVERRIDE_PRIVATE,\n '%s should not override a private member.' % identifier,\n jsdoc.GetFlag('override').flag_token)\n # Can have a private class which inherits documentation from a\n # public superclass.\n if jsdoc.HasFlag('inheritDoc') and not jsdoc.HasFlag('constructor'):\n self._HandleError(errors.INVALID_INHERIT_DOC_PRIVATE,\n '%s should not inherit from a private member.' % identifier,\n jsdoc.GetFlag('inheritDoc').flag_token)\n if (not jsdoc.HasFlag('private') and\n not ('underscore' in jsdoc.suppressions)):\n self._HandleError(errors.MISSING_PRIVATE,\n 'Member \"%s\" must have @private JsDoc.' %\n identifier, token)\n if jsdoc.HasFlag('private') and 'underscore' in jsdoc.suppressions:\n self._HandleError(errors.UNNECESSARY_SUPPRESS,\n '@suppress {underscore} is not necessary with @private',\n jsdoc.suppressions['underscore'])\n elif jsdoc.HasFlag('private'):\n self._HandleError(errors.EXTRA_PRIVATE,\n 'Member \"%s\" must not have @private JsDoc' %\n identifier, token)\n\n if ((jsdoc.HasFlag('desc') or jsdoc.HasFlag('hidden'))\n and not identifier.startswith('MSG_')\n and identifier.find('.MSG_') == -1):\n # TODO(user): Update error message to show the actual invalid\n # tag, either @desc or @hidden.\n self._HandleError(errors.INVALID_USE_OF_DESC_TAG,\n 'Member \"%s\" should not have @desc JsDoc' % identifier,\n token)\n\n # Check for illegaly assigning live objects as prototype property values.\n index = identifier.find('.prototype.')\n # Ignore anything with additional .s after the prototype.\n if index != -1 and identifier.find('.', index + 11) == -1:\n equal_operator = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)\n next_code = tokenutil.SearchExcept(equal_operator, Type.NON_CODE_TYPES)\n if next_code and (\n next_code.type in (Type.START_BRACKET, Type.START_BLOCK) or\n next_code.IsOperator('new')):\n self._HandleError(errors.ILLEGAL_PROTOTYPE_MEMBER_VALUE,\n 'Member %s cannot have a non-primitive value' % identifier,\n token)\n\n elif type == Type.END_PARAMETERS:\n # Find extra space at the end of parameter lists. We check the token\n # prior to the current one when it is a closing paren.\n if (token.previous and token.previous.type == Type.PARAMETERS\n and self.ENDS_WITH_SPACE.search(token.previous.string)):\n self._HandleError(errors.EXTRA_SPACE, 'Extra space before \")\"',\n token.previous)\n\n jsdoc = state.GetDocComment()\n if state.GetFunction().is_interface:\n if token.previous and token.previous.type == Type.PARAMETERS:\n self._HandleError(errors.INTERFACE_CONSTRUCTOR_CANNOT_HAVE_PARAMS,\n 'Interface constructor cannot have parameters',\n token.previous)\n elif (state.InTopLevel() and jsdoc and not jsdoc.HasFlag('see')\n and not jsdoc.InheritsDocumentation()\n and not state.InObjectLiteralDescendant() and not\n jsdoc.IsInvalidated()):\n distance, edit = jsdoc.CompareParameters(state.GetParams())\n if distance:\n params_iter = iter(state.GetParams())\n docs_iter = iter(jsdoc.ordered_params)\n\n for op in edit:\n if op == 'I':\n # Insertion.\n # Parsing doc comments is the same for all languages\n # but some languages care about parameters that don't have\n # doc comments and some languages don't care.\n # Languages that don't allow variables to by typed such as\n # JavaScript care but languages such as ActionScript or Java\n # that allow variables to be typed don't care.\n if not self._limited_doc_checks:\n self.HandleMissingParameterDoc(token, params_iter.next())\n\n elif op == 'D':\n # Deletion\n self._HandleError(errors.EXTRA_PARAMETER_DOCUMENTATION,\n 'Found docs for non-existing parameter: \"%s\"' %\n docs_iter.next(), token)\n elif op == 'S':\n # Substitution\n if not self._limited_doc_checks:\n self._HandleError(errors.WRONG_PARAMETER_DOCUMENTATION,\n 'Parameter mismatch: got \"%s\", expected \"%s\"' %\n (params_iter.next(), docs_iter.next()), token)\n\n else:\n # Equality - just advance the iterators\n params_iter.next()\n docs_iter.next()\n\n elif type == Type.STRING_TEXT:\n # If this is the first token after the start of the string, but it's at\n # the end of a line, we know we have a multi-line string.\n if token.previous.type in (Type.SINGLE_QUOTE_STRING_START,\n Type.DOUBLE_QUOTE_STRING_START) and last_in_line:\n self._HandleError(errors.MULTI_LINE_STRING,\n 'Multi-line strings are not allowed', token)\n\n\n # This check is orthogonal to the ones above, and repeats some types, so\n # it is a plain if and not an elif.\n if token.type in Type.COMMENT_TYPES:\n if self.ILLEGAL_TAB.search(token.string):\n self._HandleError(errors.ILLEGAL_TAB,\n 'Illegal tab in comment \"%s\"' % token.string, token)\n\n trimmed = token.string.rstrip()\n if last_in_line and token.string != trimmed:\n # Check for extra whitespace at the end of a line.\n self._HandleError(errors.EXTRA_SPACE, 'Extra space at end of line',\n token, Position(len(trimmed), len(token.string) - len(trimmed)))\n\n # This check is also orthogonal since it is based on metadata.\n if token.metadata.is_implied_semicolon:\n self._HandleError(errors.MISSING_SEMICOLON,\n 'Missing semicolon at end of line', token)", "def test_assessment_ast():\n # pylint: disable-msg=anomalous-backslash-in-string\n assessment_text = (\n \"\"\"assessment = {\n preamble: '<p>This is text.</p>',\n questionsList: [\n {'questionHTML': '<p>This is text.</p>',\n choices:\n [\"A and B\", \"D and B\", correct(\"A and C\"), \"C and D\", \"I don't know\"]\n },\n {\"questionHTML\": '<p>This is text.</p>',\n choices: [correct(\"True\"), \"False\", \"I don't know\"],\n choiceScores: [0, 0.5, 1.0],\n weight: 3\n },\n {questionHTML: '<p>This is text.</p>',\n correctAnswerString: 'sunrise'\n },\n {questionHTML: '<p>This is text.</p>',\n correctAnswerRegex: regex(\"/354\\s*[+]\\s*651/\")\n }\n ],\n assessmentName: 'Pre',\n checkAnswers: false\n}\n\"\"\")\n # pylint: enable-msg=anomalous-backslash-in-string\n\n verify_assessment(assessment_text)\n\n scope = verify.Assessment().scope\n current_ast = AssessmentParser13.parse_string_in_scope(\n assessment_text, scope, 'assessment')\n expected_ast = verify.legacy_eval_python_expression_for_test(\n assessment_text, scope, 'assessment')\n same = (\n len(current_ast.get('assessment')) == 4 and\n len(current_ast.get('assessment').get('questionsList')) == 4 and\n current_ast.get('assessment') == expected_ast.get('assessment') and\n current_ast == expected_ast)\n if not same:\n import pprint # # pylint: disable-msg=g-import-not-at-top\n pprint.pprint(current_ast.get('assessment'))\n pprint.pprint(expected_ast.get('assessment'))\n assert same", "def _split_code_lines(ast_nodes, text):\n if not ast_nodes:\n yield ([], text)\n return\n assert text.startpos <= ast_nodes[0].startpos\n assert ast_nodes[-1].startpos < text.endpos\n if text.startpos != ast_nodes[0].startpos:\n # Starting noncode lines.\n yield ([], text[text.startpos:ast_nodes[0].startpos])\n end_sentinel = _DummyAst_Node()\n end_sentinel.startpos = text.endpos\n for node, next_node in zip(ast_nodes, ast_nodes[1:] + [end_sentinel]):\n startpos = node.startpos\n next_startpos = next_node.startpos\n assert startpos < next_startpos\n # We have the start position of this node. Figure out the end\n # position, excluding noncode lines (standalone comments and blank\n # lines).\n if hasattr(node, 'endpos'):\n # We have an endpos for the node because this was a multi-line\n # string. Start with the node endpos.\n endpos = node.endpos\n assert startpos < endpos <= next_startpos\n # enpos points to the character *after* the ending quote, so we\n # know that this is never at the beginning of the line.\n assert endpos.colno != 1\n # Advance past whitespace an inline comment, if any. Do NOT\n # advance past other code that could be on the same line, nor past\n # blank lines and comments on subsequent lines.\n line = text[endpos : min(text.endpos, FilePos(endpos.lineno+1,1))]\n if _is_comment_or_blank(line):\n endpos = FilePos(endpos.lineno+1, 1)\n else:\n endpos = next_startpos\n assert endpos <= text.endpos\n # We don't have an endpos yet; what we do have is the next node's\n # startpos (or the position at the end of the text). Start there\n # and work backward.\n if endpos.colno != 1:\n if endpos == text.endpos:\n # There could be a comment on the last line and no\n # trailing newline.\n # TODO: do this in a more principled way.\n if _is_comment_or_blank(text[endpos.lineno]):\n assert startpos.lineno < endpos.lineno\n if not text[endpos.lineno-1].endswith(\"\\\\\"):\n endpos = FilePos(endpos.lineno,1)\n else:\n # We're not at end of file, yet the next node starts in\n # the middle of the line. This should only happen with if\n # we're not looking at a comment. [The first character in\n # the line could still be \"#\" if we're inside a multiline\n # string that's the last child of the parent node.\n # Therefore we don't assert 'not\n # _is_comment_or_blank(...)'.]\n pass\n if endpos.colno == 1:\n while (endpos.lineno-1 > startpos.lineno and\n _is_comment_or_blank(text[endpos.lineno-1]) and\n (not text[endpos.lineno-2].endswith(\"\\\\\") or\n _is_comment_or_blank(text[endpos.lineno-2]))):\n endpos = FilePos(endpos.lineno-1, 1)\n assert startpos < endpos <= next_startpos\n yield ([node], text[startpos:endpos])\n if endpos != next_startpos:\n yield ([], text[endpos:next_startpos])", "def test_fenced_code_blocks_extra_09f():\n\n # Arrange\n source_markdown = \"\"\"- abc\n ```yaml\n def:\n - ghi\n ```\"\"\"\n expected_tokens = [\n \"[ulist(1,1):-::2:: \\n \\n \\n ]\",\n \"[para(1,3):]\",\n \"[text(1,3):abc:]\",\n \"[end-para:::False]\",\n \"[fcode-block(2,3):`:3:yaml:::::]\",\n \"[text(3,3):def:\\n- ghi:]\",\n \"[end-fcode-block:::3:False]\",\n \"[end-ulist:::True]\",\n ]\n expected_gfm = \"\"\"<ul>\n<li>abc\n<pre><code class=\"language-yaml\">def:\n- ghi\n</code></pre>\n</li>\n</ul>\"\"\"\n\n # Act & Assert\n act_and_assert(source_markdown, expected_gfm, expected_tokens, show_debug=False)", "def test_execute_code_function():\n code = dedent('''\n print \"foo\"\n print \"bar\"\n ''')\n\n expected_output = dedent('''\\\n foo\n bar\n ''')\n results = ExecuteCode.execute_code(code)\n\n assert expected_output == results", "def blocks(self):\n if self._ast:\n for block in self._ast[2]:\n yield block", "def check(text):\n text = text.copy()\n if not isinstance(text,list): # TEST\n raise TypeError(\"text must be a listlike :\\n{}\".format(text))\n \n # managing latex genuine tag\n for i, line in enumerate(text):\n if '\\\\' in line:\n utils.underlineall(line,'\\\\')\n logger.warning(\"Genuine latex tags were found, but won't be evaluated on line {}\".format(i))\n \n # check placeholders # TEST\n parsers['v'].check_syntax(text)\n \n for i,line in enumerate(text):\n # checking ends of lines TEST\n space_before_match = re.search(\"[^ ],,\",line)\n if space_before_match:\n utils.underlineall(line,space_before_match.group())\n raise SyntaxError(\"Please put a space before EOL tag in line {}\".format(i))\n space_after_match = re.search(\",,[^ ]\",line)\n if space_after_match:\n utils.underlineall(line,space_after_match.group())\n raise SyntaxError(\"Please put a space or a carriage return after EOL tag in line {}\".format(i))\n \n # checking illegal closing tags TEST\n for parser, module in parsers.items():\n if not module.has_closing_tag:\n if closing_mark + parser in line:\n utils.underlineall(line,closing_mark+parser)\n raise SyntaxError(\"{} parser has no closing tag: check line {}\".format(parser,i))\n \n # checking other tags\n if opening_mark in line:\n fline,nothing, sline = line.partition(opening_mark)\n while True:\n # checking each sub parser\n mark_to_test = sline.split()[0]\n parser = parsers[mark_to_test[0]]\n checker.checkmark(mark_to_test,parser,line,i)\n checker.checkargs(parser,mark_to_test,sline,line,i)\n \n # checking closing tag TEST BUG\n if parser.has_closing_tag:\n closing_tag = closing_mark + mark_to_test\n opening_tag = opening_mark + mark_to_test\n if opening_tag in sline:\n utils.underlineall(sline,opening_tag)\n raise SyntaxError(\"{} opening tag has been found before closing tag expected on line {}\".format(opening_tag,i))\n if closing_tag in sline:\n part1,tag,part2 = sline.partition(closing_tag)\n sline = part1 + part2\n else: # looking for closing tag in the rest of the text\n for j,line2 in enumerate(text[i+1:]):\n j+=i+1\n fline2, mark_expected, sline2 = line2.partition(closing_tag)\n if opening_tag in fline2:\n print(\"Opening tag not closed, line {}\".format(i))\n print(fline,nothing,utils.underlineall(sline,opening_tag,False))\n print(\"Opening tag found too soon, line {}\".format(j))\n utils.underlineall(line2,opening_tag)\n raise SyntaxError(\"{} opening tag has been found before closing tag expected\".format(opening_tag))\n if mark_expected:\n text[j] = fline2 + sline2\n break\n else:\n print(fline,nothing,utils.underlineall(sline,opening_tag,False))\n raise SyntaxError(\"No closing tag found for {} in line {}\".format(opening_tag,i))\n new_partition = sline.partition(opening_mark)\n fline = fline + nothing + new_partition[0]\n nothing, sline = new_partition[1:]\n \n if opening_mark not in sline: # condition to break loop\n line = fline + nothing + sline\n break\n \n # checking alone closing tags -> closing tags are supposed to be deleted TEST\n if closing_mark in line: \n alone_closing_tag = utils.wrappedchars(line,closing_mark)\n utils.underlineall(line,alone_closing_tag)\n raise SyntaxError(\"An only closing tag has been found in line {}\".format(i))\n \n return True", "def __parse_blocks_pass(self):\n\n self.stack = [DocumentStackToken()]\n\n self.tokenized_document = []\n token_to_use = self.source_provider.get_next_line()\n did_start_close = False\n did_started_close = False\n requeue = []\n ignore_link_definition_start = False\n POGGER.debug(\"---$---\", token_to_use)\n POGGER.debug(\"---\")\n self.__parse_properties.pragma_lines = {}\n line_number = 1\n try:\n (\n token_to_use,\n line_number,\n requeue,\n ) = self.__process_front_matter_header_if_present(\n token_to_use, line_number, requeue\n )\n did_start_close = token_to_use is None\n keep_on_going = True\n while keep_on_going:\n POGGER.debug(\"next-line>>$\", token_to_use)\n POGGER.debug(\"stack>>$\", self.stack)\n POGGER.debug(\"current_block>>$\", self.stack[-1])\n POGGER.debug(\"line_number>>$\", line_number)\n POGGER.debug(\"---\")\n\n position_marker = PositionMarker(line_number, 0, token_to_use)\n parser_state = ParserState(\n self.stack,\n self.tokenized_document,\n TokenizedMarkdown.__close_open_blocks,\n self.__handle_blank_line,\n )\n if did_start_close:\n POGGER.debug(\"\\n\\ncleanup\")\n\n was_link_definition_started_before_close = self.stack[\n -1\n ].was_link_definition_started\n\n did_started_close = True\n (\n tokens_from_line,\n requeue_line_info,\n ) = TokenizedMarkdown.__close_open_blocks(\n parser_state,\n self.tokenized_document,\n include_block_quotes=True,\n include_lists=True,\n caller_can_handle_requeue=True,\n was_forced=True,\n )\n if tokens_from_line and not self.tokenized_document:\n self.tokenized_document.extend(tokens_from_line)\n\n if not (requeue_line_info and requeue_line_info.lines_to_requeue):\n keep_on_going = False\n else:\n assert was_link_definition_started_before_close\n assert not requeue_line_info.lines_to_requeue[0]\n\n del requeue_line_info.lines_to_requeue[0]\n line_number -= 1\n\n did_start_close = False\n tokens_from_line = None\n else:\n POGGER.debug(\">>>>$\", self.tokenized_document)\n\n if not token_to_use or not token_to_use.strip():\n POGGER.debug(\"call __parse_blocks_pass>>handle_blank_line\")\n (\n tokens_from_line,\n requeue_line_info,\n ) = self.__handle_blank_line(\n parser_state,\n token_to_use,\n from_main_transform=True,\n position_marker=position_marker,\n )\n else:\n POGGER.debug(\"\\n\\nnormal lines\")\n (\n tokens_from_line,\n _,\n _,\n requeue_line_info,\n _,\n ) = ContainerBlockProcessor.parse_line_for_container_blocks(\n parser_state,\n position_marker,\n ignore_link_definition_start,\n self.__parse_properties,\n None,\n )\n\n POGGER.debug(\"<<<<$\", self.tokenized_document)\n\n if keep_on_going:\n line_number, ignore_link_definition_start = TokenizedMarkdown.__xx(\n line_number, requeue_line_info, requeue\n )\n\n POGGER.debug(\n \"---\\nbefore>>$\",\n self.tokenized_document,\n )\n POGGER.debug(\"before>>$\", tokens_from_line)\n if tokens_from_line:\n self.tokenized_document.extend(tokens_from_line)\n POGGER.debug(\n \"after>>$\",\n self.tokenized_document,\n )\n if requeue:\n POGGER.debug(\"requeue>>$\", requeue)\n POGGER.debug(\"---\")\n\n (\n token_to_use,\n did_start_close,\n did_started_close,\n ) = self.__determine_next_token_process(\n requeue, did_start_close, did_started_close\n )\n except AssertionError as this_exception:\n error_message = f\"A project assertion failed on line {line_number} of the current document.\"\n raise BadTokenizationError(error_message) from this_exception\n\n if self.__parse_properties.pragma_lines:\n self.tokenized_document.append(\n PragmaToken(self.__parse_properties.pragma_lines)\n )\n return self.tokenized_document", "def check_block(self, block):\n pass", "def visit_Block(self, node: Block) -> None:\n\n for declaration in node.declarations:\n self.visit(declaration)\n self.visit(node.compound_statement)", "def test__parse_next(value, position, expected_output, expected_position):\n state = ParserState(value)\n state.position = position\n \n output = parse_next(state)\n vampytest.assert_instance(output, tuple)\n vampytest.assert_eq(output, expected_output)\n vampytest.assert_eq(state.position, expected_position)", "def test_md027_good_block_quote_indented():\n\n # Arrange\n scanner = MarkdownScanner()\n source_path = os.path.join(\n \"test\", \"resources\", \"rules\", \"md027\", \"good_block_quote_indented.md\"\n )\n supplied_arguments = [\n \"scan\",\n source_path,\n ]\n\n expected_return_code = 0\n expected_output = \"\"\n expected_error = \"\"\n\n # Act\n execute_results = scanner.invoke_main(arguments=supplied_arguments)\n\n # Assert\n execute_results.assert_results(\n expected_output, expected_error, expected_return_code\n )", "def locate_code_blocks(lines):\n stripped_lines = [l.strip() for l in lines]\n code_block_indexes = [i for i, l in enumerate(stripped_lines,1) if l.startswith('```')]\n nblock_indexes = len(code_block_indexes)\n assert nblock_indexes % 2 == 0\n return [b for b in zip(code_block_indexes[::2], code_block_indexes[1::2])]", "def parse(self, items):\n tree = ast.parse(\"\")\n for item in items:\n if isinstance(item, str):\n item = self.globals[item]\n\n item_lines, item_first_lineno = inspect.getsourcelines(item)\n\n try:\n item_tree = ast.parse(\"\".join(item_lines))\n except IndentationError:\n # inner function or likewise\n warnings.warn(f\"Can't parse {item.__name__}\")\n continue\n\n ast.increment_lineno(item_tree, item_first_lineno - 1)\n tree.body += item_tree.body\n\n return tree", "def check_block(block):\n asides = block.runtime.get_asides(block)\n assert len(asides) == 1, f'Found {asides} asides but expected only test_aside'\n assert isinstance(asides[0], AsideTestType)\n category = block.scope_ids.block_type\n assert asides[0].data_field == f'{category} aside data'\n assert asides[0].content == f'{category.capitalize()} Aside'\n\n for child in block.get_children():\n check_block(child)", "def parse_code(code):\n result = []\n lines = code.splitlines()\n end = len(lines)\n for index, line in enumerate(lines):\n next_index = index + 1\n if line.startswith('>>>') and next_index != end:\n block = [line[4:]]\n while next_index != end and lines[next_index].startswith('...'):\n block.append(lines[next_index][4:])\n next_index += 1\n while (next_index != end and not\n any(lines[next_index].startswith(s)\n for s in ('>>>', 'Trace', ' File'))):\n next_index += 1\n\n if next_index != end and lines[next_index].startswith('>>>'):\n result.append('\\n'.join(block))\n\n if lines[-1].startswith('>>>'):\n result.append(lines[-1][4:])\n return '\\n'.join(result)", "def _do_code_blocks(self, text):\r\n code_block_re = re.compile(r'''\r\n (?:\\n\\n|\\A\\n?)\r\n ( # $1 = the code block -- one or more lines, starting with a space/tab\r\n (?:\r\n (?:[ ]{%d} | \\t) # Lines must start with a tab or a tab-width of spaces\r\n .*\\n+\r\n )+\r\n )\r\n ((?=^[ ]{0,%d}\\S)|\\Z) # Lookahead for non-space at line-start, or end of doc\r\n ''' % (self.tab_width, self.tab_width),\r\n re.M | re.X)\r\n return code_block_re.sub(self._code_block_sub, text)", "def blockParser(block):\n struct = []\n first = True\n record = False\n for line in block:\n if line.startswith('Structure #'):\n record = True\n if not first:\n yield struct\n struct = []\n first = False\n if record:\n struct.append(line)\n yield struct" ]
[ "0.6165394", "0.6077054", "0.6012596", "0.5991369", "0.59820217", "0.59499276", "0.5923587", "0.5916354", "0.5867398", "0.58330196", "0.5742467", "0.5714201", "0.5713135", "0.5707342", "0.5696634", "0.56416905", "0.56378657", "0.55859804", "0.5572285", "0.5545311", "0.5509158", "0.54861975", "0.54538983", "0.5452973", "0.5436138", "0.5432732", "0.54010516", "0.5383749", "0.5369927", "0.5361964" ]
0.6880997
0
Declare attribute for bank account number.
def bank_account_number(cls): # pylint:disable=no-self-argument, # noqa: N805 return db.Column('bank_account_number', StringEncryptedType(String, cls._get_enc_secret, AesEngine, 'pkcs5'), nullable=True, index=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, owner, initial_balance=0.0):\n Account.count += 1\n self.owner = owner\n self.account_number = '%sXY-%s-%08d' % (Account.division,\n Account.branch, Account.count)\n self.balance = initial_balance", "def create_account_number(self) -> int:\n\t\tif self.name == Account.BANK:\n\t\t\treturn 0\n\t\treturn 10**10 + random.randint(0, 10**9-1)", "def __init__(self, bank_name, account_num, balance):\n self._bank_name = bank_name\n self._account_num = account_num\n self._balance = balance", "def __init__(self, account_number: str, bank_name: str,\n starting_balance: float):\n super().__init__(account_number)\n self.bank_name = bank_name\n self.starting_balance = starting_balance\n self.current_balance = starting_balance", "def __init__(self, account_id, balance):\n self.account_id = account_id\n self.balance = balance", "def __init__(self, first_name, second_name, gender, account_type):\n self.first_name = first_name\n self.second_name = second_name\n self.gender = gender\n self.account_type = account_type\n self.account_number = '531'+ ''.join(random.choices(string.digits, k=6)) #todo: Generate new number if it exissts in database\n self.account_password = ''.join(secrets.choice(string.ascii_letters + string.digits) for i in range(10))\n self.account_balance = 0.0", "def _set_contract_number_partyA_21N(self, val):\n self.swift_obj.SequenceA_GeneralInformation.ContractNumberPartyA = val\n self.swift_obj.SequenceA_GeneralInformation.ContractNumberPartyA.swiftTag = '21N'", "def __init__(self):\n self.account_balance = 0\n self.amount = 0", "def account_number(self, account_number):\n\n self._account_number = account_number", "def __init__(self,customer, bank, acnt,limit):\n self._customer=customer\n self._bank=bank\n self._account=acnt\n self._limit=limit\n self._balance=0", "def bank_account():\n return BankAccount()", "def set_AccountNumber(self, value):\n super(AddressValidationInputSet, self)._set_input('AccountNumber', value)", "def __init__(self, customer, bank, acnt, limit):\n self._customer = customer\n self._bank = bank\n self._account = acnt\n self._limit = limit\n self._balance = 0", "def __init__(self, cardname, amount):\n self.cardname = str(cardname)\n self.amount = int(amount)", "def create_account(self):\n account_identifier = \"\".join([str(num) for num in random.sample(range(10), 9)])\n first_fifteen_digit = self.BIN + account_identifier\n checksum = self.create_checksum(first_fifteen_digit)\n card_number = first_fifteen_digit + str(checksum)\n pin = \"\".join([str(num) for num in random.sample(range(10), 4)])\n balance = 0\n print(\"\\nYour card has been created\")\n print(f\"Your card number:\\n{card_number}\\nYour card PIN:\\n{pin}\")\n # fetching max id from database\n database_cursor.execute(\"SELECT id FROM card;\")\n ids = [x[0] for x in database_cursor.fetchall()]\n if ids:\n max_id = max(ids) + 1\n else:\n max_id = 1\n # insert new account into database\n database_cursor.execute(f\"INSERT INTO card VALUES ({max_id}, {card_number}, {pin}, {balance});\")\n database_connection.commit()", "def _setaccount_with_institution_57A(self, val):\n self.swift_obj.AccountWithInstitution_A = val\n self.swift_obj.AccountWithInstitution_A.swiftTag = '57A'", "def getAcNum(self):\n\n # stores the integer account number as a formatted 3-digit string (in which 0's occupy unused digits)\n strAcNum = str(\"{self.acNum:03d}\".format(self=self))\n return strAcNum", "def __repr__(self):\n return 'ATMBankAccount({}, {})'.format(\n self.account_id,\n self.balance\n )", "def example_bank_account():\n \n return BankAccount(\"Test User\", 1000.0)", "def account_number(self):\n return self.__account_number", "def account(self, account: str):\n self._account = account", "def __init__(self):\n segment_number = 2\n list_digits = 4\n super().__init__(4, segment_number, list_digits, default_val=\"0 \")\n self.set_credit(self.get_credit())", "def _setbeneficiary_customer_59A(self, val):\n self.swift_obj.BeneficiaryCustomer_A = val\n self.swift_obj.BeneficiaryCustomer_A.swiftTag = '59A'", "def test_set_account_number02(self, client):\n a = AccountFactory.get_account()\n a.account_number = 'AAA000'\n a.save()\n client = self.get_auth_client(a.item.user)\n\n url = '/v1/accounts/set_account_number/{}'.format(a.id)\n dic = {'account_number': 'AAA111'}\n data = json.dumps(dic)\n response = client.post(url, data, content_type='application/json')\n assert response.status_code == 400", "def _setaccount_with_institution_57C(self, val):\n self.swift_obj.AccountWithInstitution_C = val\n self.swift_obj.AccountWithInstitution_C.swiftTag = '57C'", "def create_attribute(owner_name, att_name, context=ast.Load(), line=0, column=0):\n attribute = ast.Attribute()\n attribute.attr = att_name\n attribute.ctx = context\n attribute.lineno = line\n attribute.col_offset = column\n\n if isinstance(owner_name, str):\n attribute_name = ast.Name()\n attribute_name.ctx = ast.Load()\n attribute_name.id = owner_name\n attribute_name.lineno = line\n attribute_name.col_offset = column\n\n attribute.value = attribute_name\n else:\n attribute.value = owner_name\n\n return attribute", "def __init__(self, customer, bank, account, limit, bank_bal = 0):\n\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = bank_bal # To store customer spendings.", "def __init__(self, customer, bank, account, limit):\n self._customer = customer\n self._bank = bank\n self._account = account\n self._limit = limit\n self._balance = 0", "def __init__(self, initial_amount = 0):\n self.balance = initial_amount", "def __init__(self):\n self.__account_number = self.__generate_acct_number()\n self.__account_pin = self.__generate_pin()\n self.card_number = BankAccount.BIN + self.__account_number \\\n + self.__calculate_checksum(BankAccount.BIN + self.__account_number)\n self.__balance = 0\n self.__display_login_info()" ]
[ "0.6284065", "0.6187421", "0.6022238", "0.5976614", "0.59271795", "0.59124166", "0.58611524", "0.57728064", "0.56861794", "0.56256825", "0.55930215", "0.5584585", "0.5569789", "0.55608404", "0.55424184", "0.55222857", "0.55130976", "0.5481672", "0.5476077", "0.5448975", "0.5439845", "0.5436275", "0.5434763", "0.53947705", "0.5381233", "0.53720474", "0.5368647", "0.53640866", "0.53124386", "0.5309626" ]
0.6315163
0
Return account secret key for encryption.
def _get_enc_secret(): return current_app.config.get('ACCOUNT_SECRET_KEY')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def secret_key(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"secret_key\")", "def get_key():\n try:\n return settings.get('backend')['secret_key']\n except AttributeError:\n raise AuthTokenGenerationException()", "def secret_key(self):\n return self._secret_key", "def get_secret_key():\n return get_config_handler().get_secret_key()", "def secret_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"secret_key\")", "def secret_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"secret_key\")", "def secret_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"secret_key\")", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def generate_secret_key():\n return b64encode(Fernet.generate_key()).decode('utf-8')", "def secret_key(self):\n return None", "def GetSecretKey():\n _LOG.info('Getting webapp2_secret_key.')\n return (Webapp2SecretKey.get_by_id('current_secret_key')\n .secret_key.encode('ascii', 'ignore'))", "def get_key(self):\r\n return self.__encryption_key", "def secret_access_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"secret_access_key\")", "def secret(self) -> str:\n return pulumi.get(self, \"secret\")", "def secret(self) -> str:\n return pulumi.get(self, \"secret\")", "def get_secret(self):\n return Buffer.from_mpz(self._secret_key)", "def encryption_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_key\")", "def encryption_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_key\")", "def generate_sharedsecret(self):\n return self._get_shared_secret(self.public_key)", "def encryption_key(self) -> bytearray:\n # Handle if encryption is disabled.\n if self.aes_on == 0:\n return None\n # Encryption is enabled so read the key and return it.\n key = bytearray(16)\n self._read_into(_REG_AES_KEY1, key)\n return key", "def secret_key(self):\n if self.__secret_key is not None:\n return '*' * len(self.__secret_key)\n\n return None", "def _get_encryption_key(self, **options):\n\n return self._public_key", "def get_secret():\n\n secret_name = \"Jido-Active-Directory-Service-Account\"\n\n # Create a Secrets Manager client\n session = boto3.session.Session()\n client = session.client(\n service_name='secretsmanager',\n region_name= os.environ.get(\"AWS_DEFAULT_REGION\")\n )\n try:\n get_secret_value_response = client.get_secret_value(\n SecretId= secret_name\n )\n except ClientError as e:\n print(\"Error getting secret key!: \" + str(e))\n return None\n else:\n # Decrypts secret using the associated KMS CMK.\n if 'SecretString' in get_secret_value_response:\n return get_secret_value_response['SecretString']\n\n return None", "def _get_pwd_key_from_config():\n return b64decode(config['app']['auth']['pwd_key_secret'].encode())", "def access_key(self) -> Optional['outputs.AsymmetricEncryptedSecretResponse']:\n return pulumi.get(self, \"access_key\")", "def _compute_key(secret_key=None):\n if secret_key is None:\n secret_key = settings.SECRET_KEY\n if isinstance(secret_key, six.string_types):\n secret_key = secret_key.encode()\n return SHA256.new(bytearray(secret_key)).digest()", "def secret(self):\n return self._secret", "def api_secret_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_secret_key\")", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n return self._values.get('encryption_key')", "def get_key_secret():\n \n config = configparser.ConfigParser()\n config.read('dl.cfg')\n KEY = config['AWS']['AWS_ACCESS_KEY_ID']\n SECRET = config['AWS']['AWS_SECRET_ACCESS_KEY']\n return KEY, SECRET" ]
[ "0.7814786", "0.76241505", "0.75565207", "0.7548872", "0.7499285", "0.7499285", "0.7285755", "0.7270444", "0.7231887", "0.7223162", "0.7222836", "0.72057945", "0.71798694", "0.71600825", "0.71600825", "0.7023351", "0.6951352", "0.6951352", "0.69146353", "0.69061774", "0.6847062", "0.68206316", "0.681047", "0.6797327", "0.6771153", "0.67650485", "0.6752126", "0.67402893", "0.67394674", "0.67220235" ]
0.7893033
0
Find all pending accounts to be created in CFS.
def find_all_pending_accounts(cls): return cls.query.filter_by(status=CfsAccountStatus.PENDING.value).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pending_transactions(self):\n return self._call_account_method(\n 'pendingTransactions'\n )", "def get_pending_friendships(self):\n url = 'friendships/pending/'\n return self.send_request(url)", "def get_pending_registration_requests(self,user,site):\n\n return self.filter(project=site,\n user=user,\n status=RegistrationRequest.PENDING)", "def test_get_pending_users(self):\n pass", "def _generate_accounts(self):\n accounts = []\n auth_url = 'http://{}:5000/v3/'.format(self.host)\n\n for tenant, network in self.tenants:\n account = RwcalYang.CloudAccount.from_dict({\n 'name': 'rift.auto.openstack',\n 'account_type': 'openstack',\n 'openstack': {\n 'key': self.user or self._DEFAULT_USERNAME,\n 'secret': self._DEFAULT_PASSWORD,\n 'auth_url': auth_url,\n 'tenant': tenant,\n 'mgmt_network': network}})\n\n accounts.append(account)\n\n return accounts", "def list_accounts(self):\n pass", "def list_pending_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.reserved()", "def accounts(self):\n if self._accounts is None:\n url = f'{self._ynab.api_url}/budgets/{self.id}/accounts'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving accounts, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n self._accounts = [Account(self, account)\n for account in response.json().get('data', {}).get('accounts', [])]\n return self._accounts", "def fetchAllAccounts(config):\n allAccounts = []\n currentStart = 1\n currentLimit = 99\n while currentLimit > 98 :\n currentPull = fetchBatchAccounts(accountsConfig, currentStart, currentLimit)['data']\n allAccounts = allAccounts + currentPull\n currentLimit = int(len(currentPull))\n currentStart = int(currentStart) + int(currentLimit)\n return allAccounts", "def get_pending_transactions():\n\n return History.get_pending().get()", "def get_accounts(self):\n uri = '/credentials'\n response = gate_request(uri=uri)\n assert response.ok, 'Failed to get accounts: {0}'.format(response.text)\n\n all_accounts = response.json()\n self.log.debug('Accounts in Spinnaker:\\n%s', all_accounts)\n\n filtered_accounts = []\n for account in all_accounts:\n if account['type'] == self.provider:\n filtered_accounts.append(account)\n\n if not filtered_accounts:\n raise ForemastError('No Accounts matching {0}.'.format(self.provider))\n\n return filtered_accounts", "def accounts():", "def list_accounts(self, **kwargs):\r\n if 'mask' not in kwargs:\r\n items = [\r\n 'id',\r\n 'name',\r\n 'status',\r\n 'nodes',\r\n ]\r\n kwargs['mask'] = \"mask[%s]\" % ','.join(items)\r\n\r\n return self.client['Account'].getMessageQueueAccounts(**kwargs)", "def accounts(self):\n # get the summary data\n options = { 'PayLoadText' : self.request_xml() }\n\n print(self.url)\n print(options)\n\n response = requests.get(self.url, params=options) \\\n .content\n print(response)\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n self.security_token = xml_tree.find('ClientSecurityToken').text\n\n accounts = [ \n self.create_account(account)\n for account in xml_tree.iter('CardAccounts')\n ]\n\n return accounts", "def get_fedcm_account_list(self):\n pass", "def get_all_accounts():\n accounts = Account.query.all()\n print(accounts)\n return \"\"", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def get_accounts(self):\n return self.accounts.all()", "def get_accounts(self):\r\n return self._accounts", "def accounts(self):\r\n return acc.Accounts(self)", "def get_accounts(self):\n return self.accounts", "def get_all_accounts_information(self):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_ENTITY_OWNER_SUDO_OPERATION, us.SERVER_COMMAND_GET_ALL_ACCOUNTS_INFORMATION)", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def complete_charges():\n for charge in Charge.objects.filter(state=Charge.CREATED):\n charge.retrieve()", "def list_accounts():\n\n try:\n accounts = Account.query.all()\n except NoResultFound:\n print(f\"No account configured yet.\")\n return\n n_len = max([len(a.nickname) for a in accounts if a.nickname != 'no.name'])\n fmt = \"{nickname:\" + str(n_len) + \"s}: {email:s}\"\n #import pdb; pdb.set_trace()\n for acct in [acct for acct in accounts if acct.nickname != 'no.name']:\n print(fmt.format(nickname=acct.nickname, email=acct.email))\n return", "def users_groups_pending():\n if request.method == \"GET\":\n query = {\"token\": ciconnect_api_token, \"globus_id\": session[\"primary_identity\"]}\n # Get user info\n user = get_user_info(session)\n unix_name = user[\"metadata\"][\"unix_name\"]\n\n # Query user's pending project requests\n project_requests = get_user_pending_project_requests(unix_name)\n project_requests = [\n project_request\n for project_request in project_requests\n if session[\"url_host\"][\"unix_name\"] in project_request[\"name\"]\n ]\n # Check user status of root connect group\n connect_group = session[\"url_host\"][\"unix_name\"]\n user_status = get_user_connect_status(unix_name, connect_group)\n return render_template(\n \"users_groups_pending.html\",\n project_requests=project_requests,\n user_status=user_status,\n )", "def get_pending_orders(self):\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'pending', ''), auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions" ]
[ "0.60277927", "0.59768397", "0.59724915", "0.59519696", "0.59498996", "0.59382385", "0.59188753", "0.58744895", "0.5854713", "0.58441377", "0.5843147", "0.5704515", "0.5589288", "0.5575046", "0.5569818", "0.55688405", "0.55530936", "0.55303377", "0.5524717", "0.55170417", "0.5515112", "0.54996985", "0.54768586", "0.54730326", "0.5466439", "0.54641896", "0.5444031", "0.5442261", "0.54291016", "0.54171574" ]
0.79221874
0
Create a MayaScene object.
def __init__(self, *args, **kwargs): super(MayaScene, self).__init__(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_scene(self):\n \n self.scene=soya.World()", "def create_scene(self, ):\n self.scene = create_scene(\n self.opt.splats_img_size, self.opt.splats_img_size, self.opt.fovy,\n self.opt.focal_length, self.opt.n_splats)", "def new_scene(force=True, **kwargs):\n\n pass", "def create_scene():\n create_floor()\n if config.M != \"\":\n if config.LEVEL == 1:\n create_wall()\n create_enemy()\n create_gap()\n create_platform()\n create_marijuana()\n create_star()\n create_fish()\n elif config.LEVEL == 2:\n create_boss()\n create_platform()\n create_star()", "def openMayaScene(self, *arg, **keys):\n mode = Mode(keys.get('show', None), keys.get('sequence', None))\n mayaSceneFile = keys.get(\"mayaSceneFile\")\n if not mayaSceneFile:\n recipePath = mode.get(Recipe.XML_FILE, keys)\n recipe = Recipe.recipeFromFile(recipePath)\n mayaSceneFile = recipe.getMayaFile()\n\n if not mayaSceneFile:\n return\n\n mayaCommand = mode.get(\"[mayaCommand]\", keys)\n mayaCommand += \" \" + mayaSceneFile + \"&\"\n OSUtils.run(mayaCommand)\n return", "def _create_scene(knx_module: XKNX, config: ConfigType) -> XknxScene:\n return XknxScene(\n knx_module,\n name=config[CONF_NAME],\n group_address=config[CONF_ADDRESS],\n scene_number=config[SceneSchema.CONF_SCENE_NUMBER],\n )", "def create_scene(self, name, nodes=None):\n new_scene = self._build_scene(nodes=nodes)\n self.scenes.append(new_scene)\n\n # Update scenes map\n self.scenes_map[name] = self._last_index(self.scenes)\n\n return self._last_index(self.scenes)", "def create_on_scene(self, name, **kwargs):\n monster = self.create(name, **kwargs)\n yield monster, self._level\n self._level.add(monster)", "async def create_scene(self, group_id: str, name: str) -> dict[str, Any]:\n return await self.gateway.request(\n \"post\",\n path=f\"/groups/{group_id}/scenes\",\n json={\"name\": name},\n )", "def make_multi_object_scene(self):\n multi1 = Scene3D()\n box = self.objects[0]\n box.set_location(1, 0, 0)\n box.set_size(0.4, 0.4, 0.1)\n multi1.add_object(box)\n\n box = self.objects[1]\n box.set_location(-1, 0, 0)\n multi1.add_object(box)\n\n self.scenes.append(multi1)", "def make_multi_object_scene(self):\n multi1 = Scene3D()\n box = self.objects[0]\n box.set_location(1, 0, 0)\n box.set_size(0.4, 0.4, 0.1)\n multi1.add_object(box)\n\n box = self.objects[1]\n box.set_location(-1, 0, 0)\n multi1.add_object(box)\n\n self.scenes.append(multi1)", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def new_scene(command):\n namespace = app.main(command)\n assert namespace.command == 'ns' or namespace.command == \"newscene\"\n assert namespace.name == \"test\"", "def __init__(self, scene): # type: (Scene) -> None\n self.scene = scene", "def create_scene(self, s):\n s = json.dumps(s.output())\n\n # URL-encoding. Vera not happy with Python's standard\n # URL-encoding.\n s = Vera.urlencode(s)\n \n payload = self.get('data_request?id=scene&action=create&json=%s' % s)\n return payload", "def __create_scene(self):\n\n print 'creating a scene'\n # create scenegraph by the ifgi scene parser\n _infilepath = '../../sampledata/cornel_box.ifgi'\n # _infilepath = '../../sampledata/one_tri_full.ifgi'\n ifgireader = IfgiSceneReader.IfgiSceneReader()\n if(not ifgireader.read(_infilepath)):\n raise StandardError, ('load file [' + _infilepath + '] failed.')\n\n # add a new scene\n # A ifgi file may have many cameras, but only default camera\n # is handled.\n cam_dict = ifgireader.camera_dict_dict['default']\n\n assert(self.__ifgi_cpp_render_core != None)\n self.__ifgi_cpp_render_core.create_scene(ifgireader.material_dict_list,\\\n ifgireader.geometry_dict_list,\\\n cam_dict)\n # check the camera correctly pushed\n # print cam_dict\n # dir(ifgi_cpp_render_core)\n # ret_cam_dict = ifgi_cpp_render_core.get_camera_pydict()\n # print ret_cam_dict\n\n # self.__scenegraph.update_all_bbox()\n # -- now all primitive (TriMesh) can look up the material\n\n # # added RGBA buffer and Hit buffer to the current camera.\n # imgsz = (self.__image_xsize, self.__image_ysize, 4)\n # cur_cam.set_film('RGBA', Film.ImageFilm(imgsz, 'RGBA'))\n # # cur_cam.print_obj()", "def make_simple_scenes(self):\n box1 = self.make_box(1, Color(1, 0, 1))\n self.objects.append(box1)\n\n box1Scene = Scene3D()\n box1Scene.add_object(box1)\n self.scenes.append(box1Scene)\n\n box2 = self.make_box(0.25, Color(0, 1, 1))\n box2.set_rotate(45, 0, 0, 1)\n self.objects.append(box2)\n\n box2Scene = Scene3D()\n box2Scene.add_object(box2)\n self.scenes.append(box2Scene)\n\n sp = self.make_ball(0.5, Color(0.8, 0.8, 0))\n self.objects.append(sp)\n\n ballScene = Scene3D()\n ballScene.add_object(sp)\n self.scenes.append(ballScene)", "def __init__(self, scene: Scene):\n self.scene = scene", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def createScene(self):\n # create the graphics scene\n self.scene = QGraphicsScene()\n\n # set the background color\n self.setBackgroundBrush(QBrush(self.backgroundColor))\n\n # create the image pixmap\n self.imgPixmap = QPixmap()\n\n # add the pixmap to the scene and get the reference to the QGraphicsPixmapItem\n self.imgPixmapItem = self.scene.addPixmap(self.imgPixmap)\n\n # set our scene...\n self.setScene(self.scene)\n self.sceneItems = []", "def make_simple_scenes(self):\n clown = Clown()\n clown.set_location( 0, 0, 0 )\n clown.set_size( 1, 1, 1 )\n self.objects.append( clown )\n\n clown1Scene = Scene3D()\n clown1Scene.add_object( clown )\n self.scenes.append( clown1Scene )\n\n head = Head()\n head.set_location( 0, 0, 0 )\n head.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( head )\n\n headScene = Scene3D()\n headScene.add_object( head )\n self.scenes.append( headScene )\n\n hat = Hat()\n hat.set_location( 0, 0, 0 )\n hat.set_size( 1.5, 1.5, 1.5 )\n self.objects.append( hat )\n\n hat1Scene = Scene3D()\n hat1Scene.add_object( hat )\n self.scenes.append( hat1Scene )\n\n eye = Eye()\n eye.set_color(1, 0, 0)\n eye.set_location(0, 1, 1)\n eye.set_size(1.3, 1.3, 1.3)\n eye.set_rotate( 45, 1, 0, 0 )\n self.objects.append( eye )\n\n eye1Scene = Scene3D()\n eye1Scene.add_object( eye )\n self.scenes.append( eye1Scene )\n\n donut = Donut()\n donut.set_color(1, 0, 1 )\n donut.set_location( 0, 0, 0 )\n donut.set_size( 2.0, 2.0, 2.0 )\n donut.set_rotate( 45, 0, 1, 0)\n self.objects.append( donut )\n\n donut1Scene = Scene3D()\n donut1Scene.add_object( donut )\n self.scenes.append( donut1Scene )\n\n cone = Cone()\n cone.set_color( 1, 0, 1 )\n cone.set_location( 0, 0, 0 )\n cone.set_size( 2.0, 2.0, 2.0 )\n self.objects.append( cone )\n\n cone1Scene = Scene3D()\n cone1Scene.add_object( cone )\n self.scenes.append( cone1Scene )\n\n box1 = self.make_box(1, Color(1, 0, 1))\n self.objects.append( box1 )\n\n box1Scene = Scene3D()\n box1Scene.add_object( box1 )\n self.scenes.append( box1Scene )\n\n box2 = self.make_box( 1, Color(0, 1, 1 ))\n box2.set_rotate( 45, 0, 0, 1 )\n box2.set_size(2.0, 2.0, 2.0)\n self.objects.append( box2 )\n\n box2Scene = Scene3D()\n box2Scene.add_object( box2 )\n self.scenes.append( box2Scene )\n\n sp = self.make_ball(1, Color(0.8, 0.8, 0))\n sp.set_size(2.0, 2.0, 2.0)\n self.objects.append( sp )\n\n ballScene = Scene3D()\n ballScene.add_object( sp )\n self.scenes.append( ballScene )", "def scene(self):\n geometry = {}\n for name, link in self.links.items():\n geometry.update(link.geometry)\n\n base_frame = self.base_frame\n graph = trimesh.scene.transforms.SceneGraph()\n graph.from_edgelist([(base_frame, geom_name,\n {'geometry': geom_name})\n for geom_name in geometry.keys()])\n graph.update(frame_from=graph.base_frame, frame_to=base_frame)\n\n scene = trimesh.Scene(geometry, graph=graph)\n return scene", "def scene_fixture(scene_factory):\n return scene_factory('Test Scene')", "def __init__(self, *args, **kwargs):\n super(Scene, self).__init__(*args, **kwargs)\n\n self.setVar('category', 'scene')", "def from_scene(cls, key: str, class_id: int = 0) -> 'Mesh':\n\t\tobj = bpy.data.objects[key]\n\t\treturn cls(obj, class_id=class_id)", "def create_scene(self):\n\n c = config.Colors.background\n gr3.setbackgroundcolor(c[0], c[1], c[2], 1.0)\n gr3.clear()\n\n if self.results is None:\n return\n\n show_domains = self.settings.show_domains\n show_surface_cavities = self.settings.show_surface_cavities\n show_center_cavities = self.settings.show_center_cavities\n if show_center_cavities and self.results.center_cavities is not None:\n show_surface_cavities = False\n elif show_surface_cavities and self.results.surface_cavities is not None:\n show_domains = False\n\n self.objectids = [None]\n edges = self.results.atoms.volume.edges\n num_edges = len(edges)\n edge_positions = [edge[0] for edge in edges]\n edge_directions = [[edge[1][i]-edge[0][i] for i in range(3)] for edge in edges]\n edge_lengths = [sum([c*c for c in edge])**0.5 for edge in edge_directions]\n edge_radius = min(edge_lengths)/200\n if self.settings.show_bounding_box:\n gr3.drawcylindermesh(num_edges, edge_positions, edge_directions,\n [config.Colors.bounding_box]*num_edges,\n [edge_radius]*num_edges, edge_lengths)\n corners = list(set([tuple(edge[0]) for edge in edges] + [tuple(edge[1]) for edge in edges]))\n num_corners = len(corners)\n gr3.drawspheremesh(num_corners, corners,\n [config.Colors.bounding_box]*num_corners,\n [edge_radius]*num_corners)\n\n if self.settings.show_atoms and self.results.atoms is not None:\n visible_atom_indices = self.settings.visible_atom_indices\n if visible_atom_indices is not None:\n visible_atom_indices = [comp for comp in visible_atom_indices if 0 <= comp < self.results.atoms.number]\n else:\n visible_atom_indices = range(self.results.atoms.number)\n if len(visible_atom_indices) == 0:\n visible_atom_indices = None\n if visible_atom_indices is not None:\n visible_atom_indices = np.array(visible_atom_indices)\n gr3.drawspheremesh(len(visible_atom_indices),\n self.results.atoms.positions[visible_atom_indices],\n self.results.atoms.colors[visible_atom_indices],\n np.ones(len(visible_atom_indices))*config.OpenGL.atom_radius)\n if self.settings.show_bonds:\n bonds = self.results.atoms.bonds\n for start_index, target_indices in enumerate(bonds):\n if start_index not in visible_atom_indices:\n continue\n target_indices = np.array([i for i in target_indices if i in visible_atom_indices])\n if len(target_indices) == 0:\n continue\n start_position = self.results.atoms.positions[start_index]\n target_positions = self.results.atoms.positions[target_indices]\n directions = target_positions - start_position\n bond_lengths = la.norm(directions, axis=1)\n directions /= bond_lengths.reshape(len(directions), 1)\n gr3.drawcylindermesh(len(target_indices),\n target_positions,\n -directions,\n [config.Colors.bonds] * self.results.atoms.number,\n np.ones(bond_lengths.shape)*config.OpenGL.bond_radius,\n bond_lengths)\n\n if self.results is None:\n return\n if show_domains and self.results.domains is not None:\n self.draw_cavities(self.results.domains,\n config.Colors.domain, 'domain',\n self.settings.visible_domain_indices)\n if show_surface_cavities and self.results.surface_cavities is not None:\n self.draw_cavities(self.results.surface_cavities,\n config.Colors.surface_cavity, 'surface cavity',\n self.settings.visible_surface_cavity_indices)\n if show_center_cavities and self.results.center_cavities is not None:\n self.draw_cavities(self.results.center_cavities,\n config.Colors.center_cavity, 'center cavity',\n self.settings.visible_center_cavity_indices)", "def parse(vera, s):\n\n sd = SceneDefinition()\n \n sd.name = s[\"name\"]\n\n for i in s[\"triggers\"]:\n sd.triggers.append(Trigger.parse(vera, i))\n\n if s.has_key(\"timers\"):\n for i in s[\"timers\"]:\n sd.timers.append(Timer.parse(i))\n\n if s.has_key(\"groups\"):\n for i in s[\"groups\"]:\n sd.actions.append(Group.parse(vera, i))\n\n if s.has_key(\"room\"):\n if s[\"room\"] == 0:\n sd.room = None\n else:\n sd.room = vera.get_room_by_id(s[\"room\"])\n\n if s.has_key(\"modeStatus\"):\n sd.modes = Modes.parse(vera, s[\"modeStatus\"])\n\n return sd", "def __init__(self, scene, new_scene, new_name=None, renderer=None):\n self.original_scene = scene\n\n # saves current states to restore in the end\n self.original_use_simplify = scene.render.use_simplify\n self.original_simplify_subdivision = scene.render.simplify_subdivision\n self.original_layers = list(scene.layers)\n\n if new_scene:\n self.name = self.copy_scene(scene, new_name, renderer)\n\n else:\n if new_name is not None:\n scene.name = new_name\n self.name = scene.name\n\n if renderer is not None:\n scene.render.engine = renderer\n \n self.renderengine = self.get_scene().render.engine", "def load(self) -> Scene:\n self.path = self.find_scene(self.meta.path)\n if not self.path:\n raise ImproperlyConfigured(\"Scene '{}' not found\".format(self.meta.path))\n\n self.scene = Scene(self.path)\n\n # Load gltf json file\n if self.path.suffix == \".gltf\":\n self.load_gltf()\n\n # Load binary gltf file\n if self.path.suffix == \".glb\":\n self.load_glb()\n\n self.gltf.check_version()\n self.gltf.check_extensions(self.supported_extensions)\n self.load_images()\n self.load_samplers()\n self.load_textures()\n self.load_materials()\n self.load_meshes()\n self.load_nodes()\n\n self.scene.calc_scene_bbox()\n self.scene.prepare()\n\n return self.scene", "def createObject(self, *args):\n return _libsbml.MultiASTPlugin_createObject(self, *args)" ]
[ "0.73323196", "0.67833483", "0.6756612", "0.672581", "0.6613683", "0.6521628", "0.6344319", "0.63363475", "0.6316412", "0.63114274", "0.63114274", "0.62943345", "0.6286045", "0.62190175", "0.62152356", "0.61051285", "0.61049014", "0.61027217", "0.6061932", "0.60470086", "0.5941756", "0.592929", "0.5898748", "0.5738173", "0.5704207", "0.56910354", "0.5631625", "0.56154555", "0.5540374", "0.5538979" ]
0.68939376
1
Test if the path holder contains a Maya scene.
def test(cls, pathHolder, parentCrawler): if not super(Scene, cls).test(pathHolder, parentCrawler): return False return pathHolder.ext() in cls.extensions()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_scene_open(self):\n return self._engine.current_file_path() is not None", "def checkScene ( doc_id ):\n if cmds.objExists ( \"root\" ) :\n \n self.labelStatus.setText ( \"You shouldn't have any named 'root' node in your scene\" )\n return False \n \n return True", "def is_maya():\n\n return 'cmds' in main.__dict__", "def scene_is_modified():\n\n pass", "def isMayaFile(potentialMayaFile):\n\n pass", "def in_maya():\n return \"maya.bin\" in sys.argv[0]", "def is_valid_animation(path, verbose=True):\n try:\n if \"idle\" in os.listdir(path) or \"transition\" in os.listdir(path):\n return True\n else:\n if verbose:\n print(path, \"is not a valid animation folder! It needs an /idle or /transition folder!\")\n return False\n except:\n return False", "def is_study(self) -> bool:\n return self._path_exist()", "def is_rig(rig_node):\n\n if not cmds.objExists(rig_node) or (\n cmds.objExists(\"{}_cache\".format(rig_node))):\n return False\n\n return True", "def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True", "def has_scn_quicklook(self, unq_id):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).one()\n scn_json = query_result.ExtendedInfo\n ses.close()\n logger.debug(\"Closed the database session.\")\n\n quicklook_calcd = False\n if scn_json is not None:\n json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()\n quicklook_calcd = json_parse_helper.doesPathExist(scn_json, [\"quicklook\"])\n return quicklook_calcd", "def explore_maya_project():\n proj_dir = cmds.workspace(rd=True, q=True)\n subprocess.Popen(r'explorer /select,\"{}scenes\"'.format(proj_dir.replace(\"/\", \"\\\\\")))\n LOG.info('Exploring to %s'.format(proj_dir))", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def _check_integrity(self):\n root = self.root\n for scene_name in self.scene_list:\n if not(os.path.isdir(os.path.join(root,scene_name)) and \n os.path.isdir(os.path.join(root,scene_name, images_dir)) and\n os.path.isfile(os.path.join(root,scene_name,annotation_filename))):\n return False\n return True", "def has_scn_tilecache(self, unq_id):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).one()\n scn_json = query_result.ExtendedInfo\n ses.close()\n logger.debug(\"Closed the database session.\")\n\n tile_cache_calcd = False\n if scn_json is not None:\n json_parse_helper = eodatadown.eodatadownutils.EDDJSONParseHelper()\n tile_cache_calcd = json_parse_helper.doesPathExist(scn_json, [\"tilecache\"])\n return tile_cache_calcd", "def check_if_anim_exist(name, ext=vext, figpath=figpath):\n return not(os.path.isfile(format_filename(name, ext, figpath)))", "def is_saved(self):\n return self.get_scene_path() != '' and not self._document.GetChanged()", "def isSameKindAs(self, *args):\n return _osgAnimation.BasicAnimationManager_isSameKindAs(self, *args)", "def isSceneModified(self):\n logger.debug(\"Func: isSceneModified\")\n return nuke.modified()", "def is_root(self):\n return self.parent == None", "def _select_scene_objects(self):\n\n # Skip if the selection signals have been blocked\n if self.selectSignalBlocked:\n return\n\n # Get the tree selected items which exist inside the maya scene\n selected_items = [x.data(1, QtCore.Qt.UserRole)\n for x in self.selectedItems()\n if utils.object_exists(str(x.text(0)))] or None\n\n # If not valid items selected we pass\n if selected_items is None:\n return\n\n # Select the maya scene objects\n utils.select_objects(selected_items)\n\n return None", "def _resolve_scene(self, name):\n all_scenes = self.__try_to_get(self.bridge.scenes)\n if not all_scenes:\n return None\n for scene in self.bridge.scenes:\n if scene.name == name:\n return scene\n return None", "def isSameKindAs(self, *args):\n return _osgAnimation.Skeleton_isSameKindAs(self, *args)", "def testMazeExists(self):\n\n pass", "def isSameKindAs(self, *args):\n return _osgAnimation.Animation_isSameKindAs(self, *args)", "def openMayaScene(self, *arg, **keys):\n mode = Mode(keys.get('show', None), keys.get('sequence', None))\n mayaSceneFile = keys.get(\"mayaSceneFile\")\n if not mayaSceneFile:\n recipePath = mode.get(Recipe.XML_FILE, keys)\n recipe = Recipe.recipeFromFile(recipePath)\n mayaSceneFile = recipe.getMayaFile()\n\n if not mayaSceneFile:\n return\n\n mayaCommand = mode.get(\"[mayaCommand]\", keys)\n mayaCommand += \" \" + mayaSceneFile + \"&\"\n OSUtils.run(mayaCommand)\n return", "def testMazeExists(self):\n pass", "def is_mesh(node):\n try:\n mesh = attach_mesh(node)\n except TypeError:\n return False\n else:\n return True", "def is_resource_node(self):\n return self.camera is not None or self.mesh is not None", "def is_root(self):\n return self._parent == None" ]
[ "0.6938421", "0.66223943", "0.605499", "0.5837007", "0.5764086", "0.5731431", "0.5718635", "0.57006675", "0.5642447", "0.5609292", "0.55125946", "0.55025", "0.54665756", "0.54665756", "0.5446389", "0.53687525", "0.53429943", "0.5323305", "0.53167343", "0.52955806", "0.5281899", "0.52554226", "0.5254518", "0.52436686", "0.52399176", "0.5220473", "0.52119035", "0.5206814", "0.5200592", "0.51961917" ]
0.6686738
1
Initializing a ball object
def __init__(self, x = 140, y = 140): super(Ball, self).__init__(image = Ball.image, x = 600, y = 240, dx = -3, dy = 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n super(Ball, self).__init__(*args, **kwargs)\n self.speed = kwargs.get('speed', 5)\n self.ball_image = pyglet.image.load(os.path.join(config.ASSETS_DIR, 'ball.png'))\n self.width = self.ball_image.width\n self.height = self.ball_image.height\n self.ball_sprite = pyglet.sprite.Sprite(self.ball_image, self.x, self.y)\n self.x_direction = 1\n self.y_direction = 1\n\n print('Ball Created')", "def __init__(self, pos=(0, 0)):\n super().__init__() # Call 'turtle' initiation\n self.penup() # Stop displaying trail\n self.shapesize(stretch_wid=1, stretch_len=1) # Set dimensions of ball object to same height and width\n self.color(\"white\") # Set colour to white\n self.shape(\"circle\") # Set ball shape to round\n self.setpos(pos) # Move ball to desired position on screen\n self.x_dir = 1 # Set ball horizontal movement to right\n self.y_dir = 1 # Set ball vertical movement to up", "def __init__(self, posn_x, posn_y, velocity_x, velocity_y, kula): \n self.posn_x = posn_x # x position of box containing the ball (bottom). \n self.posn_y = posn_y # x position of box containing the ball (left edge). \n self.velocity_x = velocity_x # amount of x-movement each cycle of the 'for' loop. \n self.velocity_y = 100.0 # amount of y-movement each cycle of the 'for' loop. \n self.color = kula # color of the ball \n\n self.ball_width = 20.0 # size of ball - width (x-dimension). \n self.ball_height = 20.0 # size of ball - height (y-dimension). \n self.coef_restitution = 0.90", "def make_ball():\n ball = Ball()\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n\n # Speed and direction of rectangle\n ball.change_x = random.randrange(-2, 2)\n ball.change_y = random.randrange(-2, 2)\n\n return ball", "def __init__(self):\r\n self.radius = BALL_RADIUS\r\n self.center_x = BALL_START_X\r\n self.center_y = BALL_START_Y\r\n self.velocity = BALL_SPEED\r\n self.angle = - math.pi / 2\r\n self.rectangle = pygame.Rect(self.center_x - self.radius, self.center_y - self.radius, 2 * self.radius, 2 * self.radius)\r\n self.color = \"white\"\r\n self.save_pos = (self.center_x, self.center_y)", "def __init__(self, screen_Size, paddle_Width):\r\n self.screen_Width, self.screen_Height = screen_Size\r\n\r\n # Setup x,y limits for ball position\r\n self.left_x = paddle_Width\r\n self.right_x = self.screen_Width - paddle_Width\r\n self.top_y = self.Radius\r\n self.bot_y = self.screen_Height - self.Radius\r\n\r\n self.x = self.screen_Width//2\r\n self.y = np.random.randint(self.Radius, self.screen_Height-self.Radius)\r\n\r\n self.vx = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n self.vy = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n\r\n # Ralley counter to see game progress\r\n self.rallies = 0", "def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)", "def __init__(self):\n #random.uniform(1, 10) = random float values for x coordinate to make sure ball spawns on left edge of screen with random values\n #random.unform(1, 330) = 1-330 was chosen to make sure the ball can spawn randomly either below or on top of left edge of the screen\n self.x = random.uniform(1, 10)\n self.y = random.uniform(1, 330)", "def __init__(self, width, height):\n super().__init__(width, height)\n\n self.ball = Ball()\n self.paddle = Paddle()\n self.score = 0\n\n # These are used to see if the user is\n # holding down the arrow keys\n self.holding_left = False\n self.holding_right = False\n\n arcade.set_background_color(arcade.color.WHITE)", "def make_ball(id):\n ball = Ball()\n\n ball.id = id\n\n # Size of the ball\n # ball.size = random.randrange(10, 30)\n ball.size = 10\n\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(ball.size, WINDOW_WIDTH - ball.size)\n ball.y = random.randrange(ball.size, WINDOW_HEIGHT - ball.size)\n\n # Speed and direction\n ball.speed(DEFAULT_SPEED)\n\n # Color\n ball.color = (0, 0, random.randrange(128,256))\n\n return ball", "def __init__(self,x_pos, y_pos, velocity, kind, fillcolor = 'red'):\n self._velocity = velocity\n self._kind = kind\n super().__init__(x = x_pos, y=y_pos, width = BOLT_WIDTH, \\\n height = BOLT_HEIGHT, fillcolor=fillcolor)", "def __init__(self):\n \n self._wall = BrickWall() \n self._paddle = GRectangle(\n x=GAME_WIDTH/2 - PADDLE_WIDTH/2,\n y=PADDLE_OFFSET,\n width=PADDLE_WIDTH,\n height=PADDLE_HEIGHT,\n fillcolor = PADDLE_COLOR)\n self._clickdist = 0\n self._ball = Ball() \n self._last = None\n self._tries = 2\n self._lostlife = False", "def __init__(self, screen):\n self.screen = screen\n\n #Load the ball image\n self.image = pygame.image.load('Images/ball.bmp')\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n #Start ball in center\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.centerx", "def __init__(self, *args, **kwargs):\n super().__init__(**kwargs)\n self.speed = kwargs.get('speed', 5)\n\n # Loading image file\n self.ball_image = image.load(config.resources_path + 'ball.png')\n self.width = self.ball_image.width\n self.height = self.ball_image.height\n self.ball_sprite = sprite.Sprite(self.ball_image, self.x, self.y)\n\n self.ball_image.rotation = randint(0, 360) # Rotates the sprite\n self.ball_image.scale = uniform(0.5, 2)\n\n self.x_direction = 1 # 1 for + axis direction\n self.y_direction = 1", "def __init__(self, screen_Size, p1_Type, p2_Type, action_Space):\r\n self.screen_Width, self.screen_Height = screen_Size\r\n\r\n # Initialize game objects\r\n self.paddle_1 = Paddle(screen_Size, p1_Type, 1, action_Space)\r\n self.paddle_2 = Paddle(screen_Size, p2_Type, 2, action_Space)\r\n self.ball = Ball(screen_Size, self.paddle_1.Width)", "def __init__(self):\r\n #use ball image, place in center of screen, give x and y velocity\r\n #and add a score.\r\n super(Ball, self).__init__(image = Ball.ball,\r\n x = games.screen.width/2,\r\n y = games.screen.height/2,\r\n dy = 2,\r\n dx = 2)\r\n self.score = games.Text(0,\r\n size = 30, color = color.red,\r\n x = games.screen.width-10,\r\n y = 15,\r\n is_collideable = False)\r\n games.screen.add(self.score)", "def __init__(self, home_robots=[], away_robots=[], ball=Ball(75, 65)):\n \n self.home_robots = home_robots\n self.away_robots = away_robots\n self.ball = ball\n self.field = Field(150, 130)\n\n self.status = 0\n self.home_goals = 0\n self.away_goals = 0", "def init():\n global balls, super_balls\n\n balls = [gen_ball() for _ in range(number_of_balls)]\n super_balls = []\n generate_velocity_all_balls()", "def make_ball(canvas):\n random_x = random.randint(0, canvas.get_canvas_width() - 2 * BALL_RADIUS)\n random_y = random.randint(0, canvas.get_canvas_height() - 2 * BALL_RADIUS)\n ball = canvas.create_oval(random_x, random_y, random_x + 2 * BALL_RADIUS, random_y + 2 * BALL_RADIUS)\n canvas.set_color(ball, 'blue')\n return ball", "def __init__(self,size):\n self.bricks = []\n self.size = size\n for x in range(self.BRICK_GAPS[0],self.size[0]-(self.BRICK_SIZE[0]+self.BRICK_GAPS[0]),self.BRICK_SIZE[0]+self.BRICK_GAPS[0]):\n for y in range(self.BRICK_GAPS[1],int(self.size[1]/2.0),self.BRICK_SIZE[1]+self.BRICK_GAPS[1]):\n brick_color = pygame.Color(random.randrange(0,256),random.randrange(0,256),random.randrange(0,256))\n new_brick = Brick(x,y,self.BRICK_SIZE[0],self.BRICK_SIZE[1],brick_color)\n self.bricks.append(new_brick)\n self.paddle = Paddle(self.size[0]/2.0,self.size[1]-40.0,100,20)\n self.ball = Ball(self.paddle.x+self.paddle.width/2.0,self.paddle.y - (self.BALL_RADIUS+10),self.BALL_RADIUS,0.0,-1.0)", "def spawn_ball(direction):\n global ball_pos, ball_vel \n ball_pos = [WIDTH / 2, HEIGHT / 2]\n ball_vel = ball_generate_velocity(direction) # Ball velocity randomization ", "def __init__(self,circlePos,circleRad,circleVel):\n self.circlePos=circlePos\n self.circleRad=circleRad\n self.circleVel=circleVel", "def __init__(self, x_coor, x_speed, y_coor, y_speed, direction):\n self.__x_coor = x_coor\n self.__x_speed = x_speed\n self.__y_coor = y_coor\n self.__y_speed = y_speed\n self.__direction = direction\n self.__radius = self.TORPEDO_RADIUS", "def __init__(self,Number=100,ContainerRadius=60e-10,BallRadius=53e-12,temperature=2980,ballmass=1.67e-27,speed=1012):\r\n self.__cont=Container(ContainerRadius)\r\n self.__ContainerRad=ContainerRadius\r\n self.__ballList=[]\r\n self.__relativetemp=(temperature/298)\r\n self._bmass=ballmass\r\n randx=[np.sqrt(self.__relativetemp)*k*np.sqrt(speed) for k in np.random.normal(0,50,Number)]\r\n randy=[np.sqrt(self.__relativetemp)*k*np.sqrt(speed) for k in np.random.normal(0,50,Number)]\r\n \r\n r=BallRadius\r\n n=1\r\n Angle=0\r\n for i in range(Number):\r\n self.__ballList.append(Ball(ballmass,BallRadius,[r*np.cos(Angle),r*np.sin(Angle)],[randx[i],randy[i]]))\r\n circumference=np.pi*2*r\r\n distAngle=(np.pi*2)/(circumference/(3*BallRadius))\r\n Angle+=distAngle\r\n if (Angle+distAngle)>2*np.pi:\r\n r+=3*BallRadius\r\n n+=1\r\n Angle=0\r\n scale=self.__ContainerRad/(n*3*BallRadius)\r\n for ball in self.__ballList:\r\n ball.scalepos(scale)\r\n \r\n self.__text0 = None", "def __init__(self):\n self.x_coord = default_init\n self.y_coord = default_init\n self._init_random_coord() # generating random coordinates\n self.x_speed = default_init\n self.y_speed = default_init\n self.degrees = default_init\n self.radius = ship_def_radius", "def create_ball_sprite(x,y):\n ball_img = pyglet.resource.image('football100.png')\n ball_img.anchor_x = ball_img.width/2\n ball_img.anchor_y = ball_img.height/2\n\n mass = 10\n radius = ball_img.width/2\n\n ball_sprite = pyglet.sprite.Sprite(ball_img)\n ball_sprite.body = pymunk.Body(mass, pymunk.moment_for_circle(mass, 0, radius))\n ball_sprite.body.position = x, y\n ball_sprite.body.angle = random.random() * math.pi\n ball_sprite.shape = pymunk.Circle(ball_sprite.body, radius)\n ball_sprite.shape.friction = 0.5\n ball_sprite.shape.elasticity = 0.9\n\n return ball_sprite", "def __init__(self):\n self.screen_width = 1200\n self.screen_height = 800\n self.bg_color = (0, 230, 0)\n\n # Glove Settings\n self.glove_move_speed = 0.25\n self.glove_size = 100\n\n # Ball Settings\n self.ball_move_speed = 0.25\n self.ball_size = 40", "def __init__(self, BridgeObj, speed=0.005):\n self.speed = speed\n self.BridgeObj = BridgeObj\n print(\"[RainbowAll] Mode Initialized. Speed : \" + str(speed))", "def serveBall(self):\n self._ball = Ball(GAME_WIDTH/2, BALL_HEIGHT, BALL_VY, BALL_DIAMETER, colormodel.BLUE)\n self.draw()", "def __init__(self, x, y, dx, dy):\r\n super(pong2, self).__init__(image=pong2.bball, x=x, y=y, dx=dx, dy=dy)\r\n self.dx1=self.dx\r\n self.dy1=self.dy\r\n self.dx, self.dy=0, 0\r\n self.count=1\r\n self.picture=0" ]
[ "0.7679933", "0.7565953", "0.747588", "0.72836137", "0.7182046", "0.705327", "0.70508504", "0.69490194", "0.69460386", "0.69447124", "0.68870175", "0.683677", "0.6823838", "0.6812778", "0.6801902", "0.67790073", "0.6765068", "0.6759668", "0.674916", "0.6742566", "0.67322433", "0.6664632", "0.6616625", "0.6536399", "0.6503375", "0.6497571", "0.64923936", "0.64886904", "0.6485081", "0.6471847" ]
0.77236575
0
Creates a frame worker with the given arguments and then runs it. The queues are assumed to be ZeroMQ queues which are serialized. The error file is not passed to the FrameWorker, but is instead where errors are stored if one occurs.
def frame_worker_target(img_queue, rec_queue, send_queue, frame_gen, ms_per_frame, error_file): img_queue = ZeroMQQueue.deser(img_queue) rec_queue = ZeroMQQueue.deser(rec_queue) send_queue = ZeroMQQueue.deser(send_queue) try: FrameWorker(img_queue, rec_queue, send_queue, frame_gen, ms_per_frame).do_all() except: traceback.print_exc() with open(error_file, 'w') as outfile: traceback.print_exc(file=outfile) raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _worker(self, args):\n pass", "def main_loop():\n\n editor = 'FrameEditorEmpty'\n merge = 'FrameMergerFirst'\n editorparams = ''\n mergerparams = ''\n framesrcparams = 'localhost:5005'\n framedstparams = 'localhost:5005'\n framesource = 'CameraFrameGenerator'\n framesdestination = 'FrameSinkShower'\n\n if '-framesource' in sys.argv:\n framesource = sys.argv[sys.argv.index('-framesource') + 1]\n if len(sys.argv) > sys.argv.index('-framesource') + 2 \\\n and sys.argv[sys.argv.index('-framesource') + 2][0] != '-':\n framesrcparams = sys.argv[sys.argv.index('-framesource') + 2]\n\n if '-framedestination' in sys.argv:\n framesdestination = sys.argv[sys.argv.index('-framedestination') + 1]\n if len(sys.argv) > sys.argv.index('-framedestination') + 2 \\\n and sys.argv[sys.argv.index('-framedestination') + 2][0] != '-':\n framedstparams = sys.argv[sys.argv.index('-framedestination') + 2]\n\n if '-editor' in sys.argv:\n editor = sys.argv[sys.argv.index('-editor') + 1]\n\n if '-merge' in sys.argv:\n merge = sys.argv[sys.argv.index('-merge') + 1]\n\n if '-editorparams' in sys.argv:\n editorparams = sys.argv[sys.argv.index('-editorparams') + 1]\n\n if '-mergerparams' in sys.argv:\n mergerparams = sys.argv[sys.argv.index('-mergerparams') + 1]\n\n # print \"From %s:%s to %s:%s, edit by %s\" % (fromhost, _from, tohost, _to, editor)\n\n frameEditor = eval(editor)(editorparams)\n frameMerger = eval(merge)(mergerparams)\n framesSrc = eval(framesource)(framesrcparams)\n framesDst = eval(framesdestination)(framedstparams)\n\n receive_and_sink_video(framesSrc=framesSrc, framesDst=framesDst, frameEditor=frameEditor, frameMerger=frameMerger)", "def run_job(in_args=sys.argv[1:]):\n print '>>>> condor_worker.py logging:'\n proc = Popen(['hostname', '-f'], stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n if err == '':\n print 'Running on', out\n else:\n raise RuntimeError(err)\n\n parser = WorkerArgParser(description=__doc__)\n args = parser.parse_args(in_args)\n print 'Args:'\n print args\n\n # Make sandbox area to avoid names clashing, and stop auto transfer\n # back to submission node\n # -------------------------------------------------------------------------\n tmp_dir = 'scratch'\n os.mkdir(tmp_dir)\n os.chdir(tmp_dir)\n try:\n # Copy files to worker node area from /users, /hdfs, /storage, etc.\n # ---------------------------------------------------------------------\n if args.copyToLocal:\n print 'PRE EXECUTION: Copy to local:'\n for (source, dest) in args.copyToLocal:\n print source, dest\n if source.startswith('/hdfs'):\n source = source.replace('/hdfs', '')\n check_call(['hadoop', 'fs', '-copyToLocal', source, dest])\n else:\n if os.path.isfile(source):\n shutil.copy2(source, dest)\n elif os.path.isdir(source):\n shutil.copytree(source, dest)\n\n print 'In current dir:'\n print os.listdir(os.getcwd())\n\n # Do setup of programs & libs, and run the program\n # We have to do this in one step to avoid different-shell-weirdness,\n # since env vars don't necessarily get carried over.\n # ---------------------------------------------------------------------\n print 'SETUP AND EXECUTION'\n setup_cmd = ''\n if args.setup:\n os.chmod(args.setup, 0555)\n setup_cmd = 'source ./' + args.setup + ' && '\n\n if os.path.isfile(os.path.basename(args.exe)):\n os.chmod(os.path.basename(args.exe), 0555)\n\n # run_cmd = args.exe\n\n # If it's a local file, we need to do ./ for some reason...\n # But we must determine this AFTER running setup script,\n # can't do it beforehand\n run_cmd = \"if [[ -e {exe} ]];then ./{exe} {args};else {exe} {args};fi\"\n run_args = ' '.join(args.args) if args.args else ''\n run_cmd = run_cmd.format(exe=args.exe, args=run_args)\n print 'Contents of dir before running:'\n print os.listdir(os.getcwd())\n print \"Running:\", setup_cmd + run_cmd\n check_call(setup_cmd + run_cmd, shell=True)\n\n print 'In current dir:'\n print os.listdir(os.getcwd())\n\n # Copy files from worker node area to /hdfs or /storage\n # ---------------------------------------------------------------------\n if args.copyFromLocal:\n print 'POST EXECUTION: Copy to HDFS:'\n for (source, dest) in args.copyFromLocal:\n print source, dest\n if dest.startswith('/hdfs'):\n source = os.path.realpath(source)\n dest = dest.replace('/hdfs', '')\n check_call(['hadoop', 'fs', '-copyFromLocal', '-f', source, dest])\n else:\n if os.path.isfile(source):\n shutil.copy2(source, dest)\n elif os.path.isdir(source):\n shutil.copytree(source, dest)\n finally:\n # Cleanup\n # ---------------------------------------------------------------------\n print 'CLEANUP'\n os.chdir('..')\n shutil.rmtree(tmp_dir)", "def worker(queue, run):\n for args in iter(queue.get, None):\n try:\n run(*args)\n except Exception as e: # catch exceptions to avoid exiting the thread prematurely\n print('{} failed: {}'.format(args, e), file=sys.stderr)", "def main():\n flags = parser_create()\n config_data = config_loader_yaml(flags.config_file)\n loggers_config = get_loggers_config(config_data)\n logging_queue = multiprocessing.Queue()\n logging_worker = LoggingWorker(loggers_config, logging_queue)\n logging_worker.start()\n\n class_name = \"\"\n function_name = inspect.stack()[0][3]\n\n for i in range(5):\n log_message(logging_queue, 'DEBUG', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'INFO', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'WARNING', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'ERROR', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'CRITICAL', __name__, class_name, function_name, 'Message ' + str(i))\n log_message(logging_queue, 'Unknown', __name__, class_name, function_name, 'Message ' + str(i))\n\n logging_queue.put(None)\n logging_worker.join()", "def run_workflow(args, run=True):\n\n import os\n import os.path as op\n\n import nipype.interfaces.io as nio\n import nipype.pipeline.engine as pe\n import nipype.interfaces.utility as niu\n\n import qap\n from qap_utils import read_json\n\n import glob\n\n import time\n from time import strftime\n from nipype import config as nyconfig\n\n # unpack args\n resource_pool_dict, sub_info_list, config, run_name, runargs, \\\n bundle_idx, num_bundles = args\n\n # Read and apply general settings in config\n keep_outputs = config.get('write_all_outputs', False)\n\n # take date+time stamp for run identification purposes\n pipeline_start_stamp = strftime(\"%Y-%m-%d_%H:%M:%S\")\n pipeline_start_time = time.time()\n\n if \"workflow_log_dir\" not in config.keys():\n config[\"workflow_log_dir\"] = config[\"output_directory\"]\n\n bundle_log_dir = op.join(config[\"workflow_log_dir\"],\n '_'.join([\"bundle\", str(bundle_idx)]))\n\n try:\n os.makedirs(bundle_log_dir)\n except:\n if not op.isdir(bundle_log_dir):\n err = \"[!] Bundle log directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % bundle_log_dir\n raise Exception(err)\n else:\n pass\n\n # set up logging\n nyconfig.update_config(\n {'logging': {'log_directory': bundle_log_dir, 'log_to_file': True}})\n logging.update_logging(nyconfig)\n\n logger.info(\"QAP version %s\" % qap.__version__)\n logger.info(\"Pipeline start time: %s\" % pipeline_start_stamp)\n\n workflow = pe.Workflow(name=run_name)\n workflow.base_dir = op.join(config[\"working_directory\"])\n\n # set up crash directory\n workflow.config['execution'] = \\\n {'crashdump_dir': config[\"output_directory\"]}\n\n # create the one node all participants will start from\n starter_node = pe.Node(niu.Function(input_names=['starter'], \n output_names=['starter'], \n function=starter_node_func),\n name='starter_node')\n\n # set a dummy variable\n starter_node.inputs.starter = \"\"\n\n new_outputs = 0\n\n # iterate over each subject in the bundle\n logger.info(\"Starting bundle %s out of %s..\" % (str(bundle_idx),\n str(num_bundles)))\n # results dict\n rt = {'status': 'Started', 'bundle_log_dir': bundle_log_dir}\n\n for sub_info in sub_info_list:\n\n resource_pool = resource_pool_dict[sub_info]\n\n # in case we're dealing with string entries in the data dict\n try:\n resource_pool.keys()\n except AttributeError:\n continue\n\n # resource pool check\n invalid_paths = []\n\n for resource in resource_pool.keys():\n try:\n if not op.isfile(resource_pool[resource]) and resource != \"site_name\":\n invalid_paths.append((resource, resource_pool[resource]))\n except:\n err = \"\\n\\n[!]\"\n raise Exception(err)\n\n if len(invalid_paths) > 0:\n err = \"\\n\\n[!] The paths provided in the subject list to the \" \\\n \"following resources are not valid:\\n\"\n\n for path_tuple in invalid_paths:\n err = \"%s%s: %s\\n\" % (err, path_tuple[0], path_tuple[1])\n\n err = \"%s\\n\\n\" % err\n raise Exception(err)\n\n # process subject info\n sub_id = str(sub_info[0])\n # for nipype\n if \"-\" in sub_id:\n sub_id = sub_id.replace(\"-\",\"_\")\n if \".\" in sub_id:\n sub_id = sub_id.replace(\".\",\"_\")\n\n if sub_info[1]:\n session_id = str(sub_info[1])\n # for nipype\n if \"-\" in session_id:\n session_id = session_id.replace(\"-\",\"_\")\n if \".\" in session_id:\n session_id = session_id.replace(\".\",\"_\")\n else:\n session_id = \"session_0\"\n\n if sub_info[2]:\n scan_id = str(sub_info[2])\n # for nipype\n if \"-\" in scan_id:\n scan_id = scan_id.replace(\"-\",\"_\")\n if \".\" in scan_id:\n scan_id = scan_id.replace(\".\",\"_\")\n else:\n scan_id = \"scan_0\"\n\n name = \"_\".join([\"\", sub_id, session_id, scan_id])\n\n rt[name] = {'id': sub_id, 'session': session_id, 'scan': scan_id,\n 'resource_pool': str(resource_pool)}\n\n logger.info(\"Participant info: %s\" % name)\n\n # set output directory\n output_dir = op.join(config[\"output_directory\"], run_name,\n sub_id, session_id, scan_id)\n\n try:\n os.makedirs(output_dir)\n except:\n if not op.isdir(output_dir):\n err = \"[!] Output directory unable to be created.\\n\" \\\n \"Path: %s\\n\\n\" % output_dir\n raise Exception(err)\n else:\n pass\n\n # for QAP spreadsheet generation only\n config.update({\"subject_id\": sub_id, \"session_id\": session_id,\n \"scan_id\": scan_id, \"run_name\": run_name})\n\n if \"site_name\" in resource_pool:\n config.update({\"site_name\": resource_pool[\"site_name\"]})\n\n logger.info(\"Configuration settings:\\n%s\" % str(config))\n\n qap_types = [\"anatomical_spatial\", \n \"functional_spatial\", \n \"functional_temporal\"]\n\n # update that resource pool with what's already in the output\n # directory\n for resource in os.listdir(output_dir):\n if (op.exists(op.join(output_dir, resource)) and\n resource not in resource_pool.keys()):\n try:\n resource_pool[resource] = \\\n glob.glob(op.join(output_dir, resource, \"*\"))[0]\n except IndexError:\n if \".json\" in resource:\n # load relevant json info into resource pool\n json_file = op.join(output_dir, resource)\n json_dict = read_json(json_file)\n sub_json_dict = json_dict[\"%s %s %s\" % (sub_id,\n session_id,\n scan_id)]\n\n if \"anatomical_header_info\" in sub_json_dict.keys():\n resource_pool[\"anatomical_header_info\"] = \\\n sub_json_dict[\"anatomical_header_info\"]\n\n if \"functional_header_info\" in sub_json_dict.keys():\n resource_pool[\"functional_header_info\"] = \\\n sub_json_dict[\"functional_header_info\"]\n\n for qap_type in qap_types:\n if qap_type in sub_json_dict.keys():\n resource_pool[\"_\".join([\"qap\",qap_type])] = \\\n sub_json_dict[qap_type]\n except:\n # a stray file in the sub-sess-scan output directory\n pass\n\n # create starter node which links all of the parallel workflows within\n # the bundle together as a Nipype pipeline\n resource_pool[\"starter\"] = (starter_node, 'starter')\n\n # individual workflow and logger setup\n logger.info(\"Contents of resource pool for this participant:\\n%s\"\n % str(resource_pool))\n\n # start connecting the pipeline\n qw = None\n for qap_type in qap_types:\n if \"_\".join([\"qap\", qap_type]) not in resource_pool.keys():\n if qw is None:\n from qap import qap_workflows as qw\n wf_builder = \\\n getattr(qw, \"_\".join([\"qap\", qap_type, \"workflow\"]))\n workflow, resource_pool = wf_builder(workflow, resource_pool,\n config, name)\n\n if (\"anatomical_scan\" in resource_pool.keys()) and \\\n (\"anatomical_header_info\" not in resource_pool.keys()):\n if qw is None:\n from qap import qap_workflows as qw\n workflow, resource_pool = \\\n qw.qap_gather_header_info(workflow, resource_pool, config,\n name, \"anatomical\")\n\n if (\"functional_scan\" in resource_pool.keys()) and \\\n (\"functional_header_info\" not in resource_pool.keys()):\n if qw is None:\n from qap import qap_workflows as qw\n workflow, resource_pool = \\\n qw.qap_gather_header_info(workflow, resource_pool, config,\n name, \"functional\")\n\n # set up the datasinks\n out_list = []\n for output in resource_pool.keys():\n for qap_type in qap_types:\n if qap_type in output:\n out_list.append(\"_\".join([\"qap\", qap_type]))\n\n # write_all_outputs (writes everything to the output directory, not\n # just the final JSON files)\n if keep_outputs:\n out_list = resource_pool.keys()\n logger.info(\"Outputs we're keeping: %s\" % str(out_list))\n logger.info('Resource pool keys after workflow connection: '\n '{}'.format(str(resource_pool.keys())))\n\n # Save reports to out_dir if necessary\n if config.get('write_report', False):\n\n if (\"qap_mosaic\" in resource_pool.keys()) and \\\n (\"qap_mosaic\" not in out_list):\n out_list += ['qap_mosaic']\n\n # The functional temporal also has an FD plot\n if 'qap_functional_temporal' in resource_pool.keys():\n if (\"qap_fd\" in resource_pool.keys()) and \\\n (\"qap_fd\" not in out_list):\n out_list += ['qap_fd']\n\n for output in out_list:\n # we use a check for len()==2 here to select those items in the\n # resource pool which are tuples of (node, node_output), instead\n # of the items which are straight paths to files\n\n # resource pool items which are in the tuple format are the\n # outputs that have been created in this workflow because they\n # were not present in the subject list YML (the starting resource\n # pool) and had to be generated\n if (len(resource_pool[output]) == 2) and (output != \"starter\"):\n ds = pe.Node(nio.DataSink(), name='datasink_%s%s'\n % (output,name))\n ds.inputs.base_directory = output_dir\n node, out_file = resource_pool[output]\n workflow.connect(node, out_file, ds, output)\n new_outputs += 1\n elif \".json\" in resource_pool[output]:\n new_outputs += 1\n\n logger.info(\"New outputs: %s\" % str(new_outputs))\n\n # run the pipeline (if there is anything to do)\n if new_outputs > 0:\n if config.get('write_graph', False):\n workflow.write_graph(\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n workflow.write_graph(\n graph2use=\"orig\",\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n workflow.write_graph(\n graph2use=\"hierarchical\",\n dotfilename=op.join(config[\"output_directory\"],\n \"\".join([run_name, \".dot\"])),\n simple_form=False)\n if run:\n try:\n logger.info(\"Running with plugin %s\" % runargs[\"plugin\"])\n logger.info(\"Using plugin args %s\" % runargs[\"plugin_args\"])\n workflow.run(plugin=runargs[\"plugin\"],\n plugin_args=runargs[\"plugin_args\"])\n rt['status'] = 'finished'\n logger.info(\"Workflow run finished for bundle %s.\"\n % str(bundle_idx))\n except Exception as e: # TODO We should be more specific here ...\n errmsg = e\n rt.update({'status': 'failed'})\n logger.info(\"Workflow run failed for bundle %s.\"\n % str(bundle_idx))\n # ... however this is run inside a pool.map: do not raise\n # Exception\n else:\n return workflow\n\n else:\n rt['status'] = 'cached'\n logger.info(\"\\nEverything is already done for bundle %s.\"\n % str(bundle_idx))\n\n # Remove working directory when done\n if not keep_outputs:\n try:\n work_dir = op.join(workflow.base_dir, scan_id)\n\n if op.exists(work_dir):\n import shutil\n shutil.rmtree(work_dir)\n except:\n logger.warn(\"Couldn\\'t remove the working directory!\")\n pass\n\n if rt[\"status\"] == \"failed\":\n logger.error(errmsg)\n else:\n pipeline_end_stamp = strftime(\"%Y-%m-%d_%H:%M:%S\")\n pipeline_end_time = time.time()\n logger.info(\"Elapsed time (minutes) since last start: %s\"\n % ((pipeline_end_time - pipeline_start_time) / 60))\n logger.info(\"Pipeline end time: %s\" % pipeline_end_stamp)\n\n return rt", "def runFragment(options, cmdargs, taskNum=None):\n t0=time.time()\n exitcode=-3\n\n # create local tmp dir\n if options.queue == LOCAL:\n localDir = tempfile.mkdtemp(suffix='batchRunner', dir=options.tmpDir)\n else:\n localDir = tempfile.mkdtemp(suffix='batchRunner')\n logging.debug(\"Local dir (%s): %s\" % (os.path.exists(localDir), localDir))\n\n #if options.queue != LOCAL:\n if taskNum is None:\n # get the task number from env\n taskNum=getTaskId(options)\n\n hostname=getHostName()\n logFile=\"%s%stask%05d.%s.log\" % (localDir,os.sep,taskNum,hostname)\n errStream=open(logFile,'w')\n # if called from SGE, we will have to:\n if options.queue != LOCAL:\n # set up logging\n if options.verbose==0:\n loglevel=logging.ERROR\n elif options.verbose==1:\n loglevel=logging.WARN\n elif options.verbose==2:\n loglevel=logging.INFO\n elif options.verbose>=3:\n loglevel=logging.DEBUG\n logging.basicConfig(stream=errStream, level=loglevel)\n\n # set up file names\n infragmentName = getFragmentName(options.fragBase, taskNum,\n options.fragSuff)\n fragment_dir = options.frag_dir\n prefix = getFragmentPrefix(options.fragBase, taskNum)\n infragment = \"%s%s%s\" % (fragment_dir, os.sep, infragmentName)\n stdoutFragment=os.path.join(options.tmpDir, infragmentName) + \".stdout\"\n stderrFragment=os.path.join(options.tmpDir, infragmentName) + \".stderr\"\n if options.queue == LOCAL:\n stdoutLocal=stdoutFragment\n stderrLocal=stderrFragment\n else:\n stdoutLocal = \"%s%stask%s.%s.stdout\" % ( localDir, os.sep, taskNum, hostname )\n stderrLocal = \"%s%stask%s.%s.stderr\" % ( localDir, os.sep, taskNum, hostname )\n\n try:\n logging.debug(\"Begin runFragment: type: %s, host: %s\" % (options.taskType, hostname))\n # check arguments\n if options.tmpDir is None:\n logging.error(\"batch_runner needs --tmp_dir parameter!\")\n parser.error(\"MUST supply --temp_dir\")\n\n # if cwd is False, use local tmp directory, otherwise use current directory\n if options.cwd:\n subprocwd=None\n else:\n localDir = os.path.abspath(localDir)\n subprocwd=localDir\n infragment=os.path.abspath(infragment)\n # translate any relative paths in command to absolute\n makeRelativePathsAbsolute(cmdargs)\n\n # any command specific changes\n if options.taskType in finalCommandProcessingForTask:\n finalCommandProcessingForTask[options.taskType](cmdargs,localDir)\n\n # modify command\n outLocal = \"%s%s%s.output\" % (localDir, os.sep, infragmentName)\n (foundI, outputFlags) = prepareCommandForFragment(options, infragment, prefix, outLocal, cmdargs, hostname, errStream)\n logging.debug(\"ready ro RUN!\")\n\n # fragmented HMMdbs need to be compiled\n prep_input_fragment(infragment,\n fragment_prep_for_task.get(options.taskType,\n options.frag_prep))\n\n # setup to run command\n # I/O\n if foundI:\n spin=None\n else:\n spin=open(infragment,'rt')\n spout=open(stdoutLocal,'w')\n sperr=open(stderrLocal,'w')\n # environment\n path=os.environ['PATH']\n #path=os.pathsep.join([BINDIR,os.environ['PATH']])\n\n # run it\n try:\n logging.debug(\"Command:\\n%s\\nwd=%s\\npath=%s\" % (formatCommand(cmdargs),subprocwd,path))\n t1=time.time()\n # Run the command\n exitcode=subprocess.call(cmdargs, stdin=spin, stdout=spout,\n stderr=sperr, cwd=subprocwd)\n #exitcode=subprocess.call(cmdargs, stdin=spin, stdout=spout, stderr=sperr, cwd=subprocwd, env={\"PATH\":path})\n t2=time.time()\n logging.info(\"Command took %.2f seconds\" % (t2-t1))\n except:\n errStream.write(\"Exception executing command: %s\\n\" % (cmdargs))\n errStream.write('-'*60+'\\n')\n traceback.print_exc(file=errStream)\n errStream.write('-'*60+'\\n')\n exitcode=-1\n\n if not foundI:\n spin.close()\n spout.close()\n sperr.close()\n\n # Report any error code\n if exitcode != 0:\n logging.error(\"Error code %d from command:\\n%s\" % (exitcode,cmdargs))\n\n if options.queue != LOCAL:\n # copy stderr and stdout\n logging.info(\"Copying %s to %s\" % (stdoutLocal,stdoutFragment))\n shutil.copy(stdoutLocal,stdoutFragment)\n logging.info(\"Copying %s to %s\" % (stderrLocal,stderrFragment))\n shutil.copy(stderrLocal,stderrFragment)\n\n # move output files\n for flag in outputFlags:\n # check for other files\n logging.debug(\"Looking for file from output flag: %s\" % (flag))\n # skip stdout\n if flag=='%stdout':\n # we've already grabbed it\n continue\n\n (outputdir,output) = getOutputFromFlag(flag,infragmentName,prefix,localDir,options.tmpDir)\n logging.debug(\"file should be: %s in %s\" % (output,outputdir))\n if outputdir is localDir:\n localOutput = \"%s%s%s\" % (outputdir,os.sep,output)\n tmpOutput = \"%s%s%s\" % (options.tmpDir, os.sep, output)\n if os.path.exists(localOutput):\n logging.info(\"Copying %s to %s\" % (localOutput, tmpOutput))\n create_parent_dir(tmpOutput)\n shutil.move(localOutput,tmpOutput)\n else:\n logging.warning(\"Could not find output: %s\" % output)\n logging.debug(\"Flag: %s\" % (flag))\n\n except:\n exitcode=2\n errStream.write(\"Exception running fragment %d:\\n\" % (taskNum))\n errStream.write('-'*60+'\\n')\n traceback.print_exc(file=errStream)\n errStream.write('-'*60+'\\n')\n\n # Do some final cleanup:\n if exitcode!=0:\n errCodeFile=os.path.join(options.tmpDir, \n \"%s.exitcode\" % (infragmentName))\n ecStream=open(errCodeFile,'w')\n ecStream.write(str(exitcode))\n ecStream.close()\n\n t2=time.time()\n logging.info(\"Fragment took %.2f seconds\" % (t2-t0))\n\n # copy err to shared dir\n if options.queue != LOCAL:\n logging.shutdown()\n errStream.close()\n if options.queue == LOCAL:\n shutil.move(logFile, os.path.join(options.tmpDir,\n \"%s.log\" % (infragmentName)))\n else:\n shutil.copy(logFile, os.path.join(options.tmpDir,\n \"%s.log\" % (infragmentName)))\n\n # remove local temorary dir (if not debugging)\n if logging.getLogger().level > logging.DEBUG:\n logging.debug(\"I AM removing %s\", localDir)\n shutil.rmtree(localDir)\n else:\n logging.debug(\"NOT removing %s\", localDir)\n\n return exitcode", "def worker(self, **options):\n pass", "def produce(frame_gen: fg.FrameGenerator, fps: float,\r\n dpi: typing.Union[int, float], bitrate: typing.Union[int, float],\r\n outfile: str,\r\n settings: PerformanceSettings = None, time_per_print: float = 15.0,\r\n logger: logging.Logger = None) -> PerformanceSettings:\r\n\r\n try:\r\n mp.set_start_method('spawn')\r\n except RuntimeError:\r\n pass\r\n\r\n if settings is None:\r\n settings = PerformanceSettings()\r\n if logger is None:\r\n logger = logging.getLogger('pympanim.worker')\r\n logger.setLevel(logging.DEBUG)\r\n logging.basicConfig(\r\n format='%(asctime)s [%(filename)s:%(lineno)d] %(message)s',\r\n datefmt='%m/%d/%Y %I:%M:%S %p')\r\n\r\n ms_per_frame = 1000 / fps\r\n num_frames = int(frame_gen.duration / ms_per_frame)\r\n logger.info('Settings: %0.1f seconds; %d frames at %d fps with %d workers...',\r\n frame_gen.duration / 1000, num_frames, fps, settings.num_workers)\r\n\r\n workers = []\r\n paused_workers = []\r\n stopping_workers = [] # closed when we process their last frame\r\n\r\n perf = imgst.ISRunningAveragePerfHandler(settings.window_size)\r\n isticher = imgst.ImageSticher(frame_gen.frame_size, dpi, bitrate, fps,\r\n outfile, settings.ooo_error)\r\n isticher.perfs.append(perf)\r\n\r\n for i in range(settings.num_workers):\r\n worker = _spawn_worker(frame_gen, ms_per_frame, i)\r\n isticher.register_queue(worker.img_queue)\r\n workers.append(worker)\r\n\r\n worker_counter = settings.num_workers\r\n\r\n for worker in workers:\r\n worker.start_sync()\r\n isticher.start()\r\n\r\n all_synced = False\r\n while not all_synced:\r\n all_synced = True\r\n for worker in workers:\r\n if not worker.check_sync():\r\n all_synced = False\r\n time.sleep(0.001)\r\n\r\n old_perf = None\r\n cur_optim = None # magical string values\r\n frame_batch_dyn_min = settings.frame_batch_min\r\n frame_batch_dyn_max = settings.frame_batch_max\r\n frame_batch_min_next_decay = float('inf')\r\n frame_batch_max_next_decay = float('inf')\r\n next_optim = time.time() + settings.perf_delay + settings.window_size\r\n next_progress = time.time() + max(settings.perf_delay + settings.window_size, time_per_print)\r\n\r\n\r\n cur_frame = 0\r\n syncing = False\r\n\r\n while cur_frame < num_frames:\r\n if not syncing:\r\n frames_per_worker_since_sync = 0\r\n for worker in workers:\r\n worker.check_ack_queue()\r\n while worker.offer(cur_frame, settings.worker_queue_size):\r\n cur_frame += 1\r\n frames_per_worker_since_sync = max(\r\n frames_per_worker_since_sync, worker.num_since_sync)\r\n if cur_frame >= num_frames:\r\n break\r\n for i in range(settings.frame_batch_amount - 1):\r\n worker.send(cur_frame)\r\n cur_frame += 1\r\n frames_per_worker_since_sync = max(\r\n frames_per_worker_since_sync, worker.num_since_sync)\r\n if cur_frame >= num_frames:\r\n break\r\n if cur_frame >= num_frames:\r\n break\r\n if cur_frame >= num_frames:\r\n break\r\n if cur_frame >= num_frames:\r\n break\r\n\r\n if frames_per_worker_since_sync > settings.frames_per_sync:\r\n for worker in workers:\r\n worker.start_sync()\r\n syncing = True\r\n else:\r\n syncing = False\r\n for worker in workers:\r\n if not worker.check_sync():\r\n syncing = True\r\n break\r\n\r\n for i in range(settings.work_per_dispatch):\r\n isticher.do_work()\r\n\r\n while len(isticher.ooo_frames) > settings.ooo_cap:\r\n isticher.do_work()\r\n\r\n for i in range(len(stopping_workers) - 1, 0, -1):\r\n worker = stopping_workers[i]\r\n if worker.check_finish() and isticher.next_frame > worker.last_frame:\r\n worker.check_sync() # cleanup just in case\r\n isticher.remove_queue(worker.img_queue)\r\n worker.close()\r\n stopping_workers.pop(i)\r\n\r\n thetime = time.time()\r\n if thetime >= next_progress:\r\n next_progress = thetime + time_per_print\r\n recpsec, procpsec = perf.mean()\r\n frames_to_proc = num_frames - isticher.next_frame\r\n time_left_sec = frames_to_proc / procpsec if procpsec > 0 else float('inf')\r\n logger.info('[%0.1f secs remaining] Generating %0.2f images/sec and ' # pylint: disable=logging-not-lazy\r\n + 'processing %0.2f images/sec', time_left_sec,\r\n recpsec, procpsec)\r\n\r\n if thetime >= next_optim:\r\n next_optim = thetime + settings.perf_delay + settings.window_size\r\n if frame_batch_min_next_decay < thetime:\r\n frame_batch_dyn_min -= 1\r\n frame_batch_min_next_decay = (\r\n float('inf') if frame_batch_dyn_min <= settings.frame_batch_min\r\n else thetime + settings.frame_batch_dyn_min_decay_time\r\n )\r\n if frame_batch_max_next_decay < thetime:\r\n frame_batch_dyn_max += 1\r\n frame_batch_max_next_decay = (\r\n float('inf') if frame_batch_dyn_max >= settings.frame_batch_max\r\n else thetime + settings.frame_batch_dyn_max_decay_time\r\n )\r\n\r\n recpsec, procpsec = perf.mean()\r\n if old_perf is not None and cur_optim is not None:\r\n oldrecpsec, oldprocpsec = old_perf # pylint: disable=unpacking-non-sequence, unused-variable\r\n\r\n if cur_optim == 'reduce_frame_batch_amount':\r\n relative_performance = 0 if procpsec == 0 else oldprocpsec / procpsec\r\n if relative_performance > settings.frame_batch_max_badness:\r\n # keep the change\r\n logger.debug(\r\n 'found better setting: frame_batch_amount=%d (rel performance: %0.3f)',\r\n settings.frame_batch_amount, relative_performance)\r\n frame_batch_dyn_max = settings.frame_batch_amount\r\n frame_batch_max_next_decay = (\r\n thetime + settings.frame_batch_dyn_max_decay_time\r\n )\r\n else:\r\n # revert the change\r\n # we're evil scientists so we dont report null results\r\n settings.frame_batch_amount += 1\r\n frame_batch_dyn_min = settings.frame_batch_amount\r\n frame_batch_min_next_decay = (\r\n thetime + settings.frame_batch_dyn_min_decay_time\r\n )\r\n elif cur_optim == 'increase_frame_batch_amount':\r\n relative_performance = 0 if procpsec == 0 else oldprocpsec / procpsec\r\n if relative_performance > settings.frame_batch_min_improvement:\r\n # keep the change\r\n logger.debug(\r\n 'found better setting: frame_batch_amount=%d (rel performance: %0.3f)',\r\n settings.frame_batch_amount, relative_performance)\r\n frame_batch_dyn_min = settings.frame_batch_amount\r\n frame_batch_min_next_decay = (\r\n thetime + settings.frame_batch_dyn_min_decay_time\r\n )\r\n else:\r\n # revert the change\r\n # we're evil scientists so we dont report null results\r\n settings.frame_batch_amount -= 1\r\n frame_batch_dyn_max = settings.frame_batch_amount\r\n frame_batch_max_next_decay = (\r\n thetime + settings.frame_batch_dyn_max_decay_time\r\n )\r\n else:\r\n raise RuntimeError(f'unknown cur_optim = {cur_optim}')\r\n\r\n cur_optim = None\r\n\r\n perc_rec_proc = procpsec / recpsec\r\n reason_str = (f'(processing {perc_rec_proc:.3f} images for every '\r\n + f'image generated, have {len(isticher.ooo_frames)} '\r\n + 'frames awaiting processing)')\r\n\r\n threshold_spawn, threshold_kill = (\r\n (settings.spawn_worker_threshold_low,\r\n settings.kill_worker_threshold_low)\r\n if len(isticher.ooo_frames) < settings.ooo_balance\r\n else (settings.spawn_worker_threshold_high,\r\n settings.kill_worker_threshold_high)\r\n )\r\n\r\n if (perc_rec_proc > threshold_spawn\r\n and settings.num_workers < settings.max_workers):\r\n settings.num_workers += 1\r\n if settings.frames_per_sync > settings.min_frames_per_sync:\r\n settings.frames_per_sync -= 1\r\n if paused_workers:\r\n unpaused = paused_workers.pop()\r\n workers.append(unpaused)\r\n logger.debug('Unpaused a worker %s', reason_str)\r\n else:\r\n worker = _spawn_worker(frame_gen, ms_per_frame, worker_counter)\r\n isticher.register_queue(worker.img_queue)\r\n workers.append(worker)\r\n worker_counter += 1\r\n logger.debug('Spawned a worker %s', reason_str)\r\n elif (perc_rec_proc < threshold_kill\r\n and settings.num_workers > 1):\r\n settings.num_workers -= 1\r\n if settings.frames_per_sync > settings.min_frames_per_sync:\r\n settings.frames_per_sync -= 1\r\n settings.frames_per_sync -= 1\r\n if not paused_workers:\r\n paused = workers.pop()\r\n paused_workers.append(paused)\r\n logger.debug('Paused a worker %s', reason_str)\r\n else:\r\n paused = workers.pop()\r\n killed = paused_workers.pop()\r\n paused_workers.append(paused)\r\n stopping_workers.append(killed)\r\n killed.start_finish()\r\n logger.debug('Killed a worker %s', reason_str)\r\n elif settings.frames_per_sync < settings.max_frames_per_sync:\r\n settings.frames_per_sync += 1\r\n\r\n want_reduce_frame_batch = perc_rec_proc < 1\r\n # if we have processed fewer than we have received it's not as\r\n # important that we optimize image generation\r\n can_reduce_frame_batch = (\r\n settings.frame_batch_amount > frame_batch_dyn_min\r\n )\r\n can_increase_frame_batch = (\r\n settings.frame_batch_amount < frame_batch_dyn_max\r\n )\r\n\r\n if ((want_reduce_frame_batch or not can_increase_frame_batch)\r\n and can_reduce_frame_batch):\r\n cur_optim = 'reduce_frame_batch_amount'\r\n settings.frame_batch_amount -= 1\r\n elif can_increase_frame_batch:\r\n cur_optim = 'increase_frame_batch_amount'\r\n settings.frame_batch_amount += 1\r\n\r\n\r\n old_perf = (recpsec, procpsec)\r\n\r\n\r\n logger.debug('Shutting down workers...')\r\n workers.extend(paused_workers)\r\n paused_workers = []\r\n for worker in workers:\r\n worker.start_finish()\r\n workers.extend(stopping_workers)\r\n stopping_workers = []\r\n\r\n all_finished = False\r\n while not all_finished:\r\n all_finished = not isticher.do_work()\r\n if not all_finished:\r\n for worker in workers:\r\n if not worker.check_finish():\r\n all_finished = False\r\n break\r\n if not all_finished:\r\n for worker in stopping_workers:\r\n if not worker.check_finish():\r\n all_finished = False\r\n break\r\n\r\n logger.debug('All workers shut down, processing remaining frames...')\r\n while isticher.next_frame < num_frames:\r\n if not isticher.do_work():\r\n time.sleep(0.001)\r\n\r\n isticher.finish()\r\n for worker in workers:\r\n worker.check_sync() # just in case we leaked one\r\n worker.close()\r\n logger.info('Finished')\r\n return settings", "def main():\r\n\r\n # parsing command line argumants\r\n parser = optparse.OptionParser()\r\n parser.add_option('-c', '--config',\r\n dest='config_file', default='logging.json',\r\n help='set the logging configuration file.')\r\n parser.add_option('-d', '--datapath',\r\n dest='data_path', default='data',\r\n help='set the path of the data directory. ' +\r\n 'default is \\'data\\'.')\r\n parser.add_option('-s', '--server',\r\n dest='server_ip', default='127.0.0.1',\r\n help='set the server IP address. default is localhost.')\r\n parser.add_option('-p', '--port',\r\n dest='port', default='2048', type='int',\r\n help='set custom connection port.')\r\n\r\n options, args = parser.parse_args()\r\n\r\n # configurate the loggers of the threads\r\n with open(options.config_file, 'rb') as f:\r\n config = json.loads(f.read())\r\n logging.config.dictConfig(config)\r\n\r\n if not os.path.exists(options.data_path):\r\n os.mkdir(options.data_path)\r\n\r\n logic_queue = Queue.Queue()\r\n network_queue = Queue.Queue()\r\n\r\n network_receiver_thread = network.NetworkReceiverThread(\r\n server_ip=options.server_ip,\r\n port=options.port,\r\n network_queue=network_queue,\r\n logic_queue=logic_queue)\r\n\r\n network_sender_thread = network.NetworkSenderThread(\r\n network_queue=network_queue)\r\n\r\n logic_thread = logic.LogicThread(\r\n data_path=options.data_path,\r\n logic_queue=logic_queue,\r\n network_queue=network_queue)\r\n\r\n network_receiver_thread.start()\r\n network_sender_thread.start()\r\n logic_thread.start()", "def main(argc, argv):\n global NUM_PROCESSES, enableProcLogs, enableBqLogs, HELP, useWeibull\n\n print('Process checkpoint-restart simulator')\n random.seed(RANDOM_SEED) # constant seed for reproducibility\n\n # Create an environment and start the setup process\n env = simpy.Environment()\n parser = ap.ArgumentParser(description=HELP, formatter_class=ap.RawTextHelpFormatter)\n parser.add_argument(\"-p\", \"--proc_logs\", action=\"store_true\", help=\"Show run time logs from processes\")\n parser.add_argument(\"-b\", \"--batchqueue_logs\", action=\"store_true\", help=\"Show run time logs from the batch-queue manager\")\n parser.add_argument(\"-n\", \"--procs\", type=int, default=NUM_PROCESSES, help=\"Max. number of processes to simulate (default: 7)\")\n parser.add_argument(\"-x\", \"--no_preempt\", action=\"store_true\", help=\"Disables preemption of currently executing \"\\\n \"job on failure. This simulates the behavior \"\\\n \"of a simple FIFO queue.\")\n parser.add_argument(\"-w\", \"--use-weibull\", action=\"store_true\", help=\"Use Weibull distribution for failure injection. Default is to use exponential distribution\")\n parser.add_argument(\"-f\", \"--file-name\", type=str, help=\"Store lost work/throughput results in the given file.\")\n parser.add_argument(\"-s\", \"--show-throughput-results\", action=\"store_true\", help=\"Show throughput results using matplotlib.\")\n parser.add_argument(\"-l\", \"--show-lostwork-results\", action=\"store_true\", help=\"Show lost work results using matplotlib.\")\n args = parser.parse_args()\n NUM_PROCESSES = args.procs\n MAX_CIRC_Q_LEN = NUM_PROCESSES + 1\n enableProcLogs = args.proc_logs\n enableBqLogs = args.batchqueue_logs\n useWeibull = args.use_weibull\n\n # Create a batch queue\n mymachine = simpy.Resource(env, MAX_PARALLEL_PROCESSES)\n batchQ = BatchQueue(env, MAX_CIRC_Q_LEN, mymachine, args.no_preempt)\n showPlot = args.show_throughput_results | args.show_lostwork_results\n\n testProcesses = [Process(env, 'Process %d' % i, time_to_checkpoint() + random.randint(0, 5) * 10, mymachine)\n for i in range(NUM_PROCESSES)]\n\n simulateArrivalOfJobs(env, testProcesses, batchQ)\n env.process(batchQ.runBq(False))\n # Execute\n env.run()\n\n # Analyis/results\n print(\"******************************************************\")\n print(\"******************FINAL DATA**************************\")\n print(\"******************************************************\")\n\n res = computeResults(args, batchQ)\n saveResults(args, res)\n showResults(args, res)\n\n print(\"Process #, # Ckpts, # Total Failures, # Restarts, # Failed Restarts, # Failed Ckpts, # Preempts,\"\\\n \" Compute Time, Ckpt Time, Lost Work, Lost Restart Time, Lost Ckpt Time, Submission Time, Start Time,\"\\\n \" End Time, Actual Run Time\")\n for p in testProcesses:\n t1 = int(p.numCkpts * p.ckptTime + p.numRestarts * int(p.ckptTime/2.0) + p.lostWork + p.totalComputeTime + p.lostRestartTime)\n t2 = int(p.actualRunTime)\n if not p.restartFailures * p.ckptTime >= p.lostRestartTime:\n print \"Warning\"\n if t1 != t2:\n print(\"Warning: %d != %d\" % (t1, t2))\n print(p)\n if showPlot:\n plt.show()", "def main():\n\n try:\n worker_id = get_arg(\"--worker-id\", assert_nonnegative_int)\n master_host = get_arg(\"--master-host\", assert_host)\n master_port = get_arg(\"--master-port\", assert_positive_int)\n scale = get_arg(\"--scale\", assert_pos_float)\n method = get_arg(\"--method\", assert_downscaling_method)\n load_backup = get_arg(\"--load-backup\", assert_bool, default=0)\n number_of_random_walkers = get_arg(\n \"--n-random-walkers\", assert_nonnegative_int, default=1)\n backup_size = get_arg(\n \"--backup-size\", assert_nonnegative_int, default=100)\n walking_iterations = get_arg(\n \"--walking-iterations\", assert_positive_int, default=1)\n\n except AssertionError as e:\n print_error(e)\n print_error(\n \"The downscaling worker expects the following arguments:\\n\"\n \"\\t--worker-id: The id of the worker\\n\"\n \"\\t--master-host: The host of the master\\n\"\n \"\\t--master-port: The port of the master\\n\"\n \"\\t--scale: The scale of the downscaled graph w.r.t. the input graph\\n\"\n \"\\t--method: The method to use for downscaling, `random_walk` or `random_edge`\\n\"\n \"\\t--load-backup: Should the worker load from a backup send by the master\\n\"\n \"\\t--n-random-walkers: Number of random walkers to start with\\n\"\n \"\\t--backup-size: Minimum size of the backup before it will be send to the master during a run, 0 if you want no backups\\n\"\n \"\\t--walking-iterations: The number of steps a random walker sets before the queue will be handled\\n\"\n )\n return\n\n Worker(worker_id, master_host, master_port, scale, method, load_backup,\n number_of_random_walkers, backup_size, walking_iterations)", "def main(self, filenames):\n # Initialize ZMQ.\n self._zmq_init()\n\n # Send XML files to the controller.\n status = 0\n for filename in filenames:\n try:\n err = self._load_schedule(filename)\n if err:\n status = -1\n except Exception as exception:\n print(exception, file=sys.stderr)\n traceback.print_exc(file=sys.stderr)\n status = -1\n\n return status", "def main():\n\n # Configure logging.\n\n logging.basicConfig(level=logging.DEBUG, format=\"%(asctime)-23s | %(threadName)-10s | %(levelname)-5s | %(message)s\")\n logging.Formatter.default_msec_format = '%s.%03d'\n\n # Parse command line arguments.\n\n parser = argparse.ArgumentParser(description=\"Record F1 2019 telemetry data to SQLite3 files.\")\n\n parser.add_argument(\"-p\", \"--port\", default=20777, type=int, help=\"UDP port to listen to (default: 20777)\", dest='port')\n parser.add_argument(\"-i\", \"--interval\", default=1.0, type=float, help=\"interval for writing incoming data to SQLite3 file, in seconds (default: 1.0)\", dest='interval')\n\n args = parser.parse_args()\n\n # Start recorder thread first, then receiver thread.\n\n quit_barrier = Barrier()\n\n recorder_thread = PacketRecorderThread(args.interval)\n recorder_thread.start()\n\n receiver_thread = PacketReceiverThread(args.port, recorder_thread)\n receiver_thread.start()\n\n wait_console_thread = WaitConsoleThread(quit_barrier)\n wait_console_thread.start()\n\n # Recorder, receiver, and wait_console threads are now active. Run until we're asked to quit.\n\n quit_barrier.wait()\n\n # Stop threads.\n\n wait_console_thread.request_quit()\n wait_console_thread.join()\n wait_console_thread.close()\n\n receiver_thread.request_quit()\n receiver_thread.join()\n receiver_thread.close()\n\n recorder_thread.request_quit()\n recorder_thread.join()\n recorder_thread.close()\n\n # All done.\n\n logging.info(\"All done.\")", "def main():\n # get command line args\n args = create_parser()\n\n # report args\n report_args(args)\n\n # check and create instance of process, if possible\n eM = create_process(args)\n\n # write data\n write_data(args.directory, args.file, args.process, args.length, eM)\n\n # write machine to pickle\n write_em_pickle(args.file, eM)", "def dispatcher( port, cmd, files, allworkers, start ):\n # Only the host running as dispatcher should be calling this.\n\n host = ipaddrs( socket.gethostname() )\n\n # Initialize a 0mq context\n\n context = zmq.Context()\n\n # Set up a socket to receive task requests and send replies over.\n # The linger option is set to help make sure all comunication is\n # delivered when the thread ends. The time unit is milliseconds. A\n # rigorous receive request - send reply pattern must be followed as\n # the zmq.REP socket keeps track of who sent the request and thus\n # were the reply should go. Trying to do two receives or two sends\n # in a row will cause a fatal error or hang the program. Here we\n # set up the REP side of the socket pattern.\n\n dispatcher_socket = context.socket( zmq.REP )\n dispatcher_socket.setsockopt( zmq.LINGER, 5000 )\n dispatcher_socket.bind( \"tcp://%s:%s\" % ( host, port ) )\n\n maxtime = 0\n tasknum = 0\n workers = {}\n already_notified = 0\n\n sys.stderr.write ( \"Dispatcher:Start:%d\\n\" % ( start ) )\n sys.stderr.flush()\n\n # Adjust starting task for 0 offset:\n\n start = start - 1\n tasknum = start\n lasttask = 0\n\n for f in files[start:]:\n\n request = dispatcher_socket.recv_json()\n worker = request['worker']\n workers[worker] = 1\n\n # Interpret a negative maxtime value as the time up signal.\n\n if request['maxtime'] >= 0 :\n\n if request['maxtime'] > maxtime :\n\n maxtime = request['maxtime']\n sys.stderr.write( \"Dispatcher:Maxtime:%s:%.2f:%.2f\\n\"\n % ( worker, maxtime, time.time() ) )\n sys.stderr.flush()\n\n tasknum = tasknum + 1\n task_message = { 'cmd' : cmd, 'file' : f.strip(),\n 'maxtime' : maxtime, 'tasknum' : tasknum }\n\n else:\n\n maxtime = -1\n sys.stderr.write( \"Dispatcher:Timeup:%s:%.2f\\n\"\n % ( worker, time.time() ) )\n sys.stderr.flush()\n task_message = { 'cmd' : \"FINI\", 'file' : \"None\",\n 'maxtime' : -1, 'tasknum' : tasknum }\n already_notified += 1\n lasttask = request['lasttask']\n\n dispatcher_socket.send_json( task_message )\n if maxtime < 0 :\n break\n\n # Now make sure all workers have received the shutdown message.\n\n shutdown = allworkers - already_notified\n\n if lasttask == 0 :\n # All tasks handed out before any completions received.\n # Have to assume all will complete.\n lasttask = tasknum\n\n if shutdown > 0 :\n task_message = { 'cmd' : \"FINI\", 'file' : \"None\",\n 'maxtime' : -1, 'tasknum' : tasknum }\n sys.stderr.write( \"Dispatcher:Shutdown:%d\\n\" % ( shutdown ) )\n sys.stderr.flush()\n\n # There is always a chance multiple assignments went out before\n # a timeout was received. All should sense time out as well,\n # so check for that when handling their final requests.\n\n for w in range( shutdown ):\n\n request = dispatcher_socket.recv_json()\n\n if request['maxtime'] < 0 :\n if request['lasttask'] < lasttask :\n lasttask = request['lasttask']\n\n dispatcher_socket.send_json( task_message )\n\n sys.stderr.write( \"Dispatcher:Last:%d\\n\" % ( lasttask ) )\n sys.stderr.flush()", "def test_queue_worker_needs_a_queue(self):\n with pytest.raises(ValueError):\n MinimalQueueWorker(None)", "def job_thread(argv):\n #pylint: disable=lost-exception\n\n try:\n exitcode = pfwdefs.PF_EXIT_FAILURE\n pid = os.getpid()\n stdp = None\n stde = None\n stdporig = None\n stdeorig = None\n wcl = WCL()\n wcl['wrap_usage'] = 0.0\n jobfiles = {}\n task = {'wrapnum':'-1'}\n try:\n # break up the input data\n (task, jobfiles, jwcl, ins, outq, errq, multi) = argv\n stdp = WrapOutput(task['wrapnum'], outq)\n stdporig = sys.stdout\n sys.stdout = stdp\n stde = WrapOutput(task['wrapnum'], errq)\n stdeorig = sys.stderr\n sys.stderr = stde\n\n # print machine status information\n exechost_status()\n\n wrappercmd = \"%s %s\" % (task['wrapname'], task['wclfile'])\n\n if not os.path.exists(task['wclfile']):\n print \"Error: input wcl file does not exist (%s)\" % task['wclfile']\n return (1, jobfiles, jwcl, 0, task['wrapnum'], pid)\n\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n sys.stdout.flush()\n\n # set up the working directory if needed\n if multi:\n workdir = \"fwtemp%04i\" % (int(task['wrapnum']))\n else:\n workdir = None\n setup_wrapper(wcl, task['logfile'], workdir, ins)\n\n print \"Running wrapper: %s\" % (wrappercmd)\n sys.stdout.flush()\n starttime = time.time()\n try:\n exitcode = pfwutils.run_cmd_qcf(wrappercmd, task['logfile'],\n wcl['execnames'])\n except:\n (extype, exvalue, trback) = sys.exc_info()\n print '!' * 60\n print \"%s: %s\" % (extype, str(exvalue))\n\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n exitcode = pfwdefs.PF_EXIT_FAILURE\n sys.stdout.flush()\n if exitcode != pfwdefs.PF_EXIT_SUCCESS:\n print \"Error: wrapper %s exited with non-zero exit code %s. Check log:\" % \\\n (wcl[pfwdefs.PF_WRAPNUM], exitcode),\n logfilename = miscutils.parse_fullname(wcl['log'], miscutils.CU_PARSE_FILENAME)\n print \" %s/%s\" % (wcl['log_archive_path'], logfilename)\n print \"DESDMTIME: run_wrapper %0.3f\" % (time.time()-starttime)\n\n print \"Post-steps (exit: %s)\" % (exitcode)\n post_wrapper(wcl, ins, jobfiles, task['logfile'], exitcode, workdir)\n\n if exitcode:\n miscutils.fwdebug_print(\"Aborting due to non-zero exit code\")\n except:\n print traceback.format_exc()\n exitcode = pfwdefs.PF_EXIT_FAILURE\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n finally:\n if stdp is not None:\n sys.stdout = stdporig\n if stde is not None:\n sys.stderr = stdeorig\n sys.stdout.flush()\n sys.stderr.flush()\n\n return (exitcode, jobfiles, wcl, wcl['wrap_usage'], task['wrapnum'], pid)\n except:\n print \"Error: Unhandled exception in job_thread.\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n return (1, None, None, 0.0, '-1', pid)", "def launchJobs(options, cmdargs, errStream=sys.stdin):\n\n if options.queue == LOCAL:\n launchLocalJobs(options,cmdargs,errStream)\n return\n\n logging.debug(\"Launching task array: %r\" % ({'tmpDir':options.tmpDir,'splits':options.splits,'fragName':options.fragBase,'cmd':cmdargs,'sgeOpts':options.sgeOptions,'job':options.jobName,'priority':options.priority,'loglevel':options.verbose,'wait':options.wait, 'type':options.taskType}))\n \n # SGE or SLURM submission prefix\n command = getSubmissionCommandPrefix(options)\n\n # batch_runner command\n command.append(BATCHLAUNCHER)\n command+=[\"--mode\",\"run\",\"--tmp_dir\",options.tmpDir,\"--frag_base\",\n options.fragBase, \"--frag_dir\", options.frag_dir, \"--frag_suffix\", options.fragSuff, \"--loglevel\", str(options.verbose), \"--queue\", options.queue]\n if options.inputFlag is not None:\n command.append('-i=%s' % (options.inputFlag))\n if options.prefixFlag is not None:\n command.append('-p=%s' % (options.prefixFlag))\n if options.threadsFlag is not None:\n command+=['-t',str(options.threadsFlag)]\n if options.outputFlags is not None:\n for flag in options.outputFlags:\n command.append('-o=%s' % (flag))\n if options.taskType is not None:\n command+=['--taskType',options.taskType]\n if options.cwd:\n command.append('--cwd')\n command.append('--')\n command+=cmdargs\n\n # redirect qsub output to std, silence if vebose is 0\n #if options.verbose==0:\n # qsubOuts=open(os.devnull,'w')\n #else:\n # qsubOuts=errStream\n \n # run command\n logging.debug('Launching task array: %s' % (formatCommand(command)))\n try:\n submissionOutput = subprocess.check_output(command)\n try:\n submissionOutput = submissionOutput.decode()\n except:\n pass\n if options.verbose>0:\n errStream.write(\"Submission Output: \" + submissionOutput)\n except subprocess.CalledProcessError as error:\n if options.wait and options.queue != SLURM:\n # when using -sync y, the exit code may come from a task\n # (which cleanup will handle)\n logging.warning(\"qsub returned an error code of: %d\" \n % error.returncode)\n else:\n raise error\n\n # get job id\n try:\n jobid = re.search(r'(\\d+)\\s*$',submissionOutput).group(1)\n options.jobid = jobid\n except:\n if options.queue==SLURM:\n logging.error(\"Cannot parse SLURM job id from '%s'\" % (submissionOutput))\n raise\n\n # SLURM doesn't allow waiting for completion on array jobs, so we hack:\n # use srun to start a dummy job that will wait for our job array\n if options.wait and options.queue==SLURM:\n waitForSlurmArray(options, errStream)", "def test_enqueue(self):\n dest = '/queue/foo'\n frame = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='some data')\n self.store.enqueue(dest, frame)\n \n assert self.store.has_frames(dest) == True\n assert self.store.size(dest) == 1", "def run(parser, args):\n if not args.basename:\n if '_' in os.path.basename(args.left):\n args.basename = os.path.basename(args.left).split('_')[0]\n elif '.' in os.path.basename(args.left):\n args.basename = os.path.basename(args.left).split('.')[0]\n else:\n args.basename = os.path.basename(args.left)\n\n total = countfastq(args.left)\n if args.right:\n total = total*2\n status(f'Loading {total:,} total reads')\n\n DEVNULL = open(os.devnull, 'w')\n if args.method == 'bbduk':\n if args.memory:\n MEM = f'-Xmx{args.memory}g'\n else:\n MEM = f'-Xmx{round(0.6*getRAM())}g'\n\n status('Adapter trimming using BBDuk')\n cmd = ['bbduk.sh', MEM,\n 'ref=adapters',\n f't={args.cpus}',\n 'ktrim=r',\n 'k=23',\n 'mink=11',\n f'minlen={args.minlen}',\n 'hdist=1',\n f'maq={args.avgqual}',\n 'ftm=5',\n 'tpe',\n 'tbo',\n 'overwrite=true']\n if args.left and args.right:\n cmd += [f'in1={args.left}',\n f'in2={args.right}',\n f'out1={args.basename}_1P.fastq.gz',\n f'out2={args.basename}_2P.fastq.gz']\n elif args.left:\n cmd += [f'in={args.left}',\n f'out={args.basename}_1U.fastq.gz']\n\n printCMD(cmd)\n if args.debug:\n subprocess.run(cmd)\n else:\n subprocess.run(cmd, stderr=DEVNULL)\n\n if args.right:\n clean = countfastq(f'{args.basename}_1P.fastq.gz')\n clean = clean*2\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tFor: {:}\\n\\tRev {:}'.format(\n args.basename + '_1P.fastq.gz',\n args.basename + '_2P.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -r {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz',\n args.basename,\n args.cpus))\n else:\n clean = countfastq(f'{args.basename}_1U.fastq.gz')\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tSingle: {:}'.format(\n args.basename+'_1U.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1U.fastq.gz',\n args.basename,\n args.cpus))\n\n elif args.method == 'trimmomatic':\n # find path\n trimmomatic_path = find_trimmomatic()\n if trimmomatic_path:\n jarfile = trimmomatic_path\n elif args.trimmomatic:\n jarfile = args.trimmomatic\n else:\n status('Trimmomatic cannot be found - ' +\n 'please provide location of trimmomatic.jar file.')\n sys.exit(1)\n\n if jarfile:\n path_to_adaptors = args.trimmomatic_adaptors\n leadingwindow = \"LEADING:%d\" % (args.trimmomatic_leadingwindow)\n trailingwindow = \"TRAILING:%d\" % (args.trimmomatic_trailingwindow)\n slidingwindow = \"SLIDINGWINDOW:%s\" % (\n args.trimmomatic_slidingwindow)\n\n quality = args.trimmomatic_quality\n quality = \"-%s\" % (quality) # add leading dash\n\n if not os.path.exists(path_to_adaptors):\n if args.right:\n path_to_adaptors = os.path.join(dirname(jarfile),\n TRIMMOMATIC_TRUSEQPE)\n else:\n path_to_adaptors = os.path.join(dirname(jarfile),\n TRIMMOMATIC_TRUSEQSE)\n\n if not os.path.exists(path_to_adaptors):\n findpath = dirname(jarfile)\n path_to_adaptors = \"\"\n while findpath:\n if os.path.exists(findpath + \"/share\"):\n if args.right:\n path_to_adaptors = os.path.join(\n findpath,\n \"/share/trimmomatic\",\n TRIMMOMATIC_TRUSEQPE)\n else:\n path_to_adaptors = os.path.join(\n findpath,\n \"/share/trimmomatic\",\n TRIMMOMATIC_TRUSEQSE)\n break\n findpath = dirname(findpath)\n\n if not os.path.exists(path_to_adaptors):\n status(\"Cannot find adaptors file please specify manually\")\n return\n\n clipstr = args.trimmomatic_clip % (path_to_adaptors)\n\n cmd = []\n\n if args.left and args.right:\n cmd = ['java', '-jar', jarfile, 'PE',\n '-threads', str(args.cpus), quality,\n args.left, args.right,\n args.basename+'_1P.fastq',\n args.basename+'_1U.fastq',\n args.basename+'_2P.fastq',\n args.basename+'_2U.fastq',\n clipstr, leadingwindow, trailingwindow, slidingwindow,\n \"MINLEN:%d\" % (args.minlen)]\n elif args.left and not args.right:\n cmd = ['java', '-jar', jarfile, 'SE',\n '-threads', str(args.cpus),\n quality, args.left,\n args.basename+'_1U.fastq',\n clipstr, leadingwindow, trailingwindow, slidingwindow,\n \"MINLEN:%d\" % (args.minlen)]\n else:\n status(\"Must provide left and right pairs or single read set\")\n return\n\n status('Running trimmomatic adapter and quality trimming')\n printCMD(cmd)\n if args.debug:\n subprocess.run(cmd)\n else:\n subprocess.run(cmd, stderr=DEVNULL)\n if args.right:\n status('Compressing trimmed PE FASTQ files')\n Fzip_inplace(args.basename+'_1P.fastq', args.cpus)\n Fzip_inplace(args.basename+'_2P.fastq', args.cpus)\n SafeRemove(args.basename+'_1U.fastq')\n SafeRemove(args.basename+'_2U.fastq')\n status('Trimming finished:\\n\\tFor: {:}\\n\\tRev {:}'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -r {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz',\n args.basename,\n args.cpus))\n else:\n status('Compressing trimmed SE FASTQ file')\n Fzip_inplace(args.basename + '_1U.fastq', args.cpus)\n status('Trimming finished:\\n\\tSingle: {:}'.format(\n args.basename + '_1U.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1U.fastq.gz',\n args.basename,\n args.cpus))\n\n elif args.method == 'fastp':\n status('Adapter trimming using fastp')\n cmd = ['fastp', '--low_complexity_filter',\n '-l', f'{args.minlen}',\n '--average_qual', f'{args.avgqual}',\n '-w', f'{args.cpus}']\n\n# '-wref=adapters', 't={:}'.format(args.cpus), 'ktrim=r',\n# 'k=23', 'mink=11', 'minlen={:}'.format(args.minlen), 'hdist=1',\n# 'ftm=5', 'tpe', 'tbo', 'overwrite=true']\n if args.left and args.right:\n # could add merging ...\n cmd += [f'--in1={args.left}',\n f'--in2={args.right}',\n f'--out1={args.basename}_1P.fastq.gz',\n f'--out2={args.basename}_2P.fastq.gz'\n ]\n if args.merge:\n cmd += ['--merge',\n f'--merged_out={args.basename}_MG.fastq.gz']\n\n elif args.left:\n cmd += [f'--in={args.left}',\n f'--out={args.basename}_1U.fastq.gz']\n if args.dedup:\n cmd += ['--dedup']\n if args.cutfront:\n cmd += ['--cut_front']\n if args.cuttail:\n cmd += ['--cut_tail']\n if args.cutright:\n cmd += ['--cut_right']\n\n cmd += [f'--html={args.basename}.fastp.html',\n f'--json={args.basename}.fastp.json']\n printCMD(cmd)\n if args.debug:\n subprocess.run(cmd)\n else:\n subprocess.run(cmd, stderr=DEVNULL)\n\n if args.right:\n clean = countfastq(f'{args.basename}_1P.fastq.gz')\n clean = clean*2\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tFor: {:}\\n\\tRev {:}'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter -l {:} -r {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1P.fastq.gz',\n args.basename+'_2P.fastq.gz',\n args.basename,\n args.cpus))\n else:\n clean = countfastq(f'{args.basename}_1U.fastq.gz')\n status(f'{clean:,} reads remaining and writing to file')\n status('Trimming finished:\\n\\tSingle: {:}'.format(\n args.basename + '_1U.fastq.gz'))\n if not args.pipe:\n status('Your next command might be:\\n\\t' +\n 'AAFTF filter --left {:} -o {:} -c {:}\\n'.format(\n args.basename+'_1U.fastq.gz',\n args.basename, args.cpus))\n\n else:\n status(f'Uknown trimming method: {args.method}')", "def main() -> None:\n worker = Worker()\n worker.do_work()", "def createBlockNumprovFiles(input_queue, output_queue, message_queue, config, \n blockm_df, start_time):\n try: \n temp_time = time.localtime()\n\n for _ in range(config['number_servers']):\n message_queue.put('create_block_numprov')\n\n # load the input_queue\n continue_run, append_list = queueLoader(input_queue, blockm_df, config, \n start_time)\n\n # process the outputs from the workers\n if continue_run:\n continue_run = processWork(config, input_queue, output_queue, \n start_time)\n\n if continue_run:\n my_message = \"\"\"\n INFO - STEP 2 (MASTER): COMPLETED CREATING NUMPROV FILES FOR\n ALL SPEEDS\n \"\"\"\n my_message = ' '.join(my_message.split())\n print(nbmf.logMessage(my_message.strip(), temp_time, time.localtime(), \n time.mktime(time.localtime()) - time.mktime(start_time)))\n return True, append_list\n else:\n my_message = \"\"\"\n ERROR - STEP 2 (MASTER): FAILED TO CREATE NUMPROV FILES FOR\n ALL SPEEDS\n \"\"\"\n my_message = ' '.join(my_message.split())\n print(nbmf.logMessage(my_message.strip(), temp_time, time.localtime(), \n time.mktime(time.localtime()) - time.mktime(start_time)))\n return False, None\n\n except:\n my_message = \"\"\"\n ERROR - STEP 2 (MASTER): UNHANDLED FAILURE IN EXECUTING DISTRIBUTED \n BUILD OF NUMPROV FILES\n \"\"\"\n my_message = ' '.join(my_message.split())\n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message.strip(), temp_time, time.localtime(), \n time.mktime(time.localtime()) - time.mktime(start_time)))\n return False, None", "def run(self, args):\n pass", "def run(self):\n\n (robotProc, iRMsg, robotStat, robotInfo, robotCmd, bcMsg, cbpaeRun, wsInfo) = self.prepVars()\n\n broadcasterProc = self.startBroadcaster(cbpaeRun, bcMsg, iRMsg)\n\n# =============================================================================\n# # pass additional queues to the robot processes by overloading this method\n# =============================================================================\n robotProc = self.startRobots(robotProc, iRMsg, bcMsg, robotInfo, robotCmd, robotStat)\n\n guiProc = self.startGui(wsInfo, robotInfo, robotCmd)\n\n# =============================================================================\n# # This is the main loop checking robotProcs\n# =============================================================================\n rJoinable = self.checkRJoinable(robotProc, robotStat)\n\n self.stopBroadcaster(cbpaeRun)\n\n self.clearQueues(iRMsg, robotCmd, robotInfo)\n\n self.joinRobotProc(robotProc)\n\n self.logBasicInfo()\n\n print (\"CBPAE Trial Finished!!!\")", "def execute_from_cli(argv=None):\n\n settings.configure()\n\n configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)\n\n # Set up Connection\n connection = kombu.Connection(settings.AMQP_CONNECTION_STRING)\n\n # Create Exchanges\n exchanges = dict()\n for exchange_name, exchange_settings in settings.EXCHANGES.items():\n exchange = kombu.Exchange(\n name=exchange_name,\n type=exchange_settings.get('type', 'topic'),\n durable=exchange_settings.get('durable', False),\n channel=connection\n\n )\n exchanges[exchange_name] = exchange\n exchange.declare()\n\n # Create Queues\n queues = dict()\n for queue_name, queue_settings in settings.QUEUES.items():\n queue = kombu.Queue(name=queue_name,\n exchange=exchanges.get(\n queue_settings.get('exchange')\n ),\n routing_key=queue_settings.get('routing_key'),\n channel=connection)\n\n queues[queue_name] = queue\n queue.declare()\n\n worker_class = import_string(settings.WORKER_CLASS)\n worker = worker_class(connection, exchanges, queues)\n\n try:\n worker.run()\n except KeyboardInterrupt:\n sys.exit()", "def startWorker(self):\n worker = Worker(self)\n \n # configure the QgsMessageBar\n messageBar = self.iface.messageBar().createMessage('Started to create file', )\n progressBar = self.dlg.progressBar\n cancelButton = self.dlg.pbCancel\n cancelButton.clicked.connect(worker.kill)\n self.iface.messageBar().pushWidget(messageBar, self.iface.messageBar().INFO)\n self.messageBar = messageBar\n \n # start the worker in a new thread\n thread = QtCore.QThread()\n worker.moveToThread(thread)\n worker.finished.connect(self.workerFinished)\n worker.error.connect(self.workerExceptionThrown)\n worker.progress.connect(self.dlg.progressBar.setValue)\n worker.progresstext.connect(self.dlg.lbProgress.setText)\n worker.infotext.connect(self.dlg.txInfo.setText)\n thread.started.connect(worker.run)\n thread.start()\n self.thread = thread\n self.worker = worker", "def main(args):\n\n data = {\n 'id': '00353',\n 'expanded_folder': '00353.1/9a0f0b0d-1f0b-47c8-88ef-050bd9cdff92',\n 'version': '1',\n 'status': 'VOR',\n 'updated_date': datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%S\")\n }\n\n settings = settings_lib.get_settings('exp')\n identity = \"resize_%s\" % int(random.random() * 1000)\n log_file = \"worker.log\"\n logger = log.logger(log_file, settings.setLevel, identity)\n conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)\n act = activity_ArchiveArticle(settings, logger, conn=conn)\n act.do_activity(data)", "def worker(scenes, cap_templates, ques_templates, worker_id, out_q):\n\n dialogs = []\n for index, scene in enumerate(scenes):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % \\\n (cur_time, worker_id, index, len(scenes), scene['image_index']))\n try:\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(json.loads(json.dumps(gen_dialog)))\n except:\n print('NOTE: Missing data for %d' % scene['image_index'])\n out_q.put({worker_id: dialogs})", "def main(args=sys.argv):\n try:\n # Set up logging.\n logging.basicConfig(level=logging.WARN)\n work_dir = args[1]\n assert os.path.exists(work_dir), \"First argument to lsf_runner.py must be a directory that exists\"\n do_work_on_compute_node(work_dir)\n except Exception as exc:\n # Dump encoded data that we will try to fetch using mechanize\n print(exc)\n raise" ]
[ "0.53672075", "0.52763987", "0.525807", "0.52311337", "0.5166696", "0.5102056", "0.5101873", "0.5017907", "0.49991044", "0.49972054", "0.4982134", "0.49532136", "0.49340105", "0.49315396", "0.4894124", "0.48782435", "0.4870958", "0.4849824", "0.4847422", "0.4843554", "0.4837474", "0.48128763", "0.4774373", "0.4767776", "0.47441375", "0.4736143", "0.47261313", "0.47228122", "0.47224858", "0.47189862" ]
0.70788133
0
Checks the queue that the worker uses to talk to us
def check_ack_queue(self): try: while True: ack = self.ack_queue.get_nowait() self.handle_ack(ack) except queue.Empty: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_queue(self):\n self._process_incoming_queue_messages()\n self._root.after(200, self._check_queue)", "def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)", "async def _check_queue(self, ctx: commands.Context) -> None:\n\n try:\n if not ctx.voice_client or not ctx.voice_client.is_connected():\n return\n\n queue = self.queue[ctx.guild.id]\n player = await queue.get_next_player(self.youtube)\n\n if player is None:\n await self.cleanup(None, ctx.guild)\n await self.call_event(\"on_queue_end\", ctx)\n\n player.source = (\n discord.PCMVolumeTransformer(\n discord.FFmpegPCMAudio(\n player.stream_url, **FFMPEG_OPTIONS, executable=self.executable\n ),\n queue.volume,\n )\n if not self.opus_players\n else discord.FFmpegOpusAudio(\n player.stream_url, **FFMPEG_OPTIONS, executable=self.executable\n )\n )\n\n ctx.voice_client.play(\n player.source,\n after=lambda x: create_task(self.bot.loop, self._check_queue(ctx)),\n )\n\n player.start_timestamp = time.time()\n\n queue.played_history.append(player)\n queue.vote_skips = []\n await self.call_event(\"on_play\", ctx, player)\n\n except (IndexError, KeyError):\n await self.cleanup(None, ctx.guild)\n await self.call_event(\"on_queue_end\", ctx)", "def check_queue(st):\n\n logging.info(\"Checking queue...\")\n check_time = time.time()\n n_waiting_jobs = BatchPlugin.poll_queue()\n\n if n_waiting_jobs is not None:\n\n # Correction factor\n corr = st['vms_allegedly_running'] * cf['elastiq']['n_jobs_per_vm']\n logging.info(\"Jobs: waiting=%d | allegedly running=%d | considering=%d\" % \\\n (n_waiting_jobs, corr, n_waiting_jobs-corr))\n n_waiting_jobs -= corr\n\n if n_waiting_jobs > cf['elastiq']['waiting_jobs_threshold']:\n if st['first_seen_above_threshold'] != -1:\n if (check_time-st['first_seen_above_threshold']) > cf['elastiq']['waiting_jobs_time_s']:\n # Above threshold time-wise and jobs-wise: do something\n logging.info(\"Waiting jobs: %d (above threshold of %d for more than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n list_ok = scale_up( math.ceil(n_waiting_jobs / float(cf['elastiq']['n_jobs_per_vm'])), valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n st['first_seen_above_threshold'] = -1\n else:\n # Above threshold but not for enough time\n logging.info(\"Waiting jobs: %d (still above threshold of %d for less than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n else:\n # First time seen above threshold\n logging.info(\"Waiting jobs: %d (first time above threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = check_time\n else:\n # Not above threshold: reset\n logging.info(\"Waiting jobs: %d (below threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = -1\n else:\n logging.error(\"Cannot get the number of waiting jobs this time, sorry\")\n\n return {\n 'action': 'check_queue',\n 'when': time.time() + cf['elastiq']['check_queue_every_s']\n }", "def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )", "def _chk_empty(self, queue, receiver):\n try:\n msg = receiver.fetch(timeout=0)\n self.assert_(False, \"Queue \\\"%s\\\" not empty: found message: %s\" % (queue, msg))\n except Empty:\n pass", "def is_alive(self):\n try:\n stdout, stderr = self.run(0, \"rabbitmqctl\", \"list_queues\")\n for lines in stdout, stderr:\n for line in lines:\n if \"no_exists\" in line:\n return False\n return True\n except Exception:\n return False", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())", "def test_queue_worker_needs_a_queue(self):\n with pytest.raises(ValueError):\n MinimalQueueWorker(None)", "def check_queue_exists(self, queue_name):\n try:\n yield from self.queue_declare(queue_name, passive=True)\n except asyncio.exceptions.ChannelClosed:\n return False\n return True", "def queueOn() -> None:\n\t\tLogging.enableQueue = Logging.queueSize > 0", "def checkQueue( self ):\n if self.queue:\n yield self.writeToSerial( self.queue.pop( 0 ) )\n else:\n self.free = True", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def is_queued(self):\n qstat = self._grep_qstat('queued')\n if qstat:\n return True\n return False", "def check_sync(self):\r\n if not self.awaiting_sync:\r\n return True\r\n self.check_ack_queue()\r\n return not self.awaiting_sync", "def send_and_check(self, pkt, rss_queue):\n self.tester.scapy_append('sendp(%s, iface=\"%s\")' % (pkt, self.tester_itf))\n self.tester.scapy_execute()\n time.sleep(2)\n queue = self.get_queue_number()\n self.verify(queue in rss_queue, \"the packet doesn't enter the expected RSS queue.\")\n return queue", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def report_queue_status(self):\n raise NotImplementedError", "def any(self) -> bool:\n return len(self.queue) > 0", "def check_in(self):\n etree = self._encapsulate_request(self._generate_ping())\n self.zmq_scheduler_request_queue.put_nowait(etree)", "def test_ipcrm_queues_not_isntalled(): # pragma: windows\n IPCComm.ipcrm_queues()", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def check_plugin(work_queue, result_queue):\n while work_queue.qsize():\n host = work_queue.get()\n result = commands.getoutput(plugin_cmd + \" -H \" + host)\n result_queue.put([host, result])", "def checkQueue( self ):\n if self.queue:\n print 'clearing queue...(%d items)' % len( self.queue )\n yield self.writeToSerial( *self.queue.pop( 0 ) )\n else:\n print 'queue free for writing'\n self.free = True", "def check_packet_queue(self, queue, out):\n time.sleep(2)\n if queue == \"all\":\n self.verify(\"Queue= 0\" in out and \"Queue= 1\" in out and \"Queue= 2\" in out and \"Queue= 3\" in out,\n \"There is some queues doesn't work.\")\n elif queue == \"0\":\n self.verify(\"Queue= 0\" in out and \"Queue= 1\" not in out and \"Queue= 2\" not in out and \"Queue= 3\" not in out,\n \"RSS is enabled.\")\n lines = out.split(\"\\r\\n\")\n reta_line = {}\n queue_flag = 0\n packet_sumnum = 0\n # collect the hash result and the queue id\n for line in lines:\n line = line.strip()\n if queue_flag == 1:\n result_scanner = r\"RX-packets:\\s?([0-9]+)\"\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(line)\n packet_num = m.group(1)\n packet_sumnum = packet_sumnum + int(packet_num)\n queue_flag = 0\n elif line.strip().startswith(\"------- Forward\"):\n queue_flag = 1\n elif line.strip().startswith(\"RX-packets\"):\n result_scanner = r\"RX-packets:\\s?([0-9]+)\"\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(line)\n packet_rec = m.group(1)\n\n self.verify(packet_sumnum == int(packet_rec) == 128, \"There are some packets lost.\")", "def has_queue(self):\n return (os.path.exists(self._queue_path) and\n os.path.getsize(self._queue_path) > 0)", "def _check_comm_reply(self):\n if len(self._pending_comms) == 0:\n return\n for comm in self._pending_comms.values():\n self._notify_comm_ready(comm)\n self.kernel.io_loop.call_later(1, self._check_comm_reply)" ]
[ "0.7696136", "0.7004301", "0.6914097", "0.68644655", "0.682932", "0.6723008", "0.66928446", "0.6659868", "0.6654834", "0.65975434", "0.65645915", "0.6548608", "0.64988154", "0.649025", "0.6477076", "0.6465446", "0.64649564", "0.63961077", "0.6396043", "0.6396043", "0.6395173", "0.63763446", "0.6368652", "0.63552207", "0.6324094", "0.6319767", "0.6303934", "0.6279563", "0.62774336", "0.62731385" ]
0.7153081
1
Checks if the syncing process is complete
def check_sync(self): if not self.awaiting_sync: return True self.check_ack_queue() return not self.awaiting_sync
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_done(self):\n pass", "def is_done():\n return False", "def has_finished():", "def is_finished(self):\n self.refresh()\n return self.progress.remaining_budget is not None and self.progress.remaining_budget <= 0", "def done(self):\n return False", "def check_finish(self):\r\n return not self.proc.is_alive()", "def in_progress(self):\n return False", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def complete(self):\r\n if self.scheduler_launch_time == INVALID_TIME:\r\n print \"Missing task scheduler launch time\"\r\n return False\r\n if self.node_monitor_launch_time == INVALID_TIME:\r\n\t print \"Missing task node monitor launch time\"\r\n\t return False\r\n\tif self.completion_time == INVALID_TIME:\r\n\t print \"Missing task completion time\"\r\n\t return False\r\n\tif self.clock_skew == INVALID_TIME_DELTA:\r\n print \"Missing task clock skew\"\r\n\t return False\r\n\treturn True", "def done(self) -> bool:", "def is_ready(self):\n return self.prep_job.is_done()", "def is_complete(self) -> bool:\r\n return path.exists(self._has_completed_path)", "def _isDone(self) -> bool:\n pass", "def is_complete(self):\n status = self.get_status()\n return status[\"status\"] == 4", "def is_done(self):\n\n return not self.thread.is_alive()", "def is_complete(self):\n pass", "def syncDone (self) :\r\n self.ongoing_sync_count -= 1", "def is_done(self):\n return time.time() - self._start > self._time", "def isFinished():", "def isFinished():", "def isFinished():", "def isFinished(self):\n return False", "def is_complete(self):\n with self.__lock:\n return self.__complete", "async def wait_until_done(self) -> None:\n ...", "def isComplete(self):\n return self.bytesToRead == 0", "def finished(self):\n return False", "def _check_if_completed(self):\n if self.completed:\n self._result = self._complete()\n elif self.timed_out:\n logger.debug(f\"Use case {type(self.use_case).__name__} \"\n f\"timed out after taking more than \"\n f\"{self.use_case.timeout} seconds.\")\n self._result = self._complete(timed_out=True)\n self._execution_counter += 1", "def waitUntilFinished():" ]
[ "0.7570702", "0.7508425", "0.745692", "0.7256559", "0.72279084", "0.7211759", "0.71528757", "0.7147975", "0.7147975", "0.7147975", "0.7139429", "0.71376795", "0.71324706", "0.7129093", "0.71033275", "0.7101565", "0.71015084", "0.7029679", "0.702156", "0.7021484", "0.7021319", "0.7021319", "0.7021319", "0.6973113", "0.6972429", "0.6942076", "0.6937561", "0.69328487", "0.69303805", "0.6918741" ]
0.75308084
1
Waits for finish process to complete
def wait_finish(self): self.proc.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def wait_until_done(self) -> None:\n ...", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait_complete(self):\n self.join()", "def finish(self):\r\n self.start_finish()\r\n self.wait_finish()", "def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)", "def wait_to_complete(self, timeout: float = 5) -> None:\n if self.proc.poll() is not None: # type: ignore\n return\n\n start_time = time.time()\n\n while start_time + timeout > time.time() and self.proc.poll() is None: # type: ignore\n time.sleep(0.001)\n\n if self.proc.poll() is None: # type: ignore\n self.terminate(force=True)\n self.wait()\n self.exitstatus = \"Terminated!\" # type: ignore", "def wait_completion(self):\r\n self.tasks.join()", "def Finish(self):\n\t\n self.queue.join()", "def wait(self):\r\n self.jobs.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_completion(self):\n self.tasks.join()", "def wait_all_process_done(self) -> None:\n while len(self.process_queue) > 0:\n self.check_process_done()", "def wait(self):\n self.queue.join()", "def wait_for_completion(self):\n self.logger.debug(\"Waiting for completion\")\n finished = False\n while not finished:\n if self._all_workers_are_idle():\n self.logger.info(\"Finished\")\n finished = True", "def waitFinish(self):\n while self.job_queue_count > 0:\n sleep(0.5)\n\n # If there was a failure, we don't want to wait for possibly halted threads\n # while performing a 'join'. So just exit now with a failure.\n if self.failure:\n sys.exit(1)\n\n self.worker_pool.close()\n self.worker_pool.join()\n self.status_pool.close()\n self.status_pool.join()", "def wait(self):\n self.Popen.wait()", "def wait(self, timeout=0):\n if timeout:\n self._finished.wait(timeout=timeout)\n else:\n self._finished.wait()", "def wait_done(self, timeout=None):\n self._cv_done.acquire()\n if not self._done:\n self._cv_done.wait(timeout)\n self._cv_done.release()", "def wait(self):\n with self.__lock:\n while not self.__complete:\n self.__lock.wait()", "def wait_until_done(self, timeout=10.0):\r\n cfunc = lib_importer.windll.DAQmxWaitUntilTaskDone\r\n if cfunc.argtypes is None:\r\n with cfunc.arglock:\r\n if cfunc.argtypes is None:\r\n cfunc.argtypes = [lib_importer.task_handle, ctypes.c_double]\r\n\r\n error_code = cfunc(self._handle, timeout)\r\n check_for_error(error_code)" ]
[ "0.7736579", "0.76388836", "0.76388836", "0.76388836", "0.76388836", "0.7635715", "0.73941064", "0.7175193", "0.7058205", "0.6982526", "0.69299793", "0.6926044", "0.69219345", "0.69219345", "0.69219345", "0.69219345", "0.69219345", "0.69219345", "0.69219345", "0.69219345", "0.69219345", "0.68573534", "0.68500286", "0.68455917", "0.6824471", "0.67012197", "0.6691105", "0.66500866", "0.66371316", "0.65845317" ]
0.82706493
0
Notifies this worker that it should render the specified frame number
def send(self, frame_num): self.send_queue.put(('img', frame_num)) self.in_queue += 1 self.num_since_sync += 1 self.last_frame = frame_num
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_pre_render(self, event, signal):\n t = ppb.get_time() - self.start_time\n self.frames += 1\n print(f\"Frame {self.frames} rendered at {t}\")", "def frame_number(self, frame_number):\n\n self._frame_number = frame_number", "def render_frame(self, scene_state, scene_view, frame):\n self._scene.update_state(scene_state)\n self._scene.update_view(scene_view)\n\n color, depth, mask = self._renderer.render_frame(\n self._scene, *scene_view.viewport)\n\n if self._callback_fn is not None:\n # pass result to a callback function\n self._callback_fn(color, depth, mask)\n return False\n\n # pass result to bullet\n if color is not None:\n frame.color_img[:] = color\n if depth is not None:\n frame.depth_img[:] = depth\n if mask is not None:\n frame.mask_img[:] = mask\n return True", "def _refresh_render(self):\n current_frame = self.frame\n self.frame = int(1E6)\n self.frame = current_frame", "def render(self, time: float, frame_time: float):\n raise NotImplementedError(\"Example:render not implemented\")", "def onMessageFrameBegin(self, length):", "def _append_frame(self, observation):\n if self._counter % self._record_every == 0:\n self._frames.append(self._render_frame(observation[self.visual_key]))", "def renderFrame(self, scenename, frame, imageType):\n\n\t\timport bpy\n\t\tlib = bpy.libraries.load(self.name)\n\t\tprint self.name,' loaded'\n\t\tscn = lib.scenes.link(scenename)\n\t\tcontext = scn.getRenderingContext()\n\t\tprint 'remote render start', frame\n\t\tcontext.displayMode = 0 # to prevent an additional render window popping up\n\t\tcontext.currentFrame(frame)\n\n\t\t# remember to restore later!\n\t\ts,context.sFrame = context.sFrame,frame\n\t\te,context.eFrame = context.eFrame,frame\n\t\toldImagetype = context.imageType\n\t\toldRenderPath = context.getRenderPath()\n\n\t\tcontext.imageType = imageType\n\t\tcontext.setRenderPath(configurer.get('ServerRenderPath'))\n\t\tcontext.renderAnim()\n\t\tself.result = context.getFrameFilename()\n\n\t\tprint 'remote render end frame', frame\n\n\t\t# Restore changed settings\n\t\tcontext.sFrame,context.eFrame = s,e\n\t\tcontext.imageType = oldImagetype\n\t\tcontext.setRenderPath(oldRenderPath)\n\n\t\treturn 'render finished'", "def __send_frame(self, frame_nb, msg):\n if frame_nb in self.frames:\n retry = self.frames[frame_nb]['retry'] + 1\n else:\n retry = 0\n if retry > 5:\n self.log.error('SEND:too many retry (%d) for frame \"%s\"' % (retry, msg))\n self.frames.pop(frame_nb)\n return \n self.frames[frame_nb] = {'msg': msg, 'time': time.time(), 'retry': retry}\n s = \"!%d:%s\\n\" % (self.frame_nb, msg)\n self.log.info('SEND:%s' % s)\n self.send(s.encode())", "def render(self, frame: Frame):\n\n cv2.imshow(winname=self.title, mat=frame)\n cv2.waitKey(delay=self.delay)\n\n if self.step:\n while cv2.waitKey(delay=0) != self.step_key:\n continue", "def frame_messenger(subscriber_obj):\n channel = get_channel(subscriber_obj)\n\n frame = get_frame(subscriber_obj)\n scenes = SimulationScene.objects.filter(simulation_id=get_sim_id(subscriber_obj))\n\n if frame < len(scenes):\n send_save_to_channel(channel, simulation_id_to_json(scenes[frame].id))\n set_frame(subscriber_obj, frame + 1)\n\n else:\n disconnect_subscriber(channel)", "def _render_callback(self, _sim, _viewer):\n pass", "def render(self):\n fmt = 'B' + 'B' * len(self.frame)\n self.sendPacket(6, struct.pack(fmt, self.start_code, *self.frame))", "def dispatch_frame(self, frame):", "def next_frame(self, framenumber):\r\n \r\n if Animate==True:\r\n self.__text0.set_text(\"f={:4d}\".format(framenumber))\r\n patches = [self.__text0]\r\n timeallball=[]\r\n for ball in self.__ballList:\r\n time1ball=[]\r\n balls=self.__ballList.copy()\r\n balls.append(self.__cont)\r\n balls.remove(ball)\r\n for otherball in balls:\r\n othertime=ball.time_to_collision(otherball)\r\n if isinstance(othertime,int)==True or isinstance(othertime,float)==True:\r\n time1ball.append(othertime)\r\n if othertime==0:\r\n ball.collide(otherball)\r\n break\r\n timeallball.append(min(time1ball))\r\n if min(time1ball)==0:\r\n break\r\n for b in self.__ballList:\r\n b.move(min(timeallball))\r\n patches.append(b.get_patch())\r\n Gas.timepassed+=min(timeallball)\r\n \r\n #check to see if KE/momentum conserved, and see pressure change\r\n \"\"\"\r\n if Gas.timepassed>5 and Gas.timepassed<10 :\r\n print('Kinetic energy is ', self.kinetic_en())\r\n print('Total momentum is ', self.momentum())\r\n print(self.pressure())\r\n \"\"\" \r\n return patches", "def setFrame(self, frameNum, callback=None): \n if(self.isLoaded() == False):\n return\n self.video.set(1, frameNum)\n self.currentFrameNumber = int(self.video.get(cv2.CAP_PROP_POS_FRAMES))\n self.read()\n if callback != None:\n callback()", "def __send_msg(self, msg):\n self.frame_nb += 1\n self.__send_frame(self.frame_nb, msg)", "def change_frame(self, frame):\r\n pass", "def onFrameUpdated(self):\n pass", "def dummy_videoframe_handler(frame, userdata=None):\n sys.stdout.write('Got frame %d\\r' % userdata.count())\n sys.stdout.flush()\n userdata.increment()", "def render(self, time: float, frame_time: float):\n self.example.render(time, frame_time)", "def runFrame(self):\n self._drawFrame(self._advanceTime())", "def frame_idx(self) -> int:\n pass", "def status_update(frame_number, tot_frames):\r\n if frame_number == 1:\r\n sys.stdout.write(\"Starting analysis of %d frames...\\n\" %tot_frames)\r\n sys.stdout.flush()\r\n\r\n if frame_number % 100 == 0:\r\n sys.stdout.write(\"%d\" %frame_number)\r\n sys.stdout.flush()\r\n elif frame_number % 10 == 0:\r\n sys.stdout.write(\".\")\r\n sys.stdout.flush()\r\n\r\n if frame_number == tot_frames:\r\n print (\"End of video reached successfully.\")", "def _update_anim(self):\n if self._skip_frames > 1:\n # Do not render while _skip_frames is > 1\n self._skip_frames -= 1\n else:\n # Render frame\n self._visualization.taskMgr.step()\n # Calculate number of frames that need to be skipped\n self._skip_frames = int(1 / self._fps / self._dt)", "def show_frame(self, frame_number=None):\n try:\n import cv2\n except (ImportError, ModuleNotFoundError):\n logger.error(\n 'Import Error! Cant import cv2. Annotations operations will be limited. import manually and fix errors')\n raise\n\n try:\n import tkinter\n except ImportError:\n logger.error(\n 'Import Error! Cant import tkinter. Annotations operations will be limited. import manually and fix errors')\n raise\n\n try:\n import PIL.ImageTk\n import PIL.Image\n except ImportError:\n logger.error(\n 'Import Error! Cant import PIL.ImageTk/PIL.ImageTk. Annotations operations will be limited. '\n 'import manually and fix errors')\n raise\n ret, frame = self.vid.get_frame(frame_number)\n if ret:\n if self.show_annotations:\n frame = self.get_annotations(frame)\n if self.show_frame_num:\n text = '%d - %d' % (self.vid.frame_number, np.round(self.annotations_timestamp * self.vid.fps))\n frame = cv2.putText(frame,\n text=text,\n org=(100, 100),\n color=(0, 0, 255),\n fontFace=cv2.FONT_HERSHEY_DUPLEX,\n fontScale=2,\n thickness=3)\n\n self.photo = PIL.ImageTk.PhotoImage(\n image=PIL.Image.fromarray(frame).resize((self.window_width, self.window_height)),\n master=self.canvas)\n self.canvas.create_image(0, 0, image=self.photo, anchor=tkinter.NW)\n # set timestamp\n self.current_frame_text.configure(text='Frame number:\\n%d' % self.vid.frame_number)\n self.current_frame_text.grid(sticky=\"W\", row=12, column=0, columnspan=10)\n millis = int(1000 * self.vid.frame_number / self.vid.fps)\n seconds = (millis / 1000) % 60\n minutes = int((millis / (1000 * 60)) % 60)\n hours = int((millis / (1000 * 60 * 60)) % 24)\n self.frame_timestamp_text.configure(\n text='Frame timestamp:\\n{:02d}:{:02d}:{:.3f}'.format(hours, minutes, seconds))\n self.frame_timestamp_text.grid(sticky=\"W\", row=8, column=0, columnspan=10)", "def frame(request, frame_number):\n rider = models.Rider.objects.get(frame_number=frame_number)\n waypoints = rider.waypoints\n elapsed_times = [models.RiderTimeDelta(waypoints[0].timestamp, w.timestamp) for w in waypoints if w.transition==models.Waypoint.TRANSITION_ARRIVAL]\n\n for w in waypoints:\n if w.transition == models.Waypoint.TRANSITION_ARRIVAL:\n w.elapsed = models.RiderTimeDelta(waypoints[0].timestamp, w.timestamp)\n w.name = models.Control.control_at(w.kilometers).name\n\n template = env.get_template(\"frame.html\")\n rendered = template.render(dict(rider=rider,\n waypoints=[w for w in waypoints if w.transition == models.Waypoint.TRANSITION_ARRIVAL]\n ))\n\n return HttpResponse(rendered)", "def beginMessageFrame(self, length):", "def update(self):\n self._num_frames += 1", "def _process(self, frame, **kwargs):\n raise NotImplementedError()" ]
[ "0.6239475", "0.6121523", "0.57494473", "0.5613775", "0.55964965", "0.5589799", "0.5520976", "0.54862803", "0.54144496", "0.5320689", "0.53091556", "0.5305608", "0.52953815", "0.5295191", "0.5221626", "0.52124155", "0.5184965", "0.5183667", "0.51628983", "0.5145614", "0.51185447", "0.5062927", "0.50568527", "0.50395155", "0.50314695", "0.5024037", "0.50132", "0.4997588", "0.49758568", "0.49572176" ]
0.6431125
0
If this worker has fewer than target_in_queue items in its queue, then we send the specified frame numebr to the worker and return true. Otherwise, we return false.
def offer(self, frame_num, target_in_queue) -> bool: if self.in_queue < target_in_queue: self.send(frame_num) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def any(self) -> bool:\n return len(self.queue) > 0", "def isFull(self):\r\n if (len(self.queue) == self.maxlen):\r\n return True\r\n else:\r\n return False", "def isFull(self):\n return len(self.queue) == self.size", "def queue_progress(self):\r\n ret = True\r\n if set(self.previous_queue_state) == set(self.cells_to_process) and \\\r\n self.previous_mines_left == self.mines_left:\r\n self.repetitions += 1\r\n else:\r\n self.previous_queue_state = self.cells_to_process.copy()\r\n self.previous_mines_left = self.mines_left\r\n self.repetitions = 0\r\n if self.repetitions > 2 * len(self.cells_to_process) or \\\r\n not self.cells_to_process:\r\n ret = False\r\n return ret", "def more(self):\n # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment\n tries = 0\n while self.Q.qsize() == 0 and not self.stopped and tries < 5:\n time.sleep(0.1)\n tries += 1\n\n return self.Q.qsize() > 0", "def has_queued_passes(self, classification):\n return len(self.pass_queues[classification]) > 0", "def can_fit_more(self):\n\n return len(self._requeue_jobs) < MAX_NUM", "def is_queued(self):\n qstat = self._grep_qstat('queued')\n if qstat:\n return True\n return False", "def active(self):\n return len(self.queue) > 0", "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def has_queue(self):\n return (os.path.exists(self._queue_path) and\n os.path.getsize(self._queue_path) > 0)", "def check_queues(self) -> int:\r\n\r\n nframes = 0\r\n\r\n for queue in self.receive_queues:\r\n if not queue.empty():\r\n nframes += 1\r\n frame, img_bytes = queue.get_nowait()\r\n\r\n if frame < self.next_frame:\r\n raise ValueError('received frame we already processed! '\r\n + f'got {frame}, at {self.next_frame}')\r\n if frame in self.ooo_frames:\r\n raise ValueError(f'received duplicate frame: {frame}')\r\n\r\n self.ooo_frames[frame] = img_bytes\r\n if len(self.ooo_frames) > self.max_ooo_frames:\r\n raise ValueError('exceeded maximum frame cache (now have '\r\n + f'{len(self.ooo_frames)} frames waiting)')\r\n\r\n return nframes", "def is_queued(self):\r\n return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map)", "def inDownloadQueue(self, _src):\n for dl in self.downloadQueue:\n if _src in dl['src']:\n return True\n return False", "def has_pending_packets_to_be_sent(self):\n return self.num_packets != 0", "def can_go_forward(self):\n return self._pointer + 1 < len(self._items)", "def has_next_batch(self):\n return self.current_index + self.batch_size <= self.count", "def __contains__(self, item):\n for _, _, _, cur_item in self.queue:\n if cur_item == item:\n return True\n return False", "def __verify_queue_item(self, queue_item):\n\n browser = BrowserHelper.request(queue_item)\n return browser and len(browser.window_handles) >= 2", "async def queueloop(self, ctx: commands.Context) -> Optional[bool]:\n\n queue = self.queue[ctx.guild.id]\n\n queue.loop = (\n Loops.QUEUE_LOOP\n if self.queue[ctx.guild.id].loop != Loops.QUEUE_LOOP\n else Loops.NO_LOOP\n )\n\n if queue.loop == Loops.QUEUE_LOOP:\n queue.queue_loop_start = queue.pos\n\n return queue.loop == Loops.QUEUE_LOOP", "def isFinished(self, uniqueHandler=None):\n\n # FIXME: The following two lines of codes have been a temporary fix for timing issues\n # on the collections of jobs in the jobHandler. This issue has emerged when\n # performing batching. It is needed to review the relations between jobHandler\n # and the Step when retrieving multiple jobs.\n # An issue has been opened: 'JobHandler and Batching #1402'\n\n with self.__queueLock:\n # If there is still something left in the queue, we are not done yet.\n if len(self.__queue)>0 or len(self.__clientQueue)>0:\n return False\n\n # Otherwise, let's look at our running lists and see if there is a job\n # that is not done.\n for run in self.__running+self.__clientRunning:\n if run:\n if uniqueHandler is None or uniqueHandler == run.uniqueHandler:\n return False\n # Are there runs that need to be claimed? If so, then I cannot say I am done.\n numFinished = len(self.getFinishedNoPop())\n if numFinished != 0:\n return False\n\n return True", "def isEmpty(self):\r\n if (len(self.queue) >= 1):\r\n return False\r\n else:\r\n return True", "def enQueue(self, value):\r\n if (len(self.queue) >= self.maxlen):\r\n return False\r\n else:\r\n self.queue.append(value)\r\n return True", "def deQueue(self) -> bool:\n if self.isEmpty():\n return False\n else:\n self.front = (self.front + 1) % self.maxlen\n self.size -= 1\n return True", "def do_work(self):\r\n recv = self.check_queues()\r\n proc = 1 if self.process_frame() else 0\r\n for perf in self.perfs:\r\n perf.post_work(recv, proc, len(self.ooo_frames))\r\n return recv > 0 or proc > 0", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "def __remove_request_from_queue(self, sender):\n with self.__queue.mutex:\n for x in self.__queue.queue:\n if x[1] == sender:\n self.__queue.queue.remove(x)\n return True\n return False", "def in_queue(self):\n if self.get_db('jobid') is None:\n log.debug('jobid not found for calculation.')\n return False\n else:\n # get the jobid\n jobid = self.get_db('jobid')\n # see if jobid is in queue\n _, jobids_in_queue, _ = getstatusoutput('qselect',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n if str(jobid) in jobids_in_queue.split('\\n'):\n # get details on specific jobid in case it is complete\n status, output, err = getstatusoutput(['qstat', jobid],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if status == 0:\n lines = output.split('\\n')\n fields = lines[2].split()\n job_status = fields[4]\n if job_status == 'C':\n return False\n else:\n return True\n else:\n return False", "def noqueue(self) -> bool:\n return not self.orders", "def batched(self) -> bool:\n return False" ]
[ "0.6630116", "0.6628836", "0.65659404", "0.654566", "0.62802523", "0.62745416", "0.6247163", "0.6159139", "0.6119754", "0.6000629", "0.59391946", "0.59151196", "0.5913453", "0.5898343", "0.5897065", "0.5881539", "0.58422446", "0.5802772", "0.5802048", "0.5798299", "0.5796814", "0.5794155", "0.5792394", "0.5790987", "0.57706004", "0.5763532", "0.5757129", "0.573595", "0.5701107", "0.5695659" ]
0.8310253
0
Produces a video with the given frame rate (specified as milliseconds per frame), using the given performance settings. If the performance settings are not provided, reasonable defaults are used. Returns the final performance settings, which may have changed over the course of video production. It may improve performance to reseed with the settings that this ended with.
def produce(frame_gen: fg.FrameGenerator, fps: float, dpi: typing.Union[int, float], bitrate: typing.Union[int, float], outfile: str, settings: PerformanceSettings = None, time_per_print: float = 15.0, logger: logging.Logger = None) -> PerformanceSettings: try: mp.set_start_method('spawn') except RuntimeError: pass if settings is None: settings = PerformanceSettings() if logger is None: logger = logging.getLogger('pympanim.worker') logger.setLevel(logging.DEBUG) logging.basicConfig( format='%(asctime)s [%(filename)s:%(lineno)d] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p') ms_per_frame = 1000 / fps num_frames = int(frame_gen.duration / ms_per_frame) logger.info('Settings: %0.1f seconds; %d frames at %d fps with %d workers...', frame_gen.duration / 1000, num_frames, fps, settings.num_workers) workers = [] paused_workers = [] stopping_workers = [] # closed when we process their last frame perf = imgst.ISRunningAveragePerfHandler(settings.window_size) isticher = imgst.ImageSticher(frame_gen.frame_size, dpi, bitrate, fps, outfile, settings.ooo_error) isticher.perfs.append(perf) for i in range(settings.num_workers): worker = _spawn_worker(frame_gen, ms_per_frame, i) isticher.register_queue(worker.img_queue) workers.append(worker) worker_counter = settings.num_workers for worker in workers: worker.start_sync() isticher.start() all_synced = False while not all_synced: all_synced = True for worker in workers: if not worker.check_sync(): all_synced = False time.sleep(0.001) old_perf = None cur_optim = None # magical string values frame_batch_dyn_min = settings.frame_batch_min frame_batch_dyn_max = settings.frame_batch_max frame_batch_min_next_decay = float('inf') frame_batch_max_next_decay = float('inf') next_optim = time.time() + settings.perf_delay + settings.window_size next_progress = time.time() + max(settings.perf_delay + settings.window_size, time_per_print) cur_frame = 0 syncing = False while cur_frame < num_frames: if not syncing: frames_per_worker_since_sync = 0 for worker in workers: worker.check_ack_queue() while worker.offer(cur_frame, settings.worker_queue_size): cur_frame += 1 frames_per_worker_since_sync = max( frames_per_worker_since_sync, worker.num_since_sync) if cur_frame >= num_frames: break for i in range(settings.frame_batch_amount - 1): worker.send(cur_frame) cur_frame += 1 frames_per_worker_since_sync = max( frames_per_worker_since_sync, worker.num_since_sync) if cur_frame >= num_frames: break if cur_frame >= num_frames: break if cur_frame >= num_frames: break if cur_frame >= num_frames: break if frames_per_worker_since_sync > settings.frames_per_sync: for worker in workers: worker.start_sync() syncing = True else: syncing = False for worker in workers: if not worker.check_sync(): syncing = True break for i in range(settings.work_per_dispatch): isticher.do_work() while len(isticher.ooo_frames) > settings.ooo_cap: isticher.do_work() for i in range(len(stopping_workers) - 1, 0, -1): worker = stopping_workers[i] if worker.check_finish() and isticher.next_frame > worker.last_frame: worker.check_sync() # cleanup just in case isticher.remove_queue(worker.img_queue) worker.close() stopping_workers.pop(i) thetime = time.time() if thetime >= next_progress: next_progress = thetime + time_per_print recpsec, procpsec = perf.mean() frames_to_proc = num_frames - isticher.next_frame time_left_sec = frames_to_proc / procpsec if procpsec > 0 else float('inf') logger.info('[%0.1f secs remaining] Generating %0.2f images/sec and ' # pylint: disable=logging-not-lazy + 'processing %0.2f images/sec', time_left_sec, recpsec, procpsec) if thetime >= next_optim: next_optim = thetime + settings.perf_delay + settings.window_size if frame_batch_min_next_decay < thetime: frame_batch_dyn_min -= 1 frame_batch_min_next_decay = ( float('inf') if frame_batch_dyn_min <= settings.frame_batch_min else thetime + settings.frame_batch_dyn_min_decay_time ) if frame_batch_max_next_decay < thetime: frame_batch_dyn_max += 1 frame_batch_max_next_decay = ( float('inf') if frame_batch_dyn_max >= settings.frame_batch_max else thetime + settings.frame_batch_dyn_max_decay_time ) recpsec, procpsec = perf.mean() if old_perf is not None and cur_optim is not None: oldrecpsec, oldprocpsec = old_perf # pylint: disable=unpacking-non-sequence, unused-variable if cur_optim == 'reduce_frame_batch_amount': relative_performance = 0 if procpsec == 0 else oldprocpsec / procpsec if relative_performance > settings.frame_batch_max_badness: # keep the change logger.debug( 'found better setting: frame_batch_amount=%d (rel performance: %0.3f)', settings.frame_batch_amount, relative_performance) frame_batch_dyn_max = settings.frame_batch_amount frame_batch_max_next_decay = ( thetime + settings.frame_batch_dyn_max_decay_time ) else: # revert the change # we're evil scientists so we dont report null results settings.frame_batch_amount += 1 frame_batch_dyn_min = settings.frame_batch_amount frame_batch_min_next_decay = ( thetime + settings.frame_batch_dyn_min_decay_time ) elif cur_optim == 'increase_frame_batch_amount': relative_performance = 0 if procpsec == 0 else oldprocpsec / procpsec if relative_performance > settings.frame_batch_min_improvement: # keep the change logger.debug( 'found better setting: frame_batch_amount=%d (rel performance: %0.3f)', settings.frame_batch_amount, relative_performance) frame_batch_dyn_min = settings.frame_batch_amount frame_batch_min_next_decay = ( thetime + settings.frame_batch_dyn_min_decay_time ) else: # revert the change # we're evil scientists so we dont report null results settings.frame_batch_amount -= 1 frame_batch_dyn_max = settings.frame_batch_amount frame_batch_max_next_decay = ( thetime + settings.frame_batch_dyn_max_decay_time ) else: raise RuntimeError(f'unknown cur_optim = {cur_optim}') cur_optim = None perc_rec_proc = procpsec / recpsec reason_str = (f'(processing {perc_rec_proc:.3f} images for every ' + f'image generated, have {len(isticher.ooo_frames)} ' + 'frames awaiting processing)') threshold_spawn, threshold_kill = ( (settings.spawn_worker_threshold_low, settings.kill_worker_threshold_low) if len(isticher.ooo_frames) < settings.ooo_balance else (settings.spawn_worker_threshold_high, settings.kill_worker_threshold_high) ) if (perc_rec_proc > threshold_spawn and settings.num_workers < settings.max_workers): settings.num_workers += 1 if settings.frames_per_sync > settings.min_frames_per_sync: settings.frames_per_sync -= 1 if paused_workers: unpaused = paused_workers.pop() workers.append(unpaused) logger.debug('Unpaused a worker %s', reason_str) else: worker = _spawn_worker(frame_gen, ms_per_frame, worker_counter) isticher.register_queue(worker.img_queue) workers.append(worker) worker_counter += 1 logger.debug('Spawned a worker %s', reason_str) elif (perc_rec_proc < threshold_kill and settings.num_workers > 1): settings.num_workers -= 1 if settings.frames_per_sync > settings.min_frames_per_sync: settings.frames_per_sync -= 1 settings.frames_per_sync -= 1 if not paused_workers: paused = workers.pop() paused_workers.append(paused) logger.debug('Paused a worker %s', reason_str) else: paused = workers.pop() killed = paused_workers.pop() paused_workers.append(paused) stopping_workers.append(killed) killed.start_finish() logger.debug('Killed a worker %s', reason_str) elif settings.frames_per_sync < settings.max_frames_per_sync: settings.frames_per_sync += 1 want_reduce_frame_batch = perc_rec_proc < 1 # if we have processed fewer than we have received it's not as # important that we optimize image generation can_reduce_frame_batch = ( settings.frame_batch_amount > frame_batch_dyn_min ) can_increase_frame_batch = ( settings.frame_batch_amount < frame_batch_dyn_max ) if ((want_reduce_frame_batch or not can_increase_frame_batch) and can_reduce_frame_batch): cur_optim = 'reduce_frame_batch_amount' settings.frame_batch_amount -= 1 elif can_increase_frame_batch: cur_optim = 'increase_frame_batch_amount' settings.frame_batch_amount += 1 old_perf = (recpsec, procpsec) logger.debug('Shutting down workers...') workers.extend(paused_workers) paused_workers = [] for worker in workers: worker.start_finish() workers.extend(stopping_workers) stopping_workers = [] all_finished = False while not all_finished: all_finished = not isticher.do_work() if not all_finished: for worker in workers: if not worker.check_finish(): all_finished = False break if not all_finished: for worker in stopping_workers: if not worker.check_finish(): all_finished = False break logger.debug('All workers shut down, processing remaining frames...') while isticher.next_frame < num_frames: if not isticher.do_work(): time.sleep(0.001) isticher.finish() for worker in workers: worker.check_sync() # just in case we leaked one worker.close() logger.info('Finished') return settings
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_video_params(video_filename):\n \n width, height = get_video_aspect(video_filename)\n frame_rate = get_video_frame_rate(video_filename)\n return width, height, frame_rate", "def get_output_file(self, path, fps=30):\n return cv2.VideoWriter(\n filename=path,\n fourcc=cv2.VideoWriter_fourcc(*\"mp4v\"),\n fps=float(fps),\n frameSize=(self.display_width, self.display_height),\n isColor=True,\n )", "def set_fps(self, fps=25):\n raise NotImplementedError(\"set_fps is not implemented\")", "def get_fps(clock):\n if video_mode:\n return \"30\" # Video FPS will be 30\n else:\n return str(int(round(clock.get_fps(), 0)))", "def get_video_frame_rate(filename):\n clip = VideoFileClip(filename)\n frame_rate = clip.fps\n clip.close()\n return frame_rate", "def generate_video_from_frames(path_to_frames, title):\r\n mean_height = 0\r\n mean_width = 0\r\n num_of_images = load_one_setting(settings_filename, 'MAX_CYCLES')\r\n os.chdir(path_to_frames)\r\n '''Loading all frames'''\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n width, height = im.size\r\n mean_width += width\r\n mean_height += height\r\n\r\n mean_width = int(mean_width / num_of_images)\r\n mean_height = int(mean_height / num_of_images)\r\n\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n imResize = im.resize((mean_width, mean_height), Image.ANTIALIAS)\r\n imResize.save(file, 'JPEG', quality=95)\r\n release_video(title)\r\n os.chdir(r'../..')", "def get_video_fps(self):\n fps = self.video.get(cv2.CAP_PROP_FPS)\n logging.info('Video FPS: {}'.format(fps))\n return fps", "def video_faster_activated(self):\n\n if self.playerType == VLC and self.playMode == VLC:\n\n if self.play_rate + self.play_rate_step <= 8:\n self.play_rate += self.play_rate_step\n self.mediaplayer.set_rate(self.play_rate)\n\n # second video together\n if self.simultaneousMedia:\n self.mediaplayer2.set_rate(self.play_rate)\n self.lbSpeed.setText('x{:.3f}'.format(self.play_rate))\n\n logging.info('play rate: {:.3f}'.format(self.play_rate))", "def get_video_frame_rate(video_filename):\n if not os.path.exists(video_filename):\n raise ValueError(\"%s does not exist\" % video_filename)\n \n probe = ffmpeg.probe(video_filename)\n assert len(probe['streams']) == 1\n \n # Seems to be two ways of coding, not sure which is better\n avg_frame_rate = probe['streams'][0]['avg_frame_rate']\n r_frame_rate = probe['streams'][0]['r_frame_rate']\n assert avg_frame_rate == r_frame_rate\n \n # Convert fraction to number\n num, den = avg_frame_rate.split('/')\n frame_rate = float(num) / float(den)\n \n return frame_rate", "def get_frame_rate(video):\n\n video = cv2.VideoCapture(video)\n\n # Find OpenCV version\n (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')\n\n # With webcam get(CV_CAP_PROP_FPS) does not work.\n # Let's see for ourselves.\n\n if int(major_ver) < 3:\n fps = video.get(cv2.cv.CV_CAP_PROP_FPS)\n logger.info(\"Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}\".format(fps))\n else:\n fps = video.get(cv2.CAP_PROP_FPS)\n logger.info(\"Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}\".format(fps))\n\n cv2.destroyAllWindows()\n video.release()\n return fps", "def getFPS(self):\n # load it each time, since this setting is not limited to a single user\n projectSettingsDB = self.loadProjectSettings()\n try:\n fpsValue = projectSettingsDB[\"FPS\"]\n return fpsValue\n except KeyError:\n msg = \"Database Error while reading projectSettings.json\"\n logger.error(msg)\n return None", "def get_video(self, fps, directory=None, name=\"estmd_output.avi\", run_id_prefix=True, cod=\"MJPG\"):\n path = self.get_full_output_name(name, directory, run_id_prefix)\n\n codec = cv2.cv.CV_FOURCC(cod[0], cod[1], cod[2], cod[3])\n video = cv2.VideoWriter(path, codec, fps, self.output_dimensions, isColor=0)\n\n print \"ESTMD outputting at: \", self.output_dimensions\n\n for values in self.result_values:\n frame = np.zeros(self.output_dimensions[::-1])\n for v in values:\n ycord, xcord, pixel = v\n frame[ycord, xcord] = pixel\n frame = (frame * 255.0).astype('u1')\n video.write(frame)\n\n video.release()\n cv2.destroyAllWindows()\n print \"Saved ESTMD output video to \" + path\n\n return", "def testX264Speed(self):\n if self.x264Speed in tools.X264_SPEEDS:\n self.assertEqual(\n self.x264Speed,\n self.config.x264Speed\n )\n else:\n self.assertNotEqual(\n self.x264Speed,\n self.config.x264Speed\n )\n self.assertEqual(\n tools.X264_SPEED_DEFAULT,\n self.config.x264Speed\n )", "def framerate(self):\n return self.config.get('framerate', 15)", "def get_fps(video, use_opencv=False):\n\n if use_opencv:\n video_cap = cv2.VideoCapture(video)\n fps = video_cap.get(cv2.CAP_PROP_FPS)\n video_cap.release()\n return fps\n else:\n return convert_to_float(\n utils.exec_shell_command(f\"{SHELL_CMD_GET_FPS} '{video}'\", silent=True)[0]\n )", "def get_fps(self):\n raise NotImplementedError(\"get_fps is not implemented\")", "def generate_video_with_target_frames(output_file, frame_targets, duration_sec, output_fps=24, blend_frames=1):\n frames = numbered_frames(\n frame_targets, duration_sec, output_fps, blend_frames)\n\n save_video(frames, output_file, output_fps)", "def get_config_sample_speed():\n # try changing learning rate\n config = get_default_config()\n\n config['train_batch_size'] = 16384\n config['_policies'] = [None, \"from_scratch_sb\", \"pretrained\"]\n config['lr'] = 3e-4\n config['sgd_minibatch_size'] = 4096\n config['num_sgd_iter'] = 4\n config['rollout_fragment_length'] = 100\n config['num_workers'] = tune.grid_search([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n config['num_envs_per_worker'] = tune.grid_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n # ['humanoid_blocker', 'humanoid'],\n config['_train_policies'] = ['player_1']\n config['num_gpus'] = 0\n config['_train_steps'] = 20\n config[\"batch_mode\"] = \"complete_episodes\"\n\n config['_trainer'] = \"PPO\"\n config['_policy'] = \"PPO\"\n config['_call']['num_samples'] = 1\n config['_call']['resources_per_trial'] = {\n \"custom_resources\": {\"tune_cpu\": tune.sample_from(lambda spec: spec.config.num_workers + 10)}} # upper bound\n\n # config['_run_inline'] = True\n\n return config", "def video_quality_key(option):\n return (\n VIDEO_RATING_DICT[option.media_type.file_type],\n product(option.media_type.resolution),\n option.media_type.video_bitrate,\n )", "def write_video(project_video_output, output_folder, fps=20):\n print(\"Creating video {}, FPS={}\".format(project_video_output, fps))\n clip = ImageSequenceClip(output_folder, fps)\n clip.write_videofile(project_video_output)", "def plot_video(video, fps=1):\n global vis\n opts = dict(fps=int(fps))\n win = vis.video(video, opts=opts)\n return win", "def setValues(\n self,\n frameRate: int = None,\n timeScale: int = None,\n vpDecorations: Boolean = ON,\n vpBackground: Boolean = OFF,\n compass: Boolean = OFF,\n ):\n pass", "def generate_video_with_target_times_seconds(output_file, time_targets_sec, duration_sec=5, output_fps=24, blend_frames=1):\n generate_video_with_target_times_ms(\n output_file, [t*1000 for t in time_targets_sec], duration_sec, output_fps, blend_frames)", "def screenshot_settings(form_values):\r\n\tdefaults = json.loads(os.getenv(\"WWW2PNG_SCREENSHOT_SETTINGS_DEFAULT\"))\r\n\r\n\tsettings = {}\r\n\tsettings.update(defaults)\r\n\tsettings.update(form_values)\r\n\r\n\tsettings[\"delay\"] = min(int(settings[\"delay\"]), settings[\"maxDelay\"])\r\n\tsettings[\"height\"] = min(list(map(int, settings[\"resolution\"].split(\"x\")))[1], list(map(int, settings[\"maxResolution\"].split(\"x\")))[1])\r\n\tsettings[\"height\"] = max(settings[\"height\"], list(map(int, settings[\"minResolution\"].split(\"x\")))[1])\r\n\tsettings[\"width\"] = min(list(map(int, settings[\"resolution\"].split(\"x\")))[0], list(map(int, settings[\"maxResolution\"].split(\"x\")))[0])\r\n\tsettings[\"width\"] = max(settings[\"width\"], list(map(int, settings[\"minResolution\"].split(\"x\")))[0])\r\n\tsettings[\"maxHeight\"] = list(map(int, settings[\"maxResolution\"].split(\"x\")))[1]\r\n\tsettings[\"maxWidth\"] = list(map(int, settings[\"maxResolution\"].split(\"x\")))[0]\r\n\tsettings[\"full_page\"] = settings[\"full_page\"] == \"true\" or settings[\"full_page\"] == \"True\" or settings[\"full_page\"] == \"1\" or settings[\"full_page\"] == True\r\n\r\n\treturn settings", "def get(self, configuration_id, **kwargs):\n # type: (string_types, dict) -> Vp9VideoConfiguration\n\n return self.api_client.get(\n '/encoding/configurations/video/vp9/{configuration_id}',\n path_params={'configuration_id': configuration_id},\n type=Vp9VideoConfiguration,\n **kwargs\n )", "def setRenderSettings(filePath):\n cache.values[\"engine\"] = bpy.context.scene.render.engine\n cache.values[\"transparent\"] = bpy.context.scene.render.film_transparent\n\n cache.values[\"filepath\"] = bpy.context.scene.render.filepath\n cache.values[\"format\"] = bpy.context.scene.render.image_settings.file_format\n cache.values[\"mode\"] = bpy.context.scene.render.image_settings.color_mode\n cache.values[\"depth\"] = bpy.context.scene.render.image_settings.color_depth\n\n cache.values[\"resolutionX\"] = bpy.context.scene.render.resolution_x\n cache.values[\"resolutionY\"] = bpy.context.scene.render.resolution_y\n cache.values[\"percentage\"] = bpy.context.scene.render.resolution_percentage\n cache.values[\"aspectX\"] = bpy.context.scene.render.pixel_aspect_x\n cache.values[\"aspectY\"] = bpy.context.scene.render.pixel_aspect_y\n\n # Define the necessary render settings.\n bpy.context.scene.render.engine = 'BLENDER_EEVEE'\n bpy.context.scene.render.film_transparent = True\n\n bpy.context.scene.render.filepath = filePath\n bpy.context.scene.render.image_settings.file_format = 'PNG'\n bpy.context.scene.render.image_settings.color_mode = 'RGBA'\n bpy.context.scene.render.image_settings.color_depth = '8'\n\n bpy.context.scene.render.resolution_x = IMAGE_SIZE\n bpy.context.scene.render.resolution_y = IMAGE_SIZE\n bpy.context.scene.render.resolution_percentage = 100\n bpy.context.scene.render.pixel_aspect_x = 1.0\n bpy.context.scene.render.pixel_aspect_y = 1.0\n\n # Store the current world.\n cache.values[\"world\"] = bpy.context.scene.world", "def get_video_stream(yt, resolution):\n global adaptive\n\n resolution_itag = {'360p':134, '480p':135, '720p':136}\n progressive_streams = yt.streams.filter(progressive=True)\n video_stream = progressive_streams.get_by_resolution(resolution)\n\n if video_stream is not None:\n return video_stream\n else:\n adaptive_streams = yt.streams.filter(adaptive=True, type='video')\n video_itag = resolution_itag[resolution]\n video_stream = adaptive_streams.get_by_itag(video_itag)\n adaptive = True\n return video_stream", "def animate_configuration(self, fps=30, **kwargs):\n\n if self.config_plot_update_func is None:\n msg = ('No ploting update function has been assigned to '\n 'config_plot_update_func.')\n raise ValueError(msg)\n\n kwargs.pop('interval', None) # ignore the user's supplied interval\n try:\n sample_rate = int(1.0 / np.diff(self.result.index).mean())\n except AttributeError:\n msg = (\"No trajectory has been computed yet, so the animation \"\n \"can't run. Run one of the response functions.\")\n raise AttributeError(msg)\n\n fps = int(fps)\n if sample_rate != fps:\n trajectories, interval = self._resample_trajectories(fps)\n else:\n trajectories, interval = self.result, 1000 / sample_rate\n\n # TODO : Could be:\n # axes, *objs_to_modify = ..\n # try:\n # fig = axes.figure\n # except AttributeError:\n # fig = axes[0].figure\n try:\n fig, *objs_to_modify = self.plot_configuration()\n except TypeError:\n msg = ('The configuration plot function does not return any objects'\n ' to modify in the update function.')\n raise ValueError(msg)\n\n def gen_frame(row_tuple, pop_list):\n time = row_tuple[0]\n row = row_tuple[1]\n # Don't mutate the orginal list.\n pop_list = pop_list.copy()\n args = []\n for k in getfullargspec(self.config_plot_update_func).args:\n if k == 'time':\n args.append(time)\n elif k == 'time__hist':\n args.append(trajectories[:time].index)\n elif k == 'time__futr':\n args.append(trajectories[time:].index)\n elif k.endswith('__hist'):\n args.append(trajectories[k[:-6]][:time])\n elif k.endswith('__futr'):\n args.append(trajectories[k[:-6]][time:])\n elif k in trajectories: # constants, coordinates, measurements\n args.append(row[k])\n elif k in self.constants:\n args.append(self.constants[k])\n else: # must be matplotlib object\n # TODO : This last part is quite fragile. It requires these\n # remaining args to be in the same order as the returned\n # tuple from the plot function and there is no way to know\n # if these are the correct objects to append if the order\n # isn't correct.\n args.append(pop_list.pop(0))\n self.config_plot_update_func(*args)\n\n # TODO : Run this with the initial conditions so that errors will\n # propogate before the animation is run.\n # NOTE : This is useful to uncomment in debugging because the errors\n # push to the top if in the FuncAnimation.\n #gen_frame((1.0, self.result.iloc[0]), list(objs_to_modify))\n\n # NOTE : If the iterrows() generator is not converted to a list then\n # matplotlib will throw a StopIteration error when the animation\n # reaches the last frame instead of repeating. This causes headaches in\n # the notebook and elsewhere. See issue #39 in the resonance repo.\n return animation.FuncAnimation(fig, gen_frame,\n fargs=(objs_to_modify, ),\n frames=list(trajectories.iterrows()),\n interval=interval,\n **kwargs)", "def generate_video_with_target_times_ms(output_file, time_targets_ms, duration_sec=5, output_fps=24, blend_frames=1):\n frame_targets = get_frame_targets_from_time_targets(\n time_targets_ms, output_fps)\n generate_video_with_target_frames(\n output_file, frame_targets, duration_sec, output_fps, blend_frames)", "def rate_video(self, params):\n video_id = params.get('video_id', [''])[0]\n rating = params.get('rating', [''])[0]\n rate = self.netflix_session.rate_video(\n video_id=video_id,\n rating=rating)\n return rate" ]
[ "0.55007595", "0.5436354", "0.5310099", "0.5290828", "0.52758837", "0.5230646", "0.5117052", "0.50882167", "0.5083284", "0.5064976", "0.5064026", "0.5059811", "0.5054783", "0.5002133", "0.5001402", "0.49894008", "0.49701047", "0.49539283", "0.49424905", "0.49278048", "0.4926101", "0.4902348", "0.48906165", "0.48887566", "0.4881708", "0.48664683", "0.48602214", "0.48471716", "0.48403963", "0.4829589" ]
0.55143076
0
Plan to a desired endeffector offset with movehandstraight constraint. movement less than distance will return failure. The motion will not move further than max_distance. robot direction unit vector in the direction of motion distance minimum distance in meters max_distance maximum distance in meters timelimit timeout in seconds stepsize step size in meters for the Jacobian pseudoinverse controller position_tolerance constraint tolerance in meters angular_tolerance constraint tolerance in radians traj
def PlanToEndEffectorOffset(self, robot, direction, distance, max_distance=None, nullspace=JointLimitAvoidance, timelimit=5.0, step_size=0.001, position_tolerance=0.01, angular_tolerance=0.15, **kw_args): if distance < 0: raise ValueError('Distance must be non-negative.') elif numpy.linalg.norm(direction) == 0: raise ValueError('Direction must be non-zero') elif max_distance is not None and max_distance < distance: raise ValueError('Max distance is less than minimum distance.') elif step_size <= 0: raise ValueError('Step size must be positive.') elif position_tolerance < 0: raise ValueError('Position tolerance must be non-negative.') elif angular_tolerance < 0: raise ValueError('Angular tolerance must be non-negative.') # save all active bodies so we only check collision with those active_bodies = [] for body in self.env.GetBodies(): if body.IsEnabled(): active_bodies.append(body) # Normalize the direction vector. direction = numpy.array(direction, dtype='float') direction /= numpy.linalg.norm(direction) # Default to moving an exact distance. if max_distance is None: max_distance = distance with robot: manip = robot.GetActiveManipulator() traj = openravepy.RaveCreateTrajectory(self.env, '') traj.Init(manip.GetArmConfigurationSpecification()) active_dof_indices = manip.GetArmIndices() limits_lower, limits_upper = robot.GetDOFLimits(active_dof_indices) initial_pose = manip.GetEndEffectorTransform() q = robot.GetDOFValues(active_dof_indices) traj.Insert(0, q) start_time = time.time() current_distance = 0.0 sign_flipper = 1 last_rot_error = 9999999999.0 try: while current_distance < max_distance: # Check for a timeout. current_time = time.time() if timelimit is not None and current_time - start_time > timelimit: raise PlanningError('Reached time limit.') # Compute joint velocities using the Jacobian pseudoinverse. q_dot = self.GetStraightVelocity(manip, direction, initial_pose, nullspace, step_size, sign_flipper=sign_flipper) q += q_dot robot.SetDOFValues(q, active_dof_indices) # Check for collisions. #if self.env.CheckCollision(robot): for body in active_bodies: if self.env.CheckCollision(robot, body): raise PlanningError('Encountered collision.') if robot.CheckSelfCollision(): raise PlanningError('Encountered self-collision.') # Check for joint limits. elif not (limits_lower < q).all() or not (q < limits_upper).all(): raise PlanningError('Encountered joint limit during Jacobian move.') # Check our distance from the constraint. current_pose = manip.GetEndEffectorTransform() a = initial_pose[0:3, 3] p = current_pose[0:3, 3] orthogonal_proj = (a - p) - numpy.dot(a - p, direction) * direction if numpy.linalg.norm(orthogonal_proj) > position_tolerance: raise PlanningError('Deviated from a straight line constraint.') # Check our orientation against the constraint. offset_pose = numpy.dot(numpy.linalg.inv(current_pose), initial_pose) offset_angle = openravepy.axisAngleFromRotationMatrix(offset_pose) offset_angle_norm = numpy.linalg.norm(offset_angle) if offset_angle_norm > last_rot_error + 0.0005: sign_flipper *= -1 last_rot_error = offset_angle_norm if offset_angle_norm > angular_tolerance: raise PlanningError('Deviated from orientation constraint.') traj.Insert(traj.GetNumWaypoints(), q) # Check if we've exceeded the maximum distance by projecting our # displacement along the direction. hand_pose = manip.GetEndEffectorTransform() displacement = hand_pose[0:3, 3] - initial_pose[0:3, 3] current_distance = numpy.dot(displacement, direction) except PlanningError as e: # Throw an error if we haven't reached the minimum distance. if current_distance < distance: raise # Otherwise we'll gracefully terminate. else: logger.warning('Terminated early at distance %f < %f: %s', current_distance, max_distance, e.message) SetTrajectoryTags(output_traj, {Tags.CONSTRAINED: True}, append=True) return traj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drive(self, distance, tolerance=0.0, tolerance_step=0.5,\n max_attempts=10, avoid_targets=True, avoid_home=False,\n use_waypoints=True):\n self.cur_loc = self.swarmie.get_odom_location()\n start = self.cur_loc.get_pose()\n\n goal = Point()\n goal.x = start.x + distance * math.cos(start.theta)\n goal.y = start.y + distance * math.sin(start.theta)\n\n return self.drive_to(\n goal,\n tolerance=tolerance,\n tolerance_step=tolerance_step,\n max_attempts=max_attempts,\n avoid_targets=avoid_targets,\n avoid_home=avoid_home,\n use_waypoints=use_waypoints\n )", "def drive_to(self, goal, tolerance=0.0, tolerance_step=0.5,\n max_attempts=10, avoid_targets=True, avoid_home=False,\n use_waypoints=True, start_location=None,\n distance_threshold=None):\n print('\\nRequest received')\n self.fail_count = 0\n self.tolerance = tolerance\n\n self.avoid_targets = avoid_targets\n if avoid_targets is True and avoid_home is True:\n avoid_home = False\n self.avoid_home = avoid_home\n\n current_ignore = Obstacle.IS_SONAR\n if self.avoid_targets is True:\n current_ignore |= Obstacle.TAG_TARGET\n elif self.avoid_home is True:\n current_ignore |= Obstacle.TAG_HOME\n\n self.goal.x = goal.x\n self.goal.y = goal.y\n\n self.cur_loc = self.swarmie.get_odom_location()\n self.current_state = Planner.STATE_IDLE\n self.prev_state = Planner.STATE_IDLE\n\n while (not self.cur_loc.at_goal(self.goal,\n Planner.DISTANCE_OK + self.tolerance)\n and self.fail_count < max_attempts):\n\n\n if use_waypoints is True:\n # get new plan and try to drive to first point in it\n point = self._get_next_waypoint(tolerance_step)\n else:\n point = goal\n\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_DRIVE\n # Turn to approximate goal heading while ignoring sonar and tags\n # helps to prevent rover from trying to jump around obstacles\n # before it even starts along its new path\n self.result = self._face_point(\n point,\n ignore=current_ignore ^ Obstacle.IS_SONAR\n )\n\n if self.result == MoveResult.SUCCESS:\n self.result = self.swarmie.drive_to(\n point,\n ignore=Obstacle.SONAR_BLOCK,\n throw=False\n )\n\n if self.result == MoveResult.SUCCESS:\n # Success, we got to our waypoint, or got ourselves out of\n # whatever pickle we were just in.\n # Just get a new plan and drive to next point\n self.fail_count = 0\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_IDLE\n print('Successfully drove to first point in nav plan.')\n\n # otherwise, something went wrong or we found home\n elif self.result == MoveResult.OBSTACLE_HOME:\n self.set_home_locations()\n\n # get around the home tag obstacle\n count = 0\n\n # Give the rover 3 tries to avoid any tags nearby before\n # getting a new nav plan. MoveResult.OBSTACLE_SONAR takes\n # priority in the driver code, so it should be safe to continue\n # this loop if the MoveResult is just an OBSTACLE_HOME\n # self.fail_count may exceed limit here, but I'll let it go\n while count < 3 and self.result == MoveResult.OBSTACLE_HOME:\n print('\\nObstacle: Found Home.')\n count += 1\n self.fail_count += 1\n\n detections = self.swarmie.get_latest_targets().detections\n inside_home = self.is_inside_home_ring(detections)\n if inside_home:\n print('\\nGetting out of the home ring!!')\n angle, dist = self.get_angle_and_dist_to_escape_home(\n detections\n )\n self.swarmie.turn(\n angle,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION\n )\n self.result = self.swarmie.drive(\n dist,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION\n )\n\n if self.avoid_home is False:\n # turn back around\n self.swarmie.turn(\n math.pi,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION\n )\n print('Obstacle: Found Home.')\n return MoveResult.OBSTACLE_HOME\n else:\n if self.avoid_home is False:\n print('Obstacle: Found Home.')\n return MoveResult.OBSTACLE_HOME\n\n self.result = self._avoid_tag(id=256,\n ignore=current_ignore)\n\n elif self.result == MoveResult.OBSTACLE_TAG:\n # get around the tag obstacle\n count = 0\n\n # Give the rover 3 tries to avoid any tags nearby before\n # getting a new nav plan. MoveResult.OBSTACLE_SONAR takes\n # priority in the driver code, so it should be safe to continue\n # this loop if the MoveResult is just an OBSTACLE_TAG\n # self.fail_count may exceed limit here, but I'll let it go\n while count < 3 and self.result == MoveResult.OBSTACLE_TAG:\n print('\\nObstacle: Found a Tag.')\n\n if self.avoid_targets is False:\n if not self.sees_home_tag():\n return self.result\n\n count += 1\n self.fail_count += 1\n\n self.result = self._avoid_tag(id=0,\n ignore=current_ignore)\n\n elif self.result == MoveResult.OBSTACLE_SONAR:\n # Check for home and tag obstacles just to be safe, because\n # sonar MoveResults take priority, and would mask a home or\n # target tag in view.\n obstacle = self.swarmie.get_obstacle_condition()\n\n if (obstacle & Obstacle.TAG_HOME == Obstacle.TAG_HOME and\n self.avoid_home is False):\n self.set_home_locations()\n return MoveResult.OBSTACLE_HOME\n\n if (obstacle & Obstacle.TAG_TARGET == Obstacle.TAG_TARGET and\n self.avoid_targets is False):\n return MoveResult.OBSTACLE_TAG\n\n # get around the sonar obstacle\n self.fail_count += 1\n\n print('\\nObstacle: Sonar.')\n left_blocked, center_blocked, right_blocked = \\\n self._check_sonar_obstacles()\n\n if (not left_blocked and\n not center_blocked and not right_blocked):\n print('\\nFake sonar obstacle??')\n pass # 'fake' obstacle?\n\n elif not left_blocked and center_blocked and right_blocked:\n print('Left looks clear, turning left.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_LEFT\n self._go_around(math.pi / 4, 0.7)\n # self.swarmie.drive_to(point, throw=False)\n\n elif left_blocked and center_blocked and not right_blocked:\n print('Right looks clear, turning right.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_RIGHT\n self._go_around(-math.pi / 4, 0.7)\n # self.swarmie.drive_to(point, throw=False)\n\n elif left_blocked and not center_blocked and not right_blocked:\n print('Only left blocked, turning a little right.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_RIGHT\n self._go_around(-math.pi / 6, 0.6)\n # self.swarmie.drive_to(point, throw=False)\n\n elif not left_blocked and not center_blocked and right_blocked:\n print('Only right blocked, turning a little left.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_LEFT\n self._go_around(math.pi / 6, 0.6)\n # self.swarmie.drive_to(point, throw=False)\n\n else:\n print('Neither left or right look clear.')\n\n # Only back up if we're far enough away from home for it\n # to be safe. Don't want to back up into the nest!\n if self._is_safe_to_back_up():\n print('Backing up.')\n self.swarmie.drive(\n -0.3,\n ignore=Obstacle.IS_SONAR,\n throw=False\n )\n\n if (self.current_state == Planner.STATE_AVOID_RIGHT or\n self.prev_state == Planner.STATE_AVOID_RIGHT):\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_RIGHT\n self.clear(-math.pi / 4, ignore=current_ignore,\n reset_heading=False)\n self._go_around(-math.pi / 4, 0.75)\n\n else:\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_LEFT\n self.clear(math.pi / 4, ignore=current_ignore,\n reset_heading=False)\n self._go_around(math.pi / 4, 0.75)\n\n elif self.result == MoveResult.PATH_FAIL:\n # shit, hope we can back up if this ever happens\n self.fail_count += 1\n\n print('\\nPath Failure. Backing up.')\n self.prev_state = self.current_state\n self.current_state = Planner.STATE_AVOID_REVERSE\n self.swarmie.drive(\n -0.5,\n ignore=Obstacle.IS_SONAR | Obstacle.IS_VISION,\n throw=False\n )\n\n self.cur_loc = self.swarmie.get_odom_location()\n\n if self.fail_count >= max_attempts:\n print('Failed to drive to goal {} times.'.format(\n max_attempts)\n )\n raise PathException(MoveResult.PATH_FAIL)\n\n if start_location is not None:\n current_loc = self.cur_loc.get_pose()\n dist = math.sqrt((start_location.x - current_loc.x) ** 2\n + (start_location.y - current_loc.y) ** 2)\n if dist > distance_threshold:\n raise PathException(MoveResult.PATH_FAIL)\n\n print('Successfully executed nav plan.')\n return MoveResult.SUCCESS", "def drive(self, distance, linear_speed):\n current_pose = [self.px, self.py, self.pth]\n \tinitial_pose = current_pose\n # final pose is distance to be moved by the robot in the x direction\n \tdistance_traveled = 0\n \ttolerance = 0.01\n\n self.send_speed(linear_speed, 0.0)\n \twhile abs(distance-distance_traveled) > tolerance:\n current_pose = [self.px, self.py, self.pth]\n distance_traveled = math.sqrt((current_pose[0]-initial_pose[0])*(current_pose[0]-initial_pose[0])+(current_pose[1]-initial_pose[1])*(current_pose[1]-initial_pose[1]))\n #print(final_pose[0]-current_pose[0])\n \tself.send_speed(0.0,0.0)", "def move(self, distance: int, direction: float, max_steering=np.pi / 2):\n if direction > max_steering:\n direction = max_steering\n if direction < -max_steering:\n direction = -max_steering\n\n if distance < 0.0:\n distance = 0.0\n\n self.total_distance_covered += distance\n\n self.theta = (self.theta + direction) % (2.0 * np.pi)\n self.x = self.x + (np.cos(self.theta) * distance)\n self.y = self.y + (np.sin(self.theta) * distance)", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1", "def Optimizer(r_grasp,PAM_r, PAM_s, object_s, object_f, object_params, phi, r_max, walls, obstacles, obstacles_PAM, current_leg, n, n_p, v_max, force_max, legs, dt):\n global action_push_pull, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned\n # assigning cost of changing from one leg to another based on the distance to the desired pose\n cost_ChangeLeg = 1\n dz_final = np.sqrt((object_s.x - object_f.x) ** 2 + (object_s.y - object_f.y) ** 2)\n if dz_final < 1:\n cost_ChangeLeg = 10\n elif dz_final < 2:\n cost_ChangeLeg = 20\n else:\n cost_ChangeLeg = 10\n\n # assigning weight for cost of predicted repositioning and cost of robot motion\n w_cost_reposition = 40\n w_cost_motion = 10\n\n # finding object's leg cordinates\n object_leg = find_corners(object_s.x, object_s.y, object_s.phi, object_params[7], object_params[8])\n\n # initialization (initializeing cost to infinity)\n cost = [float('inf'), float('inf'), float('inf'), float('inf')]\n cost_legchange = [0, 0, 0, 0]\n cost_PAM = [[0, 0],[0, 0],[0, 0],[0, 0]]\n cost_manipulation = [0, 0, 0, 0]\n cost_motion = [0, 0, 0, 0]\n force = [0, 0, 0, 0]\n path = [[[], []], [[], []], [[], []], [[], []]]\n planned_path_w = [[],[],[],[]]\n PAM_g = [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]]\n command = [[], [], [], []]\n des = [[], [], [], [], []]\n PAM_goal = state()\n\n # find the nominal trajectory for manipulation\n theta = nominal_traj([object_s.x,object_s.y,object_s.phi], [object_f.x,object_f.y,object_f.phi], v_max, walls, obstacles, n, dt)\n\n # itterate through each leg to find the leg with minimum cost\n for leg in range(4):\n phi_linear = theta\n psi_linear = [theta[k] + phi[leg] for k in range(len(theta))]\n \t# find the cost and required force for manipulation for the leg\n force[leg], cost_manipulation[leg], planned_path_w[leg], command[leg], des= OptTraj([object_s.x, object_s.y, object_s.phi, object_s.xdot, object_s.ydot, object_s.phidot], [object_f.x, object_f.y, object_f.phi, object_f.xdot, object_f.ydot, object_f.phidot], v_max, walls, obstacles, object_params[0:4], object_params[4:7], phi_linear, psi_linear, force_max, r_max[leg], n, dt, object_leg[leg])\n \t# adding cost of changing leg\n if leg != current_leg:\n cost_legchange[leg] = cost_ChangeLeg\n # adding cost of PAM motion to PAM goal pose\n phi0 = np.arctan2(object_leg[leg][1]-object_s.y,object_leg[leg][0]-object_s.x)\n # finding the better option between pulling and pushing for each leg, with the same manipulation plan\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost_PAM[leg][push_pull], path[leg][push_pull], command_pam, goal_orientation = OptPath([PAM_s.x, PAM_s.y, PAM_s.phi], PAM_g[leg][push_pull], walls, obstacles_PAM, n_p, dt)\n if cost_PAM[leg][push_pull]!= float(\"inf\"):\n PAM_s_sim = copy.deepcopy(PAM_s)\n PAM_s_sim.x, PAM_s_sim.y, PAM_s_sim.phi = [PAM_r * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], PAM_r * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n # adding cost of predicted re-positionings\n n_transition = traj_simulation(copy.deepcopy(PAM_s_sim), copy.deepcopy(object_s), force[leg], legs, leg, command[leg])\n # print(n_transition)\n cost_PAM[leg][push_pull] += w_cost_reposition*n_transition\n cost_motion[leg] += min(cost_PAM[leg])*w_cost_motion\n action_push_pull[leg] = np.argmin(cost_PAM[leg])\n else:\n phi0 = np.arctan2(force[leg][0][1], force[leg][0][0])\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost = [cost_legchange[leg] + cost_motion[leg] + cost_manipulation[leg] for leg in range(4)]\n\n if min(cost) < float(\"inf\"):\n \t[min_index, min_value] = [np.argmin(cost), min(cost)]\n \t# Finding the grasping goal pose based on the selected plan\n \tphi0 = np.arctan2(object_leg[min_index][1]-object_s.y,object_leg[min_index][0]-object_s.x)\n \tgrasping_goal = [PAM_r * np.cos(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][0], PAM_r * np.sin(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][1], np.pi * action_push_pull[min_index] + phi0]\n \tPAM_goal = state()\n \tPAM_goal.x, PAM_goal.y, PAM_goal.phi = PAM_g[min_index][action_push_pull[min_index]]\n \tobject_path_planned = Path()\n \tobject_path_planned.header.frame_id = 'frame_0'\n \tfor i in range(len(planned_path_w[min_index])):\n \t\tpose = PoseStamped()\n \t\tpose.pose.position.x = planned_path_w[min_index][i][0]\n \t\tpose.pose.position.y = planned_path_w[min_index][i][1]\n \t\tpose.pose.position.z = 0\n \t\tobject_path_planned.poses.append(pose)\n\n \tPAM_path_planned = Path()\n \tPAM_path_planned.header.frame_id = 'frame_0'\n \tif min_index != current_leg:\n \t\tfor i in range(len(path[min_index][action_push_pull[min_index]])):\n \t\t\tpose = PoseStamped()\n \t\t\tpose.pose.position.x, pose.pose.position.y, pose.pose.orientation.z =path[min_index][action_push_pull[min_index]][i]\n \t\t\tPAM_path_planned.poses.append(pose)\n else:\n \tmin_index = 5\n \tmin_value = float(\"inf\")\n if 0 < min_index and min_index <= 4:\n force_d = force[min_index][0]\n else:\n force_d = [0,0,0]\n\n return cost ,min_index, force_d, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned", "def move_straight(robot, dist):\n journey = Journey(robot, distance=dist)\n journey.start()\n robot.position.move(dist)\n sleep(0.5)", "def set_hybrid_control_trajectory(self, trajectory, model, max_force_torque, timeout=5.0,\n stop_on_target_force=False, termination_criteria=None,\n displacement_epsilon=0.002, check_displacement_time=2.0,\n verbose=True, debug=False, time_compensation=True):\n\n # For debug\n # data_target = []\n # data_actual = []\n # data_target2 = []\n # data_dxf = []\n reduced_speed = np.deg2rad([100, 100, 100, 250, 250, 250])\n\n xb = self.end_effector()\n failure_counter = 0\n\n ptp_index = 0\n q_last = self.joint_angles()\n\n trajectory_time_compensation = model.dt * 10. if time_compensation else 0.0 # Hyperparameter\n\n if trajectory.ndim == 1: # just one point\n ptp_timeout = timeout\n model.set_goals(position=trajectory)\n else: # trajectory\n ptp_timeout = timeout / float(len(trajectory)) - trajectory_time_compensation\n model.set_goals(position=trajectory[ptp_index])\n\n log = {SPEED_LIMIT_EXCEEDED: 0, IK_NOT_FOUND: 0}\n\n result = DONE\n\n standby_timer = rospy.get_time()\n standby_last_pose = self.end_effector()\n standby = False\n\n if debug:\n avg_step_time = 0.0\n step_num = 0\n\n # Timeout for motion\n initime = rospy.get_time()\n sub_inittime = rospy.get_time()\n while not rospy.is_shutdown() \\\n and (rospy.get_time() - initime) < timeout:\n if debug:\n start_time = rospy.get_time()\n\n # Transform wrench to the base_link frame\n Wb = self.get_ee_wrench()\n # Current position in task-space\n xb = self.end_effector()\n\n if termination_criteria is not None:\n assert isinstance(termination_criteria, types.LambdaType), \"Invalid termination criteria, expecting lambda/function with one argument[current pose array[7]]\"\n if termination_criteria(xb, standby):\n rospy.loginfo(\"Termination criteria returned True, stopping force control\")\n result = TERMINATION_CRITERIA\n break\n\n if (rospy.get_time() - sub_inittime) > ptp_timeout:\n sub_inittime = rospy.get_time()\n ptp_index += 1\n if ptp_index >= len(trajectory):\n model.set_goals(position=trajectory[-1])\n elif not trajectory.ndim == 1: # For some reason the timeout validation is not robust enough\n model.set_goals(position=trajectory[ptp_index])\n\n Fb = -1 * Wb # Move in the opposite direction of the force\n if stop_on_target_force and np.all(np.abs(Fb)[model.target_force != 0] > np.abs(model.target_force)[model.target_force != 0]):\n rospy.loginfo('Target F/T reached {}'.format(np.round(Wb, 3)) + ' Stopping!')\n self.set_target_pose_flex(pose=xb, t=model.dt)\n result = STOP_ON_TARGET_FORCE\n break\n\n # Safety limits: max force\n if np.any(np.abs(Wb) > max_force_torque):\n rospy.logerr('Maximum force/torque exceeded {}'.format(np.round(Wb, 3)))\n self.set_target_pose_flex(pose=xb, t=model.dt)\n result = FORCE_TORQUE_EXCEEDED\n break\n\n # Current Force in task-space\n dxf, dxf_pos, dxf_force = model.control_position_orientation(Fb, xb) # angular velocity\n\n xc = transformations.pose_from_angular_velocity(xb, dxf, dt=model.dt)\n\n # Avoid extra acceleration when a point failed due to IK or other violation\n # So, this corrects the allowed time for the next point\n dt = model.dt * (failure_counter+1)\n\n result = self._actuate(xc, dt, q_last, reduced_speed)\n\n # For debug\n # data_actual.append(self.end_effector())\n # data_target.append(xc)\n # data_target2.append(model.target_position)\n # data_dxf.append(dxf_force)\n\n if result != DONE:\n failure_counter += 1\n if result == IK_NOT_FOUND:\n log[IK_NOT_FOUND] += 1\n if result == SPEED_LIMIT_EXCEEDED:\n log[SPEED_LIMIT_EXCEEDED] += 1\n continue # Don't wait since there is not motion\n else:\n failure_counter = 0\n q_last = self.joint_angles()\n\n # Compensate the time allocated to the next command when there are failures\n # Especially important for following a motion trajectory\n for _ in range(failure_counter+1):\n self.rate.sleep()\n\n standby_time = (rospy.get_time() - standby_timer)\n if standby_time > check_displacement_time:\n displacement_dt = np.linalg.norm(standby_last_pose[:3] - self.end_effector()[:3])\n standby = displacement_dt < displacement_epsilon\n if standby:\n rospy.logwarn(\"No more than %s displacement in the last %s seconds\" % (round(displacement_dt, 6), check_displacement_time))\n last_pose = self.end_effector()\n standby_timer = rospy.get_time()\n standby_last_pose = self.end_effector()\n\n if debug:\n step_time = rospy.get_time() - start_time\n avg_step_time = step_time if avg_step_time == 0 else getAvg(avg_step_time, step_time, step_num)\n step_num += 1\n\n # For debug\n # np.save(\"/root/o2ac-ur/underlay_ws/src/ur_python_utilities/ur_control/config/actual\", data_actual)\n # np.save(\"/root/o2ac-ur/underlay_ws/src/ur_python_utilities/ur_control/config/target\", data_target)\n # np.save(\"/root/o2ac-ur/underlay_ws/src/ur_python_utilities/ur_control/config/target2\", data_target2)\n # np.save(\"/root/o2ac-ur/underlay_ws/src/ur_python_utilities/ur_control/config/trajectory\", trajectory)\n # np.save(\"/root/o2ac-ur/underlay_ws/src/ur_python_utilities/ur_control/config/data_dxf\", data_dxf)\n if debug:\n rospy.loginfo(\">>> Force Control Aprox. time per step: %s <<<\" % str(avg_step_time))\n hz = 1./avg_step_time if avg_step_time > 0 else 0.0\n rospy.loginfo(\">>> Force Control Aprox. Frequency: %s <<<\" % str(hz))\n if verbose:\n rospy.logwarn(\"Total # of commands ignored: %s\" % log)\n return result", "def left(self, param):\n\t\tglobal estop_flag, move_state\n\t\t#If input angle is zero, set angle to default\n\t\tif param:\n\t\t\tangle = param\n\t\telse:\n\t\t\tangle = riu.default_angle\n\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\t#safely grab current yaw\n\t\twith self.move_state_lock:\n\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t#Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360\n\t\tgoal = (current_yaw + angle) % 360\n\t\thalf_goal = (current_yaw + angle/2) % 360\n\t\tif self.angle_lock:\n\t\t\tif goal >= 315 and goal < 45:\n\t\t\t\tgoal = self.zeroed_angle\n\t\t\telif goal >= 45 and goal < 135:\n\t\t\t\tgoal = self.zeroed_angle + 90\n\t\t\telif goal >= 135 and goal < 225:\n\t\t\t\tgoal = self.zeroed_angle + 180\n\t\t\telif goal >= 225 and goal < 315:\n\t\t\t\tgoal = self.zeroed_angle + 270\n\t\tgoal = goal % 360\n\t\thalf_goal = (current_yaw + angle/2) % 360\n\t\thalfway_flag = False #used to flag if we've already sent out a halfway message\n\t\t#Anonymous function that calculates the current counterclockwise distance to the goal\n\t\tchkdist = lambda pos, goal: round(goal - pos + 360 * (goal < pos), 1)\n\t\t#Gets current distance and initially sets previous distance = distance\n\t\tdistance = chkdist(current_yaw, goal)\n\t\tprev_dist = distance\n\t\t\"\"\"Continues to move while absolute distance is not within angular_error and counterclockwise\n\t\tdistance is not increasing. NOTE: absolute distance is the shortest distance in either direction,\n\t\twhile counterclockwise distance is the distance using only counterclockwise movement.\n\t\tThe angular_error condition was added because the movements tended to end within the first few \n\t\tcycles due to some float error. With the error condition, the movement can only end when inside\n\t\tat least the general area of the goal.\"\"\"\n\t\twhile distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error:\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#Construct and publish left turn message\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\ttwist_msg.angular.z = riu.turn_rate\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance to goal is less than half the initial distance, publish the half done message\n\t\t\t\tif distance <= half_goal and not halfway_flag:\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#Update current position\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t\t\t#Set previous distance, then update distance based on new position\n\t\t\t\tprev_dist = distance\n\t\t\t\tdistance = chkdist(current_yaw, goal)\n\t\t\trospy.sleep(.2)\n\t\t#After loop exit, publish stop message and send done message to cmd_queue\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval) #Restart timer", "def advance(distance, angle, da):\n cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=5)\n # How fast will we update the robot's movement?\n rate = 10\n # Set the equivalent ROS rate variable\n r = rospy.Rate(rate)\n # Set the forward linear speed to 0.2 meters per second\n if distance >= 0.0:\n linear_speed = 0.5\n else:\n linear_speed = -0.5\n # Set the travel distance in meters\n goal_distance = abs(distance)\n # Set the rotation speed in radians per second\n if angle < 0.0:\n angular_speed = -0.5\n else:\n angular_speed = 0.5\n # Set the angular tolerance in degrees converted to radians\n angular_tolerance = radians(0.5)\n # Set the rotation angle to angle in radians \n goal_angle = angle\n # Initialize the tf listener\n tf_listener = tf.TransformListener()\n # Give tf some time to fill its buffer\n rospy.sleep(2)\n # Set the map frame\n map_frame = '/map'\n # Set the odom frame\n odom_frame = '/odom'\n \"\"\" Find out if the robot uses /map->/odom transform \"\"\"\n try:\n tf_listener.waitForTransform(map_frame, odom_frame, rospy.Time(), rospy.Duration(1.0))\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /map and /odom\")\n rospy.signal_shutdown(\"tf Exception\") \n # Find out if the robot uses /base_link or /base_footprint\n try:\n tf_listener.waitForTransform(odom_frame, '/base_footprint', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_footprint'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n try:\n tf_listener.waitForTransform(odom_frame, '/base_link', rospy.Time(), rospy.Duration(1.0))\n base_frame = '/base_link'\n except (tf.Exception, tf.ConnectivityException, tf.LookupException):\n rospy.loginfo(\"Cannot find transform between /odom and /base_link or /base_footprint\")\n rospy.signal_shutdown(\"tf Exception\") \n # Initialize the position variable as a Point type\n position = Point() \n # Initialize the movement command\n move_cmd = Twist()\n \n\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n # Keep track of the distance traveled\n dist = 0.0\n #pdb.set_trace()\n if da:\n print bcolors.OKGREEN + \"da True\" + bcolors.ENDC\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC\n # Set the movement command to forward motion\n move_cmd.linear.x = linear_speed\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n #pdb.set_trace()\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n \n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n else:\n print bcolors.OKGREEN + \"da False\" + bcolors.ENDC\n #pdb.set_trace()\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n # Track the last angle measured\n last_angle = quat_to_angle(rotation)\n print bcolors.OKGREEN + \"Empieza angle\" + bcolors.ENDC\n # Track how far we have turned\n turn_angle = 0\n done = False\n while abs(turn_angle + angular_tolerance) < abs(goal_angle) and not rospy.is_shutdown():\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the amount of rotation since the last loop\n delta_angle = normalize_angle(quat_to_angle(rotation) - last_angle)\n # Add to the running total\n turn_angle += delta_angle\n last_angle = quat_to_angle(rotation)\n# print \"x\", position.x\n# print \"y\", position.y\n# print \"la\", last_angle\n# print \"ta\", degrees(turn_angle)\n# print \"\\n\"\n #raw_input(\"Press ENTER to continue ...\")\n if (abs(turn_angle + angular_tolerance) > abs(goal_angle*4/5) or abs(goal_angle) < radians(2)) and not done:\n #pdb.set_trace()\n # Stop the robot before the next leg\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n if angle < 0.0:\n angular_speed = -0.05\n else:\n angular_speed = 0.05\n # Set the movement command to a rotation\n move_cmd.angular.z = angular_speed\n done = True\n \n # Stop the robot before the next movement\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n print bcolors.OKGREEN + \"Empieza distancia\" + bcolors.ENDC \n #pdb.set_trace()\n # Get the starting position values \n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n x_start = position.x\n y_start = position.y\n \n move_cmd.linear.x = linear_speed\n # Keep track of the distance traveled\n dist = 0.0\n bump_count = 0\n # Enter the loop to move along\n while dist < goal_distance and not rospy.is_shutdown():\n last_dist = dist\n # Publish the Twist message and sleep 1 cycle \n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n # Get the current position\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n # Compute the Euclidean distance from the start\n dist = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))\n \n if dist == last_dist and dist != 0.0:\n bump_count += 1\n print \"dist, goal_distance\", dist, goal_distance\n print \"BUMP\"+str(bump_count)\n if bump_count > 10:\n # Move forward for a time to go the desired distance\n linear_duration = 1.5/abs(linear_speed) \n ticks = int(linear_duration * rate)\n move_cmd.linear.x *= -1\n for t in range(ticks):\n cmd_vel_pub.publish(move_cmd)\n r.sleep()\n continue\n # Stop the robot before the rotation\n move_cmd = Twist()\n cmd_vel_pub.publish(move_cmd)\n rospy.sleep(1)\n\n # Stop the robot for good\n cmd_vel_pub.publish(Twist())\n rospy.sleep(1)\n\n # Get the current rotation\n (position, rotation) = get_odom(tf_listener, odom_frame, base_frame)\n \n return (position, rotation)", "def test_planning():\n\n joints1 = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n joints2 = [4.80, 2.92, 1.00, 4.20, 1.45, 1.32]\n\n\n path_planner = PathPlanner(\"manipulator\")\n\n print path_planner.group.get_end_effector_link()\n\n while True:\n raw_input(\"Press Enter to move to position 1\")\n plan = path_planner.plan_to_config(joints1)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)\n\n raw_input(\"Press Enter to move to position 2\")\n plan = path_planner.plan_to_config(joints2)\n path_planner.execute_path(plan)\n rospy.sleep(0.5)", "def cozmo_drive_straight(robot, dist, speed):\n robot.drive_straight(distance_mm(dist), speed_mmps(speed)).wait_for_completed()", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def cozmo_drive_straight(robot, dist, speed):\n\trobot.drive_straight(distance_mm(dist), speed_mmps(speed)).wait_for_completed()", "def moveShip(self,screen,maze):\n self.checkCheckpoint()\n angle = 0\n accel = 0 \n controlInputs = self.getDecision()\n angle -= logis(controlInputs[0]) * self.maxAngle\n angle += logis(controlInputs[1]) * self.maxAngle\n accel += logis(controlInputs[2]) * self.maxAccel\n brake = logis(controlInputs[3])\n \n self.updateSpeed(accel,angle,brake) \n self.updatePos()\n self.getInputs(maze)", "def moveToNeighbor(self, dir, alliedUnitsAllowed=False, enemyUnitsAllowed=True):\n \n #TODO: Fix this so you don't depend on Falses for empty spaces\n \n unit = self.selectedUnit\n if dir == -1:\n self._moveHelper(unit.neighbors[0].pos.pos)\n if dir == MOVE_UP:\n print \"got up move\"\n for neighbor in unit.neighbors:\n if neighbor.isNorthOf(unit):\n print \"got north neighbor\"\n self._moveHelper(neighbor.pos.pos)\n break\n elif dir == MOVE_DOWN:\n for neighbor in unit.neighbors:\n if neighbor.isSouthOf(unit):\n self._moveHelper(neighbor.pos.pos)\n break\n elif dir == MOVE_LEFT:\n for neighbor in unit.neighbors:\n if neighbor.isWestOf(unit):\n self._moveHelper(neighbor.pos.pos)\n break\n elif dir == MOVE_RIGHT:\n for neighbor in unit.neighbors:\n if neighbor.isEastOf(unit):\n self._moveHelper(neighbor.pos.pos)\n break", "def move2goal(self):\n vel_msg = Twist()\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = 0.4 # m/s\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = 1.5 # rad/s\n\n # Starting point reference\n goal_x = 1.0 \n goal_y = 1.0\n x_ref = 1.0\n y_ref = 1.0\n\n # Previous Reference\n x_prev_ref = 0.0\n y_prev_ref = 0.0\n theta_prev_ref = self.theta\n vrefA = 0.5\n wrefA = 0.0\n \n i = 0\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n\n x_prev_ref = tPx[0]\n y_prev_ref = tPy[0]\n theta_prev_ref = tPTheta[0]\n\n print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n print(f'ACTUAL THETA: {self.theta}')\n\n while not rospy.is_shutdown():\n \n if i >= 8:\n i = 0\n\n x_ref = goal_x\n y_ref = goal_y\n\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n # inputRef = ControllerInput(\n # xref=x_ref,\n # yref=y_ref,\n # RstateX=self.x_position,\n # RstateY=self.y_position,\n # RstateTheta=self.theta,\n # RstateVelocity=vel_msg.linear.x,\n # RstateW=vel_msg.angular.z,\n # xrefA=x_prev_ref,\n # yrefA=y_prev_ref,\n # thetarefA=theta_prev_ref,\n # vrefA=vrefA,\n # wrefA=wrefA\n # )\n\n # rospy.loginfo(f'X: {self.x_position} \\tY: {self.y_position}\\t Theta: {self.theta} ')\n # nmpc = NMPC_Controller(inputRef)\n # tPx, tPy, tPTheta = nmpc.test_create_mini_path()\n\n # print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n # print(f'ACTUAL THETA: {self.theta}')\n \n # new_v, new_w = nmpc.start_optmizer()\n # new_v = round(new_v, 4)\n # new_w = round(new_w, 4)\n\n # print(new_v, new_w)\n # rospy.loginfo(\n # f'X: {self.x_position}, Y: {self.y_position}, THETA: {self.theta}')\n \n # self.velocity_publisher.publish(vel_msg)\n # x_prev_ref = self.x_position\n # y_prev_ref = self.y_position\n # theta_prev_ref = self.theta\n # vrefA = vel_msg.linear.x\n # wrefA = vel_msg.angular.z\n \n\n # theta_prev_ref = self.theta\n # vel_msg.angular.z = 0.0\n\n\n '''Update the linear & angular velocity'''\n # vel_msg.linear.x = new_v\n # vel_msg.angular.z = new_w\n\n if i < 8:\n inputRef = ControllerInput(\n xref = tPx[i],\n yref = tPy[i],\n RstateX = self.x_position,\n RstateY = self.y_position,\n RstateTheta = self.theta,\n RstateVelocity = vel_msg.linear.x,\n RstateW = vel_msg.angular.z,\n xrefA = x_prev_ref,\n yrefA = y_prev_ref,\n thetarefA = theta_prev_ref,\n vrefA = vrefA,\n wrefA = wrefA\n )\n\n nmpc = NMPC_Controller(inputRef)\n new_v, new_w = nmpc.start_optmizer()\n new_v = round(new_v, 4)\n new_w = round(new_w, 4)\n\n print(f'(actual) X: {self.x_position}, Y: {self.x_position}, THETA: {self.theta}')\n print(f'(desired) X: {tPx[i]}, Y: {tPy[i]}')\n print(f'V: {vel_msg.linear.x}\\tW: {vel_msg.angular.z}')\n\n x_prev_ref = tPx[i-1]\n y_prev_ref = tPy[i-1]\n theta_prev_ref = tPTheta[i-1]\n vrefA = vel_msg.linear.x\n wrefA = vel_msg.angular.z\n\n vel_msg.linear.x = new_v\n vel_msg.angular.z = new_w\n # vel_msg.angular.z = 0.0\n\n print(f'index: {i}')\n\n distance = math.sqrt((self.x_position - tPx[i])**2 + (self.y_position - tPy[i])**2)\n if distance < 0.3:\n print(f'Distance: {distance}')\n i+=1\n\n\n self.velocity_publisher.publish(vel_msg)\n self.rate.sleep()\n\n rospy.spin()", "def linear_move(self, dist):\n\t\tglobal estop_flag, move_state\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\thalfway_flag = False\n\t\t\n\t\twith self.move_state_lock:\n\t\t\tstart_x, start_y, start_z = move_state['x'], move_state['y'], move_state['z']\n\t\tcurrent_x = start_x\n\t\tcurrent_y = start_y\n\t\tcurrent_z = start_z\n\t\t#While the distance travelled is less than target distance\n\t\twhile math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) < abs(dist):\n\t\t\t#Check if the emergency stop flag is set, if so, break the current loop and reset velocity\t\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#If the distance goal is negative, move backward\n\t\t\t\tif dist < 0:\n\t\t\t\t\t#Send negative velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = -1 * riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance goal is positive, move forward\n\t\t\t\telif dist > 0:\n\t\t\t\t\t#Send positive velocity\n\t\t\t\t\ttwist_msg = Twist()\n\t\t\t\t\ttwist_msg.linear.x = riu.move_rate\n\t\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#Check if the current movement is half completed, if so, send a Half message and set flag to avoid message duplication\n\t\t\t\tif (math.sqrt((current_x - start_x)**2 + (current_y - start_y)**2 + (current_z - start_z)**2) >= abs(dist)/2\n\t\t\t\t\tand not halfway_flag):\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#update current_x, current_y, and current_z (using local variables to be thread safe)\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_x = move_state['x']\n\t\t\t\t\tcurrent_y = move_state['y']\n\t\t\t\t\tcurrent_z = move_state['z']\n\t\t\trospy.sleep(.2)\n\t\t\t\t\n\t\t#previously had while, finally block -> illegal syntax in python. Just moved to outside loop.\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval)", "def set_hybrid_control(self, model, max_force_torque, timeout=5.0, stop_on_target_force=False):\n\n reduced_speed = np.deg2rad([100, 100, 100, 150, 150, 150])\n q_last = self.joint_angles()\n\n # Timeout for motion\n initime = rospy.get_time()\n xb = self.end_effector()\n failure_counter = 0\n\n while not rospy.is_shutdown() \\\n and (rospy.get_time() - initime) < timeout:\n\n # Transform wrench to the base_link frame\n Wb = self.get_ee_wrench()\n\n # Current Force in task-space\n Fb = -1 * Wb\n # Safety limits: max force\n if np.any(np.abs(Fb) > max_force_torque):\n rospy.logerr('Maximum force/torque exceeded {}'.format(np.round(Wb, 3)))\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return FORCE_TORQUE_EXCEEDED\n\n if stop_on_target_force and np.any(np.abs(Fb)[model.target_force != 0] > model.target_force[model.target_force != 0]):\n rospy.loginfo('Target F/T reached {}'.format(np.round(Wb, 3)) + ' Stopping!')\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return STOP_ON_TARGET_FORCE\n\n # Current position in task-space\n xb = self.end_effector()\n\n dxf = model.control_position_orientation(Fb, xb) # angular velocity\n\n # Limit linear/angular velocity\n dxf[:3] = np.clip(dxf[:3], -0.5, 0.5)\n dxf[3:] = np.clip(dxf[3:], -5., 5.)\n\n xc = transformations.pose_from_angular_velocity(xb, dxf, dt=model.dt)\n\n # Avoid extra acceleration when a point failed due to IK or other violation\n # So, this corrects the allowed time for the next point\n dt = model.dt * (failure_counter+1)\n\n q = self._solve_ik(xc)\n if q is None:\n rospy.logwarn(\"IK not found\")\n result = IK_NOT_FOUND\n else:\n q_speed = (q_last - q)/dt\n if np.any(np.abs(q_speed) > reduced_speed):\n rospy.logwarn(\"Exceeded reduced max speed %s deg/s, Ignoring command\" % np.round(np.rad2deg(q_speed), 0))\n result = SPEED_LIMIT_EXCEEDED\n else:\n result = self.set_joint_positions_flex(position=q, t=dt)\n\n if result != DONE:\n failure_counter += 1\n continue # Don't wait since there is not motion\n else:\n failure_counter = 0\n\n # Compensate the time allocated to the next command when there are failures\n for _ in range(failure_counter+1):\n self.rate.sleep()\n\n q_last = self.joint_angles()\n return DONE", "def right(self, param):\n\t\tglobal estop_flag, move_state\n\t\t#If input angle is zero, set angle to default\n\t\tif param:\n\t\t\tangle = param\n\t\telse:\n\t\t\tangle = riu.default_angle\n\n\t\tsignal.alarm(0) #Disable timer interrupt for the duration of the movement\n\t\t#safely grab current yaw\n\t\twith self.move_state_lock:\n\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t#Set goal to yaw+angle. Add 360 then mod to account for negative angles but avoid going over 360\n\t\tgoal = (current_yaw - angle + 360) % 360\n\t\tif self.angle_lock:\n\t\t\tif goal >= 315 and goal < 45:\n\t\t\t\tgoal = self.zeroed_angle\n\t\t\telif goal >= 45 and goal < 135:\n\t\t\t\tgoal = self.zeroed_angle + 90\n\t\t\telif goal >= 135 and goal < 225:\n\t\t\t\tgoal = self.zeroed_angle + 180\n\t\t\telif goal >= 225 and goal < 315:\n\t\t\t\tgoal = self.zeroed_angle + 270\n\t\tgoal = goal % 360\n\t\thalf_goal = (current_yaw - angle/2 + 360) % 360\n\t\thalfway_flag = False #used to flag if we've already sent out a halfway message\n\t\t#Anonymous function that calculates the current clockwise distance to the goal\n\t\tchkdist = lambda pos, goal: round(pos - goal + 360 * (goal > pos), 1)\n\t\t#Gets current distance and initially sets previous distance = distance\n\t\tdistance = chkdist(current_yaw, goal)\n\t\tprev_dist = distance\n\t\t\"\"\"Continues to move while absolute distance is not within angular_error and clockwise\n\t\tdistance is not increasing. NOTE: absolute distance is the shortest distance in either direction,\n\t\twhile clockwise distance is the distance using only clockwise movement.\n\t\tThe angular_error condition was added because the movements tended to end within the first few \n\t\tcycles due to some float error. With the error condition, the movement can only end when inside\n\t\tat least the general area of the goal.\"\"\"\n\t\twhile distance <= prev_dist or self.get_abs_dist(current_yaw, goal) > riu.angular_error:\n\t\t\tif estop_flag:\n\t\t\t\tself.publisher.publish(Mover.stop_msg)\n\t\t\telse:\n\t\t\t\t#Build and publish right turn message\n\t\t\t\ttwist_msg = Twist()\n\t\t\t\ttwist_msg.angular.z = -1 * riu.turn_rate\n\t\t\t\tself.publisher.publish(twist_msg)\n\t\t\t\t#If distance to goal is less than half the initial distance, publish the half done message\n\t\t\t\tif distance <= half_goal and not halfway_flag:\n\t\t\t\t\thalfway_flag = True\n\t\t\t\t\tself.status_pub.publish(String(\"half\"))\n\t\t\t\t#Update current position\n\t\t\t\twith self.move_state_lock:\n\t\t\t\t\tcurrent_yaw = (math.degrees(move_state['yaw']) + 360) % 360\n\t\t\t\t#Update previous distance, then update distance based on current position\n\t\t\t\tprev_dist = distance\n\t\t\t\tdistance = chkdist(current_yaw, goal)\n\t\t\trospy.sleep(.2)\n\t\t#After loop end, send stop message and send done message to cmd_queue\t\n\t\tself.publisher.publish(Mover.stop_msg)\n\t\tself.status_pub.publish(String(\"done\"))\n\t\tsignal.alarm(Mover.ready_message_interval) #Restart timer", "def search_parking_lot(self):\n\n self.start_driving()\n self.velocity = 8\n self.distance = 250 # maximum searching distance\n self.angle = 1.5 # TODO\n self.drive_thread.reset()\n\n vacant_distance = 0\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n if self.sensor_manager.right > 25:\n vacant_distance += 1\n else:\n vacant_distance = 0\n\n if vacant_distance >= 35:\n while self.sensor_manager.right > 25:\n time.sleep(0.1)\n\n distance_right = self.sensor_manager.right\n\n if 14 <= distance_right <= 18:\n self.angle = 0\n self.distance = 35\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n elif distance_right > 18:\n self.adjust_starting_position(\"left\")\n elif distance_right < 14:\n self.adjust_starting_position(\"right\")\n \n break\n\n self.stop_driving()", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def shiftAside(state, distDemar, angleInter):\n opp = state.opponent_nearest_ball\n while True:\n dest = Vector2D.create_random(low=-1, high=1)\n dest.norm = distDemar\n dest += state.ball_pos\n if state.is_valid_position(dest) and state.free_trajectory(dest, angleInter) and \\\n distance_horizontale(dest, state.my_goal) > distance_horizontale(opp.position, state.my_goal)-5.:\n break\n return goTo(state, dest)", "def adjust_starting_position(self, direction):\n\n direction = 1 if direction in [\"left\", 1] else -1\n\n self.angle = direction * 25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = 0\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)\n\n self.angle = direction * -25\n self.distance = 12\n self.drive_thread.reset()\n\n while self.drive_thread.driven_distance < self.distance:\n time.sleep(0.1)", "def goTo(self, arena, origin, target):\n\n\t\t# check if target is a valid destination (i.e. not 0)\n\t\t# Find route from current coordinates to point (x,y).\n\t\t# Go in a straight line unless obstacle on the way. Plan in advance for known obstacles.\n\t\t# Plan dynamically for unforseen obstacles.\n\t\t# Djakista search around obstacles\n\n\n\t\tx = origin[0]\n\t\ty = origin[1]\n\n\t\tprint(\"Going from {0} to {1}\".format(origin, target))\n\n\t\tif arena[y][x][0] == 0:\n\t\t\tprint(\"ERROR: Sorry, pick another destination. I can't go inside walls.\")\n\t\t\treturn False\n\n\t\taround = [(-1,-1),\n\t\t\t\t(-1,0),\n\t\t\t\t(-1,1),\n\t\t\t\t(1,-1),\n\t\t\t\t(1,0),\n\t\t\t\t(1,1),\n\t\t\t\t(0,1),\n\t\t\t\t(0,-1)]\n\n\t\tgraph = {}\n\n\t\t# Create a set of nodes and edges\n\t\t# For every node in the ARENA matrix\n\t\tfor x in range(arena.shape[1]):\t# 0 to 29\n\t\t\tfor y in range(arena.shape[0]): # 0 to 9\n\t\t\t\tif arena[y][x][0] == 1: # this constrains node in the graph to represent free space in the real world\n\t\t\t\t\t# Find legal surrounding nodes\n\t\t\t\t\tneighbors = {}\n\t\t\t\t\tfor pos in around:\n\t\t\t\t\t\ta = x + pos[0]\n\t\t\t\t\t\tb = y + pos[1]\n\t\t\t\t\t\tif b >= arena.shape[0] or b < 0:\t\t# goes outside bounds of arena\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif a >= arena.shape[1] or a < 0: \t# goes outside bounds of arena\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif arena[b][a][0] != 1: \t\t\t# neighbor node isn't free space\n\t\t\t\t\t\t\tcontinue\n\n\t\t\t\t\t\tneighbors[(a,b)] = 1\n\n\t\t\t\t\tgraph[(x,y)] = neighbors\n\n\t\tdist, pred = dijkstra(graph, start=origin)\n\n\t\tprint(\"Going from {0} to {1}, the shortest path is:\".format(origin, target))\n\t\tprint(shortest_path(graph, origin, target))", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def __init__(self, init_pose=None, init_velocities=None, \n init_angle_velocities=None, runtime=5., target_pos=None):\n # Simulation\n self.success=0\n self.success_rate=deque(maxlen=10) # to record agent's success/failure for 10 consecutive episodes \n self.sim = PhysicsSim(init_pose, init_velocities, init_angle_velocities, runtime) \n self.action_repeat = 3\n\n self.state_size = self.action_repeat * 6\n self.action_low = 0\n self.action_high = 900\n self.action_size = 4\n self.runtime=runtime\n self.x_lower_bound=self.sim.lower_bounds[0]\n self.y_lower_bound=self.sim.lower_bounds[1]\n self.z_lower_bound=self.sim.lower_bounds[2]\n self.x_upper_bound=self.sim.upper_bounds[0]\n self.y_upper_bound=self.sim.upper_bounds[1]\n self.z_upper_bound=self.sim.upper_bounds[2]\n \n #Initial pos\n self.start_pos=init_pose[:3]\n \n # Goal\n self.target_pos = target_pos if target_pos is not None else np.array([0., 0., 10.]) \n \n # distance between initial position and target position \n self.total_distance= (np.dot(self.target_pos-self.start_pos, self.target_pos-self.start_pos))**(0.5)\n # target_margin : if the quad is within a distance of target_margin from the target, then it is declared successful \n self.target_margin=self.total_distance/50", "def ik_point(self, start_joints, target_position, n_steps=40, link_name=None):\n link_name = link_name if link_name is not None else self.tool_frame\n \n assert len(start_joints) == len(self.joint_indices)\n self.sim.update()\n \n # set active manipulator and start joint positions\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n \n request = {\n \"basic_info\" : {\n \"n_steps\" : n_steps,\n \"manip\" : str(self.manip.GetName()), \n \"start_fixed\" : True \n },\n \"costs\" : [\n {\n \"type\" : \"joint_vel\",\n \"params\": {\"coeffs\" : [1]} \n },\n ],\n \"constraints\" : [\n {\n \"type\" : \"pose\",\n \"name\" : \"target_pose\",\n \"params\" : {\"xyz\" : list(target_position), \n \"wxyz\" : [0,0,0,1],\n \"link\": link_name,\n \"rot_coeffs\" : [0,0,0],\n \"pos_coeffs\" : [1,1,1]\n }\n \n },\n ],\n \"init_info\" : {\n \"type\" : \"stationary\",\n },\n }\n \n # convert dictionary into json-formatted string\n s = json.dumps(request) \n # create object that stores optimization problem\n prob = trajoptpy.ConstructProblem(s, self.sim.env)\n \n tool_link = self.robot.GetLink(self.tool_frame)\n def penalize_low_height(x):\n self.robot.SetDOFValues(x, self.joint_indices, False)\n z = tool_link.GetTransform()[2,3]\n return max(0, 10.0 - z)\n\n for t in xrange(n_steps-2):\n prob.AddErrorCost(penalize_low_height, [(t,j) for j in xrange(len(self.joint_indices))], \"ABS\", \"PENALIZE_LOW_HEIGHT_%i\"%t)\n \n # do optimization\n result = trajoptpy.OptimizeProblem(prob)\n \n return result.GetTraj()", "def limit_accel_in_turns(v_ego, angle_steers, a_target, CP, angle_later):\n\n a_total_max = interp(v_ego, _A_TOTAL_MAX_BP, _A_TOTAL_MAX_V)\n a_y = v_ego**2 * abs(angle_steers) * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_y2 = v_ego**2 * abs(angle_later) * CV.DEG_TO_RAD / (CP.steerRatio * CP.wheelbase)\n a_x_allowed = a_total_max - a_y\n a_x_allowed2 = a_total_max - a_y2\n\n a_target[1] = min(a_target[1], a_x_allowed, a_x_allowed2)\n a_target[0] = min(a_target[0], a_target[1])\n #print a_target[1]\n return a_target", "def _get_next_waypoint(self, tolerance_step):\n print('\\nGetting new nav plan.')\n\n for i in range(4):\n try:\n self.plan = self.swarmie.get_plan(\n self.goal,\n tolerance=self.tolerance,\n use_home_layer=self.avoid_home\n )\n break # plan received\n except rospy.ServiceException:\n print('ServiceException.')\n if i < 3:\n print('Expanding tolerance.')\n self.tolerance += tolerance_step\n else:\n raise # tried 3 times, we give up\n\n print('Received nav plan.')\n pose = self.plan.plan.poses[0]\n\n return Point(x=pose.pose.position.x, y=pose.pose.position.y)" ]
[ "0.6225101", "0.62008816", "0.5934905", "0.58985746", "0.58381546", "0.5752612", "0.5699719", "0.5670949", "0.5637514", "0.563369", "0.56063104", "0.5580816", "0.5555075", "0.55549693", "0.5549148", "0.55440676", "0.55309373", "0.552184", "0.54901534", "0.54544854", "0.54470474", "0.5444239", "0.54354703", "0.54281807", "0.5422626", "0.54119223", "0.5399823", "0.5395324", "0.53740185", "0.53055286" ]
0.7091726
0
Get path to previous nightly release results
def getPreviousNightlyPath( numDaysInPast=1 ): myPath= os.environ.get("NICOS_PROJECT_RELNAME_COPY","") #replace rel_x with rel_(x-1) for i in range(0,7): if ("rel_%d" % i) in myPath: myPath = myPath.replace( ("rel_%d" % i), ("rel_%d" % ( (i-numDaysInPast)%7 )) ) break refFile = os.environ.get("NICOS_COPY_HOME","") + "/" + myPath + "/NICOS_area/NICOS_atntest" + os.environ.get("NICOS_SUFFIX","") + "/" + os.path.basename(os.environ.get("ATN_WORK_AREA","")) return refFile
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPreviousBuild():", "def get_previous_release_info(\n previous_release_version: str | None, past_releases: list[ReleaseInfo], current_release_version: str\n) -> str | None:\n previous_release = None\n if previous_release_version == current_release_version:\n # Re-running for current release - use previous release as base for git log\n if len(past_releases) > 1:\n previous_release = past_releases[1].last_commit_hash\n else:\n previous_release = past_releases[0].last_commit_hash if past_releases else None\n return previous_release", "def getLastFinishedBuild():", "def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None", "def returnLatest(self) -> None:\n return os.path.join(self.optidir, sorted(os.listdir(self.optidir))[-1])", "def latest_experiment() -> Path:\r\n experiment_dirs = []\r\n for algorithm in OUT_PATH.iterdir():\r\n if not algorithm.is_dir():\r\n continue\r\n for experiment in algorithm.iterdir():\r\n if not experiment.is_dir():\r\n continue\r\n experiment_dirs.append((experiment.stat().st_mtime, experiment))\r\n return max(experiment_dirs)[1]", "def get_last_run(self):\n\n outdir = self.testTopDir + self.suiteName + \"-tests/\"\n\n # this will work through 2099\n if os.path.isdir(outdir):\n dirs = [d for d in os.listdir(outdir) if (os.path.isdir(outdir + d) and\n d.startswith(\"20\"))]\n dirs.sort()\n\n return dirs[-1]\n else:\n return None", "def get_last_path(self):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n spec_path = self.data_root_path / folders[0]\n logging.info('Last download folder was %s', spec_path)\n return spec_path", "def train_history_path(spec_name):\n return root_location + \"specialists/\" + model_name(spec_name) + \"/train_history\"", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def get_next_change_file():\n path = '/tmp/perf/'\n changes_to_test = _sorted_ls(path)\n if changes_to_test:\n return os.path.join(path, changes_to_test[0])", "def svn_fs_history_prev(*args):\r\n return _fs.svn_fs_history_prev(*args)", "def report(base_url):\n info = get(base_url)\n last_url = \"{0}/{1}\".format(base_url, info['lastCompletedBuild']['number'])\n last = get(last_url)\n return last", "def output_yesterday(max_past_days=10):\n path = None\n\n for yesterday_path in yesterdays(max_past_days=max_past_days):\n if yesterday_path.is_file():\n path = yesterday_path\n LOG.debug('Found last known journal.')\n break\n else:\n raise ValueError('Could not find previous journal in past {0:d} days.'.format(max_past_days))\n\n return path", "def local_results(self):\n\n return self._local_results_path", "def rtd_build_path(self, version=\"latest\"):\n return os.path.join(self.doc_path, 'rtd-builds', version)", "def _last_roll_revision(self):\n if not self._cached_last_roll_revision:\n revinfo = subprocess2.check_output(['gclient', 'revinfo'],\n cwd=self._path_to_chrome)\n project_path = 'src/' + self._path_to_project\n for line in revinfo.splitlines():\n dep_path, source = line.split(': ', 1)\n if dep_path == project_path:\n self._cached_last_roll_revision = source.split('@')[-1]\n break\n assert len(self._cached_last_roll_revision) == 40\n return self._cached_last_roll_revision", "def _get_new_measurement_path() -> pathlib.Path:\n today = strftime(\"%Y%m%d\")\n today_path = DATA_DIR / today\n new_path = get_unique_path(today_path, 'measurement_{:03d}')\n return new_path", "def get_previous_path(cls,tag) :\n if re.search('./',tag) :\n a,tag = os.path.split(tag)\n l = cls.Variants[tag]\n if len(l) == 2 :\n return os.path.join(l[0],l[1])\n else :\n return l[0]", "def currentPreviewPath(self):\n logger.debug(\"Func: currentPreviewPath/getter\")\n if self._currentSceneInfo[\"SubProject\"] is not \"None\":\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"Name\"])\n else:\n path = os.path.join(self._pathsDict[\"previewsDir\"], self._currentSceneInfo[\"Category\"],\n self._currentSceneInfo[\"SubProject\"], self._currentSceneInfo[\"Name\"])\n return path\n # if os.path.isdir(path):\n # return path\n # else:\n # return \"\"", "def remote_results(self):\n\n return self._remote_results_path", "def previous_step_result(self):\n return self._previous_step_result", "def reportinfo(self):\n return super().reportinfo()[:2] + (self.fspath.relto(os.getcwd()),)", "def staged_ruling_path(self):\r\n return os.path.join(\r\n post.paths[\"RULINGS_CACHE_PATH\"],\r\n \"{}_rulings.json\".format(self.name)\r\n )", "def get_current_prediction_output_path(prediction_output_base_path: str, image_name: str) -> str:\n dirs = [(prediction_output_base_path + d) for d in os.listdir(prediction_output_base_path)]\n newest_dir = max(dirs, key=os.path.getmtime)\n return newest_dir + '/' + image_name.replace('/', '')", "def get_current_release():\n return _CURRENT_RELEASE", "def get_current_previous_version(\n current_run_directory: Path,\n previous_run_directory: Path = None,\n resolved_name: bool = True,\n) -> Tuple[str, str]:\n run_matcher = r\"^\\d{4}\"\n\n parent_dir = current_run_directory.parent\n current_version = current_run_directory.name\n current_version_resolved = current_run_directory.resolve().name\n current_version_date = current_version_resolved.split(\".\")[0]\n previous_version = (\n previous_run_directory.name\n if previous_run_directory\n else sorted(\n [\n p.name\n for p in parent_dir.iterdir()\n if re.search(run_matcher, p.name) and current_version_date > p.name\n ]\n )[-1]\n )\n return (\n current_version_resolved if resolved_name else current_version,\n previous_version,\n )", "def get_local_repository_path():\n result = subprocess.run(\"cmd /c mvn help:evaluate -Dexpression=settings.localRepository\",\n stdout=subprocess.PIPE)\n\n regex = re.compile('.*[INFO].*')\n path = regex.sub(\"\", result.stdout.decode(\"utf-8\")).rstrip().lstrip()\n return path", "def currentScenePath(self):\n logger.debug(\"Func: currentBaseScenePath/getter\")\n\n return os.path.join(self.projectDir, self._currentSceneInfo[\"Versions\"][self.currentVersionIndex-1][\"RelativePath\"])" ]
[ "0.71381366", "0.6181723", "0.58809304", "0.5795301", "0.57847506", "0.56769204", "0.56496173", "0.5642278", "0.56289923", "0.5611356", "0.56105006", "0.5594992", "0.55496407", "0.5513853", "0.54595", "0.54346895", "0.54233766", "0.53764766", "0.5362507", "0.53522503", "0.53162885", "0.5299646", "0.5279605", "0.5279572", "0.5273576", "0.5271983", "0.5247556", "0.5234897", "0.51737666", "0.5167719" ]
0.68767756
1
When the set is used as a map this returns the value of for a certain key. The method call is passed down to tree object.
def __getitem__(self, key): result = self.tree[key] if result is not None: """This needs to be deep-copied in order not to change the elements in the map via the reference, but return the value as in SetlX. The index 2 from key implies stands for the value as key-value-pairs are represented as lists of length 2""" return copy.deepcopy(result.key[2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __getitem__(self, key):\n return self.keyvaluepair_set.get(key=key).value", "def __getitem__(self, key: ir.Value) -> ir.Value:\n return ops.MapGet(self, key).to_expr()", "def __getitem__(self, key):\n return self._root.__getitem__(key)", "def lookup(self, key):\n return self.root.lookup(key)", "def get(self, key):\n if key is None:\n return None # None is not a valid key\n return get_from_subtree(self.root, key)", "def get(self, key):", "def get(self, key):", "def get(key):\n return current().values[key]", "def lookup(self, key):", "def _map___getitem__(self, key):\n if not isinstance(key, self.keytype):\n raise KeyError('type of key should be ' + repr(self.keytype) + ' but got ' + repr(type(key)))\n if key not in self:\n raise KeyError('key not found')\n return self.second(self.find(key))", "def _get(self, key, current_node):\n pass", "def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None", "def __getitem__(self, key):\n ndx = self._findPosition(key)\n assert ndx is not None, 'Invalid map key'\n return self._entryList[ndx].value", "def lookup(self, key):\n n = self.find(key)\n if n:\n return n.value\n else:\n return False", "def _get(self, k, currNode):\n if not currNode:\n return\n if k < currNode.key:\n return self._get(k, currNode.leftChild)\n elif k > currNode.key:\n return self._get(k, currNode.rightChild)\n elif k == currNode.key:\n return currNode", "def lookup(self, key):\n k = self.get_position(key)\n\n if self.keys[k] == key:\n return node.values[k]\n\n # Lookup in the child node.\n if self.refs[k+1] == None:\n return None\n return self.refs[k+1].lookup(key)", "def get_value(self, key):\n pass", "def get(self, key):\n\n if key in self.sections:\n return self.sections[key]\n\n return self['root'].get(key)", "def __getitem__(self, key):\n\n # Checks if key is in map\n if self.__contains__(key):\n\n # Get hashed key\n i = self.hash(key)\n\n # Get chain index for key\n chain_idx = self.keys_ref[i].index(key)\n\n # Return value\n return self.table[i][chain_idx]\n\n # If key not in hash map, raise error\n raise KeyError(key)", "def get(self, key):\n\t\treturn self.__get(key, key[1:])", "def get_value(self, key):\n try:\n return self.map[key]\n except KeyError:\n raise KeyError('key is not in map')", "def visit(self, func: Callable[[str], Optional[Any]]) -> Optional[Any]:\n for key, _ in self._recurse():\n result = func(key)\n if result is not None:\n return result", "def find(self, key):\n if self.key == key:\n return self.item\n elif key > self.key:\n if self.right:\n return self.right.find(key)\n else:\n if self.left:\n return self.left.find(key)\n # Replace by correct code", "def __getitem__(self, key):\n return self()[key]", "def __getitem__(self, key):\n return self.get_function()[key]", "def __getitem__(self, key):\n return self.get(key)", "def __getitem__(self, key):\n return self.get(key)", "def __getitem__(self, key):\n return self.get(key)", "def get(self, key):\n h = key%self.m\n a = self.a\n if a[h]:\n return a[h].val\n else:\n return -1", "def get(self, key: t.Hashable) -> t.Any:" ]
[ "0.67747283", "0.66698855", "0.66177166", "0.65392965", "0.6538317", "0.64918303", "0.64918303", "0.6488301", "0.6405794", "0.639881", "0.6393981", "0.6365527", "0.6331228", "0.6322852", "0.6315035", "0.63031965", "0.6259266", "0.6252264", "0.62365365", "0.6217465", "0.62037814", "0.6171286", "0.61538804", "0.610767", "0.60941243", "0.60852927", "0.60852927", "0.60852927", "0.606831", "0.6064105" ]
0.7318522
0
Computes the cartesian product with itself if other is equal to 2.
def __pow__(self, other): if other == 2: # cartesian product new_set = Set() for s1 in self: for s2 in self: new_set += Set(List([[s1, s2]])) return new_set raise TypeError( f"{other} must be 2 to compute cartesian product of a set with itself")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def same_side_product(p, q, a, b):\n return line_ccw(a, b, p) * line_ccw(a, b, q)", "def cartesian_product(self, other, only_accessible_components=True):\n def function(*transitions):\n if equal(t.word_in for t in transitions):\n return (transitions[0].word_in,\n list(zip_longest(\n *(t.word_out for t in transitions)\n )))\n else:\n raise LookupError\n\n def final_function(*states):\n return list(zip_longest(*(s.final_word_out\n for s in states)))\n\n return self.product_FiniteStateMachine(\n other,\n function,\n final_function=final_function,\n only_accessible_components=only_accessible_components)", "def pair_product(x1, x2):\n return np.multiply(x1, x2)", "def __mul__(self, other):\n if isinstance(other, Permutation):\n return Coset(other, self, dir='+')\n gens1 = [perm._array_form for perm in self.generators]\n gens2 = [perm._array_form for perm in other.generators]\n n1 = self._degree\n n2 = other._degree\n start = list(range(n1))\n end = list(range(n1, n1 + n2))\n for i in range(len(gens2)):\n gens2[i] = [x + n1 for x in gens2[i]]\n gens2 = [start + gen for gen in gens2]\n gens1 = [gen + end for gen in gens1]\n together = gens1 + gens2\n gens = [_af_new(x) for x in together]\n return PermutationGroup(gens)", "def product(self, x, y):\n return self( x.lift() * y.lift() )", "def cubical(self, other):\n answer = other.zero()\n pre_join = other.iterated_diagonal(self.arity + self.degree - 1)\n for (k1, v1), (k2, v2) in product(self.items(), pre_join.items()):\n to_dist = []\n zero_summand = False\n for i in range(1, max(k1) + 1):\n key_to_join = tuple(cube for idx, cube in enumerate(k2)\n if k1[idx] == i)\n joined = other.create({key_to_join: 1}).join()\n if not joined:\n zero_summand = True\n break\n to_dist.append(joined)\n if not zero_summand:\n if self.torsion == 2:\n sign = 1\n else:\n sign = compute_sign(k1, k2)\n items_to_dist = [summand.items() for summand in to_dist]\n for pairs in product(*items_to_dist):\n new_k = reduce(lambda x, y: x + y, (pair[0] for pair in pairs))\n new_v = reduce(lambda x, y: x * y, (pair[1] for pair in pairs))\n to_add = answer.create({tuple(new_k): sign * new_v * v1 * v2})\n answer += to_add\n return answer", "def multiplicand_2(p):\n m2 = cddr(p) # (m2 m3 ...)\n rests = cdr(m2) # (m3...)\n if isNull(rests):\n return car(m2)\n else:\n restp = convertToPythonList(cdr(rests))\n return make_product_2(car(m2), car(rests), *restp)", "def inner_product(state_1, state_2):\n return numpy.dot(state_1.conjugate(), state_2)", "def __mul__ (self, other):\n return perm(*(self._getcycles() + other._getcycles()))", "def __mul__(self,other):\n return compositeORGenerator(left = self, right = other)", "def product(self, x, y):\n return self._cached_product(x.value, y.value)", "def cross_product(p0,p1,p2):\n\treturn (((p1[0]-p0[0])*(p2[1]-p0[1]))-((p2[0]-p0[0])*(p1[1]-p0[1])))", "def cartesian_product(G, H):\n GH = _init_product_graph(G, H)\n GH.add_nodes_from(_node_product(G, H))\n GH.add_edges_from(_edges_cross_nodes(G, H))\n GH.add_edges_from(_nodes_cross_edges(G, H))\n return GH", "def cross_product(a,b):\n return [a[1]*b[2]-a[2]*b[1], a[2]*b[0]-a[0]*b[2], a[0]*b[1]-a[1]*b[0]]", "def cross(self, other):\n if self.x == other.x:\n if self.x == 0:\n return other\n else:\n cross = getcopy(self)\n for row in other.a:\n cross.newrow(row)\n cross.newrow([self.prepare(1.0)]*cross.x)\n out = cross.new(1)\n for x in xrange(0, out.x):\n out.store(0,x, cross.minor(cross.y-1, x).det())\n return out\n else:\n raise IndexError(\"Matrix cross product invalid for dimensions \"+str(self.y)+\"x\"+str(self.x)+\" and \"+str(other.y)+\"x\"+str(other.x))", "def cross(self, other):\n ox, oy = other\n return self[0] * oy - self[1] * ox", "def commutator(self, other) -> 'MultiVector':\n\n return ((self * other) - (other * self)) / 2", "def prod(self, x, y):\n return (self.basic_operation.reduce(x.original+y.original),\n self.operation1.prod(x.left, y.left),\n self.operation2.prod(x.right, y.right))", "def cross_product(a, b):\n return (a[1]*b[2] - a[2]*b[0],\n a[2]*b[0] - a[0]*b[2],\n a[0]*b[1] - a[1]*b[0])", "def xCrossProd(self, other):\n return other.y * self.z - other.z * self.y", "def __mul__(self, other):\r\n return self.prod(other)", "def product_on_basis(self, g1, g2):\n return self.monomial(g1 * g2)", "def cross(self, other):\n return self.x * other.y - self.y * other.x", "def to_cartesian(self): # TODO\n pass", "def product_2(m1, m2):\r\n return make_mono_admissible_2(list(m1) + list(m2))", "def __mul__(self, other):\n\n newlist = [v for v in self.args]\n for i, v in enumerate(newlist):\n newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1],\n newlist[i][2])\n return Dyadic(newlist)", "def cross(self, other):\n\n return self.x * other.y - self.y * other.x", "def cross(self, other):\n \n return self.x * other[1] - self.y * other[0]", "def cross(self, other):\n\n Vector = sympy.vector.Vector\n if other == Vector.zero:\n return Dyadic.zero\n elif isinstance(other, Vector):\n outdyad = Dyadic.zero\n for k, v in self.components.items():\n cross_product = k.args[1].cross(other)\n outer = k.args[0].outer(cross_product)\n outdyad += v * outer\n return outdyad\n else:\n raise TypeError(str(type(other)) + \" not supported for \" +\n \"cross with dyadics\")", "def product(self):\n return self.right[self.i:] + self.left[:self.i], self.left[self.i:] + self.right[:self.i]" ]
[ "0.67812854", "0.65287524", "0.65172505", "0.64342856", "0.63827103", "0.62974447", "0.6271194", "0.6246401", "0.6210645", "0.6202078", "0.6144252", "0.6134404", "0.6120954", "0.6094628", "0.60883814", "0.6088177", "0.6081006", "0.60778904", "0.60576415", "0.60426986", "0.6040992", "0.60322756", "0.6028471", "0.60209006", "0.6016175", "0.6011088", "0.5984548", "0.59466153", "0.5944474", "0.5943213" ]
0.7357516
0
query artifacts with a list of specific artifactstatus
def query_artifact(artifactstatus_list): # create empty artifact queryset artifacts_merged = Artifact.objects.none() # iterate over artifactstatus objects for artifactstatus in artifactstatus_list: # get artifacts with specific artifactstatus artifacts = Artifact.objects.filter(artifactstatus=artifactstatus) # add artifacts from above query to merge queryset artifacts_merged = artifacts | artifacts_merged # sort artifacts by id artifacts_sorted = artifacts_merged.order_by('artifact_id') # return sorted artifacts with specific artifactstatus return artifacts_sorted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_steps_with_status(status, steps):\n return [ step for step in steps if step.status == status ]", "def artifact_types_get_req():\n return {'status': 'success',\n 'message': '',\n 'types': Artifact.types()}", "def test_list_artifacts_for_job(fake_client):\n artifacts = Artifacts(fake_client, \"base\")\n artifacts.list_artifacts_for_job(\"org_slug\", \"pipe_slug\", \"build_no\", 123)\n url = \"base/organizations/org_slug/pipelines/pipe_slug/builds/build_no/jobs/123/artifacts/\"\n fake_client.get.assert_called_with(\n url, query_params={\"page\": 0}, with_pagination=False\n )", "def list_artifacts(arn=None, type=None, nextToken=None):\n pass", "def filter(self, artifacts: ArtifactsList) -> ArtifactsList:\n print(self.my_param)\n return artifacts", "def select_by_status(status):\n sql = 'checkStatus'\n val = [status]\n rows = DBconnector.call_procedure(sql, val)\n for r in rows:\n return _wrap_in_parcel_list(r.fetchall())", "def query():\n args_dict = flask.request.args.to_dict()\n if \"versioned\" in args_dict.keys():\n args_dict[\"versioned\"] = args_dict[\"versioned\"].lower() in [\n \"true\",\n \"t\",\n \"yes\",\n \"y\",\n ]\n if \"exclude_deleted\" in args_dict.keys():\n args_dict[\"exclude_deleted\"] = args_dict[\"exclude_deleted\"].lower() in [\n \"true\",\n \"t\",\n \"yes\",\n \"y\",\n ]\n\n record_list = blueprint.driver.query_urls(**args_dict)\n return flask.Response(\n json.dumps(record_list, indent=2, separators=(\", \", \": \")),\n 200,\n mimetype=\"application/json\",\n )", "def get_by_status(status):\n return list(tasks.find({'status': status}))", "def test_iter_statuses(self):\n cassette_name = self.cassette_name('statuses')\n with self.recorder.use_cassette(cassette_name):\n repository = self.gh.repository('sigmavirus24', 'github3.py')\n assert repository is not None\n deployment = find(lambda d: d.id == 801,\n repository.iter_deployments())\n assert deployment is not None\n statuses = list(deployment.iter_statuses(5))\n\n for status in statuses:\n assert isinstance(status,\n github3.repos.deployment.DeploymentStatus)", "def test_list_artifacts_for_build(fake_client):\n artifacts = Artifacts(fake_client, \"base\")\n artifacts.list_artifacts_for_build(\"org_slug\", \"pipe_slug\", \"build_no\")\n url = \"base/organizations/org_slug/pipelines/pipe_slug/builds/build_no/artifacts/\"\n fake_client.get.assert_called_with(\n url, query_params={\"page\": 0}, with_pagination=False\n )", "def _filter_resources_by_status(self, resources: [], statuses: []):\n all_resources = []\n for resource in resources:\n if statuses:\n status = ResourceModel.Status.from_string(resource.status)\n if status in statuses:\n all_resources.append(resource)\n else:\n all_resources.append(resource)\n return all_resources", "def needs_by_status(cls):\n\n db = current.db\n\n # Extract the data\n table = current.s3db.req_need_line\n status = table.status\n number = table.id.count()\n query = (table.deleted == False)\n rows = db(query).select(status, number, groupby = status)\n\n # Build data structure for chart renderer\n rows = dict((row[status], row[number]) for row in rows)\n data = []\n for code, label, color in cls.REQ_STATUS:\n value = rows.get(code)\n data.append({\"label\": s3_str(label),\n \"value\": value if value else 0,\n \"color\": color,\n \"filterKey\": code,\n })\n\n return data", "def get_status_skus(sku_list, status):\n values = []\n if not (sku_list, status):\n return values\n\n for sku_id in sku_list:\n status_query = list(sku_database.find({\"SKU_unit\": int(sku_id), \"Status\": status}, {'_id': 0, 'Status': 1}))\n if status_query:\n values.append(sku_id)\n return values", "def find_by_status(self, host, state):", "def test_artifact_exporter_spreadsheet_xls_config_artifactlist_xls_choice_artifactstatus_form_filled(self):\n\n # get object\n artifactstatus_id = Artifactstatus.objects.get(artifactstatus_name = 'artifactstatus_1')\n # get object\n form = ArtifactExporterSpreadsheetXlsConfigForm(data = {\n 'artifactlist_xls_choice_artifactstatus': [artifactstatus_id, ],\n })\n # compare\n self.assertTrue(form.is_valid())", "def test_returns_projects_filter_by_statuses(self):\n # Arrange\n self.test_project_1.status = ProjectStatus.DRAFT.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.save()\n # Set status of test_project_3 to archived.\n self.test_project_3.status = ProjectStatus.ARCHIVED.value\n self.test_project_3.save()\n\n # Act\n response_pub = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.PUBLISHED.name]},\n )\n # Assert\n self.assertEqual(response_pub.status_code, 200)\n self.assertEqual(len(response_pub.json[\"results\"]), 1)\n self.assertEqual(\n response_pub.json[\"results\"][0][\"projectId\"], self.test_project_2.id\n )\n\n # Act\n response_draft = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.DRAFT.name]},\n )\n # Assert\n self.assertEqual(response_draft.status_code, 200)\n self.assertEqual(len(response_draft.json[\"results\"]), 1)\n self.assertEqual(\n response_draft.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )\n\n # Act\n response_archived = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.ARCHIVED.name]},\n )\n # Assert\n self.assertEqual(response_archived.status_code, 200)\n self.assertEqual(len(response_archived.json[\"results\"]), 1)\n self.assertEqual(\n response_archived.json[\"results\"][0][\"projectId\"], self.test_project_3.id\n )\n\n # Test multiple statuses returns all projects with those statuses.\n # Act\n response_all = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\n \"projectStatuses\": \"PUBLISHED,DRAFT,ARCHIVED\",\n },\n )\n # Assert\n self.assertEqual(response_all.status_code, 200)\n self.assertEqual(len(response_all.json[\"results\"]), 3)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_all.json[\"results\"]],\n [self.test_project_1.id, self.test_project_2.id, self.test_project_3.id],\n )", "def get_artifacts(token, artifact_names, start, end):\n\n artifacts = []\n page = 1\n retry_limit = 3\n while True:\n req = Request(URL + f\"&page={page}\")\n req.add_header(\"Accept\", \"application/vnd.github.v3+json\")\n req.add_header(\"Authorization\", f\"token {token}\")\n with urlopen(req) as r:\n # Handle hitting the GitHub rate limit\n # If the reset time is < 90s in the future, wait for it (trying 3 times)\n # Otherwise raise an error\n if r.status == 403:\n try:\n reset = int(r.headers.get(\"X-RateLimit-Reset\"))\n except:\n raise RuntimeError(\"Hit GitHub rate limit. Reset header missing.\")\n if retry_limit == 0 or time.time() > reset or reset - time.time() > 90:\n raise RuntimeError(\"Hit GitHub rate limit. Reset is at %s\" % time.ctime(reset))\n\n # Try waiting until after the reset time\n time.sleep(10 + (reset - time.time()))\n retry_limit = retry_limit - 1\n continue\n\n if r.status != 200:\n raise RuntimeError(\"Error (%d) with API request: %s\" % (r.status, str(r)))\n\n data = json.load(r)\n\n # Only include the artifacts within the date range and names\n for a in data[\"artifacts\"]:\n if a[\"name\"] not in artifact_names:\n continue\n updated_at = datetime.fromisoformat(a[\"updated_at\"][:-1])\n if start <= updated_at <= end:\n artifacts.append(a)\n\n if len(data[\"artifacts\"]) < 100:\n break\n\n # There are more results, get the next page\n page = page + 1\n\n # Avoid hitting per-second rate limits\n time.sleep(2)\n\n return sorted(artifacts, key=lambda x: x[\"updated_at\"])", "def select_task_by_status(conn, status):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM nodes WHERE status=?\", (status,))\n \n rows = cur.fetchall()\n return rows", "def query_jobs(repo_name, revision):\n return buildapi.query_jobs_schedule(repo_name, revision)", "def add_artifacts_from_result(args, result):\n for art in result.get_artifacts():\n add_artifact(args, art)", "def build_show_artifacts(ctx, build_id, data_type, artifact_relative_name):\n data = ctx.obj.get_build_artifacts_by_build_id(\n build_id,\n data_type=data_type,\n artifact_relative_name=artifact_relative_name)\n if hasattr(data, 'startswith'):\n click.echo(data)\n else:\n output_json_data(data)", "def index_load(self, groupId, artifactId, s):\n res = s.query(MavenArtifactIndex)\\\n .filter(MavenArtifactIndex.group_id == groupId)\\\n .filter(MavenArtifactIndex.artifact_id == artifactId)\\\n .one_or_none()\n return res", "def get_applications(status):\n return status['applications']", "def query_repositories():\n return buildapi.query_repositories()", "def test_get_experiment_artifact(self):\n query_string = [('id', 'id_example'),\n ('path', 'path_example')]\n headers = { \n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n }\n response = self.client.open(\n '/api/v1/experiment-artifacts/download',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def ListArtifacts(context=None):\n args = artifact_pb2.ApiListArtifactsArgs()\n\n items = context.SendIteratorRequest(\"ListArtifacts\", args)\n return utils.MapItemsIterator(\n lambda data: Artifact(data=data, context=context), items)", "def find_studies_by_status(self, statuses: List[str], exclude: bool = False) -> List[dict]:\n if not statuses:\n return []\n\n logic_str = \"NOT\" if exclude else \"\"\n statuses_str = \",\".join([f'\"{s}\"' for s in statuses])\n\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n c.execute(\n f\"\"\"\n SELECT * from studies\n WHERE status {logic_str} IN ({statuses_str});\n \"\"\"\n )\n results = c.fetchall()\n return results", "async def list(app: AppIdentity, repo: str, ref: str):\n repo = RepoName.parse(repo)\n\n async with aiohttp.ClientSession(\n headers=await app.installation_headers(repo.owner)) as sesh:\n fetch = checks.GetRuns(owner=repo.owner, repo=repo.repo, ref=ref)\n print(await fetch.execute(sesh))", "def download_artifacts(token, artifacts):\n zipfiles = []\n for a in artifacts:\n updated_at = datetime.fromisoformat(a[\"updated_at\"][:-1])\n datename = a[\"name\"]+updated_at.strftime(\"-%Y-%m-%d\")\n filename = datename + \".zip\"\n if os.path.exists(filename):\n zipfiles.append((a[\"name\"], datename, filename))\n print(f\"{filename} skipped, already downloaded\")\n continue\n\n print(f\"Fetching {filename}\")\n ok = run_curl(token, a[\"archive_download_url\"], filename)\n if not ok:\n continue\n\n zipfiles.append((a[\"name\"], datename, filename))\n\n return zipfiles", "def _get_artifact_urls_from_label(self, label):\n return run_query('label_artifacts', Namespace(rev=self.rev, label=label))['data']" ]
[ "0.56145376", "0.5502256", "0.5482654", "0.54615754", "0.5460564", "0.53760827", "0.53652465", "0.5332104", "0.53205776", "0.52803314", "0.52712375", "0.52260685", "0.5214534", "0.52103305", "0.51927596", "0.517957", "0.51641685", "0.51429254", "0.51265275", "0.50801486", "0.5062598", "0.5059534", "0.50272745", "0.5021009", "0.5017385", "0.49937823", "0.49659172", "0.4920005", "0.49132872", "0.49082166" ]
0.76355225
0
set artifact times according to config
def set_artifact_times(artifact): # get config main_config_model = MainConfigModel.objects.get(main_config_name = 'MainConfig') # get relevant artifactstatus out of config artifactstatus_requested = main_config_model.artifactstatus_requested.all() artifactstatus_acquisition = main_config_model.artifactstatus_acquisition.all() # set requested time if new artifactstatus of system is in artifactstatus_requested of main config (and has not been set before) if artifact.artifactstatus in artifactstatus_requested and artifact.artifact_requested_time == None: artifact.artifact_requested_time = timezone.now() # set acquisition time if new artifactstatus of system is in artifactstatus_acquisition of main config (and has not been set before) if artifact.artifactstatus in artifactstatus_acquisition and artifact.artifact_acquisition_time == None: artifact.artifact_acquisition_time = timezone.now() # also set request time if it has not already been done if artifact.artifact_requested_time == None: artifact.artifact_requested_time = timezone.now() return artifact
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)", "def svn_info_t_prop_time_set(svn_info_t_self, apr_time_t_prop_time): # real signature unknown; restored from __doc__\n pass", "def dt(self, _):\n raise NotImplementedError(\n \"We do not support setting dt/ time step except during setup\")", "def post_generation(self, task):\n task['executiontime'] = self.etrvs.sample(task['pkg'])\n task['criticaltime'] = 4 * task['executiontime']\n if self.last_release:\n task['offset'] = task['release'] - self.last_release\n else:\n task['offset'] = task['release']\n self.last_release = task['release']", "def __init__(self, t):\n\t\tself.delay = math.ceil(t / config.time_resolution)", "def setSubmitTime(t):", "def testEditConfigCreateAveragingTimeSelect(self):\n self.ports.editconfig_create_averaging_time_select(file_name = 'editconfig_create_port_label.xml', port_ids = portsDict['port_ids'], averaging_time_selects = portsDict['averaging_time_select'])", "def set_times(self):\n if self.anchor == \"P\":\n # specified pickup time, 5 minutes early.\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - 300\n # given pickup time, we are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) + 900\n # We are given pickup time, caluclate pickup time, and are 5 min early\n self.earliestDropoff = tools.time_to_seconds(self.times) - 300 + self.time_for_travel()\n # we are given pickup time, add travel time, and are 20 minutes\n self.latestDropoff = tools.time_to_seconds(self.times) + self.time_for_travel() + 900\n else:\n # this means the dropoff time is given. calculate the time it takes to drive, and then 5 minutes early\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel() - 1200\n # given dropoff time, we calucate when to arrive, and then are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel()\n # we are given dropoff time. It's earliest pickup time + travel time\n self.earliestDropoff = tools.time_to_seconds(self.times) - 1200\n self.latestDropoff = tools.time_to_seconds(self.times)", "def runtime_update(conf):\n conf['time'] = time.strftime(\"%m-%d-%H-%M-%S\", time.localtime())\n conf['hash'] = hash(str(conf))\n if conf.has_key('filesystem') and conf['filesystem'] != None:\n fs = str(conf['filesystem'])\n else:\n fs = 'fsnotset'\n conf['result_dir'] = \"{targetdir}/{expname}/{subexpname}-{unique}\".format(\n targetdir = conf['targetdir'], expname = conf['expname'],\n subexpname = conf['subexpname'],\n unique = '-'.join((fs, conf['time'], str(conf['hash']))))", "def __set_time_elements(*args):\n args[0].TimeState.delay_elements = args[1]\n args[0].TimeState.set_delay_elements()", "def testGetConfigAveragingTimeSelect(self):\n self.ports.getconfig_averaging_time_select(file_name = 'get_averaging_time_select.xml', port_ids = portsDict['port_ids'], averaging_time_selects = portsDict['averaging_time_select'])", "def test_update_configuration(self):\n\n ts_name = 'test-update-1'\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertTrue(configuration.default)\n self.assertEquals(configuration.retentions, granularities.RETENTIONS_GRANULARITY)\n self.assertEquals(configuration.timezone, granularities.DEFAULT_TIMEZONE)\n self.assertEquals(configuration.aggregation_method,\n aggregations.DEFAULT_AGGREGATION)\n\n custom_tz = 'America/New_York'\n custom_agg = aggregations.AGGREGATION_LAST\n custom_ret = granularities.RETENTIONS_GRANULARITY\n custom_ret[granularities.SECOND] = 3 * 365 * 12 * 30 * 24 * 60 * 60\n timeserie_configuration.update_timeserie_configuration(\n self.get_local_dynamo_cli(), ts_name, custom_tz, custom_agg, custom_ret)\n\n configuration = timeserie_configuration.get_timeserie_configure(\n self.get_local_dynamo_cli(), ts_name)\n self.assertFalse(configuration.default)\n self.assertEquals(configuration.retentions, custom_ret)\n self.assertEquals(configuration.timezone, custom_tz)\n self.assertEquals(configuration.aggregation_method, custom_agg)", "def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])", "def set_exec_time(self, time):\n for task in self.tasks:\n task.exec_time = time", "def set_case_times(case):\n\n # get config\n main_config_model = MainConfigModel.objects.get(main_config_name='MainConfig')\n\n # get relevant casestatus out of config\n casestatus_start = main_config_model.casestatus_start.all()\n casestatus_end = main_config_model.casestatus_end.all()\n\n # set start time if new casestatus of system is in casestatus_start of main config (and has not been set before)\n if case.casestatus in casestatus_start and case.case_start_time == None:\n case.case_start_time = timezone.now()\n\n # set end time if new casestatus of system is in casestatus_end of main config (and has not been set before)\n if case.casestatus in casestatus_end and case.case_end_time == None:\n case.case_end_time = timezone.now()\n # also set start time if it has not already been done\n if case.case_start_time == None:\n case.case_start_time = timezone.now()\n\n return case", "def alarm_in_setup_change():\n setup_write(\"!M1 meas interval\", \"00:01:00\")\n setup_write(\"!M2 meas interval\", \"00:01:00\")\n setup_write(\"!TX3 scheduled interval\", \"00:05:00\")", "def set_time_override(override_time=datetime.datetime.utcnow()):\r\n utcnow.override_time = override_time", "def setCurTime(self):\n\t\tself.config.SET_CUT_TIME = True", "def set_creation_time(self, t: int) -> None:\n self.metadata.data[\"creation_time\"] = t", "def set_command_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_command_time(self, *args, **kwargs)", "def __init__(self, producer: str, mtime_ns: int = 0) -> None:\n #: The step (and parameters) that updated the target.\n self.producer = producer\n\n #: The modified time of the target (in nanoseconds).\n #:\n #: This is negative until we know the correct time.\n self.mtime_ns = mtime_ns", "def update_time(self):\n pass # Do nothing", "def _update_metadata_date(self, properties):\n if \"core\" not in properties:\n properties.core = Struct()\n properties.core.metadata_date = self._backend.server_time_utc()", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def __init__(self, args, config_file):\n super(Timesheet, self).__init__()\n self.configure_attr(args, config_file)", "def desired_ntp(task):\n\n config = replace_ntp(task)\n task.run(task=napalm_configure, configuration=config, replace=True)", "def setLastModified(when):", "async def push_config(_):\n await oppio.update_opp_timezone(str(opp.config.time_zone))", "def set_command_time(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_set_command_time(self, *args, **kwargs)", "def set_command_time(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_set_command_time(self, *args, **kwargs)" ]
[ "0.567912", "0.56485784", "0.56278956", "0.5598909", "0.5496296", "0.54906976", "0.5475135", "0.5384341", "0.5380727", "0.5367412", "0.5335657", "0.5324095", "0.5298101", "0.5286373", "0.5284378", "0.5256725", "0.52557284", "0.52555525", "0.5248737", "0.52438194", "0.52354807", "0.5222902", "0.5208569", "0.51931375", "0.51541704", "0.51252365", "0.51167876", "0.5103752", "0.5092979", "0.508203" ]
0.77747446
0
finds nearest task that is unoccupied. This means if another agent is working at that location, that task will not be returned
def find_nearest_unoccupied_task(cur_agent, tasks, agents): current_location = cur_agent.getz() closest_task_distance = np.inf allowable_distance_to_task = .1 closest_task = None for task in tasks: location_occupied = False if not task.isTaskScheduled: task_loc = task.getloc() # check if task is occupied for agent in agents: # check if any agent is at the task if cur_agent == agent: # don't check yourself, cuz that's fine continue if compute_dist(agent.getz(), task_loc) < allowable_distance_to_task: location_occupied = True if location_occupied: # Now you know there is an agent too near to that task, thus, look at next task continue dist = euclid_dist(task_loc, current_location) if dist < closest_task_distance: closest_task_distance = dist closest_task = task else: continue return closest_task
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_attack_task_for_unit(self, world, unit):\r\n target_clusters = world.get_enemy_nest_clusters()\r\n max_cluster = max(target_clusters, key=len)\r\n\r\n target = min(max_cluster, key=lambda x: world.get_shortest_path_distance(unit.position, x))\r\n path = world.get_shortest_path(unit.position, target, self.planned_nest_set)\r\n if (path):\r\n return Task(target, path[::-1])", "def localSearch(self, best, A, task, **kwargs):\n\t\treturn task.repair(best + self.epsilon * A * self.normal(0, 1, task.D), rnd=self.Rand)", "def search_best_goal_node(self):\n\n dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]\n goal_indexes = [\n dist_to_goal_list.index(i)\n for i in dist_to_goal_list\n if i <= self.expand_dis\n ]\n\n safe_goal_indexes = []\n for goal_index in goal_indexes:\n t_node = self.steer(self.node_list[goal_index], self.goal_node)\n if self.check_collision(t_node, self.obstacle_list):\n safe_goal_indexes.append(goal_index)\n\n if not safe_goal_indexes:\n return None\n\n min_cost = min([self.node_list[i].cost for i in safe_goal_indexes])\n for i in safe_goal_indexes:\n if self.node_list[i].cost == min_cost:\n return i\n\n return None", "def find_unsettled_spot(self):\n\t\tfor i in range(9):\n\t\t\tfor j in range(9):\n\t\t\t\tif self.grid[i][j] == 0:\n\t\t\t\t\treturn i, j\n\t\treturn", "def get_expand_task_for_unit(self, world, unit):\r\n\r\n target = world.get_closest_capturable_tile_from(unit.position, self.planned_nest_set).position \r\n\r\n if (target):\r\n path = world.get_shortest_path(unit.position, target, self.planned_nest_set)\r\n if (path):\r\n return Task(target, path[::-1])\r\n else:\r\n return self.get_attack_task_for_unit(world, unit)\r\n else:\r\n return None", "def nearest(self, query):\n nearest_trees = list(map(lambda t: t.get_nearest_neighbor(query), self.trees))\n distances_pool = list(zip(map(lambda x: self.dist_fn(x, query), self.pool), self.pool))\n best = None\n best_cost = np.inf\n for cost, near in nearest_trees + distances_pool:\n if cost <= best_cost:\n best = near\n best_cost = cost\n return best", "def getNearestTarget(self):\n if self.myShipHull.abr in globals.targetPreference.keys():\n closestShip = self.getNearestPreference(self.myShipHull.abr)\n if closestShip != None:\n return closestShip\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1:\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n if closestShip == None and self.myGalaxy.shipsUnderAssault() == 0:\n try:\n self.myGalaxy.endSimulation(self.empireID)\n except:\n pass\n return closestShip", "def nearest_neigbor(self, pc):\n coord = get_coordinates(pc)\n # deliveries\n pdist_deliv = {haversine(coord[0], coord[1], pcoord[1][0], pcoord[1][1]):pc for pc, pcoord in self.state.D_k.items()}\n pdist_list_deliv = list(pdist_deliv.keys())\n if len(pdist_list_deliv) > 0:\n val_deliv_min = min(pdist_list_deliv)\n else:\n val_deliv_min = 1e6 # great value to be discarded when comparing with val_pickup_min\n # pickups\n pdist_pickup = {haversine(coord[0], coord[1], pcoord[-1][0], pcoord[-1][1]):pc for pc, pcoord in self.state.P_k.items()}\n pdist_list_pickup = list(pdist_pickup.keys())\n\n if len(pdist_list_pickup) > 0:\n val_pickup_min = min(pdist_list_pickup)\n else:\n val_pickup_min = 1e6 # great value to be discarded when comparing with val_pickup_min\n\n if val_deliv_min == val_pickup_min and val_deliv_min == 1e6:\n print(\"All jobs completed: go to wait or stop if it's 12pm\")\n return 0\n\n if val_deliv_min < val_pickup_min:\n return pdist_deliv[val_deliv_min]\n\n elif val_deliv_min >= val_pickup_min:\n return pdist_pickup[val_pickup_min]\n else:\n raise valueError('Impossible comparison between val_deliv_min and val_pickup_min ')", "def test_find_closest_waypoints_no_position(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n self.assertIsNone(planner.find_closest_waypoints(1))", "def get_task_for_unit(self, world, unit, pos_density, neg_density):\r\n if (unit.health > 3*neg_density and unit.health > 5):\r\n return self.get_attack_task_for_unit(world, unit)\r\n else:\r\n return self.get_expand_task_for_unit(world, unit)", "def tryout_new_location(self):\n try_location = [0, 0]\n \n # try locations until a not-occupied location is found and not all folds are checked\n while try_location in self.occupied:\n\n # folds north everytime\n current_type = 2\n \n # check if location is possible \n try_location = self.assign_location(current_type)\n\n # if location is not possible, try next fold\n if try_location in self.occupied:\n continue\n # if location is possible, use location\n else:\n self.next_location = try_location\n return", "def nearest_neigh(self, atom):\n atoms = self.hutch.get_atoms_in_same_hutch(atom)[:]\n if atom in atoms: atoms.remove(atom)\n\n # This generation of nearby hutches isn't perfect but it will work\n rots = [(1,0,0),(0,1,0),(0,0,1)]\n i = 0\n while len(atoms) == 0:\n hutch = ((hutch[0]+rots[i][0])%self.hutch.nhutchs,(hutch[1]+rots[i][1])%self.hutch.nhutchs,(hutch[2]+rots[i][2])%self.hutch.nhutchs)\n i = (i+1) % 3\n atoms = self.hutch.hutchs[hutch]\n if atom in atoms: atoms.remove(atom)\n start = atoms[0]\n\n atoms = self.get_atoms_in_cutoff(atom,self.dist(atom,start))\n #if atom in atoms: atoms.remove(atom)\n d = float(\"inf\")\n for atomi in atoms:\n dt = self.dist(atom,atomi)\n if dt < d:\n d = dt\n a = atomi\n return a", "def get_nearest_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n track_pin = self.convert_track_to_pin(coord)\n min_dist = pin.distance(track_pin)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])", "def determine_closest(self, targets):\n min_distance = None\n closest = None\n targets = filter(lambda x: not x.owner or x.owner is self, targets)\n for target in targets:\n # If target currently in use, skip it\n if target.occupied_by:\n print(f\"{target.name}: {target.x},{target.y} occupied by {target.occupied_by.name}\")\n continue\n\n # If target is known to be broken, skip it\n if target in self.memories.broken_items:\n continue\n\n dx = target.x - self.x\n dy = target.y - self.y\n distance = math.sqrt(dx**2 + dy**2)\n if min_distance is None or distance < min_distance:\n min_distance = distance\n closest = target\n\n return closest", "def find_closest_addr(self, *args):\n return _ida_hexrays.citem_t_find_closest_addr(self, *args)", "def get_nearest_tickable_pending_tasks(self):\n nearest_tickable_pending_tasks = []\n cursors = [self.tasks[self.ROOT_TASK_KEY]]\n while len(cursors) > 0:\n next_cursors = []\n for cursor in cursors:\n if cursor['status'] == 'PENDING':\n nearest_tickable_pending_tasks.append(cursor)\n elif cursor['status'] == 'COMPLETED':\n successors = self.get_successors(task=cursor)\n next_cursors.extend(successors)\n cursors = next_cursors\n return nearest_tickable_pending_tasks", "def getNearestPreference(self, myABR):\n closestRange = 99999\n closestShip = None\n for shipID in self.targets:\n enemyShip = self.myGalaxy.ships[shipID]\n if enemyShip.alive == 1 and (enemyShip.myShipHull.abr in globals.targetPreference[myABR]):\n range = funcs.getTargetRange(self.posX, self.posY, enemyShip.posX, enemyShip.posY)\n if range < closestRange:\n closestRange = range\n closestShip = enemyShip\n return closestShip", "def search_moves(self, env) -> (float, float):\n futures = []\n with ThreadPoolExecutor(max_workers=self.play_conf.search_threads) as executor:\n for _ in range(self.play_conf.simulation_num_per_move):\n futures.append(executor.submit(self.search_my_move,env=env.copy(),is_root_node=True))\n\n vals = [f.result() for f in futures]\n\n return np.max(vals), vals[0] # vals[0] is kind of racy", "def find_best_move(state: GameState) -> None:", "def get_furthest_offgrid_pin(self, pin, insufficient_list):\n \n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n min_dist = grid_utils.distance_set(coord, self.blocked_grids)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])", "def heuristic(self, heuristic_num, task_num, agent_num):\n if heuristic_num == 1: # earliest deadline first\n deadline = self.task_deadlines[0][task_num]\n if self.DEBUG:\n print('Deadline for task ', task_num, ' is ', deadline)\n return -deadline\n\n if heuristic_num == 2: # rule to mitigate resource contention\n # check task_num location\n task_loc = self.tasks[task_num].getloc()\n if task_loc[0] == 0:\n vectorized_task_loc = task_loc[1]\n elif task_loc[0] == 1:\n vectorized_task_loc = 4 + task_loc[1]\n elif task_loc[0] == 2:\n vectorized_task_loc = 8 + task_loc[1]\n else: # location[0] == 3\n vectorized_task_loc = 12 + task_loc[1]\n return self.how_many_tasks_in_each_square[0][vectorized_task_loc]\n\n if heuristic_num == 3:\n combo = self.agent_distances[agent_num][task_num] + self.alpha * np.abs(self.orientation[agent_num][task_num]) + \\\n self.alpha2 * self.agent_distances[agent_num][task_num] * np.abs(self.orientation[agent_num][task_num])\n if self.DEBUG:\n print('Combo score is ', combo)\n # return a negative because you want to travel the least distance\n return -combo", "def defenderGoal(self, point, myPos):\n (x, y) = point\n temp = self.scanmap.adjacentValidPoints(x, y)\n targets = []\n for i in temp:\n (x, y) = i\n targets += self.scanmap.adjacentValidPoints(x, y)\n targets = list(dict.fromkeys(targets))\n targets.remove(point)\n\n minDis = self.getMazeDistance(myPos, targets[0])\n nearestDefender = targets[0]\n for j in targets:\n dis = self.getMazeDistance(myPos, j)\n if dis < minDis:\n minDis = dis\n nearestDefender = j\n return nearestDefender", "def get_depth_first_task(app_id, user_id=None, user_ip=None, n_answers=30, offset=0):\r\n # Uncomment the next three lines to profile the sched function\r\n #import timeit\r\n #T = timeit.Timer(lambda: get_candidate_tasks(app_id, user_id,\r\n # user_ip, n_answers))\r\n #print \"First algorithm: %s\" % T.timeit(number=1)\r\n candidate_tasks = get_candidate_tasks(app_id, user_id, user_ip, n_answers, offset=offset)\r\n total_remaining = len(candidate_tasks)\r\n #print \"Available tasks %s \" % total_remaining\r\n if total_remaining == 0:\r\n return None\r\n if (offset == 0):\r\n return candidate_tasks[0]\r\n else:\r\n if (offset < len(candidate_tasks)):\r\n return candidate_tasks[offset]\r\n else:\r\n return None", "def best_cell(self, coord):\n if coord[0] == self.pos[0] and coord[1] == self.pos[1]:\n return self.pos\n\n # Get all available cells\n free_cells = self.get_moves()\n smal_dist = float(\"Inf\")\n\n for cell in free_cells:\n d_x = abs(coord[0] - cell[0])\n d_y = abs(coord[1] - cell[1])\n dist = (d_x**2 + d_y**2)**0.5\n if dist < smal_dist:\n smal_dist = dist\n new_cell = cell\n\n return new_cell", "def find(self, task_id):\n for task_obj in self._blocked_items:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in limbo: '{}'\".format(task_id))", "def calculate_task_potential(self) -> float:\n cur_xy = self.agent.get_position()[:2]\n goal_xy = np.array([1e3, 0])\n return -np.linalg.norm(cur_xy - goal_xy) * 60", "def nearest_neighbor(self):\n steps = [{'Tour': [], 'Tourlength': 0}]\n tour = []\n original_nodes = self._datacontroller.get_data('nodes')\n nodes = copy.deepcopy(original_nodes)\n scale = self._datacontroller.get_data('scale')\n\n # Step 1: Get a tour start\n starts = [node for node in nodes if node.start]\n _start = 'Random from marked nodes'\n if not len(starts):\n starts = nodes\n _start = 'Random from all nodes'\n\n current = starts[randint(0, (len(starts) - 1))]\n while True:\n tour.append(current.nid)\n nodes.remove(current)\n steps.append(construct_step(tour, str(_start), 'random', original_nodes, scale))\n if not len(nodes):\n break\n current = nodes[tsputil.nearest_neighbor(nodes, current)[0]]\n tour.append(tour[0])\n steps.append(construct_step(tour, str(_start), 'random', original_nodes, scale))\n self._datacontroller.commit_change('pathsteps', steps)\n self._datacontroller.commit_change('path', steps[-1])", "def __get_closest_waypoint_index(self, x, y):\n return self.__waypoint_tree.query([x, y], 1)[1]", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def closest_other_location(state):\n locations = others_locations(state)\n target = closest_other(state)\n return locations[target]" ]
[ "0.6560564", "0.6153575", "0.61524194", "0.5950683", "0.5921171", "0.58735836", "0.58607155", "0.5794781", "0.57347924", "0.5699027", "0.5618542", "0.5607949", "0.55940974", "0.55857736", "0.5556033", "0.5541281", "0.5505964", "0.54815084", "0.5447876", "0.5444861", "0.5441935", "0.5433266", "0.5430184", "0.54286265", "0.5412851", "0.54024535", "0.5392539", "0.5391711", "0.5366933", "0.53617924" ]
0.82706976
0
computes start and finish times of a task given the agent's speed, current location, and task_location
def compute_start_and_finish_times(a, n_t, current_time): duration = n_t.getc() speed = a.getv() current_location = a.getz() task_loc = n_t.getloc() dist = np.sqrt((task_loc[0] - current_location[0]) ** 2 + (task_loc[1] - current_location[1]) ** 2) travel_time = dist / speed start_time = current_time + travel_time finish_time = start_time + duration return start_time, finish_time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _schedule(self,task_dict):\n times = [time(), None, None, None] # (schedule timestamp, execution timestamp, stop timestamp, get timestamp)\n result_id = self._extract_features.remote(self, times) # calculation is started in new remote task \n task_dict[result_id] = self._idx # add sample index ", "def update_based_on_time(self):\n for counter, agent in enumerate(self.agents):\n if self.t >= agent.getFinishTime() and self.agent_current_task[counter] != -1: # task is finished\n task_num = self.agent_current_task[counter]\n self.finish_time_per_task_dict[task_num] = self.t\n self.is_task_finished[0][task_num] = 1\n agent.changebusy(False)\n self.update_agent_is_idle_based_on_class()", "def schedule_task(self, counter):\n task_to_schedule = []\n each_agent = self.agents[counter]\n task_found = False\n H1_score_list = []\n H2_score_list = []\n H3_score_list = []\n H1_dict = {}\n H2_dict = {}\n H3_dict = {}\n\n # Agent not idle case, exit immediately\n if self.is_agent_idle[counter][0] == 0:\n print(each_agent.getName(), 'is not Idle')\n print(each_agent.getName(), 'is scheduled for null task')\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n return task_to_schedule\n # if agent is busy, output null task\n\n for task_num, each_task in enumerate(self.tasks):\n if self.is_task_finished[0][task_num] == 1: # Can't schedule a task that has already been completed\n continue\n if self.is_task_alive[0][task_num] == 0:\n continue\n # if self.is_task_enabled[0][task_num] == 0:\n # continue\n # if self.travel_time_constraint_satisfied[0][task_num] == 0:\n # continue\n if self.is_task_in_progress[0][task_num] == 1: # can't schedule the same task twice\n continue\n\n # All constraints satisfied\n # normalize each score separately\n deadline_score = (self.heuristic(heuristic_num=1, task_num=task_num, agent_num=counter))\n occupacity_score = (self.heuristic(heuristic_num=2, task_num=task_num, agent_num=counter))\n distance_score = (self.heuristic(heuristic_num=3, task_num=task_num, agent_num=counter)) * 150 / np.sqrt(\n 32) # The 150/np.sqrt(32) puts it in the same range as deadline score\n\n H1_dict[task_num] = deadline_score\n H1_score_list.append(deadline_score)\n\n H2_dict[task_num] = occupacity_score\n H2_score_list.append(occupacity_score)\n\n H3_dict[task_num] = distance_score\n H3_score_list.append(distance_score)\n\n task_found = True\n\n if not task_found:\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[3])\n return task_to_schedule\n\n new_dict = {}\n for key in H1_dict:\n new_dict[key] = H1_dict[key] * self.w_EDR + H2_dict[key] * self.w_RESOURCE + H3_dict[key] * self.w_DISTANCE\n\n highest = max(new_dict.values()) # figure out the highest score\n tasks_with_best_scores = [k for k, v in new_dict.items() if v == highest] # find all keys associated with the highest value\n if len(tasks_with_best_scores) > 1:\n print(tasks_with_best_scores)\n\n if self.do_you_like_big_tasks:\n task_chosen = max(tasks_with_best_scores)\n else:\n task_chosen = min(tasks_with_best_scores)\n\n print('Expert: Task chosen for', each_agent.getName(), ' is ', task_chosen, ' enabled: ', self.is_task_enabled[0][task_chosen])\n self.teacher_actions[self.global_schedule_num].append(task_chosen)\n # neural net task\n self.converge_embedding_based_on_history(task_chosen, counter)\n neural_net_task = self.predict_task(task_chosen, counter)\n self.learner_actions[self.global_schedule_num].append(neural_net_task)\n\n print('Neural Net: Task chosen for', each_agent.getName(), ' is ', neural_net_task, ' enabled: ',\n self.is_task_enabled[0][neural_net_task])\n\n # all of this changed to represent neural net task\n\n # if self.is_task_enabled[0][neural_net_task] == 0:\n # print('Task was not enabled, but is alive')\n\n if neural_net_task == task_chosen:\n self.num_correct_predictions_total[self.global_schedule_num] += 1\n\n self.num_predictions_total[self.global_schedule_num] += 1\n\n # Only do it if all of the pre-conditions are met\n\n if self.global_schedule_num != 0:\n location_of_task = self.tasks[neural_net_task].getloc()\n vectorized_task_num = self.get_vectorized_location(location_of_task) # checks if current task is in a location that is occupied\n if self.is_task_alive[0][neural_net_task] == 0 or \\\n self.is_task_enabled[0][neural_net_task] == 0 or \\\n self.travel_time_constraint_satisfied[counter][neural_net_task] == 0 or \\\n self.agent_locations[0][vectorized_task_num] >= 1 or \\\n self.is_task_in_progress[0][neural_net_task]:\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n print('Task ', neural_net_task, ' did not meet criteria of being enabled, alive, travel satisfied, or not occupied')\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[1])\n return task_to_schedule\n\n else: # global task schedule num is zero\n if self.is_task_enabled[0][task_chosen] == 0:\n print('Task was not enabled, but is alive')\n # Only do it if all of the pre-conditions are met\n location_of_task = self.tasks[task_chosen].getloc()\n vectorized_task_num = self.get_vectorized_location(location_of_task) # checks if current task is in a location that is occupied\n if self.is_task_alive[0][task_chosen] == 0 or \\\n self.is_task_enabled[0][task_chosen] == 0 or \\\n self.travel_time_constraint_satisfied[counter][task_chosen] == 0 or \\\n self.agent_locations[0][vectorized_task_num] >= 1:\n task_to_schedule.append(-1)\n self.task_to_schedule = task_to_schedule\n print('task ', task_chosen, ' did not meet criteria of being enabled, alive, travel satisfied, or not occupied')\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[1])\n return task_to_schedule\n\n if self.global_schedule_num != 0:\n # if self.t > self.task_deadlines[0][neural_net_task]:\n if self.has_any_deadlines_passed(neural_net_task):\n task_to_schedule.append(-1)\n print('Deadline is passed')\n # updated where this is changed\n self.did_schedule_fail = True\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[2])\n return task_to_schedule\n else:\n # if self.t > self.task_deadlines[0][neural_net_task]:\n if self.has_any_deadlines_passed(task_chosen):\n task_to_schedule.append(-1)\n print('Deadline is passed')\n # updated where this is changed\n self.did_schedule_fail = True\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[2])\n return task_to_schedule\n\n if self.global_schedule_num != 0:\n task_to_schedule.append(neural_net_task)\n self.agent_current_task[counter] = task_to_schedule[0] # changes agent current task\n self.task_to_schedule = task_to_schedule\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[4])\n self.number_of_decisions_before_terminal_state[self.global_schedule_num] += 1\n # maybe remove the return\n print('Task scheduled for', each_agent.getName(), 'at time ', self.t, 'is ', self.task_to_schedule)\n return task_to_schedule # ALL zero indexed\n else:\n task_to_schedule.append(task_chosen)\n self.agent_current_task[counter] = task_to_schedule[0] # changes agent current task\n self.task_to_schedule = task_to_schedule\n self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[4])\n self.number_of_decisions_before_terminal_state[self.global_schedule_num] += 1\n # maybe remove the return\n print('Task scheduled for', each_agent.getName(), 'at time ', self.t, 'is ', self.task_to_schedule)\n return task_to_schedule # ALL zero indexed", "def get_task(self, locations):\n if self.current_location == self.desk_location:\n self.task_location = locations[random.randint(0, len(locations)-1)]\n self.task_duration = random.randint(1, 10)\n else:\n self.task_location = self.desk_location\n self.task_duration = random.randint(50, 100)", "def time_to_perform_task(total_mins, total_hours_for_task):\n\n # Get the number of mins for the task\n num_mins_for_task = total_hours_for_task * 60 \n\n # Find the percentage of the task that is completed\n percent_done = total_mins / num_mins_for_task\n percent_done *= 100\n\n is_percent = False # Holds if the number is a percentage\n if percent_done < 100:\n is_percent = True # The second number will be a percentage\n return [is_percent, floor(percent_done)]\n \n # Change the second nubmer to be the number of times the task can be \n # completed\n times_done = percent_done // 100\n return [is_percent, times_done]", "def start_and_end_times(self):\r\n return [(x.node_monitor_launch_time, x.completion_time) for x in self.__tasks.values()\r\n if x.complete()]", "def solve(task: str) -> int:\n points = Point.parse_task(task)\n sky = Sky(points)\n seconds = sky.move_till_min_area()\n return seconds", "def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max", "def update_agent_pose_and_finish_time_and_log_event(self, agent_num):\n agent = self.agents[agent_num]\n if self.task_to_schedule[0] == -1:\n pass\n else:\n # this happens as soon as it is scheduled, i think\n scheduled_task = self.task_to_schedule[0]\n agent.curr_task = scheduled_task # assigns inside agent class (REDUNDANCY)\n agent.set_orientation(self.orientation[agent_num][scheduled_task])\n agent.task_list.append(scheduled_task)\n agent.updateAgentLocation(self.tasks[scheduled_task].getloc())\n\n # Record it\n agent.task_event_dict[scheduled_task] = [self.t, self.t + self.tasks[scheduled_task].getc()]\n agent.setFinishTime(self.t + self.tasks[scheduled_task].getc())\n self.is_task_in_progress[0][self.task_to_schedule[0]] = 1", "def zip_task_analysis(self, task_data, unit_time=300):\n start_unix_time = task_data[0][0]\n end_unix_time = task_data[-1][0]\n assert(start_unix_time < end_unix_time)\n start_t = 0\n end_t = -1\n for index in range(len(task_data)):\n task_t = task_data[index][0]\n if start_t <= task_t and task_t < end_t:\n self.task_distribution[start_t] += 1.0\n task_id = \"%s_%05d\" % (task_t, index)\n task = [task_id] + task_data[index]\n self.task_candidates[start_t].append(task)\n self.total_task_list.append(task)\n self.total_task_num += 1\n elif task_t >= end_t:\n end_t = task_t if end_t <= start_t else end_t\n start_t = end_t\n end_t = start_t + unit_time\n self.time_list.append(start_t)\n self.task_candidates[start_t] = []\n self.task_distribution[start_t] = 0.0\n for key_t in self.task_distribution:\n self.task_distribution[key_t] = self.task_distribution[key_t]/self.total_task_num #归一化", "def compute_task_to_schedule(self, agent_num):\n task = self.schedule_task(agent_num) # get chosen task\n\n agent = self.agents[agent_num] # get current agent\n # self.write_csv_pairwise(agent_num)\n # self.write_csv(agent_num)\n\n if task[0] == -1: # if null task chosen\n task_currently_working_on = self.agent_current_task[agent_num]\n if task_currently_working_on != -1 and self.is_task_finished[0][task_currently_working_on] == 0: # task is currently in progress\n pass\n else: # task is finished, but there is no task to schedule\n self.agent_current_task[agent_num] = -1\n else: # tasks returned contain actual tasks\n\n self.agent_current_task[agent_num] = task[0] # update current agent task\n agent.changebusy(True) # mark agent as busy\n self.update_agent_is_idle_based_on_class()\n\n self.update_agent_pose_and_finish_time_and_log_event(agent_num)", "def method_compute_timestep(self):\n\n myg = self.cc_data.grid\n\n cfl = self.rp.get_param(\"driver.cfl\")\n\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n # the timestep is min(dx/|u|, dy|v|)\n xtmp = ytmp = 1.e33\n if not abs(u).max() == 0:\n xtmp = myg.dx/abs(u.v()).max()\n if not abs(v).max() == 0:\n ytmp = myg.dy/abs(v.v()).max()\n\n dt = cfl*min(xtmp, ytmp)\n\n # We need an alternate timestep that accounts for buoyancy, to\n # handle the case where the velocity is initially zero.\n rho = self.cc_data.get_var(\"density\")\n rho0 = self.base[\"rho0\"]\n rhoprime = self.make_prime(rho, rho0)\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n\n F_buoy = (abs(rhoprime*g).v()/rho.v()).max()\n\n dt_buoy = np.sqrt(2.0*myg.dx/F_buoy)\n\n self.dt = min(dt, dt_buoy)\n if self.verbose > 0:\n print(f\"timestep is {dt}\")", "def calculate_vars(data, lat, lon):\n # Keep track of running distance and time calculations\n distance_to_dest = 0.0\n time_estimate = 0.0\n\n # Calculate from starting dest to first point in data\n user_coords = (lat, lon)\n first_path_coords = (data[0][\"lat\"], data[0][\"lon\"])\n first_distance = geopy.distance.distance(user_coords, first_path_coords).miles\n distance_to_dest += first_distance\n time_estimate += first_distance * 20 # 3mph walking speed\n\n # Calculate for all other points\n for i in range(1, len(data) - 1):\n this_coords = (data[i][\"lat\"], data[i][\"lon\"])\n next_coords = (data[i + 1][\"lat\"], data[i + 1][\"lon\"])\n\n distance = geopy.distance.distance(this_coords, next_coords).miles\n distance_to_dest += distance\n time_estimate += distance * 20 # 3mph walking speed\n\n # Round distance and time estimates\n distance_to_dest = round(distance_to_dest, 1)\n time_estimate = round(time_estimate)\n\n return distance_to_dest, time_estimate", "def calculate_task_potential(self) -> float:\n cur_xy = self.agent.get_position()[:2]\n goal_xy = np.array([1e3, 0])\n return -np.linalg.norm(cur_xy - goal_xy) * 60", "def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(self.name, \"took\", str(hours), \"hours and\", \"{0:.2f}\".format(minutes), unit, \"to complete\")\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(self.name, \"took\", str(minutes), \"minutes and\", \"{0:.2f}\".format(seconds), unit, \"to complete\")\n else:\n print(self.name, \"took\", \"{0:.2f}\".format(elapsed), unit, \"to complete\")", "def __get_elapsed__(self):\n elapsed = (self.__end_time - self.__start_time)\n unit = \"seconds\"\n if elapsed >= 3600:\n unit = \"minutes\"\n hours = elapsed / 3600\n minutes = hours % 60\n hours = floor(hours)\n print(\"{} {} took {} hours and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, hours, minutes, unit))\n elif elapsed >= 60:\n minutes = floor(elapsed / 60)\n seconds = elapsed % 60\n print(\"{} {} took {} minutes and {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, minutes, seconds, unit))\n else:\n print(\"{} {} took {:.2f} {} to complete\".format(self.__get_timestamp__(), self.name, elapsed, unit))", "def get_task_time(self, task):\n task.task_time = TASK_TYPES[task.task_type]\n print(\"Fetched task time\")", "def runtime_cal(start,end) :\n run_time = end - start\n mm = int(run_time/60)\n ss = round(run_time%60)\n return mm, ss", "def update_task_location_vector(self):\n for counter, task in enumerate(self.tasks):\n location = task.getloc()\n if location[0] == 0:\n vectorized_task_loc = location[1]\n elif location[0] == 1:\n vectorized_task_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_task_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_task_loc = 12 + location[1]\n self.how_many_tasks_in_each_square[0][vectorized_task_loc] += 1\n self.task_locations[0][counter] = vectorized_task_loc\n # print(location)\n # print(self.how_many_tasks_in_each_square)", "def count_time(start, end, folder, model_type, task):\n print(\"It has been \", str(datetime.timedelta(seconds=(end - start))))\n timee = (end - start)/3600\n #if the folder doesn't exist, create it\n if not os.path.exists(''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/'])):\n os.makedirs(''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/']))\n file_time = ''.join(string for string in [absPath, 'data/results/', folder, task, model_type, '/time.pickle'])\n\n with open(file_time, \"wb\") as output_file:\n pickle.dump(timee, output_file)", "def start_game(self):\n GameManager.time = 0\n print(\"Game started\")\n time_spent = 0\n start_time = tm.process_time()\n winning_path = self.__agent.apply_strategy(self.__graph, self.__board)\n execution_time = tm.process_time() - start_time\n for location in winning_path:\n x = location[0]\n y = location[1]\n time_to_wait = self.__board[x][y]\n while time_spent != time_to_wait:\n GameManager.time += 1\n time_spent += 1\n self.__agent.current_location = location\n GameManager.time += 1\n time_spent = 0\n self.__print_grid(winning_path)\n print(f\"Total time required: {GameManager.time}\")\n return GameManager.time, execution_time*1000", "def inter_arrival_times(self):\n # this function returns arrival times between two subsequent tuples in ms\n # task mean_inter_arrival_time std_inter_arrival_time\n if self.inter_arrival_time is None:\n if self.tuple_arrival is None:\n self.tuple_arrivals()\n self.inter_arrival_time = convert_throughput_to_inter_arr_times(self.tuple_arrival)\n\n return self.inter_arrival_time", "def isTimeForTask(self, task_times):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n mean_task_time = np.mean(task_times)\n self.tee(\" projected task time: %s, remaining time: %s\"%(\\\n HMStime(mean_task_time), HMStime(remaining_time)), process=process)\n if mean_task_time > remaining_time:\n return False\n else:\n return True", "def set_times(self):\n if self.anchor == \"P\":\n # specified pickup time, 5 minutes early.\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - 300\n # given pickup time, we are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) + 900\n # We are given pickup time, caluclate pickup time, and are 5 min early\n self.earliestDropoff = tools.time_to_seconds(self.times) - 300 + self.time_for_travel()\n # we are given pickup time, add travel time, and are 20 minutes\n self.latestDropoff = tools.time_to_seconds(self.times) + self.time_for_travel() + 900\n else:\n # this means the dropoff time is given. calculate the time it takes to drive, and then 5 minutes early\n self.earliestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel() - 1200\n # given dropoff time, we calucate when to arrive, and then are 15 minutes late.\n self.latestPickup = tools.time_to_seconds(str(self.times)) - self.time_for_travel()\n # we are given dropoff time. It's earliest pickup time + travel time\n self.earliestDropoff = tools.time_to_seconds(self.times) - 1200\n self.latestDropoff = tools.time_to_seconds(self.times)", "def start_and_end_time():\n (shour, smin) = START_TIME.split(\":\")\n stime = arrow.now()\n start_time = arrow.Arrow(stime.year, stime.month, stime.day, int(shour), int(smin), tzinfo=stime.tzinfo)\n if stime > start_time:\n # If start time is in the past, start now\n start_time = stime\n if __debug__:\n end_time = start_time.shift(minutes=+2)\n else:\n end_time = start_time.shift(hours=+END_AFTER_HOURS)\n return start_time, end_time", "def start_time(self) -> float:\r\n ...", "def assign_tasks_per_task(self, current_time, job_id):\r\n job = self.jobs[job_id]\r\n random.shuffle(self.worker_indices)\r\n task_arrival_events = []\r\n for i, task_duration in enumerate(job.unscheduled_tasks):\r\n loads = [(i, self.workers[i].queue_length())\r\n for i in self.worker_indices[PROBE_RATIO*i:PROBE_RATIO*(i+1)]]\r\n #loads_str = \", \".join([\"%s:%s\" % (l[0], l[1]) for l in loads])\r\n #print \"Loads: %s\" % loads_str\r\n loads.sort(key = lambda x: x[1])\r\n #print (\"Assigning task of duration %s for job %s to worker %s\" %\r\n # (task_duration, job_id, loads[0][0]))\r\n task_arrival_events.append(\r\n (current_time + 2*NETWORK_DELAY,\r\n TaskArrival(self.workers[loads[0][0]], task_duration, job_id)))\r\n return task_arrival_events", "def start_and_service_times(self):\r\n return [(x.scheduler_launch_time, x.service_time()) for x in self.__tasks.values()\r\n if x.complete()]", "def compute_trajectory():\n pass", "def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\r\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(task, resource_id, arrival_time=arrival_time)\r\n if task.dummy_task:\r\n return start_time, eft, runtime_on_resource, place_id, 0\r\n else:\r\n cost = self.calculate_share_cost_change(resource_id, start_time, eft, task.graph.name, True)\r\n return start_time, eft, runtime_on_resource, place_id, cost" ]
[ "0.6099162", "0.60414046", "0.59381944", "0.58190423", "0.5716353", "0.570295", "0.5654997", "0.56370676", "0.56161964", "0.55021346", "0.5477614", "0.5460579", "0.5449442", "0.54475635", "0.54284877", "0.54032576", "0.53916776", "0.5379778", "0.5372666", "0.53617954", "0.53434277", "0.5298559", "0.5294487", "0.52863604", "0.5254633", "0.5211908", "0.521036", "0.5197172", "0.51917094", "0.5181834" ]
0.71098846
0
Are there tasks that can be scheduled?
def tasks_are_available(tasks): task_not_finished_not_scheduled_count = len(tasks) for task in tasks: if task.getisTaskFinished(): continue if task.getisTaskScheduled(): continue else: task_not_finished_not_scheduled_count -= 1 if task_not_finished_not_scheduled_count < len(tasks): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_task_stagnant(task):", "def has_open_tasks(self):\n return self.get_started_tasks()", "def checkUpstreamScheduler():", "def is_task_in_schedule(self, tid: str) -> bool:\n return tid in self.__tasks", "def found_schedules(self) -> bool:\n return self._schedule_list != []", "def is_runnable(self):\n \n if len(target_tasks) < 1: \n return False\n # check task names?\n \n if self.run_folder is None or \\\n not os.path.exists(self.run_folder) or \\\n not os.path.exists(os.path.join(self.run_folder, self.run_id, 'SampleSheet.csv')):\n return False\n \n return True", "def test_contain_tasks(self):\n dag = self.dagbag.get_dag(self.dag_id)\n tasks = dag.tasks\n task_ids = list(map(lambda task: task.task_id, tasks))\n assert sorted(task_ids) == sorted([\n 'start', 'gcs_to_bq', 'stop'\n ])", "def _schedTest(self):\n if not self._hasSlices(): # There are no migratory tasks, so let's check utilization\n return self.util() <= 1.0\n else:\n return self._qpa()", "def __contains__(self, name):\n return name in self._tasks", "def is_scheduled(self) -> bool:\n return not self.terminated and self.__state != Process.IDLE", "def is_registered(task_name):\n if tasks.find({'name': task_name}).count() > 0:\n return True\n else:\n return False", "def must_run(self):\r\n self.current_time = datetime.now()\r\n return all([self._minute(), self._hour(), self._day_of_month(), self._month(), self._day_of_week()])", "def _task_is_running(course_id, task_type, task_key):\r\n running_tasks = InstructorTask.objects.filter(\r\n course_id=course_id, task_type=task_type, task_key=task_key\r\n )\r\n # exclude states that are \"ready\" (i.e. not \"running\", e.g. failure, success, revoked):\r\n for state in READY_STATES:\r\n running_tasks = running_tasks.exclude(task_state=state)\r\n return len(running_tasks) > 0", "def can_run(self):\n\t\treturn self._start is None", "def __contains__(self, task):\n return task in self._tasks", "def is_scheduled(self, handle):\n return not handle.cancelled()", "def try_execute(self):\n executed_tasks_count = 0\n for k, v in iter(self.tasks.items()):\n if v.running and not v.finished:\n executed_tasks_count += 1\n\n if executed_tasks_count == self.task_count:\n self.start_time = time.time()\n self.migration_count += 1\n self.started = True\n self.running = True\n return True, executed_tasks_count\n return False, executed_tasks_count", "def isScheduleRunning(self):\n if DPxIsDinSchedRunning() == 0:\n schedule_running = False\n else:\n schedule_running = True\n return schedule_running", "def schedule_required(self) -> bool:\n return self._local.idle", "def dumb_task():\n return True", "def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False", "def initTasks(self):\n if self.tkey is None:\n num = TaskHistory(date=self.ddate).search(\n count=True, task_day=self.ddate)\n hour = int(time.strftime(\"%H\"))\n # a hack, many times run init module\n if num > 20 and hour >= 1:\n self.log.info(\"Initialization has been completed\")\n return True\n tlist = TaskLibrary().allTask()\n else:\n tlist = TaskLibrary().getByKey(self.tkey)\n if not tlist:\n self.log.debug(\"no tasks\")\n return False\n\n ts = TaskHistory()\n for task in tlist:\n # status not 1, not init it.\n if int(task.get(\"status\", 0)) != 1:\n continue\n task = self.__parseTask(task)\n if self.__checkInited(task.get(\"task_day\"), task.get(\"task_key\"), task.get(\"task_type\")):\n continue\n ts.insert(task)\n\n self.log.info(\"init task finished\")\n return True", "async def test_get_tasks(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # declare _scheduler task\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'get_tasks'\n interval_schedule.process_name = \"sleep5\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.exclusive = False\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(15)\n\n # Assert running tasks\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.INTERRUPTED)])\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"end_time\", \"=\", 'NULL'])\n assert tasks\n\n tasks = await scheduler.get_tasks(limit=50)\n states = [int(task.state) for task in tasks]\n\n assert len(tasks) > 1\n assert int(Task.State.RUNNING) in states\n assert int(Task.State.COMPLETE) in states\n\n tasks = await scheduler.get_tasks(1)\n assert len(tasks) == 1\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"]], offset=50)\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"], [\"start_time\", \"asc\"]])\n assert tasks\n\n tasks = await scheduler.get_tasks(or_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n tasks = await scheduler.get_tasks(and_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n await self.stop_scheduler(scheduler)", "def list_pending_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.reserved()", "def is_runnable(self):\n return self.state == self.STATE_INIT and not self.require", "def DueToRun(self):\n if self.Get(self.Schema.DISABLED):\n return False\n\n cron_args = self.Get(self.Schema.CRON_ARGS)\n last_run_time = self.Get(self.Schema.LAST_RUN_TIME)\n now = rdfvalue.RDFDatetime().Now()\n\n # Its time to run.\n if (last_run_time is None or\n now > cron_args.periodicity.Expiry(last_run_time)):\n\n # Do we allow overruns?\n if cron_args.allow_overruns:\n return True\n\n # No currently executing job - lets go.\n if self.Get(self.Schema.CURRENT_FLOW_URN) is None:\n return True\n\n return False", "def check_tasks(self):\n if self.able:\n notifications = []\n for task in self.tasks.all():\n if task.deadline is not None:\n if self._get_delta(task.deadline) < timezone.localtime():\n self.tasks.remove(task)\n notifications.append(Notification(\n title=Notifications.REMIND,\n info=f'{task.info} {self.__str__().replace(\"before\", \"after\")}'\n ))\n return notifications", "def isTimeForTask(self, task_times):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n mean_task_time = np.mean(task_times)\n self.tee(\" projected task time: %s, remaining time: %s\"%(\\\n HMStime(mean_task_time), HMStime(remaining_time)), process=process)\n if mean_task_time > remaining_time:\n return False\n else:\n return True", "def tasks():", "async def _check_schedule(self, now, last):\n\n if self._schedule is None:\n return\n\n for event in self._schedule.events:\n if event.begin <= now:\n if event.begin > last:\n await self._announce_event(event)" ]
[ "0.71745074", "0.7151431", "0.7072318", "0.70158434", "0.67972994", "0.6678688", "0.66564685", "0.66272324", "0.66050434", "0.6574776", "0.6508395", "0.6493566", "0.6471567", "0.6463538", "0.6339398", "0.63177663", "0.62976694", "0.6261362", "0.623227", "0.6224766", "0.6224698", "0.62160045", "0.6212179", "0.62105197", "0.6207181", "0.6207075", "0.6193738", "0.61894596", "0.61856914", "0.6179774" ]
0.7639722
0
counts how many tasks are each of the 16 locations also stores which location each task is in, in another array
def update_task_location_vector(self): for counter, task in enumerate(self.tasks): location = task.getloc() if location[0] == 0: vectorized_task_loc = location[1] elif location[0] == 1: vectorized_task_loc = 4 + location[1] elif location[0] == 2: vectorized_task_loc = 8 + location[1] else: # location[0] == 3 vectorized_task_loc = 12 + location[1] self.how_many_tasks_in_each_square[0][vectorized_task_loc] += 1 self.task_locations[0][counter] = vectorized_task_loc # print(location) # print(self.how_many_tasks_in_each_square)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def num_locations(self):\n return len(self.locations)", "def locations_n(self):\n return self.locations[1]", "def run_numbers():\n if run_nos:\n # Get task names\n tasks = []\n for rn in dcm_dict.keys():\n tasks.append(dcm_dict[rn]['task_name'])\n # Assign run numbers\n for tsk in set(tasks):\n n_runs = sum(i == tsk for i in tasks)\n if n_runs == 1:\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n # Add in the 'task' prefix required by BIDS format if missing from name\n if not tsk[0:4] == 'task':\n dcm_dict[rn]['out_name'] = 'task-'+tsk+'_run-01'\n else:\n dcm_dict[rn]['out_name'] = tsk+'_run-01'\n elif n_runs > 1:\n task_runs = []\n run_times = []\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n task_runs.append(rn)\n run_times.append(dcm_dict[rn]['start_time'].timestamp())\n idx_order = sorted(range(len(run_times)), key=lambda k: run_times[k])\n for i in idx_order:\n if not tsk[0:4] == 'task':\n dcm_dict[task_runs[i]]['out_name'] = 'task-'+tsk+'_run-0'+str(i+1)\n else:\n dcm_dict[task_runs[i]]['out_name'] = tsk+'_run-0'+str(i+1)\n else:\n for rn in dcm_dict.keys():\n dcm_dict[rn]['out_name'] = dcm_dict[rn]['task_name']", "def partitioner(mappings):\n\t\n\ttoken_counts = defaultdict(list)\n\t\n\tfor sublist in mappings:\n\t\tfor t, c in sublist:\n\t\t\ttoken_counts[t].append(c)\n\t\t\t\n\treturn token_counts", "def get_num_locations(self, project):\n locations = Location.objects.filter(\n Q(private=False) |\n Q(private_for_project=project)).count()\n return locations", "def __used(self):\n tot=0\n assign={}\n for c in self.assigned :\n if not assign.has_key(c.start) :\n assign[c.start]=c.end\n tot+=c.end-c.start+1\n return tot", "def count(self):\n\n paths = 0\n for task in self.tasks:\n if not task or type(task) != dict:\n continue\n\n for k, v in task.items():\n\n if not k or not v:\n continue\n\n if k in ANSIBLE_MODULES_LIST and type(v) == dict:\n paths += sum(map(lambda x: x == 'path' or x == 'src' or x == 'dest', v))\n\n return paths", "def iterate_paths_map(riv_dirs,paths_map,nlat=360,nlong=720):\n\n if np.count_nonzero(paths_map) == paths_map.size:\n return False\n for i in range(nlat+2):\n for j in range(nlong):\n if i == 0 or i == nlat+1:\n paths_map[i,j] = 1\n elif j == 0:\n paths_map[i,j] = count_accumulated_inflow(np.append(riv_dirs[i-1:i+2,nlong-1:nlong],riv_dirs[i-1:i+2,j:j+2],axis=1),\n np.append(paths_map[i-1:i+2,nlong-1:nlong],paths_map[i-1:i+2,j:j+2],axis=1))\n elif j == nlong-1:\n paths_map[i,j] = count_accumulated_inflow(np.append(riv_dirs[i-1:i+2,j-1:j+1],riv_dirs[i-1:i+2,0:1],axis=1),\n np.append(paths_map[i-1:i+2,j-1:j+1],paths_map[i-1:i+2,0:1],axis=1))\n else:\n paths_map[i,j] = count_accumulated_inflow(riv_dirs[i-1:i+2,j-1:j+2],\n paths_map[i-1:i+2,j-1:j+2])\n return True", "def count_target(self):\n tally = {}\n for obj in self.target:\n tally[obj] = 0\n\n ind = 0\n for label in self.labelList:\n filename = self.pathLabel + label\n f = open(filename, 'r')\n content = f.read().split('\\n')\n for line in content:\n items = line.split(' ')\n if items[0] in self.target:\n tally[items[0]] += 1\n f.close()\n if ind % 100 == 0:\n print(f'[COUNT] {ind} of {len(self.labelList)} processed')\n ind += 1\n \n print('[COUNT] done counting targets in dataset')\n print(tally)", "def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts", "def array_occurrences(cmd_out: list) -> defaultdict:\n array_frequency = defaultdict(int) # type: defaultdict\n array_name = 0\n for entry in cmd_out:\n array_frequency[entry[array_name]] += 1\n return array_frequency", "def add_building_output_locations2(self,areasList,start,end,step): \n print \"Getting buildings locations...\"\n \n dictionaries = []\n dictionary = {}\n \n for a in areasList:\n \n dictionaries.append(self.grid.get_building_output_locations(a[0],a[1]))\n \n for dict in dictionaries:\n for row in dict.iteritems(): \n dictionary[row[0]] = row[1] \n\n print \"Number of buildings = %s\" % (len(dictionary))\n\n if (dictionary != {}):\n self.run_nc.add_building_output_locations(dictionary, start, end,step)", "def total_data(map_index, next_sse_index, ss_def, contacts_def):\n no_of_contacts = 0\n contacts_true = contacts_def.keys()\n start, end = ss_def[next_sse_index][3], ss_def[next_sse_index][4]\n for i in range(start, end + 1):\n if i in contacts_true:\n contacts = contacts_def[i]\n for contact in contacts:\n for index in map_index:\n tstart, tend = ss_def[index][3], ss_def[index][4]\n if contact in range(tstart, tend + 1):\n no_of_contacts += 1\n return no_of_contacts", "def compute_map(current_agent_id,agent_order,number_of_timestep,state_schedules, conv :StateConverter):\r\n #Find the agent has the highest number of time steps\r\n highest_timestep = 0\r\n # Find the highest time step\r\n if len(number_of_timestep) >0:\r\n highest_timestep = np.max(number_of_timestep)\r\n occupancy_map = []\r\n # Since we don't know yet how many time step of the current id so\r\n # the number of time steps of the occupancy map == highest number of time step\r\n # of the current schedule\r\n for time_step in range(int(highest_timestep)):\r\n # Initialize the occupancy for current time step\r\n current_occupancy_map = np.zeros(conv.num_tiles)\r\n # We loop through schedule of each agent at current time step\r\n for i in range(len(state_schedules)):\r\n # Get the agent id of current schedule\r\n agent_of_schedule = agent_order[i]\r\n if time_step < len(state_schedules[i]):\r\n # The first case when the agent of current schedule is executed after the current agent\r\n if agent_of_schedule > current_agent_id:\r\n # Get the current state\r\n current_state = state_schedules[i][time_step]\r\n # Convert the current state to tile index\r\n current_tile = conv.state_to_tile(current_state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[current_tile] = 1\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n # The second case when the agent of current schedule is executed before the current agent\r\n else:\r\n if time_step + 1 < len(state_schedules[i]):\r\n # Get the next state\r\n next_state = state_schedules[i][time_step + 1]\r\n # Convert next state to next tile will be occupied\r\n next_tile_index = conv.state_to_tile(next_state)\r\n # Occupied the next tile in the occupancy map\r\n current_occupancy_map[next_tile_index] = 1\r\n if time_step + 2 < len(state_schedules[i]):\r\n # Get the next 2 state\r\n next_2state = state_schedules[i][time_step+2]\r\n # Convert the current state to tile index\r\n next_2tile = conv.state_to_tile(next_2state)\r\n # Occupied the current tile in the occupancy map\r\n current_occupancy_map[next_2tile] = 1\r\n occupancy_map.append(current_occupancy_map)\r\n return occupancy_map", "def num_tasks(self):\n return self.num_labels", "def _extract_geographical_patterns(self):\n # take onehot encoding of zipcodes\n onehot = pd.get_dummies(self.df_transaction['zipcode'], prefix='zipcode')\n rider_id = pd.DataFrame(data={'riderID': self.df_transaction['riderID']})\n frames = [rider_id, onehot]\n df_onehot = pd.concat(frames, axis=1)\n\n # count zipcodes\n df_rider_geo_count = df_onehot.groupby(['riderID'])[list(onehot.columns.values)].sum().reset_index()\n df_rider_geo_count['geo_row_sum'] = df_rider_geo_count.iloc[:, 1:].sum(axis=1)\n\n return df_rider_geo_count", "def count_accumulated_inflow(riv_dirs_section,paths_map_section):\n\n flow_to_cell = 0\n #Exact opposite across the keypad of the direction values\n inflow_values = np.array([[3, 2, 1],\n [6, 5, 4],\n [9, 8, 7]])\n for i in range(3):\n for j in range(3):\n if i == 1 and j == 1:\n flow_to_cell += 1\n #skip this iteration as flow to self is already counted\n continue\n if inflow_values[i,j] == riv_dirs_section[i,j]:\n if paths_map_section[i,j] != 0:\n flow_to_cell += paths_map_section[i,j]\n else:\n return 0\n if flow_to_cell < 1:\n raise RuntimeError('In flow less than 1')\n return flow_to_cell", "def count_task2_group(answers):\n return len(set.intersection(*answers))", "def _locations_to_senzory_map(self,locations):\n if not np.any([np.array_equal(row,[0.,0.]) for row in locations]):\n#? Shallow copy?\n#A No. np.vstack returns new array\n locations = np.vstack((locations,[0.,0.]))\n return self._sort_locations(locations)", "def get_location_count(self):\n return len(self.matrix)", "def update_current_state(self, agent_num):\n for task_num, i in enumerate(self.tasks):\n current_task_data = []\n task_loc = i.getloc()\n vectorized_task_loc = self.get_vectorized_location(task_loc)\n current_task_data.append(self.t)\n current_task_data.append(self.w_EDR)\n current_task_data.append(self.w_RESOURCE)\n current_task_data.append(self.w_DISTANCE)\n current_task_data.append(agent_num)\n\n current_task_data.append(task_num)\n current_task_data.extend(self.is_agent_idle[agent_num]) # Feature 6\n current_task_data.append((self.is_task_finished[0][task_num])) # Feature 2\n current_task_data.append((self.is_task_enabled[0][task_num])) # Feature 3\n current_task_data.append((self.is_task_alive[0][task_num])) # Feature 4\n current_task_data.append((self.travel_time_constraint_satisfied[agent_num][task_num])) # Feature 5\n is_occupied = self.agent_locations[0][vectorized_task_loc] # if 1 agent is there, 0 is unoccupied\n current_task_data.append((is_occupied)) # Feature 1\n current_task_data.append((self.agent_distances[agent_num][task_num])) # Feature 7\n current_task_data.append((self.orientation[agent_num][task_num])) # Feature 9\n current_task_data.append((self.task_deadlines[0][task_num])) # Feature 10\n current_task_data.append((self.is_task_in_progress[0][task_num])) # Feature 11\n current_task_data.append((\n self.orientation[agent_num][task_num] * self.agent_distances[agent_num][task_num])) # Feature 12\n current_task_data.append((self.how_many_tasks_in_each_square[0][vectorized_task_loc])) # Feature 8\n if self.task_to_schedule == -1:\n null_task = 1\n else:\n null_task = 0\n current_task_data.append(null_task)\n current_task_data.append(-7) # This is not really needed, but I'll add in -7 as a bs val\n self.network_state.append(current_task_data)", "def update_agent_distances_vector(self):\n count = 0\n for agent in self.agents:\n agent_loc = agent.getz()\n\n for i, each_task in enumerate(self.tasks):\n dist = euclid_dist(agent_loc, each_task.getloc())\n self.agent_distances[count][i] = dist\n count += 1\n if self.DEBUG:\n print(self.agent_distances)", "def tracking(self):\n cnt = 0\n self.untrackedufos = deepcopy(self.ufoLocations)\n\n for ship in self.ships:\n if ship in self.untrackedufos:\n cnt += 1\n self.trackingship[ship] = self.untrackedufos[ship].pop()\n if not self.untrackedufos[ship]:\n del self.untrackedufos[ship]\n\n return cnt", "def map_task(items):\n dic = {}\n for item in items:\n dic[item] = dic.get(item, 0) + 1\n\n return dic", "def count_segments(markers) -> int:\n cnt = Counter()\n for row in markers:\n cnt.update(row)\n n_cnt = dict(takewhile(lambda x: x[1] >= 10, cnt.most_common()))\n del n_cnt[1]\n del n_cnt[-1]\n return len(n_cnt.keys())", "def setupDistribution(tournamentsWon1):\n timesWon = np.sort(np.unique(tournamentsWon1))\n numberTimesWon = np.zeros_like(timesWon)\n for i in range (len(timesWon)):\n numberTimesWon[i] = count(tournamentsWon1, timesWon[i])\n return timesWon, numberTimesWon", "def test_task_count(self):\n\n dag = self.dagbag.get_dag(self.dag_id)\n self.assertEqual(len(dag.tasks), 65)" ]
[ "0.5624857", "0.5569608", "0.55368817", "0.5525221", "0.5519773", "0.5492445", "0.5469183", "0.5433112", "0.54321223", "0.5419543", "0.5411569", "0.5369202", "0.5368313", "0.5351563", "0.53196675", "0.52892", "0.52587813", "0.524506", "0.52282435", "0.5215702", "0.52141505", "0.5203234", "0.5192333", "0.51896363", "0.5182558", "0.51821905", "0.5168177", "0.51577425", "0.5152522", "0.51422113" ]
0.63371027
0
This adds the agent location into vectorized format of the grid. Only updates if the agent is busy.
def update_agent_location_vector(self): for agent in self.agents: location = agent.getz() # print(location) if location[0] == 0: vectorized_agent_loc = location[1] elif location[0] == 1: vectorized_agent_loc = 4 + location[1] elif location[0] == 2: vectorized_agent_loc = 8 + location[1] else: # location[0] == 3 vectorized_agent_loc = 12 + location[1] if agent.isBusy == False: # remove any location if it shows it as well self.agent_locations[0][vectorized_agent_loc] = 0 continue else: self.agent_locations[0][vectorized_agent_loc] = 1 if self.DEBUG: print('agent location vector is ', self.agent_locations)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)", "def update_agent_distances_vector(self):\n count = 0\n for agent in self.agents:\n agent_loc = agent.getz()\n\n for i, each_task in enumerate(self.tasks):\n dist = euclid_dist(agent_loc, each_task.getloc())\n self.agent_distances[count][i] = dist\n count += 1\n if self.DEBUG:\n print(self.agent_distances)", "def add_to_simulation(self,agent):\n self.agents[agent.name] = agent\n self.network.add_node(agent)\n \n #agent given a grid queue at initialization\n grid_queue = [gq for gq in self.grid_queues.values() if gq.accepts(agent)][agent.sex]\n agent.grid_queue = grid_queue.index\n self.add_to_grid_queue(agent)", "def update_task_location_vector(self):\n for counter, task in enumerate(self.tasks):\n location = task.getloc()\n if location[0] == 0:\n vectorized_task_loc = location[1]\n elif location[0] == 1:\n vectorized_task_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_task_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_task_loc = 12 + location[1]\n self.how_many_tasks_in_each_square[0][vectorized_task_loc] += 1\n self.task_locations[0][counter] = vectorized_task_loc\n # print(location)\n # print(self.how_many_tasks_in_each_square)", "def updated_occupied_locations(self):\n if len(self.occupiedLocations) > self.currentTurn:\n self.occupiedLocations[self.currentTurn] += [self.character.path[-1]]\n else:\n self.occupiedLocations += [[self.character.path[-1]]]", "def add_to_grid_queue(self, agent):\n self.pipes[agent.grid_queue].send(\"add\")\n self.pipes[agent.grid_queue].send(agent)", "def update_loc(self, loc):\n\n self._total_loc += loc\n for region in self._regions:\n region.update_loc(loc)", "def create_block(self, location_list, POI_locations):\n\n \n for i in range(len(location_list)):\n this_cell = self.grid.get_cell_list_contents(location_list[i])\n\n for agent in this_cell:\n if type(agent) is nodeAgent:\n agent.block = True\n\n for i in POI_locations:\n agent.locations[i] = 10000", "def update_available_cells(self, agent):\n try:\n self.available_road_cells.remove(agent.pos)\n except:\n pass\n try:\n self.available_building_cells.remove(agent.pos)\n except:\n pass\n\n adj_cells = self.environment.grid.get_neighborhood(agent.pos, moore=False)\n surrounding_cells = self.environment.grid.get_neighborhood(agent.pos, moore=True)\n\n # Update available cells if agent is a road\n if type(agent) == Road:\n for cell in surrounding_cells:\n # Roads\n if self.creates_valid_road(cell) and cell not in self.available_road_cells:\n self.available_road_cells.append(cell)\n\n # Buildings\n if self.creates_valid_building(cell) and cell not in self.available_building_cells:\n self.available_building_cells.append(cell)\n\n if type(agent) == Building:\n for cell in surrounding_cells:\n # Roads\n if self.creates_valid_road(cell) and cell not in self.available_road_cells:\n self.available_road_cells.append(cell)\n\n # Buildings\n if self.creates_valid_building(cell) and cell not in self.available_building_cells:\n self.available_building_cells(cell)", "def update(self, metric, loc):\n\n self._total_loc += loc\n for region in self._regions:\n region.update(metric, loc)", "def update_current_state(self, agent_num):\n for task_num, i in enumerate(self.tasks):\n current_task_data = []\n task_loc = i.getloc()\n vectorized_task_loc = self.get_vectorized_location(task_loc)\n current_task_data.append(self.t)\n current_task_data.append(self.w_EDR)\n current_task_data.append(self.w_RESOURCE)\n current_task_data.append(self.w_DISTANCE)\n current_task_data.append(agent_num)\n\n current_task_data.append(task_num)\n current_task_data.extend(self.is_agent_idle[agent_num]) # Feature 6\n current_task_data.append((self.is_task_finished[0][task_num])) # Feature 2\n current_task_data.append((self.is_task_enabled[0][task_num])) # Feature 3\n current_task_data.append((self.is_task_alive[0][task_num])) # Feature 4\n current_task_data.append((self.travel_time_constraint_satisfied[agent_num][task_num])) # Feature 5\n is_occupied = self.agent_locations[0][vectorized_task_loc] # if 1 agent is there, 0 is unoccupied\n current_task_data.append((is_occupied)) # Feature 1\n current_task_data.append((self.agent_distances[agent_num][task_num])) # Feature 7\n current_task_data.append((self.orientation[agent_num][task_num])) # Feature 9\n current_task_data.append((self.task_deadlines[0][task_num])) # Feature 10\n current_task_data.append((self.is_task_in_progress[0][task_num])) # Feature 11\n current_task_data.append((\n self.orientation[agent_num][task_num] * self.agent_distances[agent_num][task_num])) # Feature 12\n current_task_data.append((self.how_many_tasks_in_each_square[0][vectorized_task_loc])) # Feature 8\n if self.task_to_schedule == -1:\n null_task = 1\n else:\n null_task = 0\n current_task_data.append(null_task)\n current_task_data.append(-7) # This is not really needed, but I'll add in -7 as a bs val\n self.network_state.append(current_task_data)", "def swarm_next_location(node_locations, agents_locations, agents_velocities, agents_angles):\n\n # Potential Field Gradient Calculation\n\n # Gradient of potential field\n dv = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT)) # create an array of values\n\n for agent_it_1 in range(0, AGENT_COUNT):\n # Inter-Agent Forces\n for agent_it_2 in range(0, AGENT_COUNT):\n n_x = int(numpy.linalg.norm(numpy.subtract(agents_locations[agent_it_1], agents_locations[agent_it_2])))\n\n for dimension_it in range(0, DIMENSION_COUNT):\n delta_x = agents_locations[agent_it_1][dimension_it] - agents_locations[agent_it_2][dimension_it]\n dv[agent_it_1][dimension_it] = dv[agent_it_1][dimension_it] - long_range_repulsive * (\n delta_x / numpy.sqrt((SMOOTHNESS_COEFFICIENT ^ 2) + n_x ^ 2)) - 2 * (\n repulsive_gain / repulsive_aoe) * delta_x * numpy.exp((-n_x ^ 2) / repulsive_aoe)\n # Formation Attraction Forces\n if NODE_COUNT > 0:\n for node_it in range(0, NODE_COUNT):\n n_x = int(\n numpy.linalg.norm(\n numpy.subtract(agents_locations[agent_it_1],\n node_locations[node_it]))) # norm of the vector between two bots\n\n for dimension_it in range(0, DIMENSION_COUNT):\n delta_x = agents_locations[agent_it_1][dimension_it] - node_locations[node_it][dimension_it]\n dv[agent_it_1][dimension_it] = dv[agent_it_1][dimension_it] + ATTRACTIVE_GAIN * (\n delta_x / numpy.sqrt((SMOOTHNESS_COEFFICIENT ^ 2) + n_x ^ 2)) + (\n short_range_attractive / attractive_aoe) * delta_x * numpy.exp((-n_x ^ 2) /\n attractive_aoe)\n sliding_surface = numpy.add(agents_velocities, dv)\n # Saturation Block [sat(s)]\n\n sx = numpy.zeros(numpy.size(sliding_surface[0]))\n for agent_it_1 in range(0, AGENT_COUNT):\n for dimension_it in range(0, DIMENSION_COUNT):\n if abs(sliding_surface[agent_it_1][dimension_it]) > SATURATION_LEVEL:\n # FIXME: not sure if this fix was correct but I changed Sx(ip, di) -> sx[ip+di] based on values found\n # in MATLAB code sample\n sx[agent_it_1 + dimension_it] = numpy.sign(sliding_surface[agent_it_1][dimension_it]) * SATURATION_LEVEL\n else:\n sx[agent_it_1 + dimension_it] = sliding_surface[agent_it_1][dimension_it]\n # Gains\n\n c = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT))\n k = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT))\n\n # TODO: should be able to make the loop faster somehow\n # row by row multiplication\n for agent_it_1 in range(0, AGENT_COUNT):\n c[agent_it_1] = numpy.multiply(agents_velocities[agent_it_1], REACHING_GAINS)\n k[agent_it_1] = numpy.multiply(sx[agent_it_1], SLIDING_GAINS)\n\n u0 = k + c\n\n print(u0)\n\n return u0", "def _add_agent_to_graph(self, agent: mantrap.agents.base.DTAgent):\n from data import Node\n is_robot = agent.is_robot\n\n # In Trajectron each node has a certain type, which is either robot or pedestrian, an id and\n # state data. Enforce the Trajectron id to the internal ids format, to be able to query the\n # results later on.\n agent_history = agent.history\n acc_history = agent.compute_acceleration(agent_history, dt=self.dt)\n\n node_data = self._create_node_data(state_history=agent_history, accelerations=acc_history)\n node_tye = self._gt_env.NodeType.PEDESTRIAN if not is_robot else self._gt_env.NodeType.ROBOT\n node = Node(node_type=node_tye, node_id=agent.id, data=node_data, is_robot=is_robot)\n if is_robot:\n self._gt_scene.robot = node\n self._gt_scene.nodes.append(node)\n\n # Re-Create online environment with recently appended node.\n self._online_env = self.create_online_env(env=self._gt_env, scene=self._gt_scene)", "def append(self, agent):\n self.agents.append(agent)", "def update_positions(self, grid):\r\n self.grid = grid", "def add_locations(self):\n for _ in range(0, self.num_locations):\n detector_id = self.generate_id()\n detector_direction = self.generate_direction()\n detector_point = self.generate_point()\n self.dataset[detector_id] = (detector_direction, detector_point)\n assert len(self.dataset) == self.num_locations", "def append_locations(self, newlocs: List):\n self.locations.extend(newlocs)", "def add_gauges(self,gauge_coords):\n from numpy import floor\n \n for gauge in gauge_coords: \n # Check if gauge belongs to this grid:\n if all(self.lower[n]<=gauge[n]<self.upper[n] for n in range(self.num_dim)):\n # Set indices relative to this grid\n gauge_index = [int(round((gauge[n]-self.lower[n])/self.delta[n])) \n for n in xrange(self.num_dim)]\n gauge_file_name = 'gauge'+'_'.join(str(coord) for coord in gauge)+'.txt'\n self.gauge_file_names.append(gauge_file_name)\n self.gauges.append(gauge_index)", "def __init__(self, name, agent, all_locations):\n super().__init__(name)\n self.agent = agent\n self.world = agent.world\n self.all_locations = all_locations\n self.location_feat = get_location_key(agent)", "def update_agent_orientation_vector(self, DEBUG=False):\n count = 0\n for agent in self.agents:\n agent_dir = agent.getOrientation()\n agent_loc = agent.getz()\n for i, each_task in enumerate(self.tasks):\n angle_to_move_in = compute_angle_in_rad(agent_loc, each_task.getloc())\n angle_you_must_turn = angle_to_move_in - agent_dir\n angle_you_must_turn_bounded = np.arctan2(np.sin(angle_you_must_turn), np.cos(angle_you_must_turn))\n self.orientation[count][i] = angle_you_must_turn_bounded\n count += 1\n if DEBUG:\n print('orientation to all tasks is ', self.orientation)", "def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)", "def move_agent(self, agent):\n id_ = agent.id_\n p = agent.mobility.current\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('move agent{} {} {}'.format(id_, x, y))\n print('move agentr{} {} {}'.format(id_, x, y))", "def reset_agent_locations(self):\n self.transitions_left = self.T-1\n self.x_agent = np.repeat(self.xT.reshape(1, self.dimensions), self.n_agents, axis=0)", "def __init__(self, name, agent, color, all_locations):\n super().__init__(name)\n self.agent = agent\n self.world = agent.world\n self.all_locations = all_locations\n self.color = color\n self.location_feat = get_location_key(agent)", "def spill(self, agent):\n self.spill_list.append(agent)", "def add_transport(self, agent):\n with self.simulation_mutex:\n self.get(\"transport_agents\")[agent.name] = agent", "def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal", "def make_land_agents_2016(self):\r\n # add non-gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n\r\n landposlist = self.determine_landpos(hh_row, 'non_gtgp_latitude', 'non_gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.land_time = return_values(hh_row, 'non_gtgp_travel_time')[landposlist.index(landpos)]\r\n try:\r\n self.plant_type = return_values(hh_row, 'non_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'non_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 0\r\n lp = LandParcelAgent(hh_row, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp, landpos)\r\n self.schedule.add(lp)\r\n if self.gtgp_enrolled == 0 and landpos not in nongtgplist and landpos not in gtgplist:\r\n nongtgplist.append(landpos)\r\n # except:\r\n # pass\r\n\r\n # add gtgp\r\n for hh_row in agents: # from excel_import\r\n hh_id = return_values(hh_row, 'hh_id')\r\n self.total_rice = return_values(hh_row, 'non_gtgp_rice_mu')\r\n if self.total_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.total_dry = return_values(hh_row, 'non_gtgp_dry_mu')\r\n if self.total_dry in ['-3', '-4', -3, None]:\r\n self.total_dry = 0\r\n self.gtgp_rice = return_values(hh_row, 'gtgp_rice_mu')\r\n if self.gtgp_rice in ['-3', '-4', -3, None]:\r\n self.total_rice = 0\r\n self.gtgp_dry = return_values(hh_row, 'gtgp_dry_mu')\r\n if self.gtgp_dry in ['-3', '-4', -3, None]:\r\n self.gtgp_dry = 0\r\n landposlist = self.determine_landpos(hh_row, 'gtgp_latitude', 'gtgp_longitude')\r\n self.age_1 = return_values(hh_row, 'age')[0]\r\n self.gender_1 = return_values(hh_row, 'gender')[0]\r\n self.education_1 = return_values(hh_row, 'education')[0]\r\n for landpos in landposlist:\r\n try:\r\n self.pre_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.non_gtgp_output = return_values(hh_row, 'pre_gtgp_output')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_time = return_values(hh_row, 'gtgp_travel_time')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.plant_type = return_values(hh_row, 'pre_gtgp_plant_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n try:\r\n self.land_type = return_values(hh_row, 'pre_gtgp_land_type')[landposlist.index(landpos)]\r\n except:\r\n pass\r\n self.hh_size = len(return_values(hh_row, 'age'))\r\n self.gtgp_enrolled = 1\r\n\r\n lp_gtgp = LandParcelAgent(hh_id, self, hh_id, hh_row, landpos, self.gtgp_enrolled,\r\n self.age_1, self.gender_1, self.education_1,\r\n self.gtgp_dry, self.gtgp_rice, self.total_dry, self.total_rice,\r\n self.land_type, self.land_time, self.plant_type, self.non_gtgp_output,\r\n self.pre_gtgp_output)\r\n self.space.place_agent(lp_gtgp, landpos)\r\n self.schedule.add(lp_gtgp)\r\n if self.gtgp_enrolled == 1 and landpos not in gtgplist and landpos in nongtgplist:\r\n gtgplist.append(landpos)", "def run_agent(self):\n do_plot = False\n\n # -- Load and init the Helper mission --#\n print('Generate and load the ' + self.mission_type + ' mission with seed ' + str(\n self.mission_seed) + ' allowing ' + self.AGENT_MOVEMENT_TYPE + ' movements')\n mission_xml, reward_goal, reward_intermediate, n_intermediate_rewards, reward_timeout, reward_sendcommand, timeout = init_mission(\n self.agent_host, self.agent_port, self.AGENT_NAME, self.mission_type, self.mission_seed,\n self.AGENT_MOVEMENT_TYPE)\n self.solution_report.setMissionXML(mission_xml)\n\n # -- Define local capabilities of the agent (sensors)--#\n self.agent_host.setObservationsPolicy(MalmoPython.ObservationsPolicy.LATEST_OBSERVATION_ONLY)\n self.agent_host.setVideoPolicy(MalmoPython.VideoPolicy.LATEST_FRAME_ONLY)\n self.agent_host.setRewardsPolicy(MalmoPython.RewardsPolicy.KEEP_ALL_REWARDS)\n\n time.sleep(1)\n\n # -- Get the state of the world along with internal agent state...--#\n state_t = self.agent_host.getWorldState()\n\n # -- Get a state-space model by observing the Orcale/GridObserver--#\n if state_t.is_mission_running:\n # -- Make sure we look in the right direction when observing the surrounding (otherwise the coordinate system will rotated by the Yaw !) --#\n # Look East (towards +x (east) and +z (south) on the right, i.e. a std x,y coordinate system) yaw=-90\n self.agent_host.sendCommand(\"setPitch 20\")\n time.sleep(1)\n self.agent_host.sendCommand(\"setYaw -90\")\n time.sleep(1)\n\n # -- Basic map --#\n state_t = self.agent_host.getWorldState()\n\n if state_t.number_of_observations_since_last_state > 0:\n msg = state_t.observations[-1].text # Get the details for the last observed state\n oracle_and_internal = json.loads(msg) # Parse the Oracle JSON\n grid = oracle_and_internal.get(u'grid', 0)\n xpos = oracle_and_internal.get(u'XPos', 0)\n zpos = oracle_and_internal.get(u'ZPos', 0)\n ypos = oracle_and_internal.get(u'YPos', 0)\n yaw = oracle_and_internal.get(u'Yaw', 0)\n pitch = oracle_and_internal.get(u'Pitch', 0)\n\n # -- Parste the JOSN string, Note there are better ways of doing this! --#\n full_state_map_raw = str(grid)\n full_state_map_raw = full_state_map_raw.replace(\"[\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"]\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"u'\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\"'\", \"\")\n full_state_map_raw = full_state_map_raw.replace(\" \", \"\")\n aa = full_state_map_raw.split(\",\")\n vocs = list(set(aa))\n for word in vocs:\n for i in range(0, len(aa)):\n if aa[i] == word:\n aa[i] = vocs.index(word)\n\n X = np.asarray(aa);\n nn = int(math.sqrt(X.size))\n X = np.reshape(X, [nn, nn]) # Note: this matrix/table is index as z,x\n\n # -- Visualize the discrete state-space --#\n if do_plot:\n print yaw\n plt.figure(1)\n imgplot = plt.imshow(X.astype('float'), interpolation='none')\n plt.pause(4)\n # plt.show()\n\n # -- Define the unique states available --#\n state_wall = vocs.index(\"stained_hardened_clay\")\n state_impossible = vocs.index(\"stone\")\n state_initial = vocs.index(\"emerald_block\")\n state_goal = vocs.index(\"redstone_block\")\n\n # -- Extract state-space --#\n offset_x = 100 - math.floor(xpos);\n offset_z = 100 - math.floor(zpos);\n\n state_space_locations = {}; # create a dict\n\n for i_z in range(0, len(X)):\n for j_x in range(0, len(X)):\n if X[i_z, j_x] != state_impossible and X[i_z, j_x] != state_wall:\n state_id = \"S_\" + str(int(j_x - offset_x)) + \"_\" + str(int(i_z - offset_z))\n state_space_locations[state_id] = (int(j_x - offset_x), int(i_z - offset_z))\n if X[i_z, j_x] == state_initial:\n state_initial_id = state_id\n loc_start = state_space_locations[state_id]\n elif X[i_z, j_x] == state_goal:\n state_goal_id = state_id\n loc_goal = state_space_locations[state_id]\n\n # -- Generate state / action list --#\n # First define the set of actions in the defined coordinate system \n actions = {\"west\": [-1, 0], \"east\": [+1, 0], \"north\": [0, -1], \"south\": [0, +1]}\n state_space_actions = {}\n for state_id in state_space_locations:\n possible_states = {}\n for action in actions:\n # -- Check if a specific action is possible --#\n delta = actions.get(action)\n state_loc = state_space_locations.get(state_id)\n state_loc_post_action = [state_loc[0] + delta[0], state_loc[1] + delta[1]]\n\n # -- Check if the new possible state is in the state_space, i.e., is accessible --#\n state_id_post_action = \"S_\" + str(state_loc_post_action[0]) + \"_\" + str(\n state_loc_post_action[1])\n if state_space_locations.get(state_id_post_action) != None:\n possible_states[state_id_post_action] = 1\n\n # -- Add the possible actions for this state to the global dict --#\n state_space_actions[state_id] = possible_states\n\n # -- Kill the agent/mission --#\n agent_host.sendCommand(\"tp \" + str(0) + \" \" + str(0) + \" \" + str(0))\n time.sleep(2)\n\n # -- Save the info an instance of the StateSpace class --\n self.state_space.state_actions = state_space_actions\n self.state_space.state_locations = state_space_locations\n self.state_space.start_id = state_initial_id\n self.state_space.start_loc = loc_start\n self.state_space.goal_id = state_goal_id\n self.state_space.goal_loc = loc_goal\n\n # -- Reward location and values --#\n # OPTIONAL: If you want to account for the intermediate rewards \n # in the Random/Simple agent (or in your analysis) you can \n # obtain ground-truth by teleporting with the tp command \n # to all states and detect whether you recieve recieve a \n # diamond or not using the inventory field in the oracle variable \n #\n # As default the state_space_rewards is just set to contain \n # the goal state which is found above.\n # \n state_space_rewards = {}\n state_space_rewards[state_goal_id] = reward_goal\n\n # HINT: You can insert your own code for getting \n # the location of the intermediate rewards\n # and populate the state_space_rewards dict \n # with more information (optional). \n # WARNING: This is a bit tricky, please consult tutors before starting\n\n # -- Set the values in the state_space container --#\n self.state_space.reward_states = state_space_rewards\n self.state_space.reward_states_n = n_intermediate_rewards + 1\n self.state_space.reward_timeout = reward_timeout\n self.state_space.timeout = timeout\n self.state_space.reward_sendcommand = reward_sendcommand\n else:\n self.state_space = None\n # -- End if observations --#\n\n return", "def update_world(self):\n pass" ]
[ "0.6316581", "0.6102098", "0.6024968", "0.59487784", "0.59320974", "0.58403987", "0.57298577", "0.5622365", "0.5591658", "0.55257314", "0.5507829", "0.5465471", "0.54418015", "0.5402544", "0.5368544", "0.53452367", "0.5343779", "0.5331292", "0.527093", "0.52531976", "0.5251558", "0.5199508", "0.516083", "0.51268977", "0.51100516", "0.509844", "0.5088033", "0.50717723", "0.50583756", "0.50558007" ]
0.78973
0
updates a vector of euclidean distances to each task. If location of agent moves, this should change.
def update_agent_distances_vector(self): count = 0 for agent in self.agents: agent_loc = agent.getz() for i, each_task in enumerate(self.tasks): dist = euclid_dist(agent_loc, each_task.getloc()) self.agent_distances[count][i] = dist count += 1 if self.DEBUG: print(self.agent_distances)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_task_location_vector(self):\n for counter, task in enumerate(self.tasks):\n location = task.getloc()\n if location[0] == 0:\n vectorized_task_loc = location[1]\n elif location[0] == 1:\n vectorized_task_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_task_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_task_loc = 12 + location[1]\n self.how_many_tasks_in_each_square[0][vectorized_task_loc] += 1\n self.task_locations[0][counter] = vectorized_task_loc\n # print(location)\n # print(self.how_many_tasks_in_each_square)", "def calculate_euclidean_dist(self):\n x_dist = self._current_loc.get_column() - self._goal_loc.get_column()\n y_dist = self._current_loc.get_row() - self._goal_loc.get_row()\n # Note ** is power operator in Python\n return self._current_cost + sqrt(x_dist**2 + y_dist**2)", "def _update_distance_(self):\n pass", "def calculate_distances(drives):\n for d in drives:\n d.set_distance()", "def _computeDistances(self) -> None:\n length = len(self.data)\n for i, sequenceOne in enumerate(self.data):\n print(f\"[SeqCluBaselineOffline] Computing distances is at iteration {i} of {length}.\")\n for j, sequenceTwo in enumerate(self.data):\n if i == j:\n self.distances[i][j] = 0\n continue\n distance = self.distanceMeasure.calculateDistance(sequenceOne, sequenceTwo)\n self.distances[i][j] = distance\n self.distances[j][i] = distance", "def distances(self):", "def update_current_state(self, agent_num):\n for task_num, i in enumerate(self.tasks):\n current_task_data = []\n task_loc = i.getloc()\n vectorized_task_loc = self.get_vectorized_location(task_loc)\n current_task_data.append(self.t)\n current_task_data.append(self.w_EDR)\n current_task_data.append(self.w_RESOURCE)\n current_task_data.append(self.w_DISTANCE)\n current_task_data.append(agent_num)\n\n current_task_data.append(task_num)\n current_task_data.extend(self.is_agent_idle[agent_num]) # Feature 6\n current_task_data.append((self.is_task_finished[0][task_num])) # Feature 2\n current_task_data.append((self.is_task_enabled[0][task_num])) # Feature 3\n current_task_data.append((self.is_task_alive[0][task_num])) # Feature 4\n current_task_data.append((self.travel_time_constraint_satisfied[agent_num][task_num])) # Feature 5\n is_occupied = self.agent_locations[0][vectorized_task_loc] # if 1 agent is there, 0 is unoccupied\n current_task_data.append((is_occupied)) # Feature 1\n current_task_data.append((self.agent_distances[agent_num][task_num])) # Feature 7\n current_task_data.append((self.orientation[agent_num][task_num])) # Feature 9\n current_task_data.append((self.task_deadlines[0][task_num])) # Feature 10\n current_task_data.append((self.is_task_in_progress[0][task_num])) # Feature 11\n current_task_data.append((\n self.orientation[agent_num][task_num] * self.agent_distances[agent_num][task_num])) # Feature 12\n current_task_data.append((self.how_many_tasks_in_each_square[0][vectorized_task_loc])) # Feature 8\n if self.task_to_schedule == -1:\n null_task = 1\n else:\n null_task = 0\n current_task_data.append(null_task)\n current_task_data.append(-7) # This is not really needed, but I'll add in -7 as a bs val\n self.network_state.append(current_task_data)", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def update_agent_location_vector(self):\n\n for agent in self.agents:\n location = agent.getz()\n # print(location)\n if location[0] == 0:\n vectorized_agent_loc = location[1]\n elif location[0] == 1:\n vectorized_agent_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_agent_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_agent_loc = 12 + location[1]\n\n if agent.isBusy == False:\n # remove any location if it shows it as well\n self.agent_locations[0][vectorized_agent_loc] = 0\n continue\n else:\n self.agent_locations[0][vectorized_agent_loc] = 1\n if self.DEBUG:\n print('agent location vector is ', self.agent_locations)", "def run(self):\n for direction in self.directions:\n rotation = direction[0]\n steps = direction[1]\n\n self.make_rotation(rotation)\n hq_found = self.travel(steps)\n\n if hq_found:\n return (abs(self.new_loc[0] + self.new_loc[1]))", "def Cal_Dist(self):\n sum_euclidean_dist = 0\n last_point = None\n for index, this_point in enumerate(self.__traectory_list):\n if last_point is not None:\n sum_euclidean_dist = ((last_point[0]-this_point[0])**2+(last_point[0]-this_point[1])**2)**0.5\n # Debug: Show cumulative geodetic distance\n # Checked with the beginning and the last one\n #print sum_geodetic_dist\n last_point = this_point\n return sum_euclidean_dist", "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def update_dv(self):\n is_changed = False\n for name in self.distance_vector:\n smallest = float('Inf')\n smallest_neighbor = None\n for neighbor_name in self.neighbors:\n if self.neighbors[neighbor_name].is_killed:\n weight = float('Inf')\n else:\n weight = self.neighbors[neighbor_name].weight\n if name in self.neighbors[neighbor_name].distance_vector:\n candidate = self.neighbors[neighbor_name].distance_vector[name]\n candidate += weight\n if smallest > candidate:\n smallest = candidate\n smallest_neighbor = neighbor_name\n if self.distance_vector[name].cost != smallest and name != self.name_str:\n self.distance_vector[name].cost = smallest\n self.distance_vector[name].link = smallest_neighbor\n is_changed = True\n return is_changed", "def dv_update(self, dv_list):\n for line in dv_list:\n line_sep = line.split(',')\n other_name = line_sep[0] + ':' + line_sep[1]\n other_cost = float(line_sep[2])\n self.distance_vector[other_name] = other_cost", "def UpdateCostMatrix( self, extraXs ):\n for x in extraXs:\n newRow = [ self.EuclideanDistanceSq(x,y) for y in self.Y ]\n self.C.append(newRow)", "def cdistance(self, distances):\n self.distanceMatrix = distances\n self.dataChange()", "def update_agent_orientation_vector(self, DEBUG=False):\n count = 0\n for agent in self.agents:\n agent_dir = agent.getOrientation()\n agent_loc = agent.getz()\n for i, each_task in enumerate(self.tasks):\n angle_to_move_in = compute_angle_in_rad(agent_loc, each_task.getloc())\n angle_you_must_turn = angle_to_move_in - agent_dir\n angle_you_must_turn_bounded = np.arctan2(np.sin(angle_you_must_turn), np.cos(angle_you_must_turn))\n self.orientation[count][i] = angle_you_must_turn_bounded\n count += 1\n if DEBUG:\n print('orientation to all tasks is ', self.orientation)", "def compute_upd(self, move):", "def _update_coords(self, change=None):\n if self.node_id:\n x, y = self.layout[self.node_id]\n self.coords = (x - self.dist, x + self.dist, y - self.dist, y + self.dist)", "def _update_all_tasks(self) -> None:\n for task in self.tasks:\n task.update()", "def compute_nn(self):\n tasks = product(self.data.keys(), repeat=2)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor:\n futures = [\n executor.submit(self.nearest_neighbors, t, s)\n for (t, s) in tasks\n ]\n # wait for each job to finish\n for future in tqdm(\n concurrent.futures.as_completed(futures),\n total=len(futures)):\n t, s, d = future.result()\n self.dists[(t, s)] = d", "def distance_train(self):\n\n for self.epoch in range(self.args.epochs):\n # switch to train mode\n self.set_train()\n data_loading_time = 0\n gpu_time = 0\n before_op_time = time.time()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n data_loading_time += (time.time() - before_op_time)\n before_op_time = time.time()\n # -- PUSH INPUTS DICT TO DEVICE --\n self.inputs_to_device(inputs)\n\n # -- DISTANCE ESTIMATION --\n outputs, features = self.predict_distances(inputs)\n\n # -- POSE ESTIMATION --\n outputs.update(self.predict_poses(inputs, features))\n\n # -- PHOTOMETRIC LOSSES --\n losses, outputs = self.photometric_losses(inputs, outputs)\n\n # -- COMPUTE GRADIENT AND DO OPTIMIZER STEP --\n self.optimizer.zero_grad()\n losses[\"distance_loss\"].mean().backward()\n self.optimizer.step()\n\n duration = time.time() - before_op_time\n gpu_time += duration\n\n if batch_idx % self.args.log_frequency == 0:\n self.log_time(batch_idx, duration, losses[\"distance_loss\"].mean().cpu().data,\n data_loading_time, gpu_time)\n self.distance_statistics(\"train\", inputs, outputs, losses)\n data_loading_time = 0\n gpu_time = 0\n\n self.step += 1\n before_op_time = time.time()\n\n self.lr_scheduler.step()\n\n if (self.epoch + 1) % self.args.save_frequency == 0:\n self.save_model()\n\n print(\"Training complete!\")", "def calculate_task_potential(self) -> float:\n cur_xy = self.agent.get_position()[:2]\n goal_xy = np.array([1e3, 0])\n return -np.linalg.norm(cur_xy - goal_xy) * 60", "def updateGraphByEuclideanDistance(self, graph, neighborDistance):\r\n graph.adjacencyMatrix = np.matrix(np.zeros(graph.adjacencyMatrix.shape))\r\n for a1 in range(self.agentNum):\r\n for a2 in range(a1+1, self.agentNum):\r\n if np.linalg.norm(self.agentPos[:,a1] - self.agentPos[:,a2]) <= neighborDistance:\r\n graph.adjacencyMatrix[a1,a2] = 1\r\n graph.adjacencyMatrix[a2,a1] = 1\r\n else:\r\n graph.adjacencyMatrix[a1,a2] = 0\r\n graph.adjacencyMatrix[a2,a1] = 0\r\n \r\n assert (graph.adjacencyMatrix == graph.adjacencyMatrix.T).all()", "def update_floyd_warshall_and_all_vectors(self):\n self.graph.compute_floyd_warshal()\n # Update where agents are\n self.update_agent_location_vector()\n # update deadlines\n self.populate_deadline_vector()\n # update distances to each task and orientation to each task\n self.update_agent_distances_vector()\n self.update_agent_orientation_vector()\n\n self.update_alive_enabled_travel()\n\n return self.graph.is_feasible()", "def _propagateDistance(self, parent_dist):\n travelled = self.dist # absolute distance to this node\n self.dist = parent_dist - self.dist # relative distance to this node\n for i in range(self.nChildren()):\n self.children[i]._propagateDistance(travelled) # pass absolute distance to this node", "def _calc_distance_features(self):\n d = ()\n for dx, dy in DIRECTIONS:\n if dx and dy:\n d += (list(self.__calc_distance(direction_x=dx, direction_y=dy)), )\n elif dx:\n tmp, _, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n elif dy:\n _, tmp, _ = self.__calc_distance(direction_x=dx, direction_y=dy)\n d += (tmp, )\n\n self.dist_features = d\n\n self.direc_dist = self.__calc_direc_distance()", "def drive_distance_all(distances, motors):\n return null", "def compute_distances(self):\n if self.df is None:\n return\n\n self.origdist = []\n self.transdist = []\n for i in range(len(self.df)):\n for j in range(i+1, len(self.df)):\n self.origdist.append(distance(self.df['LPsol'].iloc[i], self.df['LPsol'].iloc[j]))\n self.transdist.append(distance(self.df[['x', 'y']].iloc[i], self.df[['x', 'y']].iloc[j]))", "def update_alive_enabled_travel(self):\n self.is_task_alive = np.ones((1, self.num_tasks)) # 1 if alive\n self.is_task_enabled = np.ones((1, self.num_tasks)) # 1 if enabled\n self.travel_time_constraint_satisfied = np.ones((2, self.num_tasks)) # 1 if satisfied\n\n # ALIVE\n for each_task, i in enumerate(self.task_vertex_numbers_for_start):\n # make sure element of first column is less than zero\n if self.graph.M[i][0] <= 0:\n name_of_task_being_checked = self.graph.names_of_vertex[i]\n # find all tasks associated with each node\n # for every element task points to\n for element in self.graph.vertices[i].points_to:\n num = self.graph.gamma[element]\n name_of_ele = self.graph.names_of_vertex[num]\n\n if name_of_ele == 'start':\n continue\n elif name_of_ele == 'end':\n continue\n elif num == i + 1: # is it the end of the task\n continue\n else:\n # task has been found\n c = int((re.findall('\\d+', name_of_ele))[0])\n if self.is_task_finished[0][c - 1] == 0:\n self.is_task_alive[0][each_task] = 0\n if self.DEBUG:\n print('tasks that are alive', self.is_task_alive)\n\n # ENABLED\n for each_task, i in enumerate(self.task_vertex_numbers_for_start):\n # make sure element of first column is less than zero, # TODO: figure out why\n if self.graph.M[i][0] <= 0:\n name_of_task_being_checked = self.graph.names_of_vertex[i]\n # find all tasks associated with each node\n # for every element task points to\n for element in self.graph.vertices[i].points_to:\n num = self.graph.gamma[element] # num of vertex as in M matrix\n name_of_ele = self.graph.names_of_vertex[num]\n weight = self.graph.vertices[i].points_to[element]\n if name_of_ele == 'start':\n continue\n elif name_of_ele == 'end':\n continue\n elif num == i + 1: # is it the end of the task\n continue\n elif self.is_task_alive[0][each_task] == 0:\n # if task is not alive, it cannot be enabled\n self.is_task_enabled[0][each_task] = 0\n else:\n # task that is alive has been been found\n if self.t < self.finish_time_per_task_dict[((num - 1) / 2) - 1] + np.abs(weight):\n self.is_task_enabled[0][each_task] = 0\n if self.DEBUG:\n print('tasks that are enabled', self.is_task_enabled)\n\n # Travel Time Enabled\n for agent_num, agent in enumerate(self.agents):\n for each_task, i in enumerate(self.task_vertex_numbers_for_start):\n # make sure element of first column is less than zero\n if self.graph.M[i][0] <= 0:\n name_of_task_being_checked = self.graph.names_of_vertex[i]\n # find all tasks associated with each node\n # for every element task points to\n for element in self.graph.vertices[i].points_to:\n num = self.graph.gamma[element] # num of vertex as in M matrix\n name_of_ele = self.graph.names_of_vertex[num]\n weight = self.graph.vertices[i].points_to[element]\n task_number = int((num - 1) / 2)\n if len(self.graph.vertices[i].points_to) == 2:\n if self.t < self.agents[agent_num].curr_finish_time + self.agent_distances[agent_num][each_task] / self.agents[\n agent_num].getv():\n self.travel_time_constraint_satisfied[agent_num][each_task] = 0\n continue\n if name_of_ele == 'start':\n continue\n elif name_of_ele == 'end':\n continue\n elif num == i + 1: # is it the end of the task\n continue\n else: # more than 2 constraints\n\n if self.t < self.finish_time_per_task_dict[task_number - 1] + \\\n self.agent_distances[agent_num][each_task] / self.agents[agent_num].getv():\n self.travel_time_constraint_satisfied[agent_num][each_task] = 0\n if self.t < self.agents[agent_num].curr_finish_time + self.agent_distances[agent_num][each_task] / self.agents[\n agent_num].getv():\n self.travel_time_constraint_satisfied[agent_num][each_task] = 0\n\n if self.DEBUG:\n print('tasks that are travel_constraint satisfied', self.travel_time_constraint_satisfied)" ]
[ "0.6916695", "0.65369725", "0.6202673", "0.5849838", "0.58113796", "0.57360786", "0.57287556", "0.5716288", "0.57080925", "0.56720674", "0.5670715", "0.56695974", "0.56677645", "0.56104976", "0.5585176", "0.5577297", "0.55730516", "0.55699134", "0.5539065", "0.54842377", "0.54739666", "0.544809", "0.54394364", "0.5390808", "0.5372535", "0.5370854", "0.53542066", "0.5352668", "0.53444034", "0.5310421" ]
0.8082939
0
Updates tasks that are alive, enabled, and travel_time_enabled Again, has some redundancies
def update_alive_enabled_travel(self): self.is_task_alive = np.ones((1, self.num_tasks)) # 1 if alive self.is_task_enabled = np.ones((1, self.num_tasks)) # 1 if enabled self.travel_time_constraint_satisfied = np.ones((2, self.num_tasks)) # 1 if satisfied # ALIVE for each_task, i in enumerate(self.task_vertex_numbers_for_start): # make sure element of first column is less than zero if self.graph.M[i][0] <= 0: name_of_task_being_checked = self.graph.names_of_vertex[i] # find all tasks associated with each node # for every element task points to for element in self.graph.vertices[i].points_to: num = self.graph.gamma[element] name_of_ele = self.graph.names_of_vertex[num] if name_of_ele == 'start': continue elif name_of_ele == 'end': continue elif num == i + 1: # is it the end of the task continue else: # task has been found c = int((re.findall('\d+', name_of_ele))[0]) if self.is_task_finished[0][c - 1] == 0: self.is_task_alive[0][each_task] = 0 if self.DEBUG: print('tasks that are alive', self.is_task_alive) # ENABLED for each_task, i in enumerate(self.task_vertex_numbers_for_start): # make sure element of first column is less than zero, # TODO: figure out why if self.graph.M[i][0] <= 0: name_of_task_being_checked = self.graph.names_of_vertex[i] # find all tasks associated with each node # for every element task points to for element in self.graph.vertices[i].points_to: num = self.graph.gamma[element] # num of vertex as in M matrix name_of_ele = self.graph.names_of_vertex[num] weight = self.graph.vertices[i].points_to[element] if name_of_ele == 'start': continue elif name_of_ele == 'end': continue elif num == i + 1: # is it the end of the task continue elif self.is_task_alive[0][each_task] == 0: # if task is not alive, it cannot be enabled self.is_task_enabled[0][each_task] = 0 else: # task that is alive has been been found if self.t < self.finish_time_per_task_dict[((num - 1) / 2) - 1] + np.abs(weight): self.is_task_enabled[0][each_task] = 0 if self.DEBUG: print('tasks that are enabled', self.is_task_enabled) # Travel Time Enabled for agent_num, agent in enumerate(self.agents): for each_task, i in enumerate(self.task_vertex_numbers_for_start): # make sure element of first column is less than zero if self.graph.M[i][0] <= 0: name_of_task_being_checked = self.graph.names_of_vertex[i] # find all tasks associated with each node # for every element task points to for element in self.graph.vertices[i].points_to: num = self.graph.gamma[element] # num of vertex as in M matrix name_of_ele = self.graph.names_of_vertex[num] weight = self.graph.vertices[i].points_to[element] task_number = int((num - 1) / 2) if len(self.graph.vertices[i].points_to) == 2: if self.t < self.agents[agent_num].curr_finish_time + self.agent_distances[agent_num][each_task] / self.agents[ agent_num].getv(): self.travel_time_constraint_satisfied[agent_num][each_task] = 0 continue if name_of_ele == 'start': continue elif name_of_ele == 'end': continue elif num == i + 1: # is it the end of the task continue else: # more than 2 constraints if self.t < self.finish_time_per_task_dict[task_number - 1] + \ self.agent_distances[agent_num][each_task] / self.agents[agent_num].getv(): self.travel_time_constraint_satisfied[agent_num][each_task] = 0 if self.t < self.agents[agent_num].curr_finish_time + self.agent_distances[agent_num][each_task] / self.agents[ agent_num].getv(): self.travel_time_constraint_satisfied[agent_num][each_task] = 0 if self.DEBUG: print('tasks that are travel_constraint satisfied', self.travel_time_constraint_satisfied)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_tasks(self, tasks):\n\n self._print('Updating tasks {} with {} ...'.format(self._tasks, tasks))\n\n self._tasks.update(tasks)", "def __update_task(self, tasks, **extra_args):\n for task in tasks:\n assert isinstance(\n task, Task), \"Core.update_job_state: passed an argument\" \\\n \" which is not a `Task` instance.\"\n task.update_state()", "def _update_all_tasks(self) -> None:\n for task in self.tasks:\n task.update()", "def update_task(self, name, fields):\n pass", "def update_based_on_time(self):\n for counter, agent in enumerate(self.agents):\n if self.t >= agent.getFinishTime() and self.agent_current_task[counter] != -1: # task is finished\n task_num = self.agent_current_task[counter]\n self.finish_time_per_task_dict[task_num] = self.t\n self.is_task_finished[0][task_num] = 1\n agent.changebusy(False)\n self.update_agent_is_idle_based_on_class()", "def task_update(self):\n try:\n self.task_stop()\n except:\n pass\n self.update()\n self.task_start()", "def update_job_state(self, *tasks, **extra_args):\n pass", "def _update(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")", "def update_resources(self):\n\n self.update(True)\n resp = self._connection._patch(\n get_url('task update', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n raise_on_error(resp)\n\n self.update(True)", "def _update():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no add task input')\n\t\treturn 1\n\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\tfor ID in IDs:\n\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.finish_status: myOpt.f})\n\n\t\tif myOpt.vt:\n\t\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.version_time: myOpt.vt})\n\n\t#commit\n\tmyTaskSession.commit()\n\n\t\"\"\"\n\t#ERR: not given itsm id for update \n\tif not myOpt.id:\n\t\tprint('Error: no itsm id given for update finish_status to 1')\n\t\treturn 1\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\t\n\tquery.filter(WorkToolkitDB.db.Task.id == myOpt.id).update({'finish_status': myOpt.f})\n\tmyTaskSession.commit()\n\n\t\n\tdata = query.filter(WorkToolkitDB.db.Task.id == myOpt.id).all()\n\tfor record in data:\n\t\t\t#record_arr = record.to_array()\n\t\t\tpt.add_row(record.to_array())\n\n\tprint(pt)\n\t\"\"\"\n\n\treturn 0", "def task_changed(self, fields):\n update = {}\n for field in fields:\n update[field] = self.__data[field]\n\n self.connection.update_task(self.name, update)", "def task_changed(old_task, diff, now_task):", "def update(self):\n for uid, server in self.servers_online.items():\n if len(server.jobs):\n self.populate_server(server)\n for uid, server in self.servers_online.items():\n if server.jobs:\n server.jobs[0].task_time -= time_interval\n server.waiting_time -= time_interval\n if server.jobs[0].task_time <= 0:\n completed_task = server.jobs.pop(0)\n print(f\"Task '{completed_task.description}' completed\")\n self.all_tasks.remove(completed_task)\n self.servers_jobs_list[uid].pop(0)\n for uid, server in self.all_servers.items():\n if server.status:\n print(f\"{server.server_name} has {len(set(server.jobs))} job(s)\")\n else:\n print(f\"{server.server_name} is offline\")", "def run(self):\n modify_tasks = filter(self._task_filter, acm.FAelTask.Select(''))\n print([task.Name() for task in modify_tasks])\n for task in modify_tasks:\n #new_task = task.Clone()\n self._update(task)\n try:\n task.Commit()\n except:\n print('Skipping: Task already exists')", "def update(self, *args, **kwargs):\n\n print(\"\\nIn MOCK ALGO OBSERVER....\")\n\n if 'remaining_tasks' in kwargs:\n\n remaining_tasks = len(kwargs['remaining_tasks'])\n\n print(\"\\tThere are {} remaining tasks\".format(remaining_tasks))\n print(\"\\tIs {} less than {}? {}\".format(remaining_tasks, min_tasks, (remaining_tasks < min_tasks)))\n\n # If we don't have the minimum number of hits out...\n if remaining_tasks < min_tasks:\n print(\"\\tRefilling queue with {} new task(s)\".format(min_tasks - remaining_tasks))\n # Fill up the tasks again\n for t in range(min_tasks - remaining_tasks):\n new_task = make_rand_task()\n tasks.append(new_task)\n\n actAMT.init_tasks(tasks, hit_type_init_file)\n del tasks[:]\n\n if 'completed_task' in kwargs:\n add_to_db(kwargs['completed_task'])", "def update(self, task_model):\n raise NotImplementedError()", "def update_all_tasks():\n # TODO: Schedule this function after starting a task (replace if with while loop with sleep inside)\n active_dict = dict()\n\n # Use list to avoid \"RuntimeError: dictionary changed size during iteration\"\n for pid in list(app.config['OPS_PIPE_PARENT'].keys()):\n if update_task(pid):\n task = Task.query.filter_by(id=pid).first()\n active_dict[pid] = task.status.name\n\n return jsonify(active_dict)", "async def have_tasks_passed(self):\n # This is where the main logic to update platform data goes.\n \n try:\n if self.default_list is None:\n passed_list = await self.hass.async_add_executor_job(\n self.gtasks.get_tasks,\n True,\n None,\n date.today(),\n '@default',\n float('inf'),\n None,\n None,\n None,\n False,\n False)\n else:\n passed_list = await self.hass.async_add_executor_job(\n self.gtasks.get_tasks,\n True,\n None,\n date.today(),\n self.default_list,\n float('inf'),\n None,\n None,\n None,\n False,\n False)\n self.hass.data[DOMAIN_DATA][\"passed_list\"] = passed_list\n except Exception as e:\n _LOGGER.exception(e)", "def update_task(self, name, fields):\n task = self.task(name)\n if not task:\n return False\n\n try:\n data = json.loads(task)\n data.update(fields)\n task = json.dumps(data)\n except ValueError:\n return False\n\n return self.create_task(name, task)", "def test_update_task_states(self):\r\n changed = self.combinedoe.update_task_states()\r\n self.assertFalse(changed)\r\n\r\n current_task = self.combinedoe.current_task\r\n current_task.change_state(CombinedOpenEndedV1Module.DONE)\r\n changed = self.combinedoe.update_task_states()\r\n\r\n self.assertTrue(changed)", "def tasks_update(cls, app):\r\n\r\n try:\r\n tasks_info = {}\r\n tasks = app.db.session.query(SpiderTask).all()\r\n except Exception as err:\r\n print(err)\r\n else:\r\n for task in tasks:\r\n tasks_info[task.id] = task.to_json()\r\n return tasks_info\r\n finally:\r\n app.db.session.close()", "def task_status():\n pass", "def updateReadyTasks(g, readyTasks, nodes, scheduledNode, deletion=True, verbose=False):\r\n for n in g.successors(scheduledNode):\r\n ready = True\r\n for p in g.predecessors(n):\r\n if p in readyTasks or p in nodes:\r\n ready = False\r\n if ready:\r\n if n not in readyTasks:\r\n readyTasks.append(n)\r\n if deletion and n in nodes:\r\n nodes.remove(n)\r\n if verbose:\r\n print(\"Ready task list after update :\", readyTasks)", "def _update_ax_tasks(self, timestring, job_s, task_s):\n\n self._ax_tasks.clear()\n\n # initialize empty lists\n data = []\n texts = []\n\n for label in self._task_status_labels:\n data.append(task_s[label])\n texts.append(\"{}: {}\".format(label, task_s[label]))\n\n # the text at the center of the donut\n if job_s == \"N/A\":\n center_text = \"Not available\"\n else:\n center_text = \"Total: {}\".format(sum(data))\n\n # title\n title = \"Task status at {}\".format(timestring)\n\n # call the underlying donut drawer\n self._donut_drawer(\n self._ax_tasks, data, self._task_status_labels,\n texts, title, center_text)", "def set_task_not_started(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 2)\n\n # Refresh the table\n self.write_tasks_table()", "def _update_activities(self) -> None:\n self.hass.async_create_task(\n async_update_programs_and_zones(self.hass, self._entry)\n )", "def _clear_tasks(self):\n listOfTasks = self.model.find(xmlns + 'ListOfTasks') \n assert listOfTasks != None\n \n for task in listOfTasks:\n task.attrib['scheduled'] = 'false'", "def transition(self, key, start, finish, *args, **kwargs):\n # Only update Tethys on tasks (keys) it cares about\n tracked_key = self.scheduler.get_metadata(keys=[key], default=False)\n\n if tracked_key:\n # Build update dask job status request against bound Tethys host\n combined_status = '{}-{}'.format(start, finish)\n url = self.tethys_endpoint + '/update-dask-job-status/' + key + '/?status=' + combined_status\n\n # Prevent deadlock\n if start != 'released':\n # Submit update request to Tethys Asynchronously\n http_client = AsyncHTTPClient()\n http_client.fetch(url, method='GET')", "def _task_update(context, task_ref, values, session=None):\n if 'deleted' not in values:\n values[\"deleted\"] = False\n task_ref.update(values)\n task_ref.save(session=session)\n return task_ref", "def test_update_instances_schedule_state(self):\n pass" ]
[ "0.7101653", "0.7070222", "0.696539", "0.68238944", "0.662848", "0.6610936", "0.654214", "0.6339835", "0.6317559", "0.6245997", "0.6237602", "0.62275285", "0.62232226", "0.61214197", "0.61144865", "0.6111591", "0.6089573", "0.6084587", "0.60815936", "0.6032838", "0.60157925", "0.5984816", "0.59728706", "0.59415644", "0.59252185", "0.5904104", "0.58650035", "0.58131206", "0.58067715", "0.5803149" ]
0.77476394
0
Schedules a task based on aggregate score updates agent current task based on this
def schedule_task(self, counter): task_to_schedule = [] each_agent = self.agents[counter] task_found = False H1_score_list = [] H2_score_list = [] H3_score_list = [] H1_dict = {} H2_dict = {} H3_dict = {} # Agent not idle case, exit immediately if self.is_agent_idle[counter][0] == 0: print(each_agent.getName(), 'is not Idle') print(each_agent.getName(), 'is scheduled for null task') task_to_schedule.append(-1) self.task_to_schedule = task_to_schedule return task_to_schedule # if agent is busy, output null task for task_num, each_task in enumerate(self.tasks): if self.is_task_finished[0][task_num] == 1: # Can't schedule a task that has already been completed continue if self.is_task_alive[0][task_num] == 0: continue # if self.is_task_enabled[0][task_num] == 0: # continue # if self.travel_time_constraint_satisfied[0][task_num] == 0: # continue if self.is_task_in_progress[0][task_num] == 1: # can't schedule the same task twice continue # All constraints satisfied # normalize each score separately deadline_score = (self.heuristic(heuristic_num=1, task_num=task_num, agent_num=counter)) occupacity_score = (self.heuristic(heuristic_num=2, task_num=task_num, agent_num=counter)) distance_score = (self.heuristic(heuristic_num=3, task_num=task_num, agent_num=counter)) * 150 / np.sqrt( 32) # The 150/np.sqrt(32) puts it in the same range as deadline score H1_dict[task_num] = deadline_score H1_score_list.append(deadline_score) H2_dict[task_num] = occupacity_score H2_score_list.append(occupacity_score) H3_dict[task_num] = distance_score H3_score_list.append(distance_score) task_found = True if not task_found: task_to_schedule.append(-1) self.task_to_schedule = task_to_schedule self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[3]) return task_to_schedule new_dict = {} for key in H1_dict: new_dict[key] = H1_dict[key] * self.w_EDR + H2_dict[key] * self.w_RESOURCE + H3_dict[key] * self.w_DISTANCE highest = max(new_dict.values()) # figure out the highest score tasks_with_best_scores = [k for k, v in new_dict.items() if v == highest] # find all keys associated with the highest value if len(tasks_with_best_scores) > 1: print(tasks_with_best_scores) if self.do_you_like_big_tasks: task_chosen = max(tasks_with_best_scores) else: task_chosen = min(tasks_with_best_scores) print('Expert: Task chosen for', each_agent.getName(), ' is ', task_chosen, ' enabled: ', self.is_task_enabled[0][task_chosen]) self.teacher_actions[self.global_schedule_num].append(task_chosen) # neural net task self.converge_embedding_based_on_history(task_chosen, counter) neural_net_task = self.predict_task(task_chosen, counter) self.learner_actions[self.global_schedule_num].append(neural_net_task) print('Neural Net: Task chosen for', each_agent.getName(), ' is ', neural_net_task, ' enabled: ', self.is_task_enabled[0][neural_net_task]) # all of this changed to represent neural net task # if self.is_task_enabled[0][neural_net_task] == 0: # print('Task was not enabled, but is alive') if neural_net_task == task_chosen: self.num_correct_predictions_total[self.global_schedule_num] += 1 self.num_predictions_total[self.global_schedule_num] += 1 # Only do it if all of the pre-conditions are met if self.global_schedule_num != 0: location_of_task = self.tasks[neural_net_task].getloc() vectorized_task_num = self.get_vectorized_location(location_of_task) # checks if current task is in a location that is occupied if self.is_task_alive[0][neural_net_task] == 0 or \ self.is_task_enabled[0][neural_net_task] == 0 or \ self.travel_time_constraint_satisfied[counter][neural_net_task] == 0 or \ self.agent_locations[0][vectorized_task_num] >= 1 or \ self.is_task_in_progress[0][neural_net_task]: task_to_schedule.append(-1) self.task_to_schedule = task_to_schedule print('Task ', neural_net_task, ' did not meet criteria of being enabled, alive, travel satisfied, or not occupied') self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[1]) return task_to_schedule else: # global task schedule num is zero if self.is_task_enabled[0][task_chosen] == 0: print('Task was not enabled, but is alive') # Only do it if all of the pre-conditions are met location_of_task = self.tasks[task_chosen].getloc() vectorized_task_num = self.get_vectorized_location(location_of_task) # checks if current task is in a location that is occupied if self.is_task_alive[0][task_chosen] == 0 or \ self.is_task_enabled[0][task_chosen] == 0 or \ self.travel_time_constraint_satisfied[counter][task_chosen] == 0 or \ self.agent_locations[0][vectorized_task_num] >= 1: task_to_schedule.append(-1) self.task_to_schedule = task_to_schedule print('task ', task_chosen, ' did not meet criteria of being enabled, alive, travel satisfied, or not occupied') self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[1]) return task_to_schedule if self.global_schedule_num != 0: # if self.t > self.task_deadlines[0][neural_net_task]: if self.has_any_deadlines_passed(neural_net_task): task_to_schedule.append(-1) print('Deadline is passed') # updated where this is changed self.did_schedule_fail = True self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[2]) return task_to_schedule else: # if self.t > self.task_deadlines[0][neural_net_task]: if self.has_any_deadlines_passed(task_chosen): task_to_schedule.append(-1) print('Deadline is passed') # updated where this is changed self.did_schedule_fail = True self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[2]) return task_to_schedule if self.global_schedule_num != 0: task_to_schedule.append(neural_net_task) self.agent_current_task[counter] = task_to_schedule[0] # changes agent current task self.task_to_schedule = task_to_schedule self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[4]) self.number_of_decisions_before_terminal_state[self.global_schedule_num] += 1 # maybe remove the return print('Task scheduled for', each_agent.getName(), 'at time ', self.t, 'is ', self.task_to_schedule) return task_to_schedule # ALL zero indexed else: task_to_schedule.append(task_chosen) self.agent_current_task[counter] = task_to_schedule[0] # changes agent current task self.task_to_schedule = task_to_schedule self.what_happend_at_every_timestep[self.global_schedule_num].append(self.condition_dict[4]) self.number_of_decisions_before_terminal_state[self.global_schedule_num] += 1 # maybe remove the return print('Task scheduled for', each_agent.getName(), 'at time ', self.t, 'is ', self.task_to_schedule) return task_to_schedule # ALL zero indexed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_task_to_schedule(self, agent_num):\n task = self.schedule_task(agent_num) # get chosen task\n\n agent = self.agents[agent_num] # get current agent\n # self.write_csv_pairwise(agent_num)\n # self.write_csv(agent_num)\n\n if task[0] == -1: # if null task chosen\n task_currently_working_on = self.agent_current_task[agent_num]\n if task_currently_working_on != -1 and self.is_task_finished[0][task_currently_working_on] == 0: # task is currently in progress\n pass\n else: # task is finished, but there is no task to schedule\n self.agent_current_task[agent_num] = -1\n else: # tasks returned contain actual tasks\n\n self.agent_current_task[agent_num] = task[0] # update current agent task\n agent.changebusy(True) # mark agent as busy\n self.update_agent_is_idle_based_on_class()\n\n self.update_agent_pose_and_finish_time_and_log_event(agent_num)", "def _schedule(self,task_dict):\n times = [time(), None, None, None] # (schedule timestamp, execution timestamp, stop timestamp, get timestamp)\n result_id = self._extract_features.remote(self, times) # calculation is started in new remote task \n task_dict[result_id] = self._idx # add sample index ", "def score(self, task):\n raise NotImplementedError(\"must subclass and implement score(task)\")", "def schedule_task(self, task):\n if self.time_based:\n minimum_wait_server = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_wait_server > server.waiting_time:\n target_server = server\n minimum_wait_server = server.waiting_time\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")\n else:\n minimum_jobs = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_jobs > len(server.jobs):\n minimum_jobs = len(server.jobs)\n target_server = server\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")", "def actor(self):\n\n xi = self.scheduler_period # Number of steps between task re-scheduling\n T = self.steps_per_episode # Total number of steps in an episode\n B = self.replay_buffer # Replay buffer to store trajectories\n\n pi_theta = self.amodel\n\n # Collect new trajectory from the environment\n s = self.env.reset() # Obtain the initial environment state\n\n for task in self.tasks:\n print(task, self.amodel.distribution(s, task), self.qmodel.Qs(s, task))\n\n tau = [] # Store trajectory as list of (state, action)-tuples\n Tau = [] # Store tasks that have been scheduled\n h = 0 # Keep track of how many tasks have been scheduled so far\n score = 0\n for t in range(T): # Repeat for T time steps\n if t % xi == 0: # Check if a new task should be scheduled\n task = self.schedule_task(Tau) # If so, sample a new task from the scheduler\n Tau.append(task)\n print(\"Switching to \", task)\n h += 1 # Update number of tasks scheduled\n a, dist = pi_theta.\\\n sample_distribution(s, Tau[-1]) # Sample action according to latest task\n s_p, rs = self.env.step(a) # Execute action, obtain observation and rewards\n tau.append((s, a, rs, dist)) # Add to trajectory\n s = s_p\n score += np.array([rs[t] for t in self.tasks])\n if s_p.is_terminal():\n break\n self._update_listeners(tau, Tau)\n print(\"Score: \", score)\n B.append(tau) # Add trajectory and scheduling choices to replay buffer\n\n self.train_scheduler(tau, Tau)", "def process(self, task):\n # Predict timestamp for the first run\n _, date = task.trigger(wait=False, **task.trigger_args)\n\n # Adding the task in schedule queue\n self.task_manager.schedule_task(task, date)", "def schedule_task(self, Tau):\n return random.choice(self.tasks)", "def schedule_task(self, name, date):\n pass", "def update_agent_pose_and_finish_time_and_log_event(self, agent_num):\n agent = self.agents[agent_num]\n if self.task_to_schedule[0] == -1:\n pass\n else:\n # this happens as soon as it is scheduled, i think\n scheduled_task = self.task_to_schedule[0]\n agent.curr_task = scheduled_task # assigns inside agent class (REDUNDANCY)\n agent.set_orientation(self.orientation[agent_num][scheduled_task])\n agent.task_list.append(scheduled_task)\n agent.updateAgentLocation(self.tasks[scheduled_task].getloc())\n\n # Record it\n agent.task_event_dict[scheduled_task] = [self.t, self.t + self.tasks[scheduled_task].getc()]\n agent.setFinishTime(self.t + self.tasks[scheduled_task].getc())\n self.is_task_in_progress[0][self.task_to_schedule[0]] = 1", "def schedule(self):\n\n self.task_cls.original_apply_async(*self.task_args,\n **self.task_kwargs)", "def generic_task(self, x_in, y, task_name):\n self.fit(x_in, y, task_name=task_name)\n print 'The score for task ', task_name, ' is ', self.score(x_in, y)", "def schedule_task(self, name, date):\n with self.db_lock:\n return self.rcon.zadd(self.job_key, name, date)", "def assign_tasks_per_task(self, current_time, job_id):\r\n job = self.jobs[job_id]\r\n random.shuffle(self.worker_indices)\r\n task_arrival_events = []\r\n for i, task_duration in enumerate(job.unscheduled_tasks):\r\n loads = [(i, self.workers[i].queue_length())\r\n for i in self.worker_indices[PROBE_RATIO*i:PROBE_RATIO*(i+1)]]\r\n #loads_str = \", \".join([\"%s:%s\" % (l[0], l[1]) for l in loads])\r\n #print \"Loads: %s\" % loads_str\r\n loads.sort(key = lambda x: x[1])\r\n #print (\"Assigning task of duration %s for job %s to worker %s\" %\r\n # (task_duration, job_id, loads[0][0]))\r\n task_arrival_events.append(\r\n (current_time + 2*NETWORK_DELAY,\r\n TaskArrival(self.workers[loads[0][0]], task_duration, job_id)))\r\n return task_arrival_events", "def __launch_task(self, current_time):\r\n assert self.queued_tasks > 0\r\n assert self.running_tasks < self.num_cores\r\n\r\n self.queued_tasks -= 1\r\n if not len(get_param(\"relative_weights\")) > self.current_user:\r\n print get_param(\"relative_weights\"), self.current_user\r\n assert False\r\n tasks_per_round = get_param(\"relative_weights\")[self.current_user]\r\n if self.task_count >= tasks_per_round:\r\n # Move on to the next user.\r\n self.task_count = 0\r\n self.current_user = (self.current_user + 1) % self.num_users\r\n\r\n while len(self.queues[self.current_user]) == 0:\r\n self.current_user = (self.current_user + 1) % self.num_users\r\n self.task_count = 0\r\n # Get the first task from the queue\r\n job, task_id = self.queues[self.current_user][0]\r\n # Remove the task from the user's queue.\r\n self.queues[self.current_user] = self.queues[self.current_user][1:]\r\n self.task_count += 1\r\n assert job.user_id == self.current_user\r\n task_length = job.get_task_length(task_id)\r\n event = (current_time + task_length, TaskCompletion(job, self))\r\n self.stats_manager.task_started(self.current_user, current_time)\r\n self.time_started = current_time\r\n if get_param(\"record_task_info\"):\r\n job.record_wait_time(task_id, current_time)\r\n self.running_tasks += 1\r\n return event", "def task_refresh_all_stats_score(request):\n start = time.time()\n cls_name = request.POST.get('cls') or 'Day'\n destroy = int(request.POST.get('destroy', '0'))\n cursor = datastore_query.Cursor(urlsafe=request.POST.get('cursor'))\n task_count = int(request.POST.get('task_count', '0'))\n assert cls_name in ('Day', 'Multi'), cls_name\n cls = (\n models.AccountStatsDay\n if cls_name == 'Day' else models.AccountStatsMulti)\n\n # Task queues are given 10 minutes. Do it in 9 minutes chunks to protect\n # against most timeout conditions.\n timeout = 540\n updated = 0\n skipped = 0\n try:\n futures = []\n chunk_size = 10\n items = []\n more = True\n if destroy:\n options = ndb.QueryOptions(keys_only=True)\n else:\n options = ndb.QueryOptions()\n while more:\n batch, cursor, more = cls.query(default_options=options).fetch_page(\n 20, start_cursor=cursor)\n if destroy:\n futures.extend(ndb.delete_multi_async(batch))\n updated += len(batch)\n else:\n for i in batch:\n score = models.compute_score(i)\n if i.score != score:\n items.append(i)\n if len(items) == chunk_size:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n items = []\n futures = [f for f in futures if not f.done()]\n else:\n skipped += 1\n if time.time() - start >= timeout:\n break\n if items:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n ndb.Future.wait_all(futures)\n if not more and cls_name == 'Day':\n # Move to the Multi instances.\n more = True\n cls_name = 'Multi'\n cursor = datastore_query.Cursor()\n if more:\n taskqueue.add(\n url=reverse(task_refresh_all_stats_score),\n params={\n 'cls': cls_name,\n 'cursor': cursor.urlsafe() if cursor else '',\n 'destroy': str(destroy),\n 'task_count': str(task_count+1),\n },\n queue_name='refresh-all-stats-score')\n result = 200\n except (db.Timeout, DeadlineExceededError):\n result = 500\n out = 'Index: %d\\nType = %s\\nStored %d items\\nSkipped %d\\nIn %.1fs\\n' % (\n task_count, cls.__name__, updated, skipped, time.time() - start)\n if result == 200:\n logging.info(out)\n else:\n logging.error(out)\n return HttpTextResponse(out, status=result)", "def task(self,target,name = \"\", prio = 10, period = 0, time2run = 0):\n newtask = Task(target,name,prio,period, time2run)\n self.taskmap[newtask.tid] = newtask\n self.schedule(newtask)\n\n\n return newtask.tid", "def algo_schedule():\n\talgo(node.id, node)\n\treactor.callLater(STEP_TIME, algo_schedule)", "def _schedule_task(self, task: _Task) -> None:\n if isinstance(task.interval, (int, float)):\n task.next = self.sys_loop.call_later(task.interval, self._run_task, task)\n elif isinstance(task.interval, time):\n today = datetime.combine(date.today(), task.interval)\n tomorrow = datetime.combine(date.today() + timedelta(days=1), task.interval)\n\n # Check if we run it today or next day\n if today > datetime.today():\n calc = today\n else:\n calc = tomorrow\n\n task.next = self.sys_loop.call_at(calc.timestamp(), self._run_task, task)\n else:\n _LOGGER.critical(\n \"Unknown interval %s (type: %s) for scheduler %s\",\n task.interval,\n type(task.interval),\n task.id,\n )", "def task_stagnant(task):", "def update(self, *args, **kwargs):\n\n print(\"\\nIn MOCK ALGO OBSERVER....\")\n\n if 'remaining_tasks' in kwargs:\n\n remaining_tasks = len(kwargs['remaining_tasks'])\n\n print(\"\\tThere are {} remaining tasks\".format(remaining_tasks))\n print(\"\\tIs {} less than {}? {}\".format(remaining_tasks, min_tasks, (remaining_tasks < min_tasks)))\n\n # If we don't have the minimum number of hits out...\n if remaining_tasks < min_tasks:\n print(\"\\tRefilling queue with {} new task(s)\".format(min_tasks - remaining_tasks))\n # Fill up the tasks again\n for t in range(min_tasks - remaining_tasks):\n new_task = make_rand_task()\n tasks.append(new_task)\n\n actAMT.init_tasks(tasks, hit_type_init_file)\n del tasks[:]\n\n if 'completed_task' in kwargs:\n add_to_db(kwargs['completed_task'])", "def schedule(self, newTask, time):\r\n sys.exit(\"You should implement function schedule in subclass\")\r\n pass", "def update_based_on_time(self):\n for counter, agent in enumerate(self.agents):\n if self.t >= agent.getFinishTime() and self.agent_current_task[counter] != -1: # task is finished\n task_num = self.agent_current_task[counter]\n self.finish_time_per_task_dict[task_num] = self.t\n self.is_task_finished[0][task_num] = 1\n agent.changebusy(False)\n self.update_agent_is_idle_based_on_class()", "def reschedule(self, task, recur=False):\n raise NotImplementedError()", "def schedule_task(self, task, date):\n return self.connection.schedule_task(task, date)", "def apply_task(self, task):\n self.tasks.add(task)", "def task_update_stats(request):\n tasks = json.loads(request.POST.get('tasks'))\n date_str = request.POST.get('date')\n cursor = ndb.Cursor(urlsafe=request.POST.get('cursor'))\n countdown = 15\n if not tasks:\n msg = 'Nothing to execute!?'\n logging.warning(msg)\n out = HttpTextResponse(msg)\n else:\n # Dispatch the task to execute.\n task = tasks.pop(0)\n logging.info('Running %s.', task)\n if task.count('-') == 2:\n out, cursor = update_daily_stats(\n cursor, datetime.datetime.strptime(task, DATE_FORMAT))\n elif task == 'monthly':\n # The only reason day is used is in case a task queue spills over the next\n # day.\n day = datetime.datetime.strptime(date_str, DATE_FORMAT)\n out, cursor = update_monthly_stats(cursor, day)\n elif task == '30':\n yesterday = (\n datetime.datetime.strptime(date_str, DATE_FORMAT)\n - datetime.timedelta(days=1)).date()\n out, cursor = update_rolling_stats(cursor, yesterday)\n else:\n msg = 'Unknown task %s, ignoring.' % task\n cursor = ''\n logging.error(msg)\n out = HttpTextResponse(msg)\n\n if cursor:\n # Not done yet!\n tasks.insert(0, task)\n countdown = 0\n\n if out.status_code == 200 and tasks:\n logging.info('%d tasks to go!\\n%s', len(tasks), ', '.join(tasks))\n # Space out the task queue execution by 15s to reduce the risk of\n # datastore inconsistency to get in the way, since no transaction is used.\n # This means to process a full month, it'll include 31*15s = 7:45 minutes\n # delay. 15s is not a lot but we are in an hurry!\n taskqueue.add(\n url=reverse(task_update_stats),\n params={\n 'tasks': json.dumps(tasks),\n 'date': date_str,\n 'cursor': cursor.urlsafe() if cursor else ''},\n queue_name='update-stats',\n countdown=countdown)\n return out", "def schedule(self, compute_env, scheduler_node, build_id=\"\", **kwargs):\n\n if not isinstance(self.data, Transform):\n raise NotImplementedError(\"cannot schedule non-Transform objects\")\n\n # this id is globally unique\n job_id = self.job_id\n\n if not build_id:\n build_id = str(uuid.uuid4())\n\n max_attempt = kwargs.pop(\"max_attempt\", 1)\n min_attempt = kwargs.pop(\"min_attempt\", 1)\n\n log.debug(\"build %s scheduling job %s...\", build_id, job_id)\n assert self._attempt is None\n\n def _submit_new_job(ctx, attempt_no=1):\n # fixme calculate attempts:\n if attempt_no < min_attempt:\n log.debug(\" jump starting job %s at attempt %d\",\n job_id, min_attempt)\n attempt_no = min_attempt\n\n if attempt_no > max_attempt:\n # allow the next try to start from attempt_no==1\n log.debug(\" maximum number of attempts (%d) exceeded. cancelling job.\", max_attempt)\n ctx.jobdata = \"\"\n ctx.jobattempt = 0\n ctx.save()\n scheduler_node.cancel()\n return\n\n # let the user's object calculate its resource requirements\n resources = self.data.task_resources(attempt=attempt_no) or {}\n\n remote_script_url = \"s3://%(bucket)s/jobs/%(envname)s/%(jobid)s/jobscript\" % {\n \"bucket\": config['storage']['tmp_bucket'],\n \"envname\": compute_env.name,\n \"jobid\": job_id\n }\n\n user_deps_prefix = \"s3://%(bucket)s/user_context/%(envname)s/\" % {\n \"bucket\": config['storage']['tmp_bucket'],\n \"envname\": compute_env.name,\n \"jobid\": job_id\n }\n\n user_deps_url = runtime.upload_user_context(user_deps_prefix)\n\n with io.BytesIO() as exec_fp:\n script_len = exec_fp.write(self.execution_transfer_script(resources).encode('utf-8'))\n exec_fp.seek(0)\n log.debug(\" uploading job script for job_id %s at %s ...\", job_id, remote_script_url)\n s3_streaming_put(exec_fp, remote_script_url, content_type=\"text/x-python\", content_length=script_len,\n logprefix=job_id + \" jobscript \")\n\n settings = {\n 'vcpus': resources.get('vcpus', None),\n 'memory': resources.get('memory', None),\n 'timeout': resources.get('timeout', -1),\n 'environment': {\n \"BUNNIES_VERSION\": __version__,\n \"BUNNIES_SUBMIT_TIME\": str(int(datetime.utcnow().timestamp()*1000)),\n \"BUNNIES_TRANSFER_SCRIPT\": remote_script_url,\n \"BUNNIES_USER_DEPS\": user_deps_url,\n \"BUNNIES_JOBID\": job_id,\n \"BUNNIES_ATTEMPT\": \"%d %d\" % (attempt_no, max_attempt),\n \"BUNNIES_RESULT\": os.path.join(self.data.output_prefix(), constants.TRANSFORM_RESULT_FILE),\n \"BUNNIES_BUILDID\": build_id\n }\n }\n\n if settings.get('timeout') <= 0:\n settings['timeout'] = 24*3600*7 # 7 days\n\n self._attempt = compute_env.submit_simple_batch_job(job_id, self._jobdef, **settings)\n self._attempt.meta['attempt_no'] = attempt_no\n self._attempt_ids.append({'attempt_no': attempt_no, 'job_id': self._attempt.job_id})\n # commit the new batch job id to the global kv store\n ctx.jobtype = \"batch\"\n ctx.jobdata = self._attempt.job_id\n ctx.jobattempt = attempt_no\n ctx.submitter = build_id\n ctx.save()\n scheduler_node.submit() # tell the bunnies scheduler that the job has been submitted\n return\n\n def _reuse_existing(ctx, job_obj, attempt_no):\n job_obj.meta['attempt_no'] = attempt_no\n self._attempt = compute_env.track_existing_job(job_obj)\n self._attempt_ids.append({'attempt_no': attempt_no, 'job_id': self._attempt.job_id})\n scheduler_node.submit() # tell the bunnies scheduler that the job has been submitted\n return\n\n with kvstore.submit_lock_context(build_id, job_id) as ctx:\n ctx.load()\n if ctx.jobtype != \"batch\":\n raise ValueError(\"unhandled job type\")\n\n if not ctx.jobdata:\n # has never been submitted\n log.debug(\" job %s has not yet been submitted\", job_id)\n return _submit_new_job(ctx, attempt_no=1)\n else:\n log.debug(\" job %s has an existing submission: %s\", job_id, ctx.jobdata)\n\n last_attempt_id = ctx.jobdata\n last_attempt_no = int(ctx.jobattempt)\n\n # see if it's still tracked by AWS Batch\n job_obj = AWSBatchSimpleJob.from_job_id(last_attempt_id)\n if not job_obj:\n # no longer tracked\n log.debug(\" job information no longer available for %s. submitting new.\", last_attempt_id)\n return _submit_new_job(ctx, attempt_no=1)\n\n job_desc = job_obj.get_desc()\n job_status = job_desc['status']\n if job_status == \"FAILED\":\n log.debug(\" %s state=%s attempt=%d. submitting new attempt=%d\",\n last_attempt_id, job_status, last_attempt_no, last_attempt_no + 1)\n return _submit_new_job(ctx, attempt_no=last_attempt_no + 1)\n else:\n log.debug(\" %s state=%s attempt=%d. can be reused\",\n last_attempt_id, job_status, last_attempt_no)\n return _reuse_existing(ctx, job_obj, last_attempt_no)", "def _schedule_run(cls, workbook, task, outbound_context):\n\n def run_delayed_task():\n \"\"\"Runs the delayed task. Performs all the steps required to setup\n a task to run which are not already done. This is mostly code\n copied over from convey_task_result.\n \"\"\"\n db_api.start_tx()\n try:\n execution_id = task['execution_id']\n execution = db_api.execution_get(execution_id)\n\n # Change state from DELAYED to IDLE to unblock processing.\n\n WORKFLOW_TRACE.info(\"Task '%s' [%s -> %s]\"\n % (task['name'],\n task['state'], states.IDLE))\n\n db_task = db_api.task_update(task['id'],\n {\"state\": states.IDLE})\n task_to_start = [db_task]\n data_flow.prepare_tasks(task_to_start, outbound_context)\n db_api.commit_tx()\n finally:\n db_api.end_tx()\n\n if not states.is_stopped_or_finished(execution[\"state\"]):\n cls._run_tasks(task_to_start)\n\n task_spec = workbook.tasks.get(task['name'])\n retries, break_on, delay_sec = task_spec.get_retry_parameters()\n if delay_sec > 0:\n # Run the task after the specified delay.\n eventlet.spawn_after(delay_sec, run_delayed_task,\n context=auth_context.ctx())\n else:\n LOG.warn(\"No delay specified for task(id=%s) name=%s. Not \"\n \"scheduling for execution.\" % (task['id'], task['name']))", "def _tally(self, score):\n self._score[self._turn] += score", "def schedule(self, task_schedule, place_id=-1):\r\n resource = task_schedule.resource\r\n if place_id == -1:\r\n self.tasksOfResource[resource].append(task_schedule)\r\n else:\r\n self.tasksOfResource[resource].insert(place_id, task_schedule)\r\n if task_schedule.task.graph.name in self.job_task_schedule:\r\n pass\r\n else:\r\n self.job_task_schedule[task_schedule.task.graph.name] = {}\r\n self.job_task_schedule[task_schedule.task.graph.name][task_schedule.task.id] = task_schedule" ]
[ "0.6529968", "0.6498752", "0.63196087", "0.6048288", "0.60182256", "0.597258", "0.5806954", "0.57858235", "0.5773089", "0.5643624", "0.56128", "0.56077075", "0.5525462", "0.5467468", "0.54315627", "0.5422822", "0.5416013", "0.53985727", "0.53969085", "0.53964466", "0.5394362", "0.5364748", "0.53547543", "0.5341795", "0.5311866", "0.5307353", "0.5304281", "0.52876127", "0.5222419", "0.5218825" ]
0.68702924
0
sets finish times to inf. This basically means they are not completed
def initialize_finish_times_to_inf(self): for i in range(0, self.num_tasks): self.finish_time_per_task_dict[i] = np.inf
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_out_of_time(self):\n self.out_of_time = True", "def set_finish(self, t: float = 0.0) -> None:\n if not t:\n t = time()\n self.tfinish = t\n self.log.debug(\"%s %s\", self.prefix, {\"end-time\": self.tfinish})", "def finish(self, finish_time=None):\n pass", "def zero_timings(self):\r\n self.step = 0\r\n self.current_T = 0.0", "def finish_time(self, finish_time):\n\n self._finish_time = finish_time", "def finish_time(self, finish_time):\n\n self._finish_time = finish_time", "def finish_time(self, finish_time):\n\n self._finish_time = finish_time", "def end_time(self) -> float:\r\n ...", "def finish(self):\r\n if self._elapsed is None:\r\n self._elapsed = self._now() - self._start", "def end_time(self):\n pass", "def reset(self):\n self.cumtime = 0\n self.start_time = self.time()", "def burn_in_finished():\n global trials\n if trials <= 0:\n return True\n trials -= 1\n return False", "def _dummy_schedule(progress_remaining: float) -> float:\n del progress_remaining\n return 0.0", "def test_is_finished(self):\n experiment = Experiment(TasksMock())\n self.assertEquals(False, experiment.is_finished())\n for _ in range(0, 17):\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(True, experiment.is_finished())", "def stop_timer(self):\n self.end_time = datetime.now()", "def _stop_clock(self):\n self._elapsed_time = time.time() - self._start", "def finish_simulation(self, current_time):\r\n for time_freed in self.free_slots:\r\n self.idle_ms += time_freed - current_time", "def stopTest(self, test):\n self.test_time = str(time.time() - self.start_time)", "def stop_timing_no_callback(self) -> None:\n self._is_timing = False", "def update_based_on_time(self):\n for counter, agent in enumerate(self.agents):\n if self.t >= agent.getFinishTime() and self.agent_current_task[counter] != -1: # task is finished\n task_num = self.agent_current_task[counter]\n self.finish_time_per_task_dict[task_num] = self.t\n self.is_task_finished[0][task_num] = 1\n agent.changebusy(False)\n self.update_agent_is_idle_based_on_class()", "def exhaust (self):\n self.counter = self.times", "def stop(self):\n self.times.append(time.time() - self.tik)\n return self.times[-1]", "def stop(self):\n self.times.append(time.time() - self.tik)\n return self.times[-1]", "def stop(self):\n self.times.append(time.time() - self.tik)\n return self.times[-1]", "def set_infinite(self):\n self.FHIT_C = 0", "def finished_tests(self):\n self.testing = 0", "def stop(self):\n# if self._start_time is None:\n elapsed_time = time.perf_counter() - self._start_time\n self._start_time = None", "def setToFinish(self):\n self.finish = True", "def _reset(self) -> ts.TimeStep:", "def stop(self) -> None:\n self.start_time = None\n self.job.elapsed += self.time_elapsed\n self.time_elapsed = None" ]
[ "0.6520086", "0.6353846", "0.6314697", "0.6176547", "0.60193926", "0.60193926", "0.60193926", "0.5974248", "0.57966334", "0.57080436", "0.5697412", "0.5680997", "0.5624751", "0.5587151", "0.5573249", "0.55686235", "0.55574065", "0.5505439", "0.5498414", "0.54834497", "0.54701835", "0.5466976", "0.5466976", "0.5466976", "0.54662913", "0.5458598", "0.5455016", "0.5423612", "0.54122055", "0.5403416" ]
0.816265
0
Computes Floyd Warshalls Updates agent locations (if they have reached a task move there) Updates implicit deadlines Updates agent distances based on updated agent locations Updates which tasks are alive, enabled and travel_constraint enabled
def update_floyd_warshall_and_all_vectors(self): self.graph.compute_floyd_warshal() # Update where agents are self.update_agent_location_vector() # update deadlines self.populate_deadline_vector() # update distances to each task and orientation to each task self.update_agent_distances_vector() self.update_agent_orientation_vector() self.update_alive_enabled_travel() return self.graph.is_feasible()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def computeForces(self, neighbors=[]): #computing forces to drive the agents and avoid collisions \n if not self.atGoal:\n if self.entry_state % 2 == 0 and len(self.entrancex) > 0 and self.id != 4 : #checks if assigned curve is entry and switches to state 1 to follow entry bezier curve\n time2=0.5 # time used to calculate driving force \n self.local_goal = [self.entrancex[0], self.entrancey[0]] #assigning waypoint as goal\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2))) #calculating direction vector\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez) #driving force\n self.entrancex = np.delete(self.entrancex,0) #eliminating the used waypoints from the list \n self.entrancey = np.delete(self.entrancey,0) #eliminating the used waypoints from the list \n \n elif self.force_state == 1 and (abs(self.pos[0] - self.goal[0]) >400 or abs(self.pos[1] - self.goal[1]) >400): #checks if force-based navigation is assigned, switches to state 2\n self.F = (self.gvel-self.vel)/self.ksi #driving force\n for neighbor in neighbors:\n if neighbor.id != self.id: #and not neighbor.atGoal: \n distSq = (neighbor.pos-self.pos).dot(neighbor.pos-self.pos)\n #print(distSq, self.dhorSq)\n if distSq < self.dhorSq: # neighbor is inside the sensing radius\n tau = self.ttc(neighbor)\n #print(tau, self.timehor)\n if tau < self.timehor: # will the two agents collide in less than timehor?\n dir = self.pos + self.vel*tau - neighbor.pos - neighbor.vel*tau \n length = sqrt(dir.dot(dir))\n if length > 0:\n dir = dir/length # the direction of the force\n mag = (self.timehor - tau)/(tau + 1e-6) # the magnitude of the force\n self.F += mag*dir # add the force\n \n else: #state 3 - following the exit bezier curve\n time2=0.5 # time used to calculate driving force\n self.local_goal = [self.exitx[0], self.exity[0]]\n if abs(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos)))) >10: #to reach first point of exit curve from agents previous state position\n self.F = ((self.local_goal - self.pos)/(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos) )))*self.prefspeed)/self.ksi\n else:\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2)))\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez)\n #print(self.pos, self.local_goal)\n if len(self.exitx) > 1 :\n self.exitx = np.delete(self.exitx,0)\n self.exity = np.delete(self.exity,0)", "def swarm_next_location(node_locations, agents_locations, agents_velocities, agents_angles):\n\n # Potential Field Gradient Calculation\n\n # Gradient of potential field\n dv = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT)) # create an array of values\n\n for agent_it_1 in range(0, AGENT_COUNT):\n # Inter-Agent Forces\n for agent_it_2 in range(0, AGENT_COUNT):\n n_x = int(numpy.linalg.norm(numpy.subtract(agents_locations[agent_it_1], agents_locations[agent_it_2])))\n\n for dimension_it in range(0, DIMENSION_COUNT):\n delta_x = agents_locations[agent_it_1][dimension_it] - agents_locations[agent_it_2][dimension_it]\n dv[agent_it_1][dimension_it] = dv[agent_it_1][dimension_it] - long_range_repulsive * (\n delta_x / numpy.sqrt((SMOOTHNESS_COEFFICIENT ^ 2) + n_x ^ 2)) - 2 * (\n repulsive_gain / repulsive_aoe) * delta_x * numpy.exp((-n_x ^ 2) / repulsive_aoe)\n # Formation Attraction Forces\n if NODE_COUNT > 0:\n for node_it in range(0, NODE_COUNT):\n n_x = int(\n numpy.linalg.norm(\n numpy.subtract(agents_locations[agent_it_1],\n node_locations[node_it]))) # norm of the vector between two bots\n\n for dimension_it in range(0, DIMENSION_COUNT):\n delta_x = agents_locations[agent_it_1][dimension_it] - node_locations[node_it][dimension_it]\n dv[agent_it_1][dimension_it] = dv[agent_it_1][dimension_it] + ATTRACTIVE_GAIN * (\n delta_x / numpy.sqrt((SMOOTHNESS_COEFFICIENT ^ 2) + n_x ^ 2)) + (\n short_range_attractive / attractive_aoe) * delta_x * numpy.exp((-n_x ^ 2) /\n attractive_aoe)\n sliding_surface = numpy.add(agents_velocities, dv)\n # Saturation Block [sat(s)]\n\n sx = numpy.zeros(numpy.size(sliding_surface[0]))\n for agent_it_1 in range(0, AGENT_COUNT):\n for dimension_it in range(0, DIMENSION_COUNT):\n if abs(sliding_surface[agent_it_1][dimension_it]) > SATURATION_LEVEL:\n # FIXME: not sure if this fix was correct but I changed Sx(ip, di) -> sx[ip+di] based on values found\n # in MATLAB code sample\n sx[agent_it_1 + dimension_it] = numpy.sign(sliding_surface[agent_it_1][dimension_it]) * SATURATION_LEVEL\n else:\n sx[agent_it_1 + dimension_it] = sliding_surface[agent_it_1][dimension_it]\n # Gains\n\n c = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT))\n k = numpy.zeros((AGENT_COUNT, DIMENSION_COUNT))\n\n # TODO: should be able to make the loop faster somehow\n # row by row multiplication\n for agent_it_1 in range(0, AGENT_COUNT):\n c[agent_it_1] = numpy.multiply(agents_velocities[agent_it_1], REACHING_GAINS)\n k[agent_it_1] = numpy.multiply(sx[agent_it_1], SLIDING_GAINS)\n\n u0 = k + c\n\n print(u0)\n\n return u0", "def update_alive_enabled_travel(self):\n self.is_task_alive = np.ones((1, self.num_tasks)) # 1 if alive\n self.is_task_enabled = np.ones((1, self.num_tasks)) # 1 if enabled\n self.travel_time_constraint_satisfied = np.ones((2, self.num_tasks)) # 1 if satisfied\n\n # ALIVE\n for each_task, i in enumerate(self.task_vertex_numbers_for_start):\n # make sure element of first column is less than zero\n if self.graph.M[i][0] <= 0:\n name_of_task_being_checked = self.graph.names_of_vertex[i]\n # find all tasks associated with each node\n # for every element task points to\n for element in self.graph.vertices[i].points_to:\n num = self.graph.gamma[element]\n name_of_ele = self.graph.names_of_vertex[num]\n\n if name_of_ele == 'start':\n continue\n elif name_of_ele == 'end':\n continue\n elif num == i + 1: # is it the end of the task\n continue\n else:\n # task has been found\n c = int((re.findall('\\d+', name_of_ele))[0])\n if self.is_task_finished[0][c - 1] == 0:\n self.is_task_alive[0][each_task] = 0\n if self.DEBUG:\n print('tasks that are alive', self.is_task_alive)\n\n # ENABLED\n for each_task, i in enumerate(self.task_vertex_numbers_for_start):\n # make sure element of first column is less than zero, # TODO: figure out why\n if self.graph.M[i][0] <= 0:\n name_of_task_being_checked = self.graph.names_of_vertex[i]\n # find all tasks associated with each node\n # for every element task points to\n for element in self.graph.vertices[i].points_to:\n num = self.graph.gamma[element] # num of vertex as in M matrix\n name_of_ele = self.graph.names_of_vertex[num]\n weight = self.graph.vertices[i].points_to[element]\n if name_of_ele == 'start':\n continue\n elif name_of_ele == 'end':\n continue\n elif num == i + 1: # is it the end of the task\n continue\n elif self.is_task_alive[0][each_task] == 0:\n # if task is not alive, it cannot be enabled\n self.is_task_enabled[0][each_task] = 0\n else:\n # task that is alive has been been found\n if self.t < self.finish_time_per_task_dict[((num - 1) / 2) - 1] + np.abs(weight):\n self.is_task_enabled[0][each_task] = 0\n if self.DEBUG:\n print('tasks that are enabled', self.is_task_enabled)\n\n # Travel Time Enabled\n for agent_num, agent in enumerate(self.agents):\n for each_task, i in enumerate(self.task_vertex_numbers_for_start):\n # make sure element of first column is less than zero\n if self.graph.M[i][0] <= 0:\n name_of_task_being_checked = self.graph.names_of_vertex[i]\n # find all tasks associated with each node\n # for every element task points to\n for element in self.graph.vertices[i].points_to:\n num = self.graph.gamma[element] # num of vertex as in M matrix\n name_of_ele = self.graph.names_of_vertex[num]\n weight = self.graph.vertices[i].points_to[element]\n task_number = int((num - 1) / 2)\n if len(self.graph.vertices[i].points_to) == 2:\n if self.t < self.agents[agent_num].curr_finish_time + self.agent_distances[agent_num][each_task] / self.agents[\n agent_num].getv():\n self.travel_time_constraint_satisfied[agent_num][each_task] = 0\n continue\n if name_of_ele == 'start':\n continue\n elif name_of_ele == 'end':\n continue\n elif num == i + 1: # is it the end of the task\n continue\n else: # more than 2 constraints\n\n if self.t < self.finish_time_per_task_dict[task_number - 1] + \\\n self.agent_distances[agent_num][each_task] / self.agents[agent_num].getv():\n self.travel_time_constraint_satisfied[agent_num][each_task] = 0\n if self.t < self.agents[agent_num].curr_finish_time + self.agent_distances[agent_num][each_task] / self.agents[\n agent_num].getv():\n self.travel_time_constraint_satisfied[agent_num][each_task] = 0\n\n if self.DEBUG:\n print('tasks that are travel_constraint satisfied', self.travel_time_constraint_satisfied)", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n\n\n\n path = [starting_car_location]\n dict = {}\n index = 0\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == starting_car_location:\n index = i\n\n path = [index]\n\n G, m = adjacency_matrix_to_graph(adjacency_matrix)\n\n home_indexes = []\n\n for home in list_of_homes:\n for i in range(len(list_of_locations)):\n if list_of_locations[i] == home:\n home_indexes.append(i)\n break\n\n new_adjacency = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n # for sake of figuring out where to walk\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, index, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2:\n di_path = nx.dijkstra_path(G, home1, home2)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n\n all_driving_path = list(nx.dfs_edges(G2))\n\n\n\n\n walking_to = []\n walking_from = {}\n\n for i in range(len(new_adjacency)):\n if i in home_indexes:\n count = 0\n edge_to = 0\n for j in range(len(new_adjacency)):\n if new_adjacency[i][j] != \"x\":\n count += 1\n edge_to = j\n\n #must ensure that this is not a home that we are already dropping someone off at, otherwise it will cut off a line of two homes\n if count == 1 and i != index and i not in walking_from.keys():\n new_adjacency[i][edge_to] = \"x\"\n new_adjacency[edge_to][i] = \"x\"\n walking_to.append(i)\n if edge_to in walking_from:\n walking_from[edge_to] = walking_from[edge_to] + [i]\n else:\n walking_from[edge_to] = [i]\n\n #\n # for i in range(len(all_driving_path) - 1):\n # #if first vertex in edge is the same, we should walk\n # if all_driving_path[i][0] == all_driving_path[i + 1][0]:\n # print(all_driving_path[i][0])\n # print(all_driving_path[i][1])\n # #get rid of only edge connected to this home\n # new_adjacency[all_driving_path[i][0]][all_driving_path[i][1]] = \"x\"\n # new_adjacency[all_driving_path[i][1]][all_driving_path[i][0]] = \"x\"\n # walking_to.append(all_driving_path[i][1])\n # if all_driving_path[i][0] in walking_from:\n # walking_from[all_driving_path[i][0]] = walking_from[all_driving_path[i][0]] + [all_driving_path[i][1]]\n # else:\n # walking_from[all_driving_path[i][0]] = [all_driving_path[i][1]]\n\n\n\n dropoff_locations = list(walking_from.keys())\n for loc in dropoff_locations:\n if loc in home_indexes:\n dropoff_locations.remove(loc)\n\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G, loc, home)\n for i in range(len(di_path) - 1):\n new_adjacency[di_path[i]][di_path[i + 1]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n new_adjacency[di_path[i + 1]][di_path[i]] = adjacency_matrix[di_path[i]][di_path[i + 1]]\n\n\n G2, m = adjacency_matrix_to_graph(new_adjacency)\n # G = G2\n # pos=nx.spring_layout(G2)\n # nx.draw_networkx_nodes(G2,pos)\n # nx.draw_networkx_labels(G2, pos)\n # nx.draw_networkx_edges(G2,pos,width=1.0,alpha=0.5)\n #\n # plt.draw()\n # plt.show()\n\n # condensed shortest paths to edges - use G3 for real\n\n new_adjacency2 = [[\"x\" for i in range(len(list_of_locations))] for j in range(len(list_of_locations))]\n\n for home in home_indexes:\n if home not in walking_to:\n di_path = nx.dijkstra_path(G2, index, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n for home1 in home_indexes:\n for home2 in home_indexes:\n if not home1 == home2 and home1 not in walking_to and home2 not in walking_to:\n di_path = nx.dijkstra_path(G2, home1, home2)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n di_path = nx.dijkstra_path(G2, index, loc)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n for loc in dropoff_locations:\n for home in home_indexes:\n di_path = nx.dijkstra_path(G2, loc, home)\n start = di_path[0]\n end = di_path[len(di_path) - 1]\n new_adjacency2[start][end] = 0\n new_adjacency2[end][start] = 0\n for i in range(len(di_path) - 1):\n new_adjacency2[start][end] += new_adjacency[di_path[i]][di_path[i + 1]]\n new_adjacency2[end][start] += new_adjacency[di_path[i]][di_path[i + 1]]\n\n\n\n\n final_G, m = adjacency_matrix_to_graph(new_adjacency2)\n drive_path = list(nx.dfs_edges(final_G, source=index))\n drive_path.append(index)\n\n mst = nx.minimum_spanning_tree(final_G)\n\n\n\n new_mst = nx.MultiGraph(mst)\n for edge in mst.edges():\n new_mst.add_edge(edge[0], edge[1])\n\n\n if new_mst.degree[index] != 0:\n to_remove = []\n for node in new_mst:\n if (new_mst.degree[node] == 0):\n to_remove.append(node)\n new_mst.remove_nodes_from(to_remove)\n\n eulerian = list(nx.eulerian_circuit(new_mst, index))\n\n path = []\n for edge in eulerian:\n path.append(edge[0])\n\n path.append(eulerian[len(eulerian) - 1][1])\n\n already_seen = []\n to_remove = []\n for i in range(len(path) - 1):\n if path[i] in already_seen:\n to_remove.append(i)\n else:\n already_seen.append(path[i])\n\n new_path = []\n for i in range(len(path) - 1):\n if i not in to_remove:\n new_path.append(path[i])\n path = new_path\n print(eulerian)\n else:\n path = [index]\n print(path)\n\n\n\n\n\n\n\n # print(path)\n final_path = []\n for node in path:\n if node == index:\n final_path.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path.append(node)\n # print(\"Dropoff loc: \", node)\n final_path.append(index)\n #print(walking_from)\n # print(final_path)\n # nx.draw(mst)\n # plt.draw()\n # plt.show()\n for node in final_path:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path = []\n for i in range(len(final_path) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path[i], final_path[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path.append(condensed_path[j])\n\n if len(very_final_path) >= 1 and [len(very_final_path) - 1] != index:\n very_final_path.append(index)\n\n if len(very_final_path) == 0:\n very_final_path = [index]\n\n print(very_final_path)\n print(dict)\n\n\n path2 = list(nx.dfs_preorder_nodes(mst, index))\n\n final_path2 = []\n for node in path2:\n if node == index:\n final_path2.append(node)\n # print(\"Index: \", node)\n elif node in home_indexes and node not in walking_to:\n final_path2.append(node)\n # print(\"Home but not walking: \", node)\n elif node in dropoff_locations:\n final_path2.append(node)\n # print(\"Dropoff loc: \", node)\n final_path2.append(index)\n\n\n for node in final_path2:\n if node in walking_from and node in home_indexes:\n dict[node] = [node] + walking_from[node]\n elif node in home_indexes:\n dict[node] = [node]\n elif node in walking_from:\n dict[node] = walking_from[node]\n\n very_final_path2 = []\n for i in range(len(final_path2) - 1):\n condensed_path = nx.dijkstra_path(G2, final_path2[i], final_path2[i+1])\n for j in range(len(condensed_path) - 1):\n if condensed_path[j] != condensed_path[j + 1]:\n very_final_path2.append(condensed_path[j])\n\n if len(very_final_path2) >= 1 and [len(very_final_path2) - 1] != index:\n very_final_path2.append(index)\n\n if len(very_final_path2) == 0:\n very_final_path2 = [index]\n\n opt1 = cost_of_solution(G, very_final_path, dict)\n opt2 = cost_of_solution(G, very_final_path2, dict)\n\n ultra_final_path = []\n if (opt1 <= opt2):\n ultra_final_path = very_final_path\n else:\n ultra_final_path = very_final_path2\n\n return ultra_final_path, dict\n\n pass", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def run_floyd_warshall_path_reconstruction(vertices, distances, edges, next_vertex):\n\n \"\"\"\n Notes:\n - no negative cycles, smallest trip time is 60s\n - theta(n^3) runtime\n \"\"\"\n\n # initialize inner dictionaries for each vertex and handle self-loops\n for vertex in vertices:\n distances[vertex] = {}\n distances[vertex][vertex] = 0\n next_vertex[vertex] = {}\n next_vertex[vertex][vertex] = None\n\n # for each vertex-vertex pairing, initialize distances and next_vertex\n for vertex_1 in vertices:\n for vertex_2 in vertices:\n if vertex_1 == vertex_2:\n continue\n if vertex_1 in edges and vertex_2 in edges[vertex_1]:\n distances[vertex_1][vertex_2] = edges[vertex_1][vertex_2]\n next_vertex[vertex_1][vertex_2] = vertex_2\n else:\n distances[vertex_1][vertex_2] = float('inf')\n next_vertex[vertex_1][vertex_2] = None\n\n for k in sorted(vertices):\n for i in sorted(vertices):\n for j in sorted(vertices):\n if distances[i][j] > distances[i][k] + distances[k][j]:\n distances[i][j] = distances[i][k] + distances[k][j]\n next_vertex[i][j] = next_vertex[i][k]", "def optimize(self):\n # Loop through every WD and WS individually\n wd_array = self.fi_subset.floris.flow_field.wind_directions\n ws_array = self.fi_subset.floris.flow_field.wind_speeds\n for nwsi, ws in enumerate(ws_array):\n\n self.fi_subset.reinitialize(wind_speeds=[ws])\n\n for nwdi, wd in enumerate(wd_array):\n # Find turbines to optimize\n turbs_to_opt = self._turbs_to_opt_subset[nwdi, nwsi, :]\n if not any(turbs_to_opt):\n continue # Nothing to do here: no turbines to optimize\n\n # Extract current optimization problem variables (normalized)\n yaw_lb = self._minimum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n yaw_ub = self._maximum_yaw_angle_subset_norm[nwdi, nwsi, turbs_to_opt]\n bnds = [(a, b) for a, b in zip(yaw_lb, yaw_ub)]\n x0 = self._x0_subset_norm[nwdi, nwsi, turbs_to_opt]\n\n J0 = self._farm_power_baseline_subset[nwdi, nwsi]\n yaw_template = self._yaw_angles_template_subset[nwdi, nwsi, :]\n turbine_weights = self._turbine_weights_subset[nwdi, nwsi, :]\n yaw_template = np.tile(yaw_template, (1, 1, 1))\n turbine_weights = np.tile(turbine_weights, (1, 1, 1))\n\n # Define cost function\n def cost(x):\n x_full = np.array(yaw_template, copy=True)\n x_full[0, 0, turbs_to_opt] = x * self._normalization_length\n return (\n - 1.0 * self._calculate_farm_power(\n yaw_angles=x_full,\n wd_array=[wd],\n turbine_weights=turbine_weights\n )[0, 0] / J0\n )\n\n # Perform optimization\n residual_plant = minimize(\n fun=cost,\n x0=x0,\n bounds=bnds,\n method=self.opt_method,\n options=self.opt_options,\n )\n\n # Undo normalization/masks and save results to self\n self._farm_power_opt_subset[nwdi, nwsi] = -residual_plant.fun * J0\n self._yaw_angles_opt_subset[nwdi, nwsi, turbs_to_opt] = (\n residual_plant.x * self._normalization_length\n )\n\n # Finalize optimization, i.e., retrieve full solutions\n df_opt = self._finalize()\n return df_opt", "def floyd_warshall(self):\n distance = {}\n path_dict = {}\n for from_node in self.nodes():\n distance[from_node] = {}\n path_dict[from_node] = {}\n for node in self.nodes():\n distance[from_node][node] = sys.maxsize\n path_dict[from_node][node] = None\n distance[from_node][from_node] = 0\n neighbors = self.neighbors(from_node)\n for neighbor in neighbors:\n distance[from_node][neighbor] = neighbors[neighbor]\n path_dict[from_node][neighbor] = neighbor\n for k in self.nodes():\n for i in self.nodes():\n for j in self.nodes():\n if distance[i][k] + distance[k][j] < distance[i][j]:\n distance[i][j] = distance[i][k] + distance[k][j]\n path_dict[i][j] = path_dict[i][k]\n return path_dict, distance", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def Optimizer(r_grasp,PAM_r, PAM_s, object_s, object_f, object_params, phi, r_max, walls, obstacles, obstacles_PAM, current_leg, n, n_p, v_max, force_max, legs, dt):\n global action_push_pull, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned\n # assigning cost of changing from one leg to another based on the distance to the desired pose\n cost_ChangeLeg = 1\n dz_final = np.sqrt((object_s.x - object_f.x) ** 2 + (object_s.y - object_f.y) ** 2)\n if dz_final < 1:\n cost_ChangeLeg = 10\n elif dz_final < 2:\n cost_ChangeLeg = 20\n else:\n cost_ChangeLeg = 10\n\n # assigning weight for cost of predicted repositioning and cost of robot motion\n w_cost_reposition = 40\n w_cost_motion = 10\n\n # finding object's leg cordinates\n object_leg = find_corners(object_s.x, object_s.y, object_s.phi, object_params[7], object_params[8])\n\n # initialization (initializeing cost to infinity)\n cost = [float('inf'), float('inf'), float('inf'), float('inf')]\n cost_legchange = [0, 0, 0, 0]\n cost_PAM = [[0, 0],[0, 0],[0, 0],[0, 0]]\n cost_manipulation = [0, 0, 0, 0]\n cost_motion = [0, 0, 0, 0]\n force = [0, 0, 0, 0]\n path = [[[], []], [[], []], [[], []], [[], []]]\n planned_path_w = [[],[],[],[]]\n PAM_g = [[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]]\n command = [[], [], [], []]\n des = [[], [], [], [], []]\n PAM_goal = state()\n\n # find the nominal trajectory for manipulation\n theta = nominal_traj([object_s.x,object_s.y,object_s.phi], [object_f.x,object_f.y,object_f.phi], v_max, walls, obstacles, n, dt)\n\n # itterate through each leg to find the leg with minimum cost\n for leg in range(4):\n phi_linear = theta\n psi_linear = [theta[k] + phi[leg] for k in range(len(theta))]\n \t# find the cost and required force for manipulation for the leg\n force[leg], cost_manipulation[leg], planned_path_w[leg], command[leg], des= OptTraj([object_s.x, object_s.y, object_s.phi, object_s.xdot, object_s.ydot, object_s.phidot], [object_f.x, object_f.y, object_f.phi, object_f.xdot, object_f.ydot, object_f.phidot], v_max, walls, obstacles, object_params[0:4], object_params[4:7], phi_linear, psi_linear, force_max, r_max[leg], n, dt, object_leg[leg])\n \t# adding cost of changing leg\n if leg != current_leg:\n cost_legchange[leg] = cost_ChangeLeg\n # adding cost of PAM motion to PAM goal pose\n phi0 = np.arctan2(object_leg[leg][1]-object_s.y,object_leg[leg][0]-object_s.x)\n # finding the better option between pulling and pushing for each leg, with the same manipulation plan\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost_PAM[leg][push_pull], path[leg][push_pull], command_pam, goal_orientation = OptPath([PAM_s.x, PAM_s.y, PAM_s.phi], PAM_g[leg][push_pull], walls, obstacles_PAM, n_p, dt)\n if cost_PAM[leg][push_pull]!= float(\"inf\"):\n PAM_s_sim = copy.deepcopy(PAM_s)\n PAM_s_sim.x, PAM_s_sim.y, PAM_s_sim.phi = [PAM_r * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], PAM_r * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n # adding cost of predicted re-positionings\n n_transition = traj_simulation(copy.deepcopy(PAM_s_sim), copy.deepcopy(object_s), force[leg], legs, leg, command[leg])\n # print(n_transition)\n cost_PAM[leg][push_pull] += w_cost_reposition*n_transition\n cost_motion[leg] += min(cost_PAM[leg])*w_cost_motion\n action_push_pull[leg] = np.argmin(cost_PAM[leg])\n else:\n phi0 = np.arctan2(force[leg][0][1], force[leg][0][0])\n for push_pull in [0,1]:\n PAM_g[leg][push_pull] = [r_grasp * np.cos(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][0], r_grasp * np.sin(phi0) * np.sign(push_pull * 2 - 1) + object_leg[leg][1], np.pi * push_pull + phi0]\n cost = [cost_legchange[leg] + cost_motion[leg] + cost_manipulation[leg] for leg in range(4)]\n\n if min(cost) < float(\"inf\"):\n \t[min_index, min_value] = [np.argmin(cost), min(cost)]\n \t# Finding the grasping goal pose based on the selected plan\n \tphi0 = np.arctan2(object_leg[min_index][1]-object_s.y,object_leg[min_index][0]-object_s.x)\n \tgrasping_goal = [PAM_r * np.cos(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][0], PAM_r * np.sin(phi0) * np.sign(action_push_pull[min_index] * 2 - 1) + object_leg[min_index][1], np.pi * action_push_pull[min_index] + phi0]\n \tPAM_goal = state()\n \tPAM_goal.x, PAM_goal.y, PAM_goal.phi = PAM_g[min_index][action_push_pull[min_index]]\n \tobject_path_planned = Path()\n \tobject_path_planned.header.frame_id = 'frame_0'\n \tfor i in range(len(planned_path_w[min_index])):\n \t\tpose = PoseStamped()\n \t\tpose.pose.position.x = planned_path_w[min_index][i][0]\n \t\tpose.pose.position.y = planned_path_w[min_index][i][1]\n \t\tpose.pose.position.z = 0\n \t\tobject_path_planned.poses.append(pose)\n\n \tPAM_path_planned = Path()\n \tPAM_path_planned.header.frame_id = 'frame_0'\n \tif min_index != current_leg:\n \t\tfor i in range(len(path[min_index][action_push_pull[min_index]])):\n \t\t\tpose = PoseStamped()\n \t\t\tpose.pose.position.x, pose.pose.position.y, pose.pose.orientation.z =path[min_index][action_push_pull[min_index]][i]\n \t\t\tPAM_path_planned.poses.append(pose)\n else:\n \tmin_index = 5\n \tmin_value = float(\"inf\")\n if 0 < min_index and min_index <= 4:\n force_d = force[min_index][0]\n else:\n force_d = [0,0,0]\n\n return cost ,min_index, force_d, PAM_goal, grasping_goal, object_path_planned, PAM_path_planned", "def solve_tsp(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n drop_off_dict = {}\n car_path = []\n home_map = {}\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n\n start_in_home = start in home_indexes\n if start in home_indexes:\n home_indexes.remove(start)\n home_indexes.insert(0, start)\n home_count = 0;\n\n for home in home_indexes:\n #print(home, end = \" \")\n home_map[home_count] = home\n home_count += 1\n # Instantiate the data problem.\n #print(len(home_map))\n data = create_data_model(home_indexes, 0)\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['locations']),\n data['num_vehicles'], data['depot'])\n\n #print(manager.NodeToIndex(15))\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n #print(home_map[to_index], end = \" \")\n from_index = manager.IndexToNode(from_index)\n to_index = manager.IndexToNode(to_index)\n dist_to = all_paths.get(home_map[from_index])[0][home_map[to_index]]\n #if from_index >= 25 or to_index >= 25:\n # print(\"from\" if from_index >= 25 else \"to\", end = \" \")\n #dist_to = all_paths[from_index][0][to_index]\n return dist_to\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n \"\"\"\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n \"\"\"\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.local_search_metaheuristic = (\n routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.time_limit.seconds = 3\n #search_parameters.log_search = True\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # if assignment:\n # print_solution(manager, routing, assignment)\n # Print solution on console.\n\n if start in home_indexes:\n drop_off_dict[start] = [start]\n\n\n index = routing.Start(0)\n car_path.append(start)\n\n while not routing.IsEnd(index):\n previous_index = manager.IndexToNode(index)\n index = assignment.Value(routing.NextVar(index))\n\n car_path.pop();\n to_index = manager.IndexToNode(index)\n path_to = all_paths.get(home_map[previous_index])[1][home_map[to_index]]\n drop_off_dict[home_map[to_index]] = [home_map[to_index]]\n #print(to_index, end = ' ')\n car_path.extend(path_to)\n #route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n # for i in car_path:\n # print(i)\n if start in drop_off_dict.keys() and not start_in_home:\n drop_off_dict.pop(start, None)\n\n return car_path, drop_off_dict", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n locations = student_utils.convert_locations_to_indices(list_of_locations, list_of_locations)\n homes = student_utils.convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n\n start_time = time.time()\n\n if params[0] == 'naive':\n car_path, drop_off = naive_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy':\n car_path, drop_off = greedy_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'three_opt':\n car_path, drop_off = three_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'ant_colony':\n car_path, drop_off = ant_colony(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_three_opt':\n car_path, drop_off = greedy_clustering_three_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'mst':\n car_path, drop_off = mst_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'two_opt':\n car_path, drop_off = two_opt_solver(locations, homes, start, adjacency_matrix)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n elif params[0] == 'greedy_clustering_two_opt':\n car_path, drop_off = greedy_clustering_two_opt(locations, homes, start, adjacency_matrix, int(params[1]))\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n return car_path, drop_off\n else:\n pass", "def search_paths_agent_to_goal(self, robot_x, robot_y, goal_x, goal_y, G, road_node_Nos, road_node_info,\n road_lines, road_directions, road_lines_num, node_edges):\n # add target node\n target_node_coordinate = np.zeros((1, 2))\n target_node_coordinate[0][0] = goal_x\n target_node_coordinate[0][1] = goal_y\n target_node = None\n\n for (key, value) in road_node_info.items():\n if math.sqrt((value[0]-target_node_coordinate[0][0])**2 + (value[1]-target_node_coordinate[0][1])**2) <= 0.01:\n target_node = key\n\n if target_node == 0:\n print(target_node)\n raise Exception(\"wrong target node\", target_node)\n\n # Check whether the robot is on the road node or not\n at_node = False\n for (key, value) in road_node_info.items():\n if key == 0:\n continue\n if value[0] == robot_x and value[1] == robot_y:\n at_node = True\n agent_node_No = key\n\n if at_node == False:\n # add agent node\n agent_node_No = 0\n agent_node_coordinate = np.zeros((1, 2))\n agent_node_coordinate[0][0] = robot_x\n agent_node_coordinate[0][1] = robot_y\n agent_node = dict(zip([agent_node_No], agent_node_coordinate))\n road_node_info.update(agent_node)\n\n # add node\n env_node_Nos = [agent_node_No] + road_node_Nos\n G.add_nodes_from(env_node_Nos)\n\n # add edges from agent to the nearest road line\n # calculate the distance from the agent to the lines\n agent_line_dist = []\n for i in range(road_lines_num):\n cross = (road_lines[i][2] - road_lines[i][0]) * (agent_node_coordinate[0][0] - road_lines[i][0]) \\\n + (road_lines[i][3] - road_lines[i][1]) * (agent_node_coordinate[0][1] - road_lines[i][1])\n if cross <= 0:\n agent_line_dist.append(np.sqrt((agent_node_coordinate[0][0] - road_lines[i][0]) ** 2\n + (agent_node_coordinate[0][1] - road_lines[i][1]) ** 2))\n continue\n\n d2 = (road_lines[i][2] - road_lines[i][0]) ** 2 + (road_lines[i][3] - road_lines[i][1]) ** 2\n if cross >= d2:\n agent_line_dist.append(np.sqrt((agent_node_coordinate[0][0] - road_lines[i][2]) ** 2\n + (agent_node_coordinate[0][1] - road_lines[i][3]) ** 2))\n continue\n r = cross / d2\n p0 = road_lines[i][0] + (road_lines[i][2] - road_lines[i][0]) * r\n p1 = road_lines[i][1] + (road_lines[i][3] - road_lines[i][1]) * r\n agent_line_dist.append(\n np.sqrt((agent_node_coordinate[0][0] - p0) ** 2 + (agent_node_coordinate[0][1] - p1) ** 2))\n\n # find the nearest line index\n agent_line_dist_shortest = float(\"inf\")\n agent_line_shortest_index = 0\n\n for index, item in enumerate(agent_line_dist):\n if item < agent_line_dist_shortest:\n agent_line_shortest_index = index\n agent_line_dist_shortest = item\n\n # find the shortest line's node\n agent_line_shortest_node0 = None\n agent_line_shortest_node1 = None\n\n for (key, value) in road_node_info.items():\n if value[0] == road_lines[agent_line_shortest_index][0] and value[1] == \\\n road_lines[agent_line_shortest_index][1]:\n agent_line_shortest_node0 = key\n if value[0] == road_lines[agent_line_shortest_index][2] and value[1] == \\\n road_lines[agent_line_shortest_index][3]:\n agent_line_shortest_node1 = key\n\n # add new edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.append([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.append([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.append([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.append([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n G.add_edges_from(node_edges)\n simple_paths_list = list()\n if agent_node_No not in G or target_node not in G:\n has_path = False\n G.clear()\n else:\n if nx.has_path(G, source=agent_node_No, target=target_node):\n simple_paths = nx.shortest_simple_paths(G, source=agent_node_No, target=target_node, weight='len')\n\n for path in simple_paths:\n simple_paths_list.append(path)\n\n for path in simple_paths_list:\n if path[1] == agent_line_shortest_node1:\n path[0] = agent_line_shortest_node0\n elif path[1] == agent_line_shortest_node0:\n path[0] = agent_line_shortest_node1\n else:\n raise ValueError('First node Error!')\n\n remove_paths_list = list()\n for path in simple_paths_list:\n for path_rest in simple_paths_list[simple_paths_list.index(path) + 1:]:\n if path == path_rest[- len(path):]:\n remove_paths_list.append(path_rest)\n\n for remove_path in remove_paths_list:\n if remove_path in simple_paths_list:\n simple_paths_list.remove(remove_path)\n\n # Choose 1 simple paths\n if len(simple_paths_list) > 1:\n simple_paths_list = simple_paths_list[0:1]\n\n # remove edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n has_path = True\n G.clear()\n else:\n # remove edges from the agent node to road note\n if road_directions[agent_line_shortest_index] == 0:\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 1:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n elif road_directions[agent_line_shortest_index] == 2:\n node_edges.remove([agent_node_No, agent_line_shortest_node0, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node0][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node0][1] - agent_node_coordinate[0][1]) ** 2)}])\n node_edges.remove([agent_node_No, agent_line_shortest_node1, {'len': np.sqrt(\n (road_node_info[agent_line_shortest_node1][0] - agent_node_coordinate[0][0]) ** 2 + (\n road_node_info[agent_line_shortest_node1][1] - agent_node_coordinate[0][1]) ** 2)}])\n else:\n raise ValueError('wrong direction')\n\n has_path = False\n G.clear()\n else:\n G.add_edges_from(node_edges)\n simple_paths_list = list()\n # 判断站点是否在路网上\n if agent_node_No not in G or target_node not in G:\n has_path = False\n G.clear()\n else:\n # 判断站点和目标间是否存在路径\n if nx.has_path(G, source=agent_node_No, target=target_node):\n # 提取所有简单路径\n simple_paths = nx.shortest_simple_paths(G, source=agent_node_No, target=target_node, weight='len')\n\n for path in simple_paths:\n simple_paths_list.append(path)\n\n # 移除带有回环的路网\n remove_paths_list = list()\n for path in simple_paths_list:\n for path_rest in simple_paths_list[simple_paths_list.index(path) + 1:]:\n if path == path_rest[- len(path):]:\n remove_paths_list.append(path_rest)\n\n for remove_path in remove_paths_list:\n if remove_path in simple_paths_list:\n simple_paths_list.remove(remove_path)\n\n # 提取最多2条路径\n if len(simple_paths_list) > 2:\n simple_paths_list = simple_paths_list[0:2]\n\n # 确认存在路径\n has_path = True\n G.clear()\n else:\n # 不存在路径\n has_path = False\n G.clear()\n\n return simple_paths_list, has_path", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def approach_gps(g_lat,g_lon,emily_lat_start, emily_lon_start, pose_rad, Parameters): #approach a gps position using potential fields\r\n\tx_goal,y_goal = latlongtoxy(g_lat,g_lon,g_lat)\r\n\tx_e_start,y_e_start = latlongtoxy(emily_lat_start,emily_lon_start,g_lat)\r\n\r\n\tprint (\"\\n HERE I AM\\n\\n\")\r\n\r\n\tdist = haver_distance(g_lat, g_lon, emily_lat_start, emily_lon_start)\r\n\tinitial_dist = dist\r\n\r\n\tprint ('Distance: ',dist)\r\n\theading = get_heading(emily_lat_start, emily_lon_start, g_lat, g_lon)\r\n print ('After get heading')\r\n\t# Eric: I'm not sure if turn_towards is necessary for a successful run.\r\n\t#turn_towards(heading)\r\n\tprint ('After Turn towards')\r\n\t#turn towards the goal initially\r\n\r\n\tstart_time = time.time()\r\n\tcurrent_time = 0\r\n\tdstore = []\r\n\thstore = []\r\n\twhile(dist >= goal_radius):\r\n\r\n\t\t#------------ code for reading gps location of emily and its orientation ------\r\n\t\te_lat = vehicle.location.global_frame.lat\r\n\t\te_lon = vehicle.location.global_frame.lon\r\n\t\te_heading = vehicle.heading * pi/180\t\t# convert heading to radians\r\n\t\t#------------------ get e_lat,e_lon, e_orient ---------------------\r\n\r\n\r\n\t\tx_e,y_e = latlongtoxy(e_lat,e_lon,g_lat)\t\t\t#change latitude and longitude to xy\r\n\r\n\t\t#x,y are given to approach victim function as y,x to algin the north heading and direction in x,y\r\n\r\n\t\tdx,dy = approach_victim_behaviour(y_goal,x_goal, y_e,x_e, pose_rad, Parameters)\t#get potential field vector\r\n\t\trc1, rc3 = dxdytorc(dx,dy, e_heading,g_lon)\t\t\t\t\t#get rc parameters\r\n\t\tdist = haver_distance(g_lat, g_lon, e_lat, e_lon)\t\t\t\t#haversine distance\r\n\r\n\t\tcurrent_time = time.time() - start_time\r\n\t\tprint (\"Time, Heading, Distance\")\r\n\t\tprint (current_time, e_heading*180/pi, dist)\r\n\t\tdstore.append(dist)\r\n\t\thstore.append(e_heading*180/pi)\r\n\t\t#code for sending the writing the rc commands\r\n\t\t# 3 is the thrust control\r\n\t\t#vehicle.channels.overrides = {'3':rc3}\r\n\t\tsendThrottleCommand(rc3, enableThrottle)\r\n\t\ttime.sleep(0.5)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\tprint (\"Rudder: \",rc1)\r\n\t\tprint (\"Throttle: \",rc3)\r\n\t\tsaveToLog(e_lat, e_lon,dist,rc1,rc3)\r\n\t\ttime.sleep(0.5)\r\n\tprint(initial_dist)\r\n\tprint(\"intial \", emily_lat_start,emily_lon_start)\r\n\tprint(\"final \",e_lat,e_lon)\r\n\tplt.plot(dstore)\r\n\t#plt.title('Distance form home vs time')\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel('Distance')\r\n\tplt.show()\r\n\tplt.plot(hstore)\r\n\tplt.show()", "def computeForces(self, neighbors: List[Any]) -> np.array:\n\n # Compute the goal force\n F_goal = (self.gvel - self.vel) / self.ksi\n self.f = F_goal\n\n # Calculate the neighbors that are within the sensing distance\n near_neighbors = [\n n for n in neighbors if n is not self and self.__distance_to(n) <= self.dhor\n ]\n\n # Iterate through all the neighbors\n for neighbor in near_neighbors:\n # Calculate the time to collision\n tau = self.time_to_collision(neighbor)\n\n # If the tau is infinite then the two agents will never collide\n if tau == float(\"inf\"):\n continue\n\n # If tau is zero the agents are colliding now so go as fast as possible\n if tau == 0:\n F_a = self.max_f\n # Else the force is scaled by the maximum of the difference between the time\n # horizon and tau (> 0) over tau\n else:\n F_a = max(self.timehor - tau, 0) / tau\n\n # Calculate the vector that points exactly away from the collision\n n = (\n (self.pos + self.vel * tau) - (neighbor.pos + neighbor.vel * tau)\n ) / np.linalg.norm(self.pos + self.vel * tau)\n\n # Add the scaled vector to the force\n self.f += F_a * n\n\n return self.f", "def get_hardwired_speed_weights(self):\n \n phase_shift=self.speed_phase_shift\n \n # row 1 has the weights of speed cells to grid cell 1\n self.W_speed_east=np.zeros_like(self.W_ee) \n self.W_speed_west=np.zeros_like(self.W_ee) \n self.W_speed_north=np.zeros_like(self.W_ee) \n self.W_speed_south=np.zeros_like(self.W_ee) \n\n if self.use_eight_directions is True:\n self.W_speed_north_east=np.zeros_like(self.W_ee) \n self.W_speed_north_west=np.zeros_like(self.W_ee) \n self.W_speed_south_east=np.zeros_like(self.W_ee) \n self.W_speed_south_west=np.zeros_like(self.W_ee) \n\n\n for phase_idx,phase in enumerate(self.gp.phases):\n shifted_north_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/2.),self.gp.phases)\n shifted_south_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/2.),self.gp.phases)\n shifted_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(0),self.gp.phases)\n shifted_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi),self.gp.phases)\n\n self.W_speed_north[phase_idx,:]=self.W_ee[shifted_north_phase_idx,:]\n self.W_speed_south[phase_idx,:]=self.W_ee[shifted_south_phase_idx,:]\n self.W_speed_east[phase_idx,:]=self.W_ee[shifted_east_phase_idx,:]\n self.W_speed_west[phase_idx,:]=self.W_ee[shifted_west_phase_idx,:] \n \n if self.use_eight_directions is True:\n shifted_north_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi/4),self.gp.phases)\n shifted_north_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(np.pi*3/4),self.gp.phases)\n shifted_south_east_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi/4),self.gp.phases)\n shifted_south_west_phase_idx=gl.get_pos_idx(phase+phase_shift*dir_vect(-np.pi*3/4),self.gp.phases)\n \n self.W_speed_north_east[phase_idx,:]=self.W_ee[shifted_north_east_phase_idx,:]\n self.W_speed_north_west[phase_idx,:]=self.W_ee[shifted_north_west_phase_idx,:]\n self.W_speed_south_east[phase_idx,:]=self.W_ee[shifted_south_east_phase_idx,:]\n self.W_speed_south_west[phase_idx,:]=self.W_ee[shifted_south_west_phase_idx,:]", "def dijkstras(occupancy_map,x_spacing,y_spacing,start,goal):\n ROWS, COLS = occupancy_map.shape\n #convert physical location to index in the grid\n startNode = locToIndex(start, x_spacing, y_spacing)\n startingNodeLoc = indexToLoc(startNode, x_spacing, y_spacing)\n initialcost = math.sqrt((startingNodeLoc[0] - start[0])**2 + (startingNodeLoc[1] - start[1])**2)\n goalNode = locToIndex(goal, x_spacing, y_spacing)\n \n freelist = np.where(occupancy_map == 0)\n if occupancy_map[startNode[0], startNode[1]] != 0:\n #raise ValueError(\"start : ({}, {}) invalid, is an obstacle\".format(startNode[0], startNode[1]))\n startNode = findValidNode(startNode, start, occupancy_map, x_spacing, y_spacing)\n if occupancy_map[goalNode[0], goalNode[1]] != 0:\n #raise ValueError(\"goal: ({}, {}) invalid, is an obstacle\".format(goalNode[0], goalNode[1]))\n goalNode = findValidNode(goalNode, goal, occupancy_map, x_spacing, y_spacing)\n candidate = [ [sys.float_info.max, \n i, (freelist[0][i], freelist[1][i])] for i in range(len(freelist[0]))] \n visited = set([])\n queue = PriorityQueue(candidate)\n paths = {}\n found = False\n\n #update initial cost\n queue.remove(startNode)\n queue.insert(startNode, initialcost)\n paths[startNode] = None\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, 1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 0, -1, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, 1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n updateInitial(occupancy_map, ROWS, COLS, start, startNode, -1, 0, queue, paths, x_spacing, y_spacing, initialcost)\n while queue.size() > 0:\n priority, current = queue.pop()\n if current == goalNode:\n found = True\n break\n #not reaching goal node yet, for each of its neighbor, update the weight\n visited.add(current)\n update(occupancy_map, ROWS, COLS, current, 0, 1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 0, -1, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, 1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n update(occupancy_map, ROWS, COLS, current, -1, 0, priority, queue, paths, visited, x_spacing, y_spacing)\n \n if not found:\n raise ValueError(\"fail to find shortest path\")\n node = goalNode\n shortestpath = []\n while node is not None:\n shortestpath.append(node)\n node = paths[node]\n #shortestpath.append(startNode)\n #print (startNode)\n #print ('*', list(reversed(shortestpath)))\n #print (goalNode)\n p = list(reversed([ indexToLoc(n, x_spacing, y_spacing) for n in shortestpath]))\n #start and final position may not fall on center of the grid\n if abs(p[0][0] - start[0]) > 0.0005 or abs(p[0][1] - start[1]) > 0.0005:\n p.insert(0, [start[0][0], start[1][0]])\n if abs(p[-1][0] - goal[0]) > 0.0005 or abs(p[-1][1] - goal[1]) > 0.0005:\n p.append([goal[0][0], goal[1][0]])\n res = np.array(p)\n print (res)\n return res", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def _estimate_velocity_by_neigh(\n x_coords_metres, y_coords_metres, x_velocities_m_s01,\n y_velocities_m_s01, e_folding_radius_metres):\n\n if numpy.isnan(e_folding_radius_metres):\n neigh_radius_metres = numpy.inf\n else:\n neigh_radius_metres = 3 * e_folding_radius_metres\n\n orig_x_velocities_m_s01 = x_velocities_m_s01 + 0.\n orig_y_velocities_m_s01 = y_velocities_m_s01 + 0.\n\n nan_flags = numpy.logical_or(\n numpy.isnan(orig_x_velocities_m_s01),\n numpy.isnan(orig_y_velocities_m_s01)\n )\n nan_indices = numpy.where(nan_flags)[0]\n\n for this_index in nan_indices:\n if numpy.isnan(e_folding_radius_metres):\n these_neighbour_indices = numpy.where(numpy.invert(nan_flags))[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n x_velocities_m_s01[this_index] = numpy.mean(\n orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.mean(\n orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n continue\n\n these_x_diffs_metres = numpy.absolute(\n x_coords_metres[this_index] - x_coords_metres)\n these_y_diffs_metres = numpy.absolute(\n y_coords_metres[this_index] - y_coords_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_x_diffs_metres <= neigh_radius_metres,\n these_y_diffs_metres <= neigh_radius_metres)\n\n these_neighbour_flags = numpy.logical_and(\n these_neighbour_flags, numpy.invert(nan_flags)\n )\n\n these_neighbour_indices = numpy.where(these_neighbour_flags)[0]\n if len(these_neighbour_indices) == 0:\n continue\n\n these_neighbour_dist_metres = numpy.sqrt(\n these_x_diffs_metres[these_neighbour_indices] ** 2 +\n these_y_diffs_metres[these_neighbour_indices] ** 2\n )\n\n these_neighbour_subindices = numpy.where(\n these_neighbour_dist_metres <= neigh_radius_metres\n )[0]\n if len(these_neighbour_subindices) == 0:\n continue\n\n these_neighbour_indices = these_neighbour_indices[\n these_neighbour_subindices]\n these_neighbour_dist_metres = these_neighbour_dist_metres[\n these_neighbour_subindices]\n\n these_weights = numpy.exp(\n -these_neighbour_dist_metres / e_folding_radius_metres\n )\n these_weights = these_weights / numpy.sum(these_weights)\n\n x_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_x_velocities_m_s01[these_neighbour_indices]\n )\n\n y_velocities_m_s01[this_index] = numpy.sum(\n these_weights * orig_y_velocities_m_s01[these_neighbour_indices]\n )\n\n return x_velocities_m_s01, y_velocities_m_s01", "def _update_local_solution(self, x: np.ndarray, x_neigh: dict, stepsize: float, **kwargs):\r\n for j, x_j in x_neigh.items():\r\n self.lambd[j] += stepsize * (x - x_j)\r\n \r\n self.x = x", "def run_heuristic_1(self):\n\n # store original upper bounds\n for arc in self.arc_info.keys():\n self.arc_info[arc][\"original_ub\"] =\\\n self.arc_info[arc][\"upper_bound\"]\n\n q = self.create_queue()\n updates = 0\n\n while (not q.empty()):\n # for every edge that has flow 0, set upper bound to 0\n # this ensures that we don't add an edge to remove this one\n for arc in self.arc_info.keys():\n if self.arc_info[arc][\"weight\"] == 0:\n self.arc_info[arc][\"upper_bound\"] = 0\n\n arc_id = q.get()[1]\n # print(\"Trying to adjust flow using arc {}\".format(arc_id))\n # set upper bound of this edge to 0\n self.arc_info[arc_id][\"upper_bound\"] = 0\n flow_found = self.update_flow()\n if flow_found:\n # start = self.arc_info[arc_id][\"start\"]\n # destin = self.arc_info[arc_id][\"destin\"]\n # print(\"Found flow without arc {}, ({},{}).\".format(arc_id,\n # start, destin))\n # create new queue from new flow\n q = self.create_queue()\n updates += 1\n\n # return bounds to original\n for arc in self.arc_info.keys():\n self.arc_info[arc][\"upper_bound\"] =\\\n self.arc_info[arc][\"original_ub\"]\n return(updates)", "def traveling_salesman(destinations_1):\n # Instantiate the data problem.\n data = create_data_model()\n\n # NEW SPOT TO MAKE distance_matrix\n distance_matrix = compute_euclidean_distance_matrix(destinations_1)\n manager = pywrapcp.RoutingIndexManager(\n len(destinations_1), data['num_vehicles'], data['depot'])\n\n# # Create the routing index manager.\n# manager = pywrapcp.RoutingIndexManager(\n# len(data['locations']), data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n# distance_matrix = compute_euclidean_distance_matrix(data['locations'])\n\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return distance_matrix[from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n # Solve the problem.\n assignment = routing.SolveWithParameters(search_parameters)\n\n # Print solution on console.\n# if assignment:\n# print_solution(manager, routing, assignment)\n if assignment:\n address1,address2,address3,address4,address5,address6,address7,address8,address9,address10=\\\n set_address_path(manager, routing, assignment,destinations_1)\n return address1,address2,address3,address4,address5,address6,address7,address8,address9,address10", "def a_star(my_map, start_locs, goal_locs, h_values, agent, constraints):\n\n ##############################\n # Task 1.1: Extend the A* search to search in the space-time domain\n # rather than space domain, only.\n # Build constraint table if there are constraints\n\n constraint_table = build_constraint_table(constraints, agent)\n\n open_list = []\n closed_list = dict()\n nodes_opened = 0\n max_opened = 500\n start_loc = start_locs[0]\n goal_loc = goal_locs[0]\n if len(start_locs) > 1: # If there is more than 1 start location then this is a multi-cell agent\n multi = True\n else:\n multi = False\n\n # determine when the last constraint is on the goal node (or any of the goal node cells in the case of multi-cell)\n earliest_goal_timestep = 0\n if len(constraint_table) != 0:\n for time in [item for item in sorted(list(constraint_table.keys()), reverse=True)]:\n flat_list = [item for sublist in constraint_table[time] for item in sublist]\n if(goal_locs[0] in flat_list):\n earliest_goal_timestep = time\n break\n elif(multi): # if multi cell check if any of the agents goal cells are constrained \n if(goal_locs[1] in flat_list): \n earliest_goal_timestep = time\n break\n\n h_value = h_values[start_loc]\n goal_orientation = orientation(goal_locs)\n\n root = {'loc': start_loc,'orientation': orientation(start_locs), 'g_val': 0, 'h_val': h_value, 'time': 0, 'parent': None}\n push_node(open_list, root)\n closed_list[(root['loc'], root['time'], root['orientation'])] = root\n\n while len(open_list ) > 0 and nodes_opened < max_opened:\n curr = pop_node(open_list)\n nodes_opened = nodes_opened + 1\n \n if curr['loc'] == goal_loc and curr['orientation'] == goal_orientation and curr['time'] >= earliest_goal_timestep:\n return get_path(curr)\n ############################\n child_orient = curr['orientation']\n for dir in range(7):\n if dir < 5:\n child_loc = move(curr['loc'], dir)\n elif not multi: \n continue\n\n if dir == 5:\n # clockwise rotation \n child_orient = curr['orientation'] - 1\n if child_orient < 1:\n child_orient = 4\n if dir == 6:\n # counter-clockwise rotation \n child_orient = curr['orientation'] + 1\n if child_orient > 4:\n child_orient = 1\n \n if test_map(my_map, child_loc[0], child_loc[1], child_orient, dir):\n continue\n \n # check if the head location is constrained \n if is_constrained(curr['loc'], child_loc, child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if this is a multi cell agent check if the tail is constrained \n if multi:\n # check the next tail location \n row_t, col_t, _, _ = find_tail_positions(curr['loc'][0], curr['loc'][1], curr['orientation'], dir)\n next_row_t, next_col_t, next_row_t_inter, next_col_t_inter = find_tail_positions(child_loc[0], child_loc[1], child_orient, dir)\n\n if is_constrained((row_t,col_t), (next_row_t, next_col_t), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n # if the agent is rotating check if the intermediate location is constrained\n if dir == 5 or dir == 6: \n if is_constrained((row_t,col_t), (next_row_t_inter, next_col_t_inter), child_orient, dir, curr['time'] + 1, constraint_table):\n continue\n\n child = {'loc': child_loc,\n 'orientation': child_orient,\n 'g_val': curr['g_val'] + 1,\n 'h_val': h_values[child_loc] + orient_cost(child_orient, goal_orientation),\n 'time': curr['time'] + 1,\n 'parent': curr}\n\n if (child['loc'], child['time'], child['orientation']) in closed_list:\n existing_node = closed_list[(child['loc'], child['time'], child['orientation'])]\n \n if compare_nodes(child, existing_node):\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n else:\n closed_list[(child['loc'], child['time'], child['orientation'])] = child\n push_node(open_list, child)\n \n return None # Failed to find solutions", "def solve(customerCount, vehicleCount, vehicleCapacity, depotIndex, customers):\n \n N, locations, locations_r, distances, closest = precalculate(customers)\n \n #print locations\n #print locations_r\n angle_order = range(1, N)\n angle_order.sort(key=lambda i: (locations_r[i, 1], locations_r[i, 0])) \n \n vehicleTours = best_order(customerCount, customers, vehicleCount, vehicleCapacity, angle_order)\n if not vehicleTours:\n vehicleTours = solve0(customerCount, vehicleCount, vehicleCapacity, depotIndex, customers)\n check(customerCount, customers, vehicleCapacity, vehicleTours)\n vehicleTours = get_shortest_paths('file_path XXX', customers, depotIndex, vehicleTours)\n check(customerCount, customers, vehicleCapacity, vehicleTours)\n \n vehicleTours0 = copy.deepcopy(vehicleTours)\n dist0 = total_dist(customers, depotIndex, vehicleTours)\n if False:\n for _ in range(100):\n vehicleTours = copy.deepcopy(vehicleTours0) \n adjust_tours(customers, vehicleCapacity, vehicleCount, vehicleTours)\n vehicleTours = get_shortest_paths('file_path XXX', customers, depotIndex, vehicleTours)\n #check(customerCount, customers, vehicleCapacity, vehicleTours)\n if not is_valid(customerCount, customers, vehicleCapacity, vehicleTours):\n continue\n dist = total_dist(customers, depotIndex, vehicleTours)\n if dist < dist0:\n print '%s => %s' % (dist0, dist)\n vehicleTours0 = vehicleTours[:]\n dist0 = dist\n \n \n vehicleTours = copy.deepcopy(vehicleTours0) \n check(customerCount, customers, vehicleCapacity, vehicleTours)\n while len(vehicleTours) < vehicleCount:\n vehicleTours.append([])\n \n print '*', vehicleTours \n \n return vehicleTours", "def _optimize(self) -> None:\n\n for i, agent in enumerate(self.agents):\n states, actions, rewards, next_states, dones = self.memory.sample()\n\n actor_next_state = self._agent_states(i, next_states)\n next_actions = torch.cat(\n [a.actor_target(actor_next_state) for a in self.agents], 1\n )\n next_q = agent.critic_target(next_states, next_actions).detach()\n target_q = rewards[:, i].view(-1, 1) + self.gamma * next_q * (\n 1 - dones[:, i].view(-1, 1)\n )\n local_q = agent.critic_local(states, actions)\n\n value_loss = agent.loss_fn(local_q, target_q)\n agent.value_optimizer.zero_grad()\n value_loss.backward()\n agent.value_optimizer.step()\n\n local_actions = []\n for i, a in enumerate(self.agents):\n local_states = self._agent_states(i, states)\n local_actions.append(\n a.actor_local(local_states)\n if a == agent\n else a.actor_local(local_states).detach()\n )\n local_actions = torch.cat(local_actions, 1)\n policy_loss = -agent.critic_local(states, local_actions).mean()\n\n agent.policy_optimizer.zero_grad()\n policy_loss.backward()\n agent.policy_optimizer.step()\n\n self._update_target_model(agent.critic_local, agent.critic_target)\n self._update_target_model(agent.actor_local, agent.actor_target)", "def floydWarshall(graph):\n \"\"\" initializing the solution matrix same as input graph matrix\n OR we can say that the initial values of shortest distances\n are based on shortest paths considerting no \n intermedidate vertices \"\"\"\n V = len(graph[0])\n dist = [[elem for elem in line] for line in graph]\n \n \"\"\" Add all vertices one by one to the set of intermediate\n vertices.\n ---> Before start of a iteration, we have shortest distances\n between all pairs of vertices such that the shortest\n distances consider only the vertices in set \n {0, 1, 2, .. k-1} as intermediate vertices.\n ----> After the end of a iteration, vertex no. k is\n added to the set of intermediate vertices and the \n set becomes {0, 1, 2, .. k}\n \"\"\"\n for k in range(V):\n \n # pick all vertices as source one by one\n for i in range(V):\n \n # Pick all vertices as destination for the\n # above picked source\n for j in range(V):\n \n # If vertex k is on the shortest path from \n # i to j, then update the value of dist[i][j]\n dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])\n\n for line in dist:\n print line\n\n return dist", "def safeJourney(Alist,s,d):\n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin\n \n #for n,w in G.adj[nmin].items():\n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #dcomp = dmin + w\n dcomp = max(w,dmin) #take largest value to record most dangerous segment\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n #path[n].extend(path[nmin])\n #path[n] = path[nmin]\n \n #path[n].append(n) #n not nmin\n print(path)\n # else:\n #dcomp = dmin + w\n # dcomp = max(w,dmin)\n # Udict[n] = dcomp\n #path[n].extend(path[nmin])\n #path[n].append(nmin) \n \n if nmin == d: #if current node is destination\n return path[d],Edict[d]\n return [] #no path", "def run_one_step(self, dt):\n if not self._erode_flooded_nodes:\n flood_status = self._grid.at_node[\"flood_status_code\"]\n flooded_nodes = np.nonzero(flood_status == _FLOODED)[0]\n else:\n flooded_nodes = []\n\n upstream_order_IDs = self._grid[\"node\"][\"flow__upstream_node_order\"]\n\n defined_flow_receivers = np.not_equal(\n self._grid[\"node\"][\"flow__link_to_receiver_node\"], self._grid.BAD_INDEX\n )\n\n try:\n length_of_link = self._grid.length_of_d8\n except AttributeError:\n length_of_link = self._grid.length_of_link\n\n flow_link_lengths = length_of_link[\n self._grid.at_node[\"flow__link_to_receiver_node\"][defined_flow_receivers]\n ]\n flow_receivers = self._grid[\"node\"][\"flow__receiver_node\"]\n\n # Operate the main function:\n if self._use_W:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / self._W[defined_flow_receivers]\n / (flow_link_lengths**self._n)\n )\n\n else:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / (flow_link_lengths**self._n)\n )\n\n # Handle flooded nodes, if any (no erosion there)\n if flooded_nodes is not None:\n self._alpha[flooded_nodes] = 0.0\n\n reversed_flow = self._elevs < self._elevs[flow_receivers]\n # this check necessary if flow has been routed across\n # depressions\n self._alpha[reversed_flow] = 0.0\n\n threshdt = self._sp_crit * dt\n\n # solve using Brent's Method in Cython for Speed\n if isinstance(threshdt, float):\n brent_method_erode_fixed_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )\n else:\n brent_method_erode_variable_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )", "def areasComparison(self):\n # if self.nFuselage > 0:\n # logger.debug(\"Fuselage initial area:\\n \"+str(self.fs_m_pointsInitArea[0]))\n # logger.debug(\"Fuselage final A:\\n \"+str(self.fs_m_pointsA[0]))\n # logger.debug(\"Fuselage final Iy:\\n \"+str(self.fs_m_pointsIy[0]))\n # logger.debug(\"Fuselage final Iz:\\n \"+str(self.fs_m_pointsIz[0]))\n # logger.debug(\"Fuselage final J:\\n \"+str(self.fs_m_pointsJ[0]))\n # logger.debug(\"Fuselage nodes names\"+str(self.fs_m_pointsName[0]))\n # for i in range(self.nWings):\n # logger.debug(\"Wing initial area:\\n \"+str(self.ws_me_pointsInitArea[i]))\n # logger.debug(\"Wing final A:\\n \"+str(self.ws_me_pointsA[i]))\n # logger.debug(\"Wing final Iy:\\n \"+str(self.ws_me_pointsIy[i]))\n # logger.debug(\"Wing final Iz:\\n \"+str(self.ws_me_pointsIz[i]))\n # logger.debug(\"Wing final J:\\n \"+str(self.ws_me_pointsJ[i]))\n # logger.debug(\"Wing nodes names\"+str(self.ws_me_pointsName[i]))\n N = len(self.aircraftNodesPoints)\n for i in range(N):\n logger.debug(\"Aircraft nodes:\\n\"+str(self.aircraftNodesPoints[i]))\n logger.debug(\"Aircraft nodes names:\\n\"+str(self.aircraftNodesNames[i]))\n logger.debug(\"Aircraft A:\\n\"+str(self.aircraftNodesA[i]))\n logger.debug(\"Aircraft Iy:\\n\"+str(self.aircraftNodesIy[i]))\n logger.debug(\"Aircraft Iz:\\n\"+str(self.aircraftNodesIz[i]))\n logger.debug(\"Aircraft J:\\n\"+str(self.aircraftNodesJ[i]))\n sys.exit()" ]
[ "0.7051051", "0.6387123", "0.6131864", "0.5956313", "0.5933806", "0.5870744", "0.5852478", "0.58488905", "0.5806669", "0.5763782", "0.57401586", "0.57004774", "0.567962", "0.56677824", "0.56226337", "0.55885166", "0.55575943", "0.5544302", "0.547298", "0.54628146", "0.54545224", "0.54411685", "0.5421106", "0.5418138", "0.53899235", "0.53851604", "0.5367466", "0.5365145", "0.536463", "0.53584" ]
0.7724953
0
adds data to the csv for a certain agent
def write_csv(self, agent_num): data = [] data.append(self.t) data.append(self.w_EDR) data.append(self.w_RESOURCE) data.append(self.w_DISTANCE) data.append(agent_num) for task_num, task in enumerate(self.tasks): vectorized_task_loc = self.get_vectorized_location(task.getloc()) is_occupied = self.agent_locations[0][vectorized_task_loc] # 1 if occupied data.append(is_occupied) # data.extend(np.ndarray.tolist(self.agent_locations)) # Feature 1 data.extend(np.ndarray.tolist(self.is_task_finished)) # Feature 2 data.extend(np.ndarray.tolist(self.is_task_enabled)) # Feature 3 data.extend(np.ndarray.tolist(self.is_task_alive)) # Feature 4 data.extend(np.ndarray.tolist(self.travel_time_constraint_satisfied[agent_num])) # Feature 5 data.extend(self.is_agent_idle[agent_num]) # Feature 6 data.extend(np.ndarray.tolist(self.agent_distances[agent_num])) # Feature 7 for task_num, task in enumerate(self.tasks): vectorized_task_loc = self.get_vectorized_location(task.getloc()) tasks_in_each_square = self.how_many_tasks_in_each_square[0][vectorized_task_loc] # 1 if occupied data.append(tasks_in_each_square) # data.extend(np.ndarray.tolist(self.how_many_tasks_in_each_square)) # Feature 8 data.extend(np.ndarray.tolist(self.orientation[agent_num])) # Feature 9 data.extend(np.ndarray.tolist(self.task_deadlines)) # Feature 10 data.extend(np.ndarray.tolist(self.is_task_in_progress)) # Feature 11 data.extend(np.ndarray.tolist(self.orientation[agent_num] * self.agent_distances[agent_num])) # Feature 12 data.append(self.task_to_schedule) # Output self.naive_total_data.append(data) # with open('1_schedule.csv', 'a') as outfile: # writer = csv.writer(outfile) # writer.writerow(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_add(invoice_details):\r\n with open(\"beer_data.csv\", \"a\") as data_file:\r\n writer = csv.writer(data_file)\r\n writer.writerow(invoice_details)\r\n data_file.close()", "def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()", "def write_to_csv(agents, filename):\n log.info(\"Writing CSV file '%s'...\" % filename)\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=place_detail_keys)\n writer.writeheader()\n writer.writerows(agents)", "def wh_append_booking(agent_full_name, agent_directory, ca_df):\r\n # count load on agent_data_df\r\n agent_data_df = pd.read_csv(f'{agent_full_name}.csv', header=0, delimiter=\",\", engine='python')\r\n agent_data_df.loc[0, 'load'] = int(agent_data_df.loc[0, 'load']) + 1\r\n agent_data_df.to_csv(f'{agent_directory}''/'f'{agent_full_name}.csv', index=False, header=True)\r\n # create msg body to send to log with booking info\r\n data_to_save = pd.DataFrame([], columns=['id', 'agent_type', 'location_1', 'location_2', 'purpose', 'request_type', 'time', 'rack', 'coil_in', 'coil_out', 'capacity', 'load'])\r\n data_to_save.at[0, 'id'] = ca_df.loc[0, 'id']\r\n data_to_save.at[0, 'agent_type'] = ca_df.loc[0, 'agent_type']\r\n data_to_save.at[0, 'location_1'] = ca_df.loc[0, 'location_1']\r\n data_to_save.at[0, 'location_2'] = ca_df.loc[0, 'location_2']\r\n data_to_save.at[0, 'location'] = ca_df.loc[0, 'location']\r\n data_to_save.at[0, 'purpose'] = ca_df.loc[0, 'purpose']\r\n data_to_save.at[0, 'request_type'] = ca_df.loc[0, 'action'] # action=book\r\n data_to_save.at[0, 'time'] = datetime.datetime.now()\r\n data_to_save.at[0, 'rack'] = 1\r\n # print(f'{agent_book} booked rack on {agent_full_name}. Added +1 load to {agent_full_name} capacity')\r\n return data_to_save.to_json()", "def write_csv_pairwise(self, agent_num):\n # self.last_timestep_data = []\n for task_num, i in enumerate(self.tasks):\n current_task_data = []\n task_loc = i.getloc()\n vectorized_task_loc = self.get_vectorized_location(task_loc)\n current_task_data.append(self.t)\n current_task_data.append(self.w_EDR)\n current_task_data.append(self.w_RESOURCE)\n current_task_data.append(self.w_DISTANCE)\n current_task_data.append(agent_num)\n\n current_task_data.append(task_num)\n current_task_data.extend(self.is_agent_idle[agent_num]) # Feature 6\n current_task_data.append((self.is_task_finished[0][task_num])) # Feature 2\n current_task_data.append((self.is_task_enabled[0][task_num])) # Feature 3\n current_task_data.append((self.is_task_alive[0][task_num])) # Feature 4\n current_task_data.append((self.travel_time_constraint_satisfied[agent_num][task_num])) # Feature 5\n is_occupied = self.agent_locations[0][vectorized_task_loc] # if 1 agent is there, 0 is unoccupied\n current_task_data.append((is_occupied)) # Feature 1\n current_task_data.append((self.agent_distances[agent_num][task_num])) # Feature 7\n current_task_data.append((self.orientation[agent_num][task_num])) # Feature 9\n current_task_data.append((self.task_deadlines[0][task_num])) # Feature 10\n current_task_data.append((self.is_task_in_progress[0][task_num])) # Feature 11\n current_task_data.append((\n self.orientation[agent_num][task_num] * self.agent_distances[agent_num][task_num])) # Feature 12\n current_task_data.append((self.how_many_tasks_in_each_square[0][vectorized_task_loc])) # Feature 8\n if self.task_to_schedule == -1:\n null_task = 1\n else:\n null_task = 0\n current_task_data.append(null_task)\n current_task_data.append(self.task_to_schedule[0]) # Output\n self.pairwise_total_data.append(current_task_data)\n # self.last_timestep_data.append(current_task_data)\n # with open('11_schedule.csv', 'a') as outfile:\n # writer = csv.writer(outfile)\n # writer.writerow(current_task_data)", "def add(self, agent):\n self._agents[agent.unique_id] = agent\n self.logger.add(agent)", "def row(self, rdata):\n self = self\n file = open(\"imdb_output.csv\", \"a\")\n file.write(str(\",\".join(rdata)) + \"\\n\")", "def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')", "def writerow(self, data):\n self.get_csv_writer().writerow(data)", "def write_log(self):\n with open(self.trav_stat_file, 'a') as stat_file:\n travel_writer = csv.writer(stat_file)\n # Every row starts with the start and destnation\n row = [self.start, self.dest]\n # This uses a static list so that the order is fixed\n for state in [\"waiting\", \"riding\", \"transferring\"]:\n state_total = sum(self.time_record[state])\n row.append(state_total)\n travel_writer.writerow(row)", "def append(self, agent):\n self.agents.append(agent)", "def write_into_csv(self, loc_details=[], itype='atm', mode='w'): \n \n if itype==\"brc\":\n csvfile_name = self.branch_file\n headers = self.branch_headers\n else:\n csvfile_name = self.atm_file\n headers = self.atm_headers\n\n with open(csvfile_name, mode, newline='') as csvfile:\n locwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n if mode=='w':\n locwriter.writerow(headers) \n\n for loc in loc_details:\n locwriter.writerow(loc)", "def csv_writer(data, path):\n\n with open(path, \"a\") as csv_file:\n\n writer = csv.writer(csv_file,delimiter=',')\n\n \n\n writer.writerow(data)", "def parse_to_csv(data,namee):\n pth = BASE_DIR + '/reports/' + csv_name\n if not os.path.isfile(namee):\n csv_file = open(namee, 'wb')\n csv_writer = csv.writer(csv_file)\n top_row = [\n 'IP', 'Host', 'os', 'Proto', 'Port',\n 'Service','Service_version', 'Product', 'Service FP',\n 'NSE Script ID', 'NSE Script Output', 'Notes'\n ]\n csv_writer.writerow(top_row)\n print('\\n[+] The file {} does not exist. New file created!\\n'.format(\n csv_name))\n # else:\n # # try:\n # csv_file = open(csv_name, 'w')\n\n # csv_writer = csv.writer(csv_file)\n # print('\\n[+] {} exists. Appending to file!\\n'.format(csv_name))\n\n \n for item in data:\n csv_writer.writerow(item)\n csv_file.close()", "def create_initial_csv():\n\tif os.path.exists(args.train):\n\t\tprint(\"--Training data input found: \", args.train)\n\t\t#quick and dirty create csv file\n\t\theaders = os.system(\"echo idorigh,idresph,origbytes,respbytes,origpkts,resppkts,duration > log.csv\")\n\t\tbrocut = os.system(\"cat \"+str(args.train)+\"| bro-cut id.orig_h id.resp_h orig_bytes resp_bytes orig_pkts resp_pkts duration | sed 's/\t/\\,/g' | sed '/-/d'>> log.csv\")\n\t\t\n\telse:\n\t\tprint(\"Bro training data input \"+str(args.train)+\" not found - needs to be in working directory\")\n\t\texit()", "def __append_to_csv(self, path, data):\n header = False\n mode = 'a'\n if not os.path.isfile(path):\n header = True\n mode = 'w'\n data.to_csv(path, mode=mode, index=False, header=header)", "def write(self, args):\n\t\tnewcsvfile = self.filename[:len(self.filename)-4] + \"NEW.csv\" #clever naming MIGHT NEED TO CHANGE THIS LATER/OVERWRITE OLD FILE?\n\t\twith open(newcsvfile, 'wb') as f:\n\t\t\twriter = csv.writer(f)\n\t\t\twriter.writerows(self.all_likes)", "def output_data(self):\n if not self.is_record:\n logging.error('Env: no record to output!')\n else:\n control_data = pd.DataFrame(self.control_data)\n control_data.to_csv(self.output_path + ('%s_%s_control.csv' % (self.name, self.agent)))", "def to_csv(self, csvwriter):\n csvwriter.writerow(self.to_csv_row())", "def write_to_csv(self, data):\n with open(\"out.csv\", \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(self.column_names)\n writer.writerows(data)\n print(\" Updated succesfully \")", "def append_to_csv(self):\n appended_data = pd.concat([self.existing_data, self.new_data], axis = 1)\n appended_data.to_csv(filename_main, index = False)\n warnings.warn(\"Add new graphs to .vsz files to show the new data\")", "def write(self): \n # Open csv file\n with open(self.file_name, 'w', newline='') as file:\n self._writer = csv.writer(file)\n \n # Write header rows\n# self.write_sim_header_data(self.trace.sim.get_data())\n \n # Write trace table\n self._writer.writerow(['Record #', 'Rep', 'Time',\n 'Priority', 'Record Type', 'Name'])\n for trace_record in self.trace._record_list:\n self._writer.writerow(trace_record.get_row())\n file.close()", "def give_data_to_csv(file_of_data, data):\n with open(file_of_data, 'a') as f:\n writer = csv.writer(f)\n writer.writerow(data)", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def __create_csv(self):\n with open(self.__csv_file_name, 'w', newline='', encoding='utf-8') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=self.__csv_fields, delimiter=';')\n writer.writeheader()", "def create_test_csv():\n if os.path.exists(args.test):\n print(\"--Traffic input for analysis found: \", args.test)\n #quick and dirty create csv file\n headers = os.system(\"echo idorigh,idresph,origbytes,respbytes,origpkts,resppkts,duration > test.csv\")\n brocut = os.system(\"cat \"+str(args.test)+\"| bro-cut id.orig_h id.resp_h orig_bytes resp_bytes orig_pkts resp_pkts duration | sed 's/\t/\\,/g' | sed '/-/d'>> test.csv\")\n \n else:\n print(\"Bro testing data input \"+str(args.test)+\" not found - needs to be in working directory\")\n exit()", "def _save_log(self, save_dir, data):\n date = datetime.datetime.today().strftime('%Y-%m-%d')\n file_dir = os.path.join(save_dir, date + \".csv\")\n with open(file_dir, 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(data)", "def tr_append_booking(agent_directory, agent_full_name, agent_df, slot_range):\r\n tr_booking_df = pd.read_csv(f'{agent_directory}''/'f'{agent_full_name}_booking.csv', header=0, delimiter=\",\", engine='python')\r\n tr_booking_df['booking_type'] = tr_booking_df['booking_type'].fillna(\"\")\r\n for y in slot_range:\r\n tr_booking_df.loc[y - 1, 'assigned_to'] = agent_df.loc[0, 'id']\r\n tr_booking_df.loc[y - 1, 'assigned_at'] = datetime.datetime.now()\r\n if agent_df.loc[0, 'action'] == \"booked\":\r\n tr_booking_df.loc[y - 1, 'booking_type'] = \"booked\"\r\n elif agent_df.loc[0, 'action'] == \"pre-book\":\r\n tr_booking_df.loc[y - 1, 'booking_type'] = \"pre-book\"\r\n tr_booking_df.to_csv(f'{agent_directory}''/'f'{agent_full_name}_booking.csv', index=False, header=True)\r\n return tr_booking_df.to_json()", "def write_csv(reviewer_data, file_obj):\n writer = csv.writer(file_obj)\n writer.writerow(\n ('Reviewer', 'Reviews', '-2', '-1', '+1', '+2', '+A', '+/- %',\n 'Disagreements', 'Disagreement%'))\n for (name, r_data, d_data) in reviewer_data:\n row = (name,) + r_data + d_data\n writer.writerow(row)" ]
[ "0.662632", "0.6398973", "0.63391227", "0.6135978", "0.61266804", "0.5973132", "0.5939993", "0.5921802", "0.5902474", "0.5853977", "0.5835502", "0.58188224", "0.58076525", "0.575672", "0.5740445", "0.57366645", "0.5726228", "0.5687795", "0.56800157", "0.5677692", "0.56772965", "0.5676786", "0.56461847", "0.5625679", "0.5614084", "0.5593769", "0.55794024", "0.5545356", "0.55384386", "0.55231833" ]
0.6684417
0
writes a schedule in pairwise format That means n rows will be presented, where n is the number of tasks. Each row contains task specific features
def write_csv_pairwise(self, agent_num): # self.last_timestep_data = [] for task_num, i in enumerate(self.tasks): current_task_data = [] task_loc = i.getloc() vectorized_task_loc = self.get_vectorized_location(task_loc) current_task_data.append(self.t) current_task_data.append(self.w_EDR) current_task_data.append(self.w_RESOURCE) current_task_data.append(self.w_DISTANCE) current_task_data.append(agent_num) current_task_data.append(task_num) current_task_data.extend(self.is_agent_idle[agent_num]) # Feature 6 current_task_data.append((self.is_task_finished[0][task_num])) # Feature 2 current_task_data.append((self.is_task_enabled[0][task_num])) # Feature 3 current_task_data.append((self.is_task_alive[0][task_num])) # Feature 4 current_task_data.append((self.travel_time_constraint_satisfied[agent_num][task_num])) # Feature 5 is_occupied = self.agent_locations[0][vectorized_task_loc] # if 1 agent is there, 0 is unoccupied current_task_data.append((is_occupied)) # Feature 1 current_task_data.append((self.agent_distances[agent_num][task_num])) # Feature 7 current_task_data.append((self.orientation[agent_num][task_num])) # Feature 9 current_task_data.append((self.task_deadlines[0][task_num])) # Feature 10 current_task_data.append((self.is_task_in_progress[0][task_num])) # Feature 11 current_task_data.append(( self.orientation[agent_num][task_num] * self.agent_distances[agent_num][task_num])) # Feature 12 current_task_data.append((self.how_many_tasks_in_each_square[0][vectorized_task_loc])) # Feature 8 if self.task_to_schedule == -1: null_task = 1 else: null_task = 0 current_task_data.append(null_task) current_task_data.append(self.task_to_schedule[0]) # Output self.pairwise_total_data.append(current_task_data) # self.last_timestep_data.append(current_task_data) # with open('11_schedule.csv', 'a') as outfile: # writer = csv.writer(outfile) # writer.writerow(current_task_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def output_schedule(self) -> None:\n with open(\"Output.txt\", \"w\") as out_file:\n for sem in self.plan:\n out_file.write(sem.title.center(15 + 20 + 50 + 5) + \"\\n\\n\")\n for course in sem.required_courses:\n if course.special:\n out_file.write(\"*\" * 10 + \" \" * 5 + f\"{course.special_type}\\n\")\n elif course.grade != \"\":\n out_file.write(\n course.sem_taken.ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + course.grade.ljust(5)\n + \"\\n\"\n )\n else:\n out_file.write(\n \"AP/UNK\".ljust(15)\n + f\"{course.dept} {course.number}-{course.section}\".ljust(\n 20\n )\n + course.title.ljust(50)\n + \"AP/UNK\".ljust(5)\n + \"\\n\"\n )\n out_file.write(\"\\n\\n\")", "def write_running_tasks(self, file, tasks_list):\r\n file.write(\"time\\trunning_tasks\\n\")\r\n previous_time = -1\r\n # Write in reverse order so that we automatically get the last event\r\n # for each time.\r\n for time, running_tasks in reversed(tasks_list):\r\n if time != previous_time:\r\n if previous_time != -1:\r\n file.write(\"%d\\t%d\\n\" % (previous_time, running_tasks))\r\n file.write(\"%d\\t%d\\n\" % (time, running_tasks))\r\n previous_time = time", "def output_schedule_brief(cout, courses_to_schedule_d, courses_to_mt_d):\n cout.writerow([\"CourseCode\",\"DayWeek\",\"Start\",\"End\",\"Campus\"])\n\n # first write out the courses we just scheduled\n for cn in sorted(courses_to_schedule_d.keys()):\n meeting_time = courses_to_mt_d[cn]\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n ct = ss.meeting_time_to_course_time(meeting_time)\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])\n\n # Now write out all the other courses\n for cn in sorted(sched_d.keys()):\n assert is_cross_list_canonical(cn)\n (subj, catalog) = sct.parse_canonical_course_name(cn)\n if print_area and subj != print_area:\n continue\n\n campus = \"Allston\" if will_be_allston_course_subj_catalog(subj, catalog) else \"Cambridge\"\n cts = sched_d[cn]\n for ct in cts:\n days = ct.days_of_week(separator='/')\n cout.writerow([cn, days, ct.time_start, ct.time_end, campus])", "def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))", "def write_tp_rows():\n \n basename = sys.argv[1]\n ops = (\"two_opt\", \"twoh_opt\", \"three_opt\", \"three_opt_broad\", \"swap\", \"swap_adj\")\n lengths = (6, 7, 8, 9, 10)\n for op in ops:\n for length in lengths:\n filename = os.path.join(basename,\n \"tsp_length_%d_%s\" % (length, op),\n \"TP_row0.dat\")\n print op, length\n x = tsp.get_tm_first_row(length, move=op)\n np.savetxt(filename, x)", "def write_csv(self, agent_num):\n data = []\n data.append(self.t)\n data.append(self.w_EDR)\n data.append(self.w_RESOURCE)\n data.append(self.w_DISTANCE)\n data.append(agent_num)\n for task_num, task in enumerate(self.tasks):\n vectorized_task_loc = self.get_vectorized_location(task.getloc())\n is_occupied = self.agent_locations[0][vectorized_task_loc] # 1 if occupied\n data.append(is_occupied)\n # data.extend(np.ndarray.tolist(self.agent_locations)) # Feature 1\n data.extend(np.ndarray.tolist(self.is_task_finished)) # Feature 2\n data.extend(np.ndarray.tolist(self.is_task_enabled)) # Feature 3\n data.extend(np.ndarray.tolist(self.is_task_alive)) # Feature 4\n data.extend(np.ndarray.tolist(self.travel_time_constraint_satisfied[agent_num])) # Feature 5\n data.extend(self.is_agent_idle[agent_num]) # Feature 6\n data.extend(np.ndarray.tolist(self.agent_distances[agent_num])) # Feature 7\n for task_num, task in enumerate(self.tasks):\n vectorized_task_loc = self.get_vectorized_location(task.getloc())\n tasks_in_each_square = self.how_many_tasks_in_each_square[0][vectorized_task_loc] # 1 if occupied\n data.append(tasks_in_each_square)\n # data.extend(np.ndarray.tolist(self.how_many_tasks_in_each_square)) # Feature 8\n data.extend(np.ndarray.tolist(self.orientation[agent_num])) # Feature 9\n data.extend(np.ndarray.tolist(self.task_deadlines)) # Feature 10\n data.extend(np.ndarray.tolist(self.is_task_in_progress)) # Feature 11\n data.extend(np.ndarray.tolist(self.orientation[agent_num] * self.agent_distances[agent_num])) # Feature 12\n data.append(self.task_to_schedule) # Output\n self.naive_total_data.append(data)\n # with open('1_schedule.csv', 'a') as outfile:\n # writer = csv.writer(outfile)\n # writer.writerow(data)", "def write_tasks_table(self):\n tasks = self._get_all_tasks()\n\n self.tasks_view.setRowCount(len(tasks))\n\n row_counter = 0\n for task in tasks:\n\n end_time = None\n start_time = None\n\n # Convert to display data\n if task.StartDate is not None:\n start_time = Time.date_time_format(int(task.StartDate))\n\n if task.EndDate is not None:\n end_time = Time.date_time_format(int(task.EndDate))\n\n # Project name header\n self.tasks_view.setItem(row_counter, 0, QtGui.QTableWidgetItem(str(task.Name)))\n self.tasks_view.setItem(row_counter, 1, QtGui.QTableWidgetItem(str(start_time)))\n self.tasks_view.setItem(row_counter, 2, QtGui.QTableWidgetItem(str(end_time)))\n self.tasks_view.setItem(row_counter, 4, QtGui.QTableWidgetItem(str(task.Assignee)))\n self.tasks_view.setItem(row_counter, 5, QtGui.QTableWidgetItem(str(self.get_project(task.Project))))\n self.tasks_view.setItem(row_counter, 6, QtGui.QTableWidgetItem(str(task.Description)))\n self.tasks_view.setItem(row_counter, 7, QtGui.QTableWidgetItem(str(task.Id)))\n\n # Status header\n if task.Status is None:\n task.Status = int(0)\n\n if int(task.Status) is 1:\n # TODO need translation\n display_status = \"In Progress\"\n\n elif int(task.Status) is 2:\n display_status = \"Not Started\"\n\n elif int(task.Status) is 3:\n display_status = \"Forecast\"\n\n else:\n # TODO need translation\n display_status = \"Done\"\n\n self.tasks_view.setItem(row_counter, 3, QtGui.QTableWidgetItem(str(display_status)))\n\n if task.Description is not None:\n self.tasks_view.setItem(row_counter, 6, QtGui.QTableWidgetItem(task.Description))\n\n row_counter += 1", "def sed_write_prob_mat_list_to_submission_csv(na_list, prob_mat_list, lbs, \n thres_ary, step_sec, out_path):\n create_folder(os.path.dirname(out_path))\n f = open(out_path, 'w')\n cnt = 0\n for n in xrange(len(na_list)):\n na = na_list[n]\n prob_mat = prob_mat_list[n]\n flag = False\n for i2 in xrange(len(lbs)):\n event_list = vad.activity_detection(x=prob_mat[:, i2], \n thres=thres_ary[i2], \n n_smooth=10, \n n_salt=10)\n if len(event_list) != 0:\n flag = True\n for [bgn, fin] in event_list:\n bgn_sec = step_sec * bgn\n fin_sec = step_sec * fin\n f.write(na + \"\\t\" + str(bgn_sec) + \"\\t\" + \\\n str(fin_sec) + \"\\t\" + lbs[i2] + \"\\n\")\n if flag == False: \n f.write(na + \"\\n\")\n f.close()\n print \"Write\", out_path, \"successfully!\"", "def transform_schedule(keywords, parameters, input_file, output_file):\n\treturn", "def report_tasks(self, stylise: bool=True):\n report = pd.DataFrame.from_dict(data=self.pm.report_intent())\n intent_replace = {'transition': 'Transition', 'synthetic_builder': 'SyntheticBuilder', 'wrangle': 'Wrangle',\n 'feature_catalog': 'FeatureCatalog', 'data_tolerance': 'DataTolerance'}\n report['component'] = report.intent.replace(to_replace=intent_replace)\n report['task'] = [x[0][10:] for x in report['parameters']]\n report['parameters'] = [x[1:] for x in report['parameters']]\n report = report.loc[:, ['level', 'order', 'component', 'task', 'parameters', 'creator']]\n if stylise:\n return self._report(report, index_header='level')\n return report", "def write_stacked_response_times(self):\r\n results_dirname = get_param(\"results_dir\")\r\n filename = os.path.join(results_dirname, \"%s_%s\" % (get_param(\"file_prefix\"),\r\n \"stacked_fairness\"))\r\n file = open(filename, \"w\")\r\n file.write(\"time\\trunning_tasks\\n\")\r\n previous_time = -1\r\n # Write in reverse order so that we automatically get the last event\r\n # for each time.\r\n for time, running_tasks in reversed(self.new_running_tasks):\r\n if time != previous_time:\r\n if previous_time != -1:\r\n file.write(\"%d\\t\" % time)\r\n for user in range(get_param(\"num_users\")):\r\n file.write(\"%d\\t\" % running_tasks[user])\r\n file.write(\"\\n\")\r\n previous_time = time", "def writeRouteSequence(self):\n print \"writing route sequence\"\n f = open(PublicTransit.OUTFILE_NAME, 'wb')\n if (PublicTransit.LINE_FILE_TYPE == LineFileType.PTLINE):\n lines = [\";;<<PT>><<LINE>>;;\" + os.linesep]\n elif (PublicTransit.LINE_FILE_TYPE == LineFileType.TRNBUILD):\n lines = [\";;<<Trnbuild>>;;\" + os.linesep]\n\n for t in self.transitRoutes:\n if t in self.stopsByRoute:\n i = 0\n self.transitRoutes[t].nodeSequence = []\n prevLinkId = -1\n # Bus routes have a link sequence from BusRouteTraversalEdges. Others just have stops.\n if (len(self.transitRoutes[t].linkSequence) > 0):\n for link in self.transitRoutes[t].linkSequence:\n # make sure this link is within the region (i.e., it is in linksDict)\n if (link in self.linksDict):\n nodeToAppend = -1\n if (i == 0):\n nodeToAppend = self.stopsByRoute[t][0].tanaNode\n if (nodeToAppend == -1):\n if (self.linksDict[link].oneWay == \"FT\"):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (self.linksDict[link].oneWay == \"TF\"):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n else: # open in both directions; determine traversal direction\n nodeToAppend = -self.linksDict[link].fromNode.nodeId \n elif (i == 1):\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n if (self.linksDict[link].oneWay == \"FT\"):\n if (self.stopsByRoute[t][0].tanaNode != self.linksDict[link].fromNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].fromNode.nodeId)\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (self.linksDict[link].oneWay == \"TF\"):\n if (self.stopsByRoute[t][0].tanaNode != self.linksDict[link].toNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].toNode.nodeId)\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n else: # open in both directions\n if (abs(self.transitRoutes[t].nodeSequence[0]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[0]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (self.transitRoutes[t].linkSequence[0] in self.linksDict and \n self.linksDict[self.transitRoutes[t].linkSequence[0]].toNode.nodeId == self.linksDict[link].fromNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].fromNode.nodeId)\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (self.transitRoutes[t].linkSequence[0] in self.linksDict and \n self.linksDict[self.transitRoutes[t].linkSequence[0]].fromNode.nodeId == self.linksDict[link].toNode.nodeId):\n self.transitRoutes[t].nodeSequence.append(-self.linksDict[link].toNode.nodeId)\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n elif (prevLinkId != link and prevLinkId != -1): # ensure there are no repeated links\n if (self.linksDict[link].oneWay == \"FT\"):\n if (len(self.transitRoutes[t].nodeSequence) > 0 and \n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (len(self.transitRoutes[t].nodeSequence) > 0):\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n\n elif (self.linksDict[link].oneWay == \"TF\"):\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n elif (len(self.transitRoutes[t].nodeSequence) > 0):\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0 and\n abs(self.transitRoutes[t].nodeSequence[-1]) == self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n else: # open in both directions\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n # determine direction based on the previous node in the sequence. If the previous\n # node is the same as this link's from node, append the toNode; otherwise append the fromNode.\n if (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n # previous link doesn't connect to this because the previous link was a duplicate\n else:\n self.transitRoutes[t].nodeSequence.pop()\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n # remove the last node in the sequence and check if the one before connects to this one\n if (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].fromNode.nodeId):\n nodeToAppend = -self.linksDict[link].toNode.nodeId\n elif (abs(self.transitRoutes[t].nodeSequence[-1]) == \\\n self.linksDict[link].toNode.nodeId):\n nodeToAppend = -self.linksDict[link].fromNode.nodeId\n\n # if the node is a stop on this route, set the node ID positive\n if (nodeToAppend != -1):\n if (i > 0 and abs(nodeToAppend) in [st.tanaNode for st in self.stopsByRoute[t]]):\n nodeToAppend = -1 * nodeToAppend\n self.transitRoutes[t].nodeSequence.append(nodeToAppend)\n prevLinkId = link\n \n i += 1\n # if the last node is not a stop, remove it\n if (len(self.transitRoutes[t].nodeSequence) > 0 and self.transitRoutes[t].nodeSequence[-1] < 0):\n del(self.transitRoutes[t].nodeSequence[-1])\n \n # if there are no links for the route, just record the stops as the nodes\n else:\n self.transitRoutes[t].nodeSequence = [n.tanaNode for n in self.stopsByRoute[t] if n.tanaNode != -1]\n \n # Only write routes with a node sequence.\n if (len(self.transitRoutes[t].nodeSequence) > 0):\n lines.append(self.__getPrintString(t, PublicTransit.LINE_FILE_TYPE) + os.linesep)\n else:\n print \"No node sequence for \" + str(t) + \" (\" + self.transitRoutes[t].new_name + \")\"\n f.writelines(lines)\n f.close()", "def generate_teacher_schedule_primary(N_classes):\n\n\tassert N_classes % 2 == 0, 'number of classes must be even'\n\n\tN_teachers = get_N_teachers('primary', N_classes)\n\tteacher_nodes = ['t{:04d}'.format(i) for i in range(1, N_teachers + 1)]\n\n\tN_teaching_hours = get_teaching_hours('primary')\n\tmax_hours, N_weekdays, weekend_days = get_teaching_framework()\n\n\tschedule = {t:[] for t in teacher_nodes}\n\n\t# the first N_teaching_hours / 2 hours are taught by teachers 1 to \n\t# N_classes:\n\tfor i in range(1, N_classes + 1):\n\t\tschedule['t{:04d}'.format(i)].extend([i] * int(N_teaching_hours / 2))\n\n\t# the rest of the teachers take a break in the faculty room\n\tfor i in range(N_classes + 1, N_teachers + 1):\n\t\tschedule['t{:04d}'.format(i)].extend([pd.NA] * int(N_teaching_hours/2))\n\n\t# the next two hours are shared between the teachers of the \n\t# primary subjects and additional teachers for the secondary subject, \n\t# such that every teacher sees a total of two different classes every day\n\tfor i, j in enumerate(range(N_classes + 1, N_teachers + 1)):\n\t\tschedule['t{:04d}'.format(j)].append(i + 1)\n\t\tschedule['t{:04d}'.format(j)].append(i + int(N_classes / 2) + 1)\n\tfor i,j in enumerate(range(1, int(N_classes / 2) + 1)):\n\t\tschedule['t{:04d}'.format(j)].append(i + int(N_classes / 2) + 1)\n\t\tschedule['t{:04d}'.format(j)].append(pd.NA)\n\tfor i,j in enumerate(range(int(N_classes / 2) + 1, N_classes + 1)):\n\t\tschedule['t{:04d}'.format(j)].append(pd.NA)\n\t\tschedule['t{:04d}'.format(j)].append(i + 1)\n\n\t# students and teachers spend the rest of the day (until hour 9) at home\n\tfor i in range(0, max_hours - N_teaching_hours):\n\t\tfor t in teacher_nodes:\n\t\t\tschedule[t].append(pd.NA)\n\n\t# convert the schedule to a data frame\n\tschedule_df = pd.DataFrame(columns=['teacher'] + ['hour_{}'.format(i)\\\n\t\t\t\t for i in range(1, max_hours + 1)])\n\tschedule_df['teacher'] = teacher_nodes * N_weekdays\n\titerables = [range(1, N_weekdays + 1), teacher_nodes]\n\tindex = pd.MultiIndex.from_product(iterables, names=['weekday', 'teacher'])\n\tschedule_df.index = index\n\tschedule_df = schedule_df.drop(columns = ['teacher'])\n\n\tfor wd in range(1, N_weekdays + 1):\n\t\tfor t in teacher_nodes:\n\t\t\tfor hour, c in enumerate(schedule[t]):\n\t\t\t\tif wd not in weekend_days:\n\t\t\t\t\tschedule_df.loc[wd, t]['hour_{}'.format(hour + 1)] = c\n\n\treturn schedule_df", "def output_schedule_registrar(cout, schedule_d, courses_to_mt_d):\n\n schedule_score.output_course_schedule(cout, make_sched_d_from_solution(schedule_d, courses_to_mt_d))", "def generate_tasks(self, task):", "def export_pod_schedule(self, r_id, matches=None,\n update_flow=True, print_hanging=True):\n if matches is None:\n matches = self.get_matches(update_flow)\n\n p_matches = self.get_pod_centered_view(matches, print_hanging)\n out_strs = []\n for pod_name in sorted(self.pod_info['name']):\n p_idx = self.pod_info['name'].index(pod_name)\n if p_matches[pod_name]:\n _out_strs = []\n for s_idx, m_idx in p_matches[pod_name]:\n s_idx += 2 # convert from UTC to UTC+1\n d_idx = s_idx//SLOT_NUM\n s_idx = s_idx%SLOT_NUM\n\n _out_strs.append('{},{},{},{},{},{},\\n'.format(\n pod_name, self.pod_info['tz_group'][p_idx],\n self._get_day_str(d_idx), self._get_slot_str(s_idx),\n ' '.join([\n self.mentor_info['first_name'][m_idx],\n self.mentor_info['last_name'][m_idx]\n ]),\n self.mentor_info['email'][m_idx],\n ))\n out_strs += sorted(_out_strs)\n else:\n out_strs.append('{},{},{},{},{},{},\\n'.format(\n pod_name, self.pod_info['tz_group'][p_idx],\n '', '', '', '',\n ))\n\n with open(f'pod.schedule_{r_id}.csv', 'w') as f:\n f.write('pod,pod time zone group,day (utc+1),slot (utc+1),mentor,mentor e-mail,zoom link\\n')\n for out_str in out_strs:\n f.write(out_str)", "def save(self, fn: str) -> None:\n fout = open(fn, 'w')\n for t,x in zip(self.times,self.milestones):\n fout.write('%f\\t%d '%(t,len(x)))\n fout.write(' '.join([str(xi) for xi in x]))\n fout.write('\\n')\n fout.close()", "def export_tasks(self, samples, features, export_id):\n samples_for_sharding = samples.randomColumn('shard_split')\n for i in range(self.num_shards):\n range_min = float(i) / float(self.num_shards)\n range_max = float(i + 1) / float(self.num_shards)\n range_filter = ee.Filter.And(\n ee.Filter.gte('shard_split', range_min),\n ee.Filter.lt('shard_split', range_max))\n samples_to_export = samples_for_sharding.filter(range_filter)\n\n task = ee.batch.Export.table.toCloudStorage(\n collection=samples_to_export,\n description=export_id + \"_%i\" % i,\n bucket=self.bucket,\n fileNamePrefix=self.directory + '/' + export_id + \"_%i\" % i,\n fileFormat='TFRecord',\n selectors=features,\n maxWorkers=2000)\n\n # Can be a stopping call if TaskManager if busy.\n self.task_manager.submit(task)", "def check_if_schedule_finished(self):\n tot_num_tasks_scheduled = sum(self.is_task_finished[0])\n if tot_num_tasks_scheduled > 19 or self.t > 150:\n self.data_done_generating = True\n if self.t > 150:\n print('Schedule failed to create')\n print('Schedule will not be copied')\n self.did_schedule_fail = True\n else:\n print('Successful schedule created')\n # copy rows into another excel file\n # with open(self.filepath, 'r') as csvfile, open(self.writepath, 'a') as outfile:\n # data = (csv.reader(csvfile))\n # writer = csv.writer(outfile)\n # for row in data:\n # writer.writerow(row)\n #\n # with open(self.second_file_path, 'r') as csvfile, open(self.writepath2, 'a') as outfile:\n # data = (csv.reader(csvfile))\n # writer = csv.writer(outfile)\n # for row in data:\n # writer.writerow(row)\n\n print('1 schedule created.')", "def generate_teacher_schedule_secondary(N_classes):\n\tassert N_classes % 2 == 0, 'number of classes must be even'\n\n\tN_hours = get_teaching_hours('secondary')\n\tN_teachers = get_N_teachers('secondary', N_classes)\n\tteacher_nodes = ['t{:04d}'.format(i) for i in range(1, N_teachers + 1)]\n\tmax_hours, N_weekdays, weekend_days = get_teaching_framework()\n\n\t# we create the schedule for teachers by first creating a list of teacher \n\t# node IDs and then reshaping it such that it fits the N_hours X N_classes \n\t# format. The schedule is created in such a way that it ensures every class \n\t# is taught by at least one teachers during every hour in N_hours. \n\tteacher_list = list(range(1, int(N_teachers * 2/3) + 1))\n\tteacher_list.extend(list(range(1, int(N_teachers * 2/3) + 1)))\n\tteacher_list.extend(list(range(1, N_teachers + 1)))\n\tteacher_list.extend(list(range(1, int(N_teachers * 1/3) + 1)))\n\tteacher_list.extend(list(range(int(N_teachers * 2/3), N_teachers + 1)))\n\ttmp_list = list(range(1, int(N_teachers * (1/3))))\n\ttmp_list.reverse()\n\tteacher_list.extend(tmp_list)\n\tteacher_list = np.asarray(teacher_list)\n\tteacher_array = teacher_list[0: N_hours * N_classes].reshape((N_hours, N_classes))\n\n\t# distill the first teacher schedule into a DataFrame with a hierarchical\n\t# index of form [weekday, teacher]\n\tfirst_teacher_schedule = pd.DataFrame(columns=['hour'] + ['class_{}'.format(i) for i in range(1, N_classes + 1)])\n\tfirst_teacher_schedule['hour'] = [i for i in range(1, N_hours + 1)]\n\tfor i in range(0, N_classes):\n\t\tfirst_teacher_schedule['class_{}'.format(i + 1)] = teacher_array[0:,i]\n\tfirst_teacher_schedule.index = first_teacher_schedule['hour']\n\tfirst_teacher_schedule = first_teacher_schedule.drop(columns = ['hour'])\n\n\t# create the overall teacher schedule of form (N_weekdays * N_teachers) X\n\t# N_hours by drawing information from the first_teacher_schedule \n\tschedule_df = pd.DataFrame(columns=['teacher'] + ['hour_{}'.format(i) for\\\n\t\t\t i in range(1, max_hours + 1)])\n\tschedule_df['teacher'] = teacher_nodes * N_weekdays\n\titerables = [range(1, N_weekdays + 1), teacher_nodes]\n\tindex = pd.MultiIndex.from_product(iterables, names=['weekday', 'teacher'])\n\tschedule_df.index = index\n\tschedule_df = schedule_df.drop(columns = ['teacher'])\n\n\tfor c in range(1, N_classes + 1):\n\t\tfor hour in range(1, N_hours + 1):\n\t\t\tt1 = first_teacher_schedule.loc[hour, 'class_{}'.format(c)]\n\t\t\tfor wd in range(1, N_weekdays + 1):\n\t\t\t\tif wd not in weekend_days:\n\t\t\t\t\tschedule_df.loc[wd, 't{:04d}'.format(t1)]\\\n\t\t\t\t\t\t['hour_{}'.format(hour)] = c\n\n\tschedule_df = schedule_df.replace({np.nan:pd.NA})\n\t# shift afternoon teaching hours by one to make space for the lunch break\n\t# in the fifth hour:\n\tschedule_df = schedule_df.rename(columns={\n\t\t\t\t'hour_9':'hour_5', 'hour_5':'hour_6', 'hour_6':'hour_7',\n\t\t\t\t'hour_7':'hour_8', 'hour_8':'hour_9'})\n\tschedule_df = schedule_df[['hour_{}'.format(i) \\\n\t\t\t\tfor i in range(1, max_hours + 1)]]\n\n\treturn schedule_df", "def write_table_times(io_stream, customers):\n io_stream.write('table time\\n')\n times = construct_distance_matrix(customers, element_type=int)\n for row in times:\n row = [str(e) for e in row]\n io_stream.write(';'.join(row) + '\\n')", "def _create_schedules(self):\n\n ''''''", "def populate_a_matrix_per_schedule(self):\n self.matrixes = []\n for i in range(self.num_schedules):\n m = np.zeros((2048, 20))\n self.matrixes.append(m)\n for i, each_matrix in enumerate(self.matrixes):\n # lets look at elements of schedule 1\n for j in range(self.schedule_array_train_naive[i][0], self.schedule_array_train_naive[i][1] + 1):\n binary_embedding = self.total_binary_embeddings[j]\n index = self.pass_in_embedding_out_state_ID(binary_embedding)\n # action taken at this instance\n action = self.Y_train_naive[j]\n each_matrix[index][action] += 1\n total_sum = each_matrix.sum()\n self.matrixes[i] = np.divide(each_matrix, total_sum)\n\n print('n matrices have been generated')", "def write_traj(name,r_eq):\r\n f = open(name, 'w') #eqilibration.dump'\r\n N =len(r_eq[0,:,0])\r\n steps = len(r_eq[0,0,:])\r\n types = np.linspace(0,N-1,N)\r\n types = np.ones(N)\r\n types[1::3] = 2\r\n for kk in tqdm(range(steps)):\r\n f.write('ITEM: TIMESTEP \\n')\r\n f.write('{} \\n'.format(dt*kk))\r\n f.write('ITEM: NUMBER OF ATOMS \\n')\r\n f.write('{} \\n'.format(N))\r\n f.write('ITEM: BOX BOUNDS pp pp pp\\n')\r\n f.write('{} {} \\n'.format(-0,L))\r\n f.write('{} {} \\n'.format(-0,L))\r\n f.write('{} {} \\n'.format(-0,L))\r\n f.write('ITEM: ATOMS id type x y z Radius \\n')\r\n for ii in range(N):\r\n f.write(' {} {} {} {} {} {}\\n'.format(ii+1,types[ii],r_eq[0,ii,kk],r_eq[1,ii,kk],r_eq[2,ii,kk], .2e-10, ))\r\n f.close() \r\n return", "def combine(schedules,agent_order_list,highest_timestep):\r\n action=[]\r\n for timestep in range(highest_timestep-1):\r\n current_action = dict()\r\n index = 0\r\n for agent in agent_order_list:\r\n if timestep < len(schedules[index]):\r\n current_action[agent] = schedules[index][timestep][agent]\r\n else:\r\n current_action[agent] = 4\r\n index += 1\r\n action.append(current_action)\r\n return action", "def create_submission_file(self, perplexity_list, task = 'A'):\n \n if task in ['A', 'B', 'C']:\n fileName = 'group12.perplexity' + task\n else:\n fileName = 'group12.' + task\n \n with open(fileName, 'w') as file_handler:\n for item in perplexity_list:\n if not(self.task=='1.2'):\n if np.isnan(item): continue #skip nans comming from dummy scentences with only pads\n file_handler.write(\"{}\\n\".format(item))\n print('output file created for task: ', task)", "def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"", "def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])", "def _schedule(self,task_dict):\n times = [time(), None, None, None] # (schedule timestamp, execution timestamp, stop timestamp, get timestamp)\n result_id = self._extract_features.remote(self, times) # calculation is started in new remote task \n task_dict[result_id] = self._idx # add sample index ", "def prepare_rw_jobs(self, repeats):\n \n \n #The tasks we need to go through to append the report output\n taskList = [\n 'steadyState',\n 'timeCourse',\n 'scan',\n 'metabolicControlAnalysis',\n 'optimization',\n 'parameterFitting',\n 'fluxMode',\n 'lyapunovExponents',\n 'timeScaleSeparationAnalysis',\n 'sensitivities',\n 'moieties'\n ]\n \n \n task_report_targets = {} #Store the report output targets \n #Create a new COPASI file for each repeat\n #Keep a note of the output files we're creating\n model_files = []\n output_files = []\n for i in range(repeats):\n #For each task, if the report output is set, append it with '_i'\n for taskName in taskList:\n try:\n task = self._getTask(taskName)\n report = task.find(xmlns + 'Report')\n if i==0:\n task_report_targets[taskName] = report.attrib['target']\n report.attrib['target'] = str(i) + '_' + task_report_targets[taskName]\n if i==0:\n if task.attrib['scheduled'] == 'true':\n output_files.append(task_report_targets[taskName])\n \n except:\n pass #It's possible not every task has a report set. If this is the case, ignore it!\n \n filename = 'auto_copasi_1.%d.cps'%i\n target = os.path.join(self.path, filename)\n model_files.append(filename)\n \n self.write(target)\n \n return model_files, output_files" ]
[ "0.6155076", "0.58488786", "0.575756", "0.56989926", "0.5652544", "0.5581654", "0.5513856", "0.55035716", "0.5478721", "0.5475027", "0.54018384", "0.5338996", "0.5317282", "0.5311656", "0.5308736", "0.5252338", "0.5228852", "0.51849914", "0.51674294", "0.5137591", "0.510102", "0.50995463", "0.5094915", "0.5066558", "0.5047784", "0.5042383", "0.50319594", "0.5022679", "0.5017303", "0.5005864" ]
0.6364437
0
Checks finish condition for schedule
def check_if_schedule_finished(self): tot_num_tasks_scheduled = sum(self.is_task_finished[0]) if tot_num_tasks_scheduled > 19 or self.t > 150: self.data_done_generating = True if self.t > 150: print('Schedule failed to create') print('Schedule will not be copied') self.did_schedule_fail = True else: print('Successful schedule created') # copy rows into another excel file # with open(self.filepath, 'r') as csvfile, open(self.writepath, 'a') as outfile: # data = (csv.reader(csvfile)) # writer = csv.writer(outfile) # for row in data: # writer.writerow(row) # # with open(self.second_file_path, 'r') as csvfile, open(self.writepath2, 'a') as outfile: # data = (csv.reader(csvfile)) # writer = csv.writer(outfile) # for row in data: # writer.writerow(row) print('1 schedule created.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def complete(self):\r\n if self.scheduler_launch_time == INVALID_TIME:\r\n print \"Missing task scheduler launch time\"\r\n return False\r\n if self.node_monitor_launch_time == INVALID_TIME:\r\n\t print \"Missing task node monitor launch time\"\r\n\t return False\r\n\tif self.completion_time == INVALID_TIME:\r\n\t print \"Missing task completion time\"\r\n\t return False\r\n\tif self.clock_skew == INVALID_TIME_DELTA:\r\n print \"Missing task clock skew\"\r\n\t return False\r\n\treturn True", "def isFinished():", "def isFinished():", "def isFinished():", "def _is_finished(self, as_of):\n if self.is_one_off():\n last_billing_cycle = self.get_billing_cycles()[self.total_billing_cycles - 1]\n return last_billing_cycle.date_range.upper <= as_of\n else:\n return False", "def guardFinish(self):\n portal_workflow = getToolByName(self, 'portal_workflow')\n getInfoFor = portal_workflow.getInfoFor\n objects = self.objectValues(['PM_Action'])\n for obj in objects:\n state = getInfoFor(obj, 'review_state')\n if state not in ('finished', 'cancelled'):\n return False\n return True", "def has_finished():", "def test_is_finished(self):\n experiment = Experiment(TasksMock())\n self.assertEquals(False, experiment.is_finished())\n for _ in range(0, 17):\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(True, experiment.is_finished())", "def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')", "def _check_for_finished_job(self):\n raise NotImplementedError", "async def _check_schedule(self, now, last):\n\n if self._schedule is None:\n return\n\n for event in self._schedule.events:\n if event.begin <= now:\n if event.begin > last:\n await self._announce_event(event)", "def finishWait(self):\r\n self.scheduler.finishWait()", "def checkMissionEnd(self) -> bool:\n if getTimestamp() - self.mission['timestamp'] < self.TAKE_OFF_DELAY:\n return False\n drone: Drone\n for drone in self.dronesSet.getDrones().values():\n if drone['state'] != 'onTheGround' and drone['state'] != 'crashed':\n return False\n\n self.endMission()\n return True", "def isFinish(self):\n return self.finish", "def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def _is_finish(self, pos):\r\n return self.course[pos[0], pos[1]] == 2", "def final_check(self, schedule: Schedule) -> bool:\n for day in range(schedule.n_weekdays):\n for track in range(schedule.n_tracks):\n if schedule.count_courses_on_day(day, track) < 2 and schedule.count_courses_on_day(day, track) != 0: \n return False\n\n return True", "def check_game_end(self):\r\n\r\n if np.all(self.remaining == -1): # end of game\r\n self.show_results() # show the final results\r\n sys.exit() # exit the program\r", "def __checkTimer(self):\n if self.__endTime is None:\n raise AssertionError('The end time had not been set.')\n if time.time() > self.__endTime:\n self._logError('Maximum Run Time Reached !!')\n raise _MaxRunTimeReachedError('')", "def is_finished(self):\n self.refresh()\n return self.progress.remaining_budget is not None and self.progress.remaining_budget <= 0", "def check(self):\r\n boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))\r\n\r\n if self.hourly and not self.last_executed:\r\n return 0\r\n \r\n if self.daily and not self.last_executed:\r\n if int(self.hour) == self.now.hour:\r\n return 0\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60\r\n\r\n delta = self.now - self.last_executed\r\n if self.hourly:\r\n if delta.seconds >= 60*60:\r\n return 0\r\n else:\r\n return 60*60 - delta.seconds\r\n else:\r\n if int(self.hour) == self.now.hour:\r\n if delta.days >= 1:\r\n return 0\r\n else:\r\n return 82800 # 23 hours, just to be safe\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60", "def _check_completed(self):\n current_rung_df = self.sieve_board.loc[\n self.sieve_board['status'].isin(\n [StatusType.WAITTING, StatusType.RUNNING])\n ]\n if current_rung_df.empty:\n return True\n else:\n return False", "def complete(self):\r\n\tif self.launch_time == INVALID_TIME:\r\n\t print \"Missing probe launch time\"\r\n return False\r\n if self.received_time == INVALID_TIME:\r\n print \"Missing probe received time\"\r\n return False\r\n if self.completion_time == INVALID_TIME:\r\n print \"Missing probe completion time\"\r\n return False\r\n return True", "def schedule_monitor(schedule):\n if schedule[\"state\"] == EC2State.STOPPED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= 7 - schedule[\n \"schedule\"\n ]:\n schedule[\"state\"] = EC2State.STARTED\n elif schedule[\"state\"] == EC2State.STARTED:\n if (date.today() - schedule[\"lastStateChange\"]).days >= schedule:\n schedule[\"state\"] = EC2State.STOPPED\n else:\n return schedule, False\n\n return schedule, True", "def is_done(self):\n return time.time() - self._start > self._time", "def checkAtFinalTime():\n global final_time\n if final_time <= current_second:\n return True\n return False", "def complete(self, verbose=False):\r\n #if self.scheduler_launch_time == INVALID_TIME: print \"scheduler launch\"\r\n #if self.node_monitor_launch_time == INVALID_TIME: print \"nm launch\"\r\n #if self.completion_time == INVALID_TIME: print \"completion\"\r\n if verbose:\r\n if self.node_monitor_get_task_time == INVALID_TIME:\r\n print \"Task %s incomplete: node monitor get_task time missing\" % self.id\r\n elif self.scheduler_launch_time == INVALID_TIME:\r\n print \"Task %s incomplete: Scheduler launch time missing\" % self.id\r\n elif self.node_monitor_launch_time == INVALID_TIME:\r\n print \"Task %s incomplete: Node monitor launch time missing\" % self.id\r\n elif self.completion_time == INVALID_TIME:\r\n print \"Task %s incomplete: Completion time missing\" % self.id\r\n return (self.node_monitor_get_task_time != INVALID_TIME and\r\n self.scheduler_launch_time != INVALID_TIME and\r\n self.node_monitor_launch_time != INVALID_TIME and\r\n self.completion_time != INVALID_TIME)", "def check_finish(self):\r\n return not self.proc.is_alive()", "def DueToRun(self):\n if self.Get(self.Schema.DISABLED):\n return False\n\n cron_args = self.Get(self.Schema.CRON_ARGS)\n last_run_time = self.Get(self.Schema.LAST_RUN_TIME)\n now = rdfvalue.RDFDatetime().Now()\n\n # Its time to run.\n if (last_run_time is None or\n now > cron_args.periodicity.Expiry(last_run_time)):\n\n # Do we allow overruns?\n if cron_args.allow_overruns:\n return True\n\n # No currently executing job - lets go.\n if self.Get(self.Schema.CURRENT_FLOW_URN) is None:\n return True\n\n return False", "def finish(self, finish_time=None):\n pass" ]
[ "0.70576763", "0.660566", "0.660566", "0.660566", "0.6559862", "0.6503821", "0.6443644", "0.64333826", "0.6411796", "0.6394543", "0.638096", "0.636754", "0.63611645", "0.6337765", "0.63229", "0.629554", "0.6281088", "0.6274435", "0.6242915", "0.623993", "0.6238348", "0.6218028", "0.6202798", "0.6191366", "0.61872303", "0.61824715", "0.61675763", "0.6139901", "0.6058332", "0.60421634" ]
0.7545543
0
Retrieve a list of the equities and the quantity held on a particular date by a particular user, if they exist
def get_portfolio(username): user_obj = User.query.filter(User.username == username).first() date = request.args.get('date') if user_obj is None: return util.build_json_response('User does not exist') if not util.is_valid_date_string(date): return util.build_json_response("Not a valid date of the form YYYY-MM-DD") following_date = util.add_days_to_date(date, 1) equities = db.session.query(Portfolio.ticker, func.sum(Portfolio.quantity))\ .filter(Portfolio.user_id == user_obj.id) \ .filter(Portfolio.transaction_date <= following_date) \ .group_by(Portfolio.ticker).all() result = dict() for equity in equities: result[equity[0]] = equity[1] return util.build_json_response("Portfolio retrieved", equities=result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_by_date():\n\n input_date = request.args.get('date')\n \n user_id = session['user']\n user_inv = (UserInv.query.filter_by(user_id=user_id)).all()\n\n inv_by_date = []\n\n for item in user_inv: \n if str(item.inv.date_of_investment) == input_date:\n inv_by_date.append({\"company\": item.inv.company_name, \n \"quantity\": item.inv.quantity, \n \"cost\": item.inv.cost})\n print inv_by_date\n\n return jsonify(inv_by_date)", "def evaluate_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n date = request.args.get('date')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if not util.is_valid_date_string(date):\n return util.build_json_response(\"Not a valid date of the form YYYY-MM-DD\")\n\n following_date = util.add_days_to_date(date, 1)\n equities = db.session.query(Portfolio.ticker, func.sum(Portfolio.quantity))\\\n .filter(Portfolio.user_id == user_obj.id) \\\n .filter(Portfolio.transaction_date <= following_date) \\\n .group_by(Portfolio.ticker).all()\n\n e_total = 0\n for equity in equities:\n price = equity[1] * market_data.get_stock_price(equity[0], date, 'low')\n e_total += price\n\n total = round(e_total + user_obj.balance, 2)\n cash = round(user_obj.balance, 2)\n e_total = round(e_total, 2)\n\n return util.build_json_response(\"Portfolio totals retrieved\", equity_total=e_total, cash_balance=cash, account_total=total)", "def get_equity_data(date=None):\n\n equity_info_list = []\n if date is None:\n equity_data_zip_file_url = __class__.get_href_for_latest_equity_data()\n else:\n equity_data_zip_file_url = __class__.get_zip_file_url_for_specific_date(date)\n\n url = urllib.request.urlopen(equity_data_zip_file_url)\n\n with ZipFile(BytesIO(url.read())) as my_zip_file:\n for contained_file in my_zip_file.namelist():\n with my_zip_file.open(contained_file) as csv_file:\n df = pd.read_csv(csv_file)\n for idx, row in df.iterrows():\n code = row['SC_CODE']\n name = row['SC_NAME'].strip().strip(\",.\")\n group = row['SC_GROUP'].strip().strip(\",.\")\n type_abbr = row['SC_TYPE'].strip().strip(\",.\")\n open = row['OPEN']\n high = row['HIGH']\n low = row['LOW']\n close = row['CLOSE']\n last = row['LAST']\n prev_close = row['PREVCLOSE']\n no_of_trades = row['NO_TRADES']\n no_of_shares = row['NO_OF_SHRS']\n net_turnov = row['NET_TURNOV']\n\n equity_info = EquityInfo(code, name, group, type_abbr, open, high, low, close, last,\n prev_close, no_of_trades, no_of_shares, net_turnov)\n equity_info_list.append(equity_info)\n\n return equity_info_list", "def fetchEquityDataForSingleDay(on_date, useCache=False):\n return fetchEquityData(on_date, on_date, useCache)", "def _get(self, user_id, query=None):\n if not query:\n ticket_data = DB_TICKET_TABLE.search(where('user_id') == int(user_id))\n else:\n query = (where('user_id') == int(user_id)) & query\n ticket_data = DB_TICKET_TABLE.search(query)\n\n res = {\n \"total_queried\" : len(ticket_data),\n \"_embedded\" : {\n \"tickets\" : self.embed_ticket_data_in_result(ticket_data)\n },\n \"_links\" : self.make_links({\n \"self\" : UserTicketList.get_self_url(user_id),\n \"contained_in\" : User.get_self_url(user_id)\n })\n }\n return res", "def get_all_stocks(userId):\r\n print(\"<get_all_stocks()>\")\r\n print(\"userId: \", userId)\r\n stocks = Stock.objects(user_id=userId)\r\n return jsonify(json.loads(stocks.to_json()))", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Experience.objects.filter(username = username).order_by('-startdate')", "def portfolio_view(request):\n\n try:\n query = request.dbsession.query(Stock)\n user_entries = query.filter(Stock.account_id == request.authenticated_userid)\n except DBAPIError:\n return DBAPIError(DB_ERR_MSG, content_type='text/plain', status=500)\n\n return {'stocks': all_entries}", "def available_employees(self,work_trips_by_date):\r\n\r\n employee_list = self.get_updated_list_from_DB('employee')\r\n available_employees_list = []\r\n total_sets = set()\r\n set_list = []\r\n\r\n for i in range(len(work_trips_by_date)):\r\n set_list.append(set(work_trips_by_date[i])) \r\n \r\n total_sets = set_list[0]\r\n \r\n if len(work_trips_by_date) != 1: \r\n for i in range(1,len(set_list)):\r\n total_sets.update(set_list[i])\r\n\r\n for line in employee_list:\r\n if line[0] not in total_sets:\r\n available_employees_list.append(line)\r\n\r\n row_names = ['id', 'name' ,'role' ,'rank'] #return columns\r\n employee_index_list = self.find_index_from_header('employee', row_names)\r\n filtered_available_employees = self.filter_by_header_index(employee_index_list, available_employees_list)\r\n\r\n available_employees_list.pop(0)\r\n\r\n return filtered_available_employees", "def getInterestedUsers():", "def getCartDetailsForUser():\n try:\n result = json.loads(request.get_data(as_text=True))\n userId = request.json['userId']\n print(\"In get of shopping cart, user id :\"+userId)\n items = myCart.find({\"userId\":userId},{\"_id\":0})\n data = dumps(items)\n print(str(items))\n stats = myCart.aggregate(\n [\n { \"$match\" : { \"userId\" : userId} },\n { \"$group\": \n { \n \"_id\": { \"userId\": \"$userId\" },\n \"totalAmount\": \n { \"$sum\": \n { \"$multiply\": [ \"$price\", \"$quantity\" ] }\n },\n \"totalQuantity\": { \"$sum\": \"$quantity\" } }\n }\n \n ]\n )\n statistics = dumps(stats)\n return jsonify({\"Status\" : \"OK\", \"data\" : data, \"stats\":statistics})\n except Exception, e:\n return jsonify(status='ERROR',message=str(e),userId=userId)", "async def list(self, ctx, user=None, date=None):\n if not user:\n user = ctx.message.author\n else:\n user = util.GetUserFromNameStr(ctx.message.server.members, user)\n change = GetPortfolioChange(user.id)\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n await self.bot.say(\n '```%s\\'s portfolio:\\n'\n 'Total Value: $%s (%.2f%s) \\n'\n '%s```' % (user, portfolio.Value(), change, \"%\", portfolio.AsTable()))", "def history():\n\n user = session.get(\"user_id\")\n rows = db.execute(\"Select TransDate as Date, Stock, Price, case when Num < 0 then 'Sell' else 'Buy' end as Type, Num as Quantity from portfolio where User = :User order by Date asc\", User = session.get(\"user_id\"))\n\n\n return render_template(\"hist.html\", rows = rows)", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def get_item_prices(user_data):\n res = requests.get(url='http://127.0.0.1:5000/get_item_prices', json=user_data)\n return res.text", "def retrieve_equities(self, sids):\n return self._retrieve_assets(sids, self.equities, Equity)", "def get_transactions_for_user(user_id: int):\n transactions = get_equity_orders_by_user_id(user_id)\n response = jsonify(\n {\n \"user_id\": user_id,\n \"transactions\": [\n {\n \"ticker\": transaction.ticker.ticker,\n \"order_type\": transaction.order_type,\n \"price\": transaction.price,\n \"quantity\": transaction.quantity,\n \"time\": transaction.created_at,\n }\n for transaction in transactions\n ],\n }\n )\n response.status_code = 200\n return response", "def add_availability_week(date, user):\n diversions = diversion_for_week(date)\n for diversion in diversions:\n unavailable = diversion['Unavailable']\n if user in unavailable:\n unavailable.remove(user)\n available = diversion['Available']\n if user not in available:\n available.append(user)\n resp = table.update_item(\n Key={\"WeekOf\": date, \"Paper\": diversion['Paper']},\n ExpressionAttributeNames={\n \"#available\": \"Available\",\n \"#unavailable\": \"Unavailable\"\n },\n ExpressionAttributeValues={\n \":available\": available,\n \":unavailable\": unavailable\n },\n UpdateExpression=\"SET #available = :available, #unavailable = :unavailable\"\n )", "def get_stock_data(company, start_date_inc, stop_date_inc):\n\n api_key = 'Bo9P_cJnmf5EsQPp1Bdp'\n desired_cols = 'date,close'\n\n# ticker = 'FB'\n# start_date_inc = '20170801'\n# end_date_inc = '20170831'\n\n # format and send the request\n payload = {\n 'date.gte': start_date_inc,\n 'date.lte': stop_date_inc,\n 'ticker': company,\n 'qopts.columns': desired_cols,\n 'api_key': api_key\n }\n meta_url = r'https://www.quandl.com/api/v3/datatables/WIKI/PRICES'\n r = requests.get(meta_url, params=payload)\n\n # convert to a pandas dataframe\n df = pd.DataFrame(r.json()['datatable']['data'])\n if not df.empty:\n df.columns = ['date', 'price']\n df['date'] = pd.to_datetime(df['date'])\n\n return df", "def get_product_available(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n\n location_obj = self.pool.get('stock.location')\n warehouse_obj = self.pool.get('stock.warehouse')\n shop_obj = self.pool.get('sale.shop')\n\n user_obj = self.pool.get('res.users').browse(cr, 1, uid)\n\n states = context.get('states',[])\n what = context.get('what',())\n if not ids:\n ids = self.search(cr, uid, [])\n res = {}.fromkeys(ids, 0.0)\n if not ids:\n return res\n\n if context.get('shop', False) and context['shop']:\n warehouse_id = shop_obj.read(cr, 1, int(context['shop']), ['warehouse_id'])['warehouse_id'][0]\n if warehouse_id:\n context['warehouse'] = warehouse_id\n\n if context.get('warehouse', False) and context['warehouse']:\n lot_id = warehouse_obj.read(cr, 1, int(context['warehouse']), ['lot_stock_id'])['lot_stock_id'][0]\n if lot_id:\n context['location'] = lot_id\n\n if context.get('location', False) and context['location']:\n if type(context['location']) == type(1):\n location_ids = [context['location']]\n elif type(context['location']) in (type(''), type(u'')):\n location_ids = location_obj.search(cr, 1, [('name','ilike',context['location'])], context=context)\n else:\n location_ids = context['location']\n else:\n location_ids = []\n #wids = warehouse_obj.search(cr, uid, [], context=context)\n #for w in warehouse_obj.browse(cr, uid, wids, context=context):\n # location_ids.append(w.lot_stock_id.id)\n lids = location_obj.search(cr, 1, [])\n #print(lids, 'todas os locais', user_obj.company_id.id)\n for lo in location_obj.browse(cr, 1, lids, context=context):\n #print(lo.id, lo.company_id, lo.company_ids, user_obj.company_id.id)\n if lo.company_id and user_obj.company_id.id == lo.company_id.id:\n location_ids.append(lo.id)\n else:\n for co in lo.company_ids:\n if user_obj.company_id.id == co.id:\n location_ids.append(lo.id)\n\n # build the list of ids of children of the location given by id\n if context.get('compute_child', True):\n if len(location_ids) == 0:\n raise osv.except_osv(u'Atenção!', u'Não há local de estoque definido para a empresa/unidade!')\n\n child_location_ids = location_obj.search(cr, 1, [('location_id', 'child_of', location_ids)])\n location_ids = child_location_ids or location_ids\n\n # this will be a dictionary of the UoM resources we need for conversion purposes, by UoM id\n uoms_o = {}\n # this will be a dictionary of the product UoM by product id\n product2uom = {}\n for product in self.browse(cr, 1, ids, context=context):\n product2uom[product.id] = product.uom_id.id\n uoms_o[product.uom_id.id] = product.uom_id\n\n results = []\n results2 = []\n\n from_date = context.get('from_date',False)\n to_date = context.get('to_date',False)\n date_str = False\n date_values = False\n where = [tuple(location_ids),tuple(location_ids),tuple(ids),tuple(states)]\n if from_date and to_date:\n date_str = \"date>=%s and date<=%s\"\n where.append(tuple([from_date]))\n where.append(tuple([to_date]))\n elif from_date:\n date_str = \"date>=%s\"\n date_values = [from_date]\n elif to_date:\n date_str = \"date<=%s\"\n date_values = [to_date]\n if date_values:\n where.append(tuple(date_values))\n\n prodlot_id = context.get('prodlot_id', False)\n prodlot_clause = ''\n if prodlot_id:\n prodlot_clause = ' and prodlot_id = %s '\n where += [prodlot_id]\n elif 'prodlot_id' in context and not prodlot_id:\n prodlot_clause = 'and prodlot_id is null '\n\n # TODO: perhaps merge in one query.\n if 'in' in what:\n # all moves from a location out of the set to a location in the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id NOT IN %s '\\\n 'and location_dest_id IN %s '\\\n 'and product_id IN %s '\\\n 'and state IN %s ' + (date_str and 'and '+date_str+' ' or '') +' '\\\n + prodlot_clause +\n 'group by product_id,product_uom',tuple(where))\n results = cr.fetchall()\n if 'out' in what:\n # all moves from a location in the set to a location out of the set\n cr.execute(\n 'select sum(product_qty), product_id, product_uom '\\\n 'from stock_move '\\\n 'where location_id IN %s '\\\n 'and location_dest_id NOT IN %s '\\\n 'and product_id IN %s '\\\n 'and state in %s ' + (date_str and 'and '+date_str+' ' or '') + ' '\\\n + prodlot_clause +\n 'group by product_id,product_uom',tuple(where))\n results2 = cr.fetchall()\n\n # Get the missing UoM resources\n uom_obj = self.pool.get('product.uom')\n uoms = map(lambda x: x[2], results) + map(lambda x: x[2], results2)\n if context.get('uom', False):\n uoms += [context['uom']]\n uoms = filter(lambda x: x not in uoms_o.keys(), uoms)\n if uoms:\n uoms = uom_obj.browse(cr, 1, list(set(uoms)), context=context)\n for o in uoms:\n uoms_o[o.id] = o\n\n #TOCHECK: before change uom of product, stock move line are in old uom.\n context.update({'raise-exception': False})\n # Count the incoming quantities\n for amount, prod_id, prod_uom in results:\n amount = uom_obj._compute_qty_obj(cr, 1, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] += amount\n # Count the outgoing quantities\n for amount, prod_id, prod_uom in results2:\n amount = uom_obj._compute_qty_obj(cr, 1, uoms_o[prod_uom], amount,\n uoms_o[context.get('uom', False) or product2uom[prod_id]], context=context)\n res[prod_id] -= amount\n\n for prod_id in res:\n if isinstance(res[prod_id], D):\n res[prod_id] = float(res[prod_id])\n return res", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Education.objects.filter(username = username) .order_by('-startdate')", "def _getdata(self, data):\n lines = []\n start_date = str(data['form']['start_date'])\n end_date = str(data['form']['end_date'])\n department_ids = data['form']['department_ids']\n\n vehicles_ids = self.pool.get('fleet.vehicle').search(self.cr, self.uid,\\\n [('department_id', 'in', department_ids)], context=self.context)\n\n fuel_qty_line_obj = self.pool.get('fuel.qty.line')\n\n sdate = datetime.strptime(start_date, \"%Y-%m-%d\")\n syear = sdate.year\n smonth = sdate.month\n edate = datetime.strptime(end_date, \"%Y-%m-%d\")\n eyear = edate.year\n emonth = edate.month\n\n fuel_qty_line_ids = fuel_qty_line_obj.search(self.cr, self.uid,\\\n [('vehicles_id', 'in', vehicles_ids)], context=self.context)\n\n\n\n counter = 1\n for qty_line in fuel_qty_line_obj.browse(self.cr, self.uid, \\\n fuel_qty_line_ids, context=self.context):\n current_m = int(qty_line.month)\n current_y = int(qty_line.year)\n start = current_m >= smonth and current_y >= syear\n end = current_m <= emonth and current_y <= eyear\n if start and end:\n line = {'type':str(counter)+\" : \"+\\\n qty_line.vehicles_id.type.name}\n line['vehicle_no'] = qty_line.vehicles_id.vin_sn\n line['spent'] = qty_line.spent_qty\n line['counter_no'] = str(qty_line.vehicles_id.odometer)+\" \"+\\\n qty_line.vehicles_id.odometer_unit\n line['date'] = qty_line.month+\"/\"+qty_line.year\n lines.append(line)\n counter += 1\n return lines", "def get_cart_info(user_name):\n\n user_name = auth.get_username_from_hash(user_name)\n cartDTO = user_handler.get_cart_info(user_name)\n cartDTO.sum = store_handler.calculate_cart_sum(cartDTO)\n return cartDTO", "def index():\n inventory = db.execute(\"SELECT symbol,quantity FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n cash = float(db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session[\"user_id\"])[0][\"cash\"])\n total = cash\n for i in inventory:\n stock = lookup(i[\"symbol\"])\n i[\"price\"] = stock[\"price\"]\n i[\"name\"] = stock[\"name\"]\n i[\"total\"] = usd(stock[\"price\"] * i[\"quantity\"])\n total += stock[\"price\"] * i[\"quantity\"]\n return render_template(\"index.html\", context={\"inventory\":inventory,\"total\":usd(total),\"cash\":usd(cash)})", "def filter_users_by_transaction_date(self, request):\n users = User.objects.filter(transactions__date=request.data[\"date\"])\n serializer = UserSerializer(users, many=True)\n return Response(serializer.data)", "def index():\n\n rows = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n print(\"rows= \" , rows)\n\n cash = rows[0] [\"cash\"]\n\n stocks = db.execute(\"SELECT * FROM transactions WHERE user_id = :user_id\", user_id=session[\"user_id\"])\n print(\"Stocks= \", stocks)\n\n holdings = 0\n for stock in stocks:\n print(stock[\"stock_code\"])\n stockDetail = lookup(stock[\"stock_code\"])\n print(\"StockDetail: \", stockDetail)\n stock_name = stockDetail[\"name\"]\n print(\"Stock Name: \", stock_name)\n\n if stockDetail == None:\n return apology(\"Not able to determine stock value\", 403)\n\n else:\n stockPrice = stockDetail[\"price\"]\n print(\"price of stock\", stockPrice)\n stock_name = stockDetail[\"name\"]\n # total value of each stock the user owns\n stock_value = stock[\"stock_quantity\"] * stockPrice\n holdings = holdings + stock_value\n stock[\"stock_name\"] = stock_name\n stock[\"stock_price\"] = usd(stockPrice)\n stock[\"stock_value\"] = usd(stock_value)\n print(\"Total value of each stock: \", stock_value)\n\n return render_template(\"index.html\", stocks=stocks,cash=usd(cash),total=usd(holdings+cash))", "def before_trading_start(context, data):\r\n # These are the securities that we are interested in trading each day.\r\n context.output = pipeline_output('my_pipeline')\r\n context.equities = context.output.index.tolist()\r\n log.info(\"Stocks today\") \r\n print(context.equities)", "def getCartDetails():\n try:\n result = json.loads(request.get_data(as_text=True))\n userId = request.json['userId']\n print(\"user id is:\"+userId)\n items = myCart.find({\"userId\":userId})\n data = dumps(items)\n print(str(items))\n stats = myCart.aggregate(\n [\n #{ \"$match\" : { \"userId\" : \"88041fab-078c-4e34-8f03-1dadbe1c537a\"} },\n { \"$match\" : { \"userId\" : userId} },\n { \"$group\": \n { \n \"_id\": { \"userId\": \"$userId\" },\n \"totalAmount\": \n { \"$sum\": \n { \"$multiply\": [ \"$price\", \"$quantity\" ] }\n },\n \"totalQuantity\": { \"$sum\": \"$quantity\" } }\n }\n \n ]\n )\n statistics = dumps(stats)\n return jsonify({\"Status\" : \"OK\", \"data\" : data, \"stats\":statistics})\n except Exception, e:\n return jsonify(status='ERROR',message=str(e))", "def get_shipments_by_date(auth, date, base_url='https://api.cratejoy.com/v1/'):\n \n shipment_endpoint = '{}shipments/?batch.end__lt={}T00:00:00Z'.format(base_url, date)\n\n resp = requests.get(\n shipment_endpoint,\n auth=auth\n )\n\n print('GET request to {} responded with status '\n 'code: {}'.format(shipment_endpoint,\n resp.status_code))\n print(resp.content)", "def history():\n if request.method == \"GET\":\n \n user_id = int(session.get('user_id'))\n user_data = db.execute('''SELECT * FROM history WHERE user_id = :user_id''', user_id = user_id)\n \n if not user_data:\n return render_template('quote.html')\n \n #create lists of values for sake of returning them to F2E\n portfolio = []\n \n for i in user_data:\n #getting data from table\n date = i.get('date')\n symbol = i.get('symbol')\n name = i.get('stock_name')\n quantity = i.get('quantity')\n price = round(float(i.get('price')), 2)\n action = str(i.get('deal'))\n \n #inserting data into a list\n a_dict = {\n 'date': date, 'symbol': symbol, \n 'name': name, 'price': price, \n 'quantity': quantity, 'action': action\n }\n portfolio.append(a_dict)\n \n return render_template('history.html',\n portfolio=portfolio)\n else:\n return render_template('index.html')" ]
[ "0.61054766", "0.5950331", "0.5886843", "0.56959987", "0.5685352", "0.54649276", "0.52916914", "0.5280743", "0.52630013", "0.52083814", "0.51895857", "0.5186475", "0.5161704", "0.51446867", "0.5128957", "0.5113111", "0.5106592", "0.51013273", "0.50893575", "0.5071267", "0.50320065", "0.50239927", "0.50229645", "0.5004244", "0.49876738", "0.4955896", "0.4949578", "0.49477807", "0.49429354", "0.49372277" ]
0.63688445
0
Adds a positive entry into the database to account for 'buying' a particular holding on a particular date only if the user exists and the price total (quantity price) does not exceed the account balance
def add_to_portfolio(username): user_obj = User.query.filter(User.username == username).first() ticker = request.form.get('ticker') date = request.form.get('date') qty = request.form.get('qty') if user_obj is None: return util.build_json_response('User does not exist') if (len(ticker) == 0) or (not util.is_valid_date_string(date)): return util.build_json_response("No ticker or valid date of the form YYYY-MM-DD") if not qty.isnumeric() or int(qty) < 0: return util.build_json_response("Quantity is not valid") price = int(qty) * market_data.get_stock_price(ticker, date, 'low') if price > user_obj.balance: return util.build_json_response("Cost exceeds balance") new_balance = user_obj.balance - price date = datetime.fromisoformat(date) holding = Portfolio.query.\ filter(Portfolio.user_id == user_obj.id)\ .filter(Portfolio.transaction_date == date)\ .filter(Portfolio.ticker == ticker)\ .filter(Portfolio.transaction_type == 'BUY').first() try: db.session.execute( update(User) .values(balance=new_balance) .where(User.id == user_obj.id) ) if holding is None: holding = Portfolio(ticker, date, 'BUY', int(qty), user_obj.id) db.session.add(holding) else: db.session.execute( update(Portfolio) .values(quantity=holding.quantity + int(qty)) .where(Portfolio.user_id == user_obj.id) .where(Portfolio.transaction_date == date) .where(Portfolio.ticker == ticker) .where(Portfolio.transaction_type == 'BUY') ) db.session.commit() except: return util.build_json_response("Failure to update DB") return util.build_json_response("Stock added to portfolio", stock=request.form, balance=user_obj.balance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buy_item(self, item):\n if self.amount < item.price:\n custom_log(\"Insufficient amount. Insert more coins.\", MSG_ERROR)\n else:\n self.amount = round((self.amount - item.price), 2)\n item._buy()\n custom_log(f\"You bought - {item.name}, remaining cash - €{self.amount}\", MSG_DEBUG)", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def buy():\n\n if request.method == \"POST\":\n symbol = request.form.get('symbol')\n shares = int(request.form.get(\"shares\"))\n quote = lookup(symbol)\n userid = session[\"user_id\"]\n\n if quote is None:\n return apology(\"Incorrect symbol, try again\", 400)\n else:\n rows = db.execute(\"SELECT cash FROM users WHERE id = :userid\",\n userid=userid)\n cash = rows[0][\"cash\"]\n price = quote[\"price\"]\n tot = price * shares\n\n if cash < tot:\n return apology(\"you can't afford this stock\")\n else:\n db.execute(\"UPDATE users SET cash = cash - :tot WHERE id = :userid\", tot=tot, userid=userid)\n db.execute(\"\"\"INSERT INTO purchase (userid, symbol, shares, tot)\n VALUES (:userid, :symbol, :shares, :tot)\"\"\", userid=userid,\n symbol=symbol, shares=shares, tot=tot)\n flash(\"Bought!\")\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def buy():\n\n def price_check(cash, price, shares):\n \"\"\"check affordability of stock vs cash on hand\"\"\"\n affordable = (cash - (price * shares)) > 0\n\n if affordable:\n return affordable\n\n else:\n return False\n\n if request.method == \"POST\":\n\n stock = lookup(request.form.get(\"symbol\"))\n\n # check symbol and share # are valid\n if not stock:\n return apology(\"Missing or Incorrect Symbol\", 400)\n\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Input at least 1 share\", 400)\n\n if shares < 0:\n return apology(\"Input at least 1 share\", 400)\n\n\n # cast shares to int & fetch users cash on hand\n shares = int(request.form.get(\"shares\"))\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n\n if price_check(user_cash, stock[\"price\"], shares) == False:\n return apology(\"Sorry, you can't afford this purchase.\", 400)\n\n else:\n # define variables for inserting into transactions table\n purchase_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # update user cash\n user_cash = user_cash - (stock[\"price\"]*shares)\n db.execute(\"UPDATE users SET cash = :user_cash WHERE id = :user_id\", user_id=session[\"user_id\"], user_cash=user_cash)\n\n # update transactions table with most recent transaction\n db.execute(\"\"\"\n INSERT INTO transactions(user_id, date, symbol, shares, price)\n VALUES(:user_id, :date, :symbol, :shares, :price)\n \"\"\",\n user_id=session[\"user_id\"],\n date=purchase_date,\n symbol=stock[\"symbol\"],\n shares=shares,\n price=stock[\"price\"]\n )\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def save(self, *args, **kwargs): #pylint: disable=W0221\n created = not self.uuid\n if created:\n existing_balance = self.account.calculated_balance\n else:\n all_other_active_transactions = [x for x in self.account.transactions.all() if x != self and x.active]\n existing_balance = sum(x.amount for x in all_other_active_transactions)\n\n if not self.active:\n pass\n elif (existing_balance + self.amount) < 0:\n raise AccountBalanceError(\n 'Balance of account {} would be brought below 0'.format(self.account)\n )\n\n instance = super().save(*args, **kwargs)\n self.account.update_balance()\n return instance", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n #Get symbol & number to buy - ok\n #Get price - ok\n #Ensure user can afford to buy - ok\n #Remove cash from user table - ok\n #Add row to stocks table - ok\n #Add row to history table - ok\n\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n if not symbol:\n return apology(\"Pick a company\")\n\n if not shares:\n return apology(\"Pick a number > 0\")\n elif int(shares) < 1:\n return apology(\"sneaky\")\n else:\n shares = int(shares)\n\n\n sDict = lookup(symbol)\n row = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session[\"user_id\"])\n\n if not sDict:\n return apology(\"Invalid Symbol\")\n\n if shares * sDict[\"price\"] > row[0][\"cash\"]:\n return apology(\"You broke, son\")\n\n #Calculate remaining cash and update users table\n rmcash = row[0][\"cash\"] - shares * sDict[\"price\"]\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\", cash = rmcash, userid = session[\"user_id\"])\n\n #Update stocks table with new holding\n curStock = db.execute(\"SELECT amount FROM stocks WHERE user_id = :userid AND symbol = :symbol\", userid=session[\"user_id\"], symbol=sDict[\"symbol\"])\n\n if len(curStock) == 0:\n db.execute(\"INSERT INTO stocks (user_id, symbol, amount) VALUES (:userid, :symbol, :amount)\", userid=session[\"user_id\"], symbol=sDict[\"symbol\"], amount=shares)\n\n else:\n db.execute(\"UPDATE stocks SET amount = :amount WHERE user_id = :userid and symbol = :symbol\", amount=curStock[0][\"amount\"]+shares, userid=session[\"user_id\"], symbol=sDict[\"symbol\"])\n\n \"\"\"Update history table with transaction\"\"\"\n time = datetime.now()\n db.execute(\"INSERT INTO history(user_id, symbol, amount, price, date) VALUES (:user_id, :symbol, :amount, :price, :date)\",\n user_id=session[\"user_id\"], symbol=sDict[\"symbol\"], price=sDict[\"price\"], amount=shares, date=time)\n\n flash(\"Bought!\")\n return redirect('/')\n\n return apology(\"Unknown Error\")", "def create_deposit_bonus(sender, instance, created, **kwargs):\n if created:\n instance.wallet.value += Decimal(instance.value)\n instance.wallet.save()\n if instance.value >= Decimal('100.00'):\n user = instance.wallet.user\n bonus_wallet = BonusWallet.objects.filter(user=user)\n if not bonus_wallet.exists():\n bonus_wallet = BonusWallet.objects.create(user=user)\n bonus_wallet.save()\n else:\n bonus_wallet = bonus_wallet[0]\n\n deposit_bonus = DepositBonus.objects.create(wallet=bonus_wallet)\n bonus_wallet.value += Decimal(deposit_bonus.value)\n bonus_wallet.save()", "def buy():\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n else:\n stock = lookup(request.form.get(\"symbol\"))\n\n if stock == None:\n return apology(\"Symbol not found. Please re-check the symbol and try again!\")\n\n shares = int(request.form.get(\"shares\"))\n if not shares or int(shares) <= 0:\n return apology(\"Invalid shares. Please re-check and try again!\")\n\n company_name = stock[\"name\"]\n price = float(stock[\"price\"])\n symbol = stock[\"symbol\"]\n userid = session[\"user_id\"]\n available_cash = (db.execute(\"SELECT cash FROM users WHERE id=:id\", id = userid))[0].get(\"cash\")\n total = shares*price\n if total > available_cash:\n return apology(\"Sorry! You do not have sufficient balance\")\n else:\n check = (db.execute(\"SELECT symbol FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol, uid=userid))\n dt = datetime.now(timezone(timedelta(hours=6)))\n dt = dt.strftime(\"%d-%m-%Y %H:%M:%S\")\n db.execute(\"INSERT INTO history (id, symbol, shares, price, time) VALUES (:userid, :symbol, :shares, :price, :time)\", userid=userid, symbol=symbol,shares=shares,price=price, time=dt)\n db.execute(\"UPDATE users SET cash=:cash WHERE id=:uid\", cash=available_cash-shares*price, uid=userid)\n\n # check = (db.execute(\"SELECT symbol FROM history WHERE symbol=:symbol\", symbol=symbol))[0].get(\"symbol\")\n print(len(check))\n if len(check) == 0:\n db.execute(\"INSERT INTO purchase (id, symbol, name, shares) VALUES (:userid, :symbol, :name, :shares)\", userid=userid, symbol=symbol, name=company_name, shares=shares)\n else:\n exshares = int((db.execute(\"SELECT shares FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol,uid=userid))[0].get(\"shares\"))\n # print(exshares+\" \"+type(exshares))\n extotal = float((db.execute(\"SELECT total FROM purchase WHERE symbol=:symbol AND id=:uid\", symbol=symbol,uid=userid))[0].get(\"total\"))\n db.execute(\"UPDATE purchase SET shares=:newshares WHERE symbol=:symbol AND id=:uid\", newshares=shares+exshares, symbol=symbol, uid=userid)\n return render_template(\"bought.html\", company_name=company_name, shares=shares, symbol=symbol, usd=usd(shares*price), balance=usd(available_cash-shares*price))", "def buy():\n current_cash= db.execute(\"select cash from users where id = \" + str(session[\"user_id\"]))[0]['cash']\n\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n shares = request.form.get(\"shares\")\n\n\n x=lookup(symbol)\n if x == None:\n return apology(\"invalid symbol\", 400)\n\n price = int(shares)*x['price']\n new_cash = current_cash - price\n\n\n #print(\"insert into users (cash) values (?)\", new_cash + \" where id = \"+ str(session[\"user_id\"]))\n\n db.execute(\"UPDATE users SET cash = \"+ str(new_cash) +\" WHERE id = \"+ str(session[\"user_id\"]) +\";\")\n db.execute(\"insert into purchases (user_id, shares, symbol, price_total, price_per_shares) values (?, ?, ?, ?,? )\", session[\"user_id\"], shares, symbol, price, x['price'])\n db.execute(\"insert into history (user_id, type, amount, time, shares, name) values (?,?,?,?,?,?)\",str(session[\"user_id\"]), \"buy\", str(price), str(datetime.now()), str(shares), symbol)\n return redirect(\"/\")\n\n return render_template(\"buy.html\")", "def buy():\n\n if request.method == \"POST\":\n numShares = 0\n try:\n numShares = float(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Enter a numerical value!\", 400)\n if numShares % 1 != 0:\n return apology(\"Fractional Shares not allowed!\", 400)\n if numShares <= 0:\n return apology(\"Enter a number greater than 0!\", 400)\n if not request.form.get(\"symbol\"):\n return apology(\"Enter a symbol!\", 400)\n if not request.form.get(\"shares\"):\n return apology(\"Enter a number of shares!\", 400)\n\n company = lookup(request.form.get(\"symbol\"))\n if not company:\n return apology(\"Invalid ticker symbol\", 400)\n price = float(company[\"price\"])\n total = float(price * numShares)\n symbl = company[\"symbol\"]\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid=session.get(\"user_id\"))\n remainingCash = float(userRows[0][\"cash\"])\n if total > remainingCash:\n return apology(\"You cannot afford the stock(s)!\", 400)\n else:\n currentUser = session.get(\"user_id\")\n purchased = db.execute(\"INSERT INTO portfolio (UserID, Symbol, Company, NumberOfShares, UnitPrice, TotalPrice) VALUES(:userid, :symbol, :name, :shares, :unitPrice, :totalPrice)\", userid=currentUser, symbol=symbl, name=company['name'], shares=numShares, unitPrice=price, totalPrice=total)\n\n\n if not purchased:\n return apology(\"Unable to purchase\", 400)\n else:\n remainingCash = remainingCash - total\n db.execute(\"UPDATE users set cash=:balance WHERE id=:userid\", balance=remainingCash, userid=currentUser)\n '''Update history'''\n dateNow = datetime.datetime.now()\n historized = db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbol, :shares, :price, :date, :userid)\", symbol = symbl, shares = numShares, price = total, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n\n else:\n return render_template(\"buy.html\")", "def deposit(self, amount):\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n if self.getBalance() + amount > 0:\n cursor.execute(\"\"\"update accounts set amount=? where name =?;\"\"\", (amount+self.getBalance(), self.name))\n cursor.execute(\"\"\"insert into history (username,dt,amount) values (?,?,?);\"\"\", (self.name, datetime.utcnow(), amount))\n else:\n \n cursor.execute(\"\"\"update accounts set amount=? where name =?;\"\"\", (0, self.name))\n\n cursor.execute(\"\"\"insert into history (username,dt,amount) values (?,?,?);\"\"\", (self.name, datetime.utcnow(), amount))\n connection.commit()", "def buy():\n \n if request.method == \"POST\":\n \n time = str(datetime.now())\n \n quantity = int(request.form.get(\"quantity\"))\n \n if quantity < 1:\n return apology(\"you need to provide right quantity\")\n \n # get user's cash\n user_id = int(session.get('user_id'))\n \n data_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id = user_id)\n \n convert = data_cash[0]\n cash = convert.get('cash')\n \n # getting stock request data\n quote = session['quote']\n \n symbol, name, price = quote['symbol'], quote['name'], float(quote['price'])\n total = price * quantity\n \n #check if user can afford so much stock\n \n if total > cash:\n return apology('you don\\'t have enough money')\n \n #INSERT bought stock into history table\n db.execute('''INSERT INTO history (date, user_id, stock_name, symbol, quantity, price, deal) \n VALUES (:date, :user_id, :stock_name, :symbol, :quantity, :price, :deal)''',\n date = time,\n user_id = user_id,\n stock_name = name,\n symbol = symbol,\n quantity = quantity,\n price = total,\n deal = 'buy')\n #update portfolio\n #check if user has bought this stock before\n symbol_check = db.execute('''SELECT symbol FROM portfolio WHERE user_id = :user_id''',\n user_id = user_id)\n \n if [x for x in symbol_check if x['symbol'] == symbol]:\n #update stock if user has bought such shares before\n db.execute('''UPDATE portfolio \n SET quantity = quantity + :quantity \n WHERE (user_id = :user_id AND symbol = :symbol)''', \n quantity = quantity, user_id = user_id, symbol = symbol)\n \n else:\n #add new shares to portfolio\n db.execute('''INSERT INTO portfolio VALUES (:user_id, :symbol, :quantity)''',\n user_id = user_id, symbol = symbol, quantity = quantity)\n \n #update cash\n db.execute('UPDATE users SET cash = cash - :total WHERE id = :user_id', total = total, user_id = user_id)\n \n return redirect(url_for(\"index\"))\n \n else:\n return redirect(url_for(\"quote\"))", "def do_withdraw(self, args):\n \n amount = float(input(\"How much? \"))\n \n balance = self.cur.execute(\"SELECT * FROM balance ORDER BY date DESC\").fetchone()[2]\n if amount > balance:\n print(\"Insufficient funds! Withdrawl canceled.\")\n print(\"Use the `balance` command to check your account balance\")\n return\n \n balance -= amount\n now = time()\n self.cur.execute(\"INSERT INTO withdrawls VALUES (?,?)\", (now, amount))\n self.cur.execute(\"INSERT INTO balance VALUES (?,?,?)\", (now, 0.0, balance))\n self.db.commit()\n print(\"Withdrawl complete. Your new balance is $%.2f\" % balance)", "def buy():\n if request.method == \"POST\":\n\n if not request.form.get(\"shares\"):\n return apology(\"gimme share\", 400)\n if not lookup(request.form.get(\"symbol\")):\n return apology(\"not correct stock\", 400)\n if not request.form.get(\"shares\").isdigit():\n return apology(\"sorry bro\", 400)\n\n quote = lookup(request.form.get(\"symbol\"))\n\n money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = money_list[0][\"cash\"]\n\n total_price = int(request.form.get(\"shares\")) * float(quote[\"price\"])\n\n if available_money < total_price:\n return apology(\"no money bro\", 400)\n\n insertion = db.execute(\"INSERT INTO transactions (id, stock, units, price, time, type) VALUES (:current_id, :stock, :units, :price, :now, :type)\",\n current_id=session[\"user_id\"], stock=request.form.get(\"symbol\"), units=request.form.get(\"shares\"), price=float(quote[\"price\"]), now=datetime.datetime.now(), type=\"B\")\n updating = db.execute(\"UPDATE users SET cash = cash - :upd_price WHERE id = :current_id\",\n upd_price=total_price, current_id=session[\"user_id\"])\n\n money_upd_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money_upd = money_upd_list[0][\"cash\"]\n\n return render_template(\"buy_result.html\",\n shares=request.form.get(\"shares\"),\n symbol=request.form.get(\"symbol\"),\n price=usd(total_price),\n cash=usd(available_money_upd))\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n elif not request.form.get(\"shares\"):\n return apology(\"must provide shares\", 400)\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be integer\",400)\n\n elif int(request.form.get(\"shares\"))<1 :\n return apology(\"must be positive integer\", 400)\n\n elif lookup(request.form.get(\"symbol\"))==None:\n return apology(\"Must be a valid symbol\",400)\n\n #ensure money>price\n quote=lookup(request.form.get(\"symbol\"))\n shares=request.form.get(\"shares\")\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n if cash[0][\"cash\"]<int(quote[\"price\"])*int(shares):\n return apology(\"You can't affort this/these\",400)\n\n #BUY, STORE DATA IN REPOSITORY AND RECORD\n\n #record this transaction\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\",session[\"user_id\"],int(shares),quote[\"symbol\"],float(quote[\"price\"]))\n\n #deduct the cash\n total=int(quote[\"price\"])*int(shares)\n db.execute(\"UPDATE users SET cash=cash- (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")", "def buy():\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n # Ensure the user inputs a symbol\n symbol = request.form.get(\"symbol\").upper()\n if not symbol:\n return apology(\"must provide a symbol\", 403)\n\n # ensure number of shares is submitted\n shares = request.form.get(\"shares\")\n if not shares:\n return apology(\"must provide number of shares\", 403)\n\n\n # do a try except for handling negative values or empty spaces in shares input box\n try:\n shares = int(shares)\n if shares < 0:\n return apology(\"Enter a positive integer for shares\", 403)\n except ValueError:\n return apology(\"No empty spaces allowed enter a positive integer\", 403)\n\n # call lookup in helpers.py to look up a stock’s current price.\n stockPriceDetail = lookup(symbol)\n\n # render apology for invalid symbol input by user\n if stockPriceDetail == None:\n return apology(\"Invalid symbol\", 403)\n else:\n price = stockPriceDetail[\"price\"]\n\n # calculate the total price of the number of shares\n totalCost = price * shares\n print(totalCost)\n\n\n # based on user's input check if they have enough cash to buy stocks\n rows = db.execute(\"SELECT cash FROM users WHERE id = :id\", id=session[\"user_id\"])\n print(\"rows= \" , rows)\n\n cash = rows[0] [\"cash\"]\n\n # Check for sufficient cash\n if cash < totalCost:\n return apology(\"you have insufficient cash balance\", 403)\n\n balance = cash - totalCost\n\n # insert row in transactions table\n result = db.execute(\"\"\"insert into transactions\n (user_id,stock_code,stock_quantity,stock_price,\n start_balance,end_balance,transaction_type)\n values(:userid, :symbol, :shares, :price, :cash,\n :balance,:ttype)\"\"\",\n userid=session[\"user_id\"],shares=shares,\n symbol=symbol,price=price,\n cash=cash,balance=balance,ttype=\"BOUGHT\")\n\n # update users balance\n result = db.execute(\"update users set cash = :balance where id = :userid\",\n userid=session[\"user_id\"],balance=balance)\n\n # Redirect user to index page\n return redirect(\"/\")\n\n else:\n symbol = request.args.get('symbol')\n return render_template(\"buy.html\",symbol=symbol)", "def pay_for_item(self, item):\n while self.amount < item.price:\n paid_amount = float(input(f\"Pay €{round((item.price - self.amount), 2)} : \"))\n if paid_amount <= 0:\n custom_log(\"Invalid amount entered.\", MSG_ERROR)\n continue\n self.amount = self.amount + paid_amount", "def buy():\n if request.method == \"POST\":\n\n # Ensure buy order\n if not request.form.get(\"symbol\"):\n return apology(\"must provide valid order info\", 400)\n\n # Ensure buy order\n elif not request.form.get(\"shares\"):\n return apology(\"must provide valid order info\", 400)\n\n # Ensure stock is balid else display an apology\n elif lookup(request.form.get(\"symbol\")) == None:\n return apology(\"invalid stock\", 400)\n\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"shares must be a positive integer\", 400)\n\n\n # Check if its negative\n #elif int(request.form.get(\"shares\")) < 1:\n # return apology(\"must provide valid order info\", 400)\n\n\n # Add stock to user's portfolio\n\n stock = lookup(request.form.get(\"symbol\"))['name']\n num = request.form.get(\"shares\")\n price = (lookup(request.form.get(\"symbol\"))['price'])\n user = session.get(\"user_id\")\n amount = (float(request.form.get(\"shares\")) * float(lookup(request.form.get(\"symbol\"))['price']))\n\n # check if they have enough cash\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE id = :id\", id = session.get(\"user_id\"))\n rows = float(rows[0][\"cash\"])\n\n\n # Add trasnaction to portfolio if user has enough cash\n if (float(num) * float(price)) <= rows:\n result = db.execute(\"INSERT INTO portfolio (User, Stock, Price, Num) VALUES(:User, :Stock, :Price, :Num)\", User = session.get(\"user_id\"), Stock = stock, Price = usd(price), Num = num)\n if not result:\n return apology(\"TX did not recrod\", 400)\n# Update cash\n result = db.execute(\"UPDATE users set cash = cash - :amount where id = :User \", User = session.get(\"user_id\"), amount = amount)\n if not result:\n return apology(\"Cash did not update\", 400)\n\n # Redirect user to home page\n return redirect(\"/\")\n else:\n\n return apology(\"Not enough Cash\", 403)\n else:\n return render_template(\"buy.html\")", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\") or not lookup(request.form.get(\"symbol\")):\n return apology(\"must provide valid symbol\",400)\n if not request.form.get(\"shares\") or int(request.form.get(\"shares\")) <= 0:\n return apology(\"shares must be positive integer!\",400)\n row=db.execute(\"SELECT * FROM users WHERE id=:s\",s=session[\"user_id\"])\n dict=lookup(request.form.get(\"symbol\"))\n cost=dict[\"price\"]* int(request.form.get(\"shares\"))\n if row[0][\"cash\"]>cost:\n db.execute(\"INSERT INTO history(symbol,shares,price,transacted,user_id,status) VALUES (:s,:sh,:p,:t,:u_i,:status)\",s=dict[\"symbol\"],sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=time.asctime( time.localtime(time.time())),u_i=session[\"user_id\"],status='bought')\n row[0][\"cash\"]=row[0][\"cash\"]-cost\n db.execute(\"UPDATE users SET cash = :cash WHERE id=:s\",cash=row[0][\"cash\"],s=session[\"user_id\"])\n exist=db.execute(\"SELECT * FROM portofolio WHERE symbol=:s AND user_id=:u_i\",s=dict[\"symbol\"],u_i=session[\"user_id\"])\n if len(exist) == 0 :\n db.execute(\"INSERT INTO portofolio(symbol,name,shares,price,total,user_id) VALUES (:s,:n,:sh,:p,:t,:u_i)\",s=dict[\"symbol\"],n=dict[\"name\"],sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=cost,u_i=session[\"user_id\"])\n else:\n db.execute(\"UPDATE portofolio SET shares =shares+:sh, price=:p, total=total+:t WHERE symbol=:s AND user_id=:u_i\",sh=int(request.form.get(\"shares\")),p=dict[\"price\"],t=dict[\"price\"] * int(request.form.get(\"shares\")),s=dict[\"symbol\"],u_i=session[\"user_id\"])\n else:\n return apology(\"Can't afford!\",400)\n\n\n\n return redirect(\"/\")\n else:\n return render_template(\"buy.html\")", "def buy_stock (self, ticker, buy_date, sell_date, amount):\n\n if self.__buy_stock_init__(ticker, buy_date, sell_date, amount) == False:\n return\n\n if self.__get_hist__() == False:\n return\n\n self.__calc_no_shares_to_buy__()\n self.__update_buy_amount__() \n self.__save_buy__()", "def checkin(self):\n folio = self.folio_id\n if folio.payment_deposits <= 0:\n raise UserError(_(\"\"\"No record of security deposit found on folio {}\n \"\"\".format(folio.name)))\n if folio.state != 'on_queue':\n raise UserError(_(\n 'Folio {} is not yet to be processed'.format(self.folio_id.name)))\n hours, minutes = decimal_to_time(self.env.user.company_id.checkin_hour)\n can_check_in = datetime.combine(\n date.today(), tm(hours, minutes)) < datetime.now()\n if not can_check_in:\n raise UserError(\n 'Guest(s) cannot be checked in earlier than {}'.format(\n self.env.user.company_id.checkin_hour))\n if self.folio_id.room_id.occupy():\n self.folio_id.write({'state': 'checkin'})", "def pay(self, amount):\n if amount > self.balance:\n print(f\"Not enough balance! Only ${self.balance} left.\")\n return False\n self.balance -= amount\n return True", "def withdraw(self, user_id, money, **kwargs):\n user = User.objects(user_id=user_id).first()\n\n if money > 0:\n if user.balance >= money:\n print('Cantidad retirada: ', money)\n user.balance = float(user.balance) - float(money)\n user.save()\n else:\n print('No hay fondos suficientes para realizar el retiro.')\n else:\n print('No es posible retirar valores negativos.')", "def buy():\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n if not request.form.get(\"stock\"):\n return apology(\"must provide stock\", 403)\n\n if not request.form.get(\"amount\"):\n return apology(\"must provide amount\", 403)\n\n amount = int(request.form.get(\"amount\"))\n\n if amount <= 0:\n return apology(\"must provide a positive value\", 403)\n\n quote_input = request.form.get(\"quote\")\n quote_info = lookup(quote_input)\n\n if not quote_info:\n return apology(\"The quote you are looking for is not available\", 403)\n\n symbol = quote_info['symbol']\n price = quote_info['price']\n\n total_order = float(amount) * float(price)\n\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])\n\n if total_order > cash:\n return apology(\"Your funds are insufficient\", 403)\n\n else:\n remaining_cash = cash - total_order\n\n db.execute(\"UPDATE users SET cash = remaining_cash WHERE id = :id\", id = session[\"user_id\"])\n\n username = db.execute(\"SELECT username FROM users WHERE id = :id\", id = session[\"user_id\"])\n\n date = str(datetime.datetime.today()).split()[0]\n\n time = datetime.datetime.time(datetime.datetime.now())\n\n db.execute(\"INSERT INTO transaction (id, username, stock, amount, price, total_amount, date, time) VALUES(:id, :username, :stock, :amount, :price, :total_amount, :date, :time)\"\n , id = session[\"user_id\"], username=username, stock=quote_info['symbol'], amount=amount, price=quote_info['price'], total_order=total_order, date = date, time = time)\n\n return redirect(\"/\")", "def buy():\n\n # Defaul to the template\n if request.method == \"GET\":\n return render_template(\"buy.html\")\n\n # Hoisting\n amount=int(request.form.get(\"amount\"))\n symbol=lookup(request.form.get(\"symbol\"))['symbol']\n user = session[\"user_id\"]\n\n # Check validity\n if not lookup(symbol):\n return render_template(\"buy.html\", error=True, messate=\"This is not a valid stock\")\n\n # Check the amount to be spent\n price=lookup(symbol)['price']\n cash = db.execute(\"SELECT cash FROM users WHERE id = :user\", user=user)[0]['cash']\n wallet = cash - price * float(amount)\n\n # Check the wallet\n if wallet < 0:\n return render_template(\"buy.html\", error=True, message=\"Sorry, there is not enough money to complete this operation\")\n\n # Check the current amount of the stock\n stock = db.execute(\"SELECT amount FROM stocks WHERE user_id = :user AND symbol = :symbol\", user=user, symbol=symbol)\n if not stock:\n db.execute(\"INSERT INTO stocks(user_id, symbol, amount) VALUES (:user, :symbol, :amount)\", user=user, symbol=symbol, amount=amount)\n else:\n amount += stock[0]['amount']\n db.execute(\"UPDATE stocks SET amount = :amount WHERE user_id = :user AND symbol = :symbol\", user=user, symbol=symbol, amount=amount)\n\n # Update wallet\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :user\", cash=wallet, user=user)\n db.execute(\"INSERT INTO transactions(user_id, symbol, amount, value) VALUES (:user, :symbol, :amount, :value)\",\n user=user, symbol=symbol, amount=amount, value=round(price * float(amount)))\n\n return redirect(\"/\")", "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def buy():\n if request.method == \"POST\":\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which stock to buy\", 403)\n if not request.form.get(\"nos\"):\n return apology(\"Please specify how many stocks you want to buy\", 403)\n if int(request.form.get(\"nos\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"nos\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n symbol = request.form.get(\"symbol\")\n if not lookup(symbol):\n return apology(\"Invalid symbol\", 403)\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"nos\"))\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n money = bro[0][\"cash\"]\n if cost > money:\n return apology(\"Cannot afford\", 400)\n money = money - cost\n bef = db.execute(\"SELECT COUNT (?) FROM ind WHERE user_id = ?\", lookup(symbol)[\"symbol\"], session[\"user_id\"])\n if len(bef):\n tot = 0\n nob = 0\n tota = cost\n\n else:\n tot = db.execute(\"SELECT total FROM ind where symbol = ?\", lookup(symbol)[\"symbol\"])\n no = db.execute(\"SELECT nos FROM ind where symbol = ?\", lookup(symbol)[\"symbol\"])\n nob = no[0][\"nos\"]\n tota = tot[0][\"total\"] - cost\n\n\n\n\n nos = int(request.form.get(\"nos\"))\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"CREATE TABLE IF NOT EXISTS buys (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, nos INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"BOUGHT\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = nos, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO buys(user_id, symbol, name, price, nos, cost, time) VALUES (:user_id, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = nos, cost = cost, time = datetime.datetime.now())\n bef = db.execute(\"SELECT symbol FROM ind WHERE symbol=:symbol AND user_id=:id\", symbol=lookup(symbol)[\"symbol\"], id=session[\"user_id\"])\n\n # add to portfolio database\n # if symbol is new, add to portfolio\n if not bef:\n db.execute(\"INSERT INTO ind (symbol, name, nos, user_id, price, total) VALUES (:symbol, :name, :nos, :id, :price, :total)\",\n name = lookup(symbol)[\"name\"], symbol=lookup(symbol)[\"symbol\"], nos=int(request.form.get(\"nos\")), id = session[\"user_id\"], price = lookup(symbol)[\"price\"], total = cost)\n\n # if symbol is already in portfolio, update quantity of shares and total\n else:\n db.execute(\"UPDATE ind SET nos=nos+:nos WHERE symbol=:symbol AND user_id=:id\",\n nos=int(request.form.get(\"nos\")), symbol=lookup(symbol)[\"symbol\"], id = session[\"user_id\"]);\n return redirect(\"/\")\n\n\n else:\n return render_template(\"buy.html\")", "def buy():\n username = session.get(\"username\")\n # print(f'username: {username}')\n\n if request.method==\"POST\":\n symbol = request.form.get(\"symbol\")\n quantity = request.form.get(\"shares\")\n if not quantity.isdigit() or int(quantity)<=0:\n return apology(\"Quantity must be a positive integer\", 400)\n quantity = int(quantity)\n price = 0\n message = \"\"\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n response = lookup(symbol)\n if not response:\n return apology(\"Invalid symbol\", 400)\n\n price = response[\"price\"]\n name = response[\"name\"]\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n cost = price * float(quantity)\n status = \"bought\"\n if cash >= cost:\n cash -= cost\n db.execute(\"UPDATE users SET cash=:cash WHERE username=:username\", cash=cash, username=username)\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :stock_symbol, :unit_price, :time, :quantity, :name, :status)\",\n username = username, stock_symbol=symbol, unit_price=price, time=time, quantity=quantity, name=name, status=status)\n message = f'Recorded purchase {quantity} share(s) of {name} for total of {usd(cost)}, your remaining cash is {usd(cash)}'\n return render_template(\"buy.html\", message=message)\n else:\n return apology(\"Not enough cash\", 400)\n else:\n return render_template(\"buy.html\")", "def remove_from_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n ticker = request.form.get('ticker')\n date = request.form.get('date')\n qty = request.form.get('qty')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if (len(ticker) == 0) or (not util.is_valid_date_string(date)):\n return util.build_json_response(\"No ticker or valid date of the form YYYY-MM-DD\")\n\n if not qty.isnumeric() or int(qty) < 0:\n return util.build_json_response(\"Quantity is not valid\")\n\n following_date = util.add_days_to_date(date, 1)\n qty_held = db.session.query(func.sum(Portfolio.quantity))\\\n .filter(Portfolio.user_id == user_obj.id) \\\n .filter(Portfolio.transaction_date <= following_date) \\\n .group_by(Portfolio.ticker).first()[0]\n\n if qty_held < int(qty): \n return util.build_json_response(\"Cannot sell more than you hold\")\n\n price = int(qty) * market_data.get_stock_price(ticker, date, 'low')\n new_balance = user_obj.balance + price\n date = datetime.fromisoformat(date)\n holding = Portfolio.query\\\n .filter(Portfolio.user_id == user_obj.id)\\\n .filter(Portfolio.transaction_date == date)\\\n .filter(Portfolio.ticker == ticker)\\\n .filter(Portfolio.transaction_type == 'SELL').first()\n\n try: \n db.session.execute(\n update(User)\n .values(balance=new_balance)\n .where(User.id == user_obj.id)\n )\n\n if holding is None:\n holding = Portfolio(ticker, date, 'SELL', -int(qty), user_obj.id)\n db.session.add(holding)\n else:\n db.session.execute(\n update(Portfolio)\n .values(quantity=holding.quantity - int(qty))\n .where(Portfolio.user_id == user_obj.id)\n .where(Portfolio.transaction_date == date)\n .where(Portfolio.ticker == ticker)\n .where(Portfolio.transaction_type == 'SELL')\n )\n db.session.commit()\n except:\n return util.build_json_response(\"Failure to update DB\")\n\n return util.build_json_response(\"Stock sold\", stock=request.form, balance=user_obj.balance)", "def buy():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n if not symbol:\n return apology(\"Must enter a symbol\", 400)\n num_shares = request.form.get(\"shares\")\n if not num_shares:\n return apology(\"Must enter some number of shares to buy\", 400)\n company_quote = lookup(symbol)\n if company_quote == None:\n return apology(\"Invalid Symbol\", 400)\n num_shares = int(num_shares)\n if num_shares <= 0:\n return apology(\"Must enter a positve number of shares to buy\", 400)\n balance = db.execute(\"SELECT cash FROM users WHERE id = :id\",\n id=session['user_id'])\n balance = balance[0][\"cash\"]\n cost = num_shares * company_quote[\"price\"]\n if balance < cost:\n return apology(\"Insufficient cash\", 400)\n else:\n new_balance = balance - cost\n date_time = datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n # Update history in history table\n return_val = db.execute(\"INSERT INTO 'history' (id, symbol, shares, price, transacted) VALUES (:id, :symbol, :shares, :price, :transacted)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], shares=num_shares, price=company_quote[\"price\"], transacted = date_time)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n\n #Update total number and value of each shares (symbol) held in totalshares table\n rows = db.execute(\"SELECT id, symbol, numshares, totalvalue FROM totalshares WHERE id = :id AND symbol = :symbol\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if len(rows) != 1: #if nothing is returned i.e id and symbol combination does not already exist, insert it\n return_val = db.execute(\"INSERT INTO totalshares (id, symbol, numshares, totalvalue) VALUES (:id, :symbol, :numshares, :totalvalue)\",\n id=session[\"user_id\"], symbol=company_quote[\"symbol\"], numshares=num_shares, totalvalue=cost)\n if return_val == None:\n return apology(\"something went wrong\", 403)\n else: #if id, symbol combination exists already, update numshares and totalvalue\n new_numshares = rows[0][\"numshares\"] + num_shares\n new_totalvalue = rows[0][\"totalvalue\"] + cost\n return_val = db.execute(\"UPDATE totalshares SET numshares = :new_numshares, totalvalue = :new_totalvalue WHERE id = :id AND symbol = :symbol\",\n new_numshares=new_numshares, new_totalvalue=new_totalvalue, id=session[\"user_id\"], symbol=company_quote[\"symbol\"])\n if return_val == None:\n return apology(\"something went wrong\", 403)\n\n #Update balance in users table\n return_val = db.execute(\"UPDATE users SET cash = :cash WHERE id = :id\", cash=new_balance, id=session[\"user_id\"])\n if return_val != None:\n return redirect(\"/\")\n else:\n return apology(\"something went wrong\", 403)\n\n else:\n return render_template(\"buy.html\")" ]
[ "0.62969047", "0.6211707", "0.6173486", "0.6135983", "0.61165524", "0.6044539", "0.6012283", "0.6010995", "0.60059714", "0.6000789", "0.5987403", "0.59842676", "0.59677374", "0.5959572", "0.5948626", "0.5947851", "0.59408027", "0.59256536", "0.5917937", "0.59175825", "0.5896044", "0.589372", "0.5889756", "0.5889432", "0.58803135", "0.5870898", "0.5869341", "0.58596563", "0.58577543", "0.58557516" ]
0.6369183
0
Adds a negative entry into the database to account for 'selling' a particular holding on a particular date only if the user exists and the quantity being sold does not exceed the quantity held up to that date
def remove_from_portfolio(username): user_obj = User.query.filter(User.username == username).first() ticker = request.form.get('ticker') date = request.form.get('date') qty = request.form.get('qty') if user_obj is None: return util.build_json_response('User does not exist') if (len(ticker) == 0) or (not util.is_valid_date_string(date)): return util.build_json_response("No ticker or valid date of the form YYYY-MM-DD") if not qty.isnumeric() or int(qty) < 0: return util.build_json_response("Quantity is not valid") following_date = util.add_days_to_date(date, 1) qty_held = db.session.query(func.sum(Portfolio.quantity))\ .filter(Portfolio.user_id == user_obj.id) \ .filter(Portfolio.transaction_date <= following_date) \ .group_by(Portfolio.ticker).first()[0] if qty_held < int(qty): return util.build_json_response("Cannot sell more than you hold") price = int(qty) * market_data.get_stock_price(ticker, date, 'low') new_balance = user_obj.balance + price date = datetime.fromisoformat(date) holding = Portfolio.query\ .filter(Portfolio.user_id == user_obj.id)\ .filter(Portfolio.transaction_date == date)\ .filter(Portfolio.ticker == ticker)\ .filter(Portfolio.transaction_type == 'SELL').first() try: db.session.execute( update(User) .values(balance=new_balance) .where(User.id == user_obj.id) ) if holding is None: holding = Portfolio(ticker, date, 'SELL', -int(qty), user_obj.id) db.session.add(holding) else: db.session.execute( update(Portfolio) .values(quantity=holding.quantity - int(qty)) .where(Portfolio.user_id == user_obj.id) .where(Portfolio.transaction_date == date) .where(Portfolio.ticker == ticker) .where(Portfolio.transaction_type == 'SELL') ) db.session.commit() except: return util.build_json_response("Failure to update DB") return util.build_json_response("Stock sold", stock=request.form, balance=user_obj.balance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_with_negative_amount(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"30\", \"-40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_check_user_quantity_stocks_for_given_item_with_greater_quantity(\n offer_sell_instance,\n):\n\n result = check_user_quantity_stocks_for_given_item(\n user_id=offer_sell_instance.user.id,\n item_id=offer_sell_instance.item.id,\n quantity=\"970\",\n )\n\n assert result == False", "def needRestock(self):\n # TODO check if the quantity<threshold and return true if it is\n # we'll set for now the threshold at *five* items\n # so we need to check if self.quantity is less than five.\n\n threshold = 5\n if self.quantity < threshold or PerishableStockItem.pastSellByDate(self):\n return True\n else:\n return False", "def test_add_with_negative_price(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def deduct(self, user: User, quantity: int) -> int:\n from .exceptions import NotEnoughStockError\n\n # Assert that quantity isn't negative\n if quantity < 0:\n raise ValueError('\"quantity\" must be a positive value')\n\n # Perform the deduction and assert that the remaining stock is valid\n new_stock: int = self.on_hand - quantity\n if new_stock < 0:\n raise NotEnoughStockError(self, quantity)\n\n # Adjust the stock\n self.update(user, on_hand=new_stock)\n\n # Return the new stock value\n return new_stock", "def sell():\n \n render_template(\"sell.html\")\n \n currency = request.form.get(\"currency\")\n quantity = request.form.get(\"quantity\")\n \n if request.method == \"POST\":\n\n if currency == \"\" or quantity == \"\" or currency == None or quantity == None:\n return render_template(\"sell.html\")\n else:\n currency = str(currency).upper()\n currency_value = lookup(currency)\n test = db.execute(\"SELECT SUM(quantity) FROM transactions WHERE product = :currency GROUP BY product\", currency = currency)\n \n for e in test:\n for key, value in e.items():\n value = int(value)\n if value < int(quantity):\n return apology(\"not ennough stock\")\n \n if currency_value == None:\n return apology(\"NOT A REAL STOCK\")\n \n else:\n ## access the price key of the stock\n stock_price = currency_value['price']\n ## convert quantity to float to enable multiplication\n quantity = int(quantity)\n if quantity < 0:\n return apology(\"please enter positive numbers only\")\n total_cost = stock_price * quantity\n sale = db.execute(\"UPDATE users SET cash = cash + (:quantity * :stock_price) WHERE id = :user\", quantity = quantity, stock_price = stock_price, user = session[\"user_id\"] )\n \n ## sell stock adds negative sales to the table so that the total quantities will be adding a -1\n sell_stock = db.execute('''INSERT INTO Transactions (user_id, product, quantity, total_cost, stock_price) \n VALUES (:user, :currency, :quantity, :total_cost, :stock_price)''', user = session[\"user_id\"], \n currency = currency, quantity = -quantity, total_cost = -total_cost , stock_price = stock_price)\n return index()\n else:\n return render_template(\"sell.html\")", "def sell():\n\n if request.method == \"POST\":\n\n # define stock variables\n symbol = request.form.get(\"symbol\")\n stock = lookup(request.form.get(\"symbol\"))\n\n # error checking\n if not stock:\n return apology(\"Missing or Incorrect Symbol\", 400)\n\n # check if stock is owned\n try:\n sold_stock = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id AND symbol = :symbol GROUP BY symbol\", user_id=session[\"user_id\"], symbol=symbol)[0]\n except IndexError:\n return apology(\"Stock not owned\", 400)\n\n # check for shares input\n try:\n shares = int(request.form.get(\"shares\"))\n except ValueError:\n return apology(\"Input at least 1 share\", 400)\n\n if shares < 0:\n return apology(\"Input at least 1 Share\", 400)\n\n if int(sold_stock[\"shares\"]) < shares:\n return apology(\"Not enough shares to sell\", 400)\n\n else:\n # define variables for inserting into transactions table and updating cash\n purchase_date = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # update user cash\n user_cash = db.execute(\"SELECT cash FROM users WHERE id = :user_id\", user_id=session[\"user_id\"])[0][\"cash\"]\n user_cash = user_cash + (stock[\"price\"]*shares)\n db.execute(\"UPDATE users SET cash = :user_cash WHERE id = :user_id\", user_id=session[\"user_id\"], user_cash=user_cash)\n\n # update transactions table with selling transaction\n db.execute(\"\"\"\n INSERT INTO transactions(user_id, date, symbol, shares, price)\n VALUES(:user_id, :date, :symbol, :shares, :price)\n \"\"\",\n user_id=session[\"user_id\"],\n date=purchase_date,\n symbol=stock[\"symbol\"],\n shares=-shares,\n price=stock[\"price\"]\n )\n\n flash(\"You paper-handed that one!\")\n return redirect(\"/\")\n\n else:\n # query db for current holdings\n stocks = db.execute(\n \"SELECT symbol, SUM(shares) AS shares, price FROM transactions WHERE user_id = :user_id GROUP BY symbol\", user_id=session[\"user_id\"])\n stocks[:] = [stock for stock in stocks if stock.get('shares') > 0]\n return render_template(\"sell.html\", stocks=stocks)", "def sell_stock (self, ticker, sell_date):\n \n self.__validate_sell__() \n self.__get_sell_share_price__(ticker, sell_date)\n self.__calc_profit_from_sales__() \n self.__update_sell_delta_amount__()\n self.__save_sell__()\n\n del self.invested[ticker]", "def sell():\n username = session.get(\"username\")\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n req_quantity = request.form.get(\"shares\")\n if not req_quantity.isdigit() or int(req_quantity)<=0:\n return apology(\"Quantity must be positive integer\", 400)\n req_quantity = int(req_quantity)\n status = \"sold\"\n\n time = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n\n owned_stock = db.execute(\"SELECT SUM(quantity) FROM history WHERE username=:username GROUP BY stock_symbol HAVING stock_symbol=:symbol\",\n username=username, symbol=symbol)\n if owned_stock:\n owned_quantity = owned_stock[0][\"SUM(quantity)\"]\n stock = lookup(symbol)\n price = stock[\"price\"]\n name = stock[\"name\"]\n else:\n owned_quantity = 0\n if owned_quantity>=req_quantity:\n total_value = req_quantity * price\n db.execute(\"INSERT INTO history (username, stock_symbol, unit_price, time, quantity, stock_name, status) VALUES (:username, :symbol, :price, :time, :quantity, :name, :status)\",\n username=username, symbol=symbol, price=price, time=time, quantity=-req_quantity, name=name, status=status)\n db.execute(\"UPDATE users SET cash = cash+:total_value WHERE username=:username\",\n total_value=total_value, username=username)\n cash = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n message = f\"Recorded sold {req_quantity} share(s) of {name} total {usd(total_value)}, your new cash balance is {usd(cash)}\"\n return render_template(\"sell.html\", message = message)\n else:\n return apology(\"Insufficient shares\", 400)\n # if db.execute()\n else:\n stock_options = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n stock_options = [s[\"stock_symbol\"] for s in stock_options]\n\n # print(f\"Stock options: {stock_options}\")\n return render_template(\"sell.html\", options = stock_options)", "def test_cannot_sell_more_than_stock(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":15\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Only 10 NY_denims available right now!')\n self.assertEqual(resp.status_code, 400)", "def test_add_with_not_right_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-30\", \n \"-14\", \"2020-12-30\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def add_unavailability_week(date, user):\n diversions = diversion_for_week(date)\n for diversion in diversions:\n available = diversion['Available']\n if user in available:\n available.remove(user)\n unavailable = diversion['Unavailable']\n if user not in unavailable:\n unavailable.append(user)\n resp = table.update_item(\n Key={\"WeekOf\": date, \"Paper\": diversion['Paper']},\n ExpressionAttributeNames={\n \"#available\": \"Available\",\n \"#unavailable\": \"Unavailable\"\n },\n ExpressionAttributeValues={\n \":available\": available,\n \":unavailable\": unavailable\n },\n UpdateExpression=\"SET #available = :available, #unavailable = :unavailable\"\n )", "def userBuyShipObj(self, user : bbUser.bbUser, requestedShip : bbShip.bbShip):\n if self.userCanAffordItemObj(user, requestedShip):\n self.shipsStock.removeItem(requestedShip)\n user.credits -= requestedShip.getValue()\n user.inactiveShips.addItem(requestedShip)\n else:\n raise RuntimeError(\"user \" + str(user.id) + \" attempted to buy ship \" + requestedShip.name + \" but can't afford it: \" + str(user.credits) + \" < \" + str(requestedShip.getValue()))", "def add_to_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n ticker = request.form.get('ticker')\n date = request.form.get('date')\n qty = request.form.get('qty')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if (len(ticker) == 0) or (not util.is_valid_date_string(date)):\n return util.build_json_response(\"No ticker or valid date of the form YYYY-MM-DD\")\n\n if not qty.isnumeric() or int(qty) < 0:\n return util.build_json_response(\"Quantity is not valid\")\n\n price = int(qty) * market_data.get_stock_price(ticker, date, 'low')\n\n if price > user_obj.balance:\n return util.build_json_response(\"Cost exceeds balance\")\n\n new_balance = user_obj.balance - price\n date = datetime.fromisoformat(date)\n holding = Portfolio.query.\\\n filter(Portfolio.user_id == user_obj.id)\\\n .filter(Portfolio.transaction_date == date)\\\n .filter(Portfolio.ticker == ticker)\\\n .filter(Portfolio.transaction_type == 'BUY').first()\n\n try: \n db.session.execute(\n update(User)\n .values(balance=new_balance)\n .where(User.id == user_obj.id)\n )\n\n if holding is None:\n holding = Portfolio(ticker, date, 'BUY', int(qty), user_obj.id)\n db.session.add(holding)\n else:\n db.session.execute(\n update(Portfolio)\n .values(quantity=holding.quantity + int(qty))\n .where(Portfolio.user_id == user_obj.id)\n .where(Portfolio.transaction_date == date)\n .where(Portfolio.ticker == ticker)\n .where(Portfolio.transaction_type == 'BUY')\n )\n db.session.commit()\n except:\n return util.build_json_response(\"Failure to update DB\")\n\n return util.build_json_response(\"Stock added to portfolio\", stock=request.form, balance=user_obj.balance)", "def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_booking = datetime.date.today()\n self.end_time_booking = self.start_time_booking + datetime.timedelta(days=5)\n self.cars.quantity -= 1\n self.cars.save()\n return super(Reservation, self).save(*args, **kwargs)", "def sell():\n # Moved userID outside of 'if' as could not be accessed in 'else' for html.\n userID = session[\"user_id\"]\n\n if request.method == \"POST\":\n\n user = db.execute(\"SELECT * FROM users WHERE id = :id\", id=userID)\n cash = user[0][\"cash\"]\n\n stock = lookup(request.form.get(\"symbol\"))\n\n numOfShares = float(request.form.get(\"shares\"))\n if not request.form.get(\"symbol\"):\n return apology(\"You haven't typed a symbol\")\n if stock is None:\n return apology(\"This doesn't seem to be a valid symbol, try again\")\n if numOfShares < 0:\n return apology(\"You must state how many shares you want to sell\")\n\n salePrice = stock[\"price\"] * numOfShares\n date_time = datetime.now().strftime('%d-%m-%Y %H:%M:%S')\n\n stockOwned = db.execute(\"SELECT * FROM portfolio WHERE id=:userID AND symbol=:symbol\", userID=userID, symbol=stock[\"symbol\"])\n if not stockOwned:\n return apology(\"You don't own any of this stock\")\n if stockOwned[0][\"numOwned\"] < numOfShares:\n return apology(\"You are trying to sell more shares than you own!\")\n else:\n newNumOwned = float(stockOwned[0][\"numOwned\"]) - numOfShares\n newTotalValue = newNumOwned * stock[\"price\"]\n db.execute(\"UPDATE users SET cash=cash+:salePrice WHERE id=:userID\", salePrice=salePrice, userID=userID)\n db.execute(\"INSERT INTO transactions (id, symbol, num_shares, price_ps, date_time, buy_or_sell) VALUES (:userID, :symbol, :num_shares, :price_ps, :date_time, :buy_or_sell)\",\n userID=userID, symbol=stock[\"symbol\"], num_shares=numOfShares, price_ps=stock[\"price\"], date_time=date_time, buy_or_sell=\"SELL\")\n db.execute(\"UPDATE portfolio SET numOwned=:newNumOwned, totalValue=:newTotalValue WHERE id=:userID AND symbol=:symbol\",\n newNumOwned=newNumOwned, newTotalValue=newTotalValue, userID=userID, symbol=stock[\"symbol\"])\n\n return redirect(\"/\")\n else:\n symbols = db.execute(\"SELECT symbol FROM portfolio WHERE id=:userID\", userID=userID)\n return render_template(\"sell.html\", symbols=symbols)", "def _check_date(self):\n\n limit_hours = self.env.user.company_id.product_rejected_limit_hours\n if limit_hours > 0:\n\n for record in self:\n\n if record.partner_id:\n last_record = self.search([\n ('id', '!=', record.id),\n ('product_id', '=', record.product_id.id),\n ('partner_id', '=', record.partner_id.id),\n ('company_id', '=', self.env.user.company_id.id),\n ], order='date')\n\n if last_record:\n last_record_date = last_record[-1].date\n last_record_datetime = datetime.strptime(\n last_record_date, '%Y-%m-%d %H:%M:%S')\n record_date = datetime.strptime(\n record.date, '%Y-%m-%d %H:%M:%S')\n\n diff = record_date - last_record_datetime\n hours_diff = (diff.seconds / 60.0) / 60\n\n if hours_diff <= limit_hours:\n raise ValidationError(\n _('This product rejected has already been registered for this same partner in the last %s hours.' % limit_hours)\n )", "def test_add_with_end_shelf_life(self):\n good = GoodInfo(\"яйцо 1 кат.\", \"-30\", \"40\", \"2020-12-1\", \n \"3\", \"2020-12-1\")\n check_product_data = self.database.add(good)\n\n self.assertFalse(check_product_data)", "def test_report_out_of_stock_second_time_more_then_7_days(self, *args):\n ReportOutOfStock.objects.create(menu_item=self.menu, user=self.user,\n start_timer=timezone.now()-timezone.timedelta(days=8))\n self.client.force_login(self.user)\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(ReportOutOfStock.objects.count(), 2)\n\n report = ReportOutOfStock.objects.last()\n self.assertEqual(report.user, self.user)\n self.assertEqual(report.menu_item, self.menu)\n\n self.menu.refresh_from_db()\n self.assertTrue(self.menu.in_stock)", "def sell():\n\n user = session[\"user_id\"]\n\n # If GET just view\n if request.method == \"GET\":\n # view transactions\n rows = db.execute(\"SELECT symbol, amount FROM stocks WHERE user_id = :user\", user=user)\n\n # Create dictionary for stocks data owned\n stocks = {}\n for row in rows:\n stocks[row['symbol']] = row['amount']\n\n return render_template(\"sell.html\", stocks=stocks)\n\n # I case of POST\n amount=int(request.form.get(\"amount\"))\n symbol=request.form.get(\"symbol\")\n price=lookup(symbol)[\"price\"]\n value=round(price * float(amount))\n\n # Update stocks table\n stocks_before = db.execute(\"SELECT amount FROM stocks WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user)[0]['amount']\n stocks_after = stocks_before - amount\n\n # not enough\n if stocks_after < 0:\n return render_template(\"sell.html\", error=True, message=\"You can't sell more than you have\")\n\n # delete stock\n elif stocks_after == 0:\n db.execute(\"DELETE FROM stocks WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user)\n\n # or update it\n else:\n db.execute(\"UPDATE stocks SET amount = :amount WHERE user_id = :user AND symbol = :symbol\", symbol=symbol, user=user, amount=stocks_after)\n\n # update cash and history\n cash = db.execute(\"SELECT cash FROM users WHERE id = :user\", user=user)[0]['cash']\n cash_after = cash + price * float(amount)\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :user\", cash=cash_after, user=user)\n db.execute(\"INSERT INTO transactions(user_id, symbol, amount, value) VALUES (:user, :symbol, :amount, :value)\",\n user=user, symbol=symbol, amount=-amount, value=value)\n\n # If success redirect\n return redirect(\"/\")", "def sell():\n if request.method == \"POST\":\n # Ensure data is inputted\n if not request.form.get(\"symbol\"):\n return apology(\"Insert symbol\", 403)\n \n if not request.form.get(\"shares\"):\n return apology(\"Insert number of shares to sell\", 403)\n \n # Ensure shares value is valid\n try:\n if not int(request.form.get(\"shares\")) > 0:\n return apology(\"invalid value\", 403)\n except ValueError:\n return apology(\"invalid value\", 403)\n \n # Ensure there's enough shares to sell \n share_count_dict = db.execute(\"SELECT share_count FROM shares WHERE user_id=:usid AND share=:share\", usid=session[\"user_id\"], share=request.form.get(\"symbol\").upper())\n share_count = int(share_count_dict[0][\"share_count\"])\n \n if int(request.form.get(\"shares\")) > share_count:\n return apology(\"You don't own enough shares\", 403)\n \n # Create variables\n symbol = request.form.get(\"symbol\").upper()\n quantity = int(request.form.get(\"shares\"))\n \n # Add cash to user data\n new_cash = float(lookup(symbol)[\"price\"]) * quantity\n db.execute(\"UPDATE users SET cash= cash + :cash WHERE id=:usid\", cash=new_cash, usid=session[\"user_id\"]) \n \n # Remove shares of user data\n db.execute(\"UPDATE shares SET share_count = share_count - :shares WHERE user_id=:usid AND share = :share\", shares=quantity,share=symbol, usid=session[\"user_id\"])\n db.execute(\"DELETE FROM shares WHERE user_id=:usid AND share_count = :shares\", usid=session[\"user_id\"], shares=0)\n \n # Record transaction\n db.execute(\"INSERT INTO history (user_id, symbol, shares, time, price) VALUES (:usid, :symbol, :shares, :time, :price)\", usid=session[\"user_id\"], symbol=symbol, shares='-' + str(quantity), time=str(db.execute(\"SELECT CURRENT_TIMESTAMP\")[0][\"CURRENT_TIMESTAMP\"]), price=str(lookup(symbol)[\"price\"]))\n \n return redirect(\"/\")\n \n else:\n # Create list with purchased symbols\n symbol_dicts = db.execute(\"SELECT share FROM shares WHERE user_id=:usid\", usid=session[\"user_id\"])\n symbol_list = [None] * len(symbol_dicts)\n \n # Insert symbols into list\n for i in range(len(symbol_dicts)):\n symbol_list[i] = symbol_dicts[i][\"share\"]\n \n return render_template(\"sell.html\", longitude=len(symbol_dicts), symbols=symbol_list)", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def sell():\n if request.method == \"POST\":\n bef = db.execute(\"SELECT symbol FROM ind WHERE user_id = ?\", session[\"user_id\"])\n if not request.form.get(\"symbol\"):\n return apology(\"Please specify which valid stock to sell\", 403)\n symbol = request.form.get(\"symbol\")\n p = db.execute(\"SELECT COUNT(symbol) FROM ind WHERE user_id = ?\", session[\"user_id\"])\n q = 0\n\n for i in range(int(p[0][\"COUNT(symbol)\"])):\n if symbol == bef[i][\"symbol\"]:\n q = 1\n if q == 0:\n return apology(\"Please specify which valid stock to sell\", 403)\n if not request.form.get(\"shares\"):\n return apology(\"Please specify how many stocks you want to sell\", 403)\n if int(request.form.get(\"shares\")) < 1:\n return apology(\"Please input a positive integer\", 403)\n if request.form.get(\"shares\").isnumeric() != True:\n return apology(\"Please input a positive integer\", 403)\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) < int(request.form.get(\"shares\")):\n return apology(\"You do not own that many shares\", 403)\n shares = int(request.form.get(\"shares\"))\n db.execute(\"CREATE TABLE IF NOT EXISTS sells (user_id INTEGER NOT NULL, symbol TEXT NOT NULL, name TEXT NOT NULL, price NUMERIC NOT NULL, shares INTEGER NOT NULL, cost NUMERIC NOT NULL, time datetime NOT NULL, FOREIGN KEY(user_id) REFERENCES users(id))\")\n bro = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n cost = (lookup(symbol)[\"price\"]) * int(request.form.get(\"shares\"))\n money = bro[0][\"cash\"]\n money = money + cost\n db.execute(\"UPDATE users SET cash = ? WHERE id = ?\", money, session[\"user_id\"])\n db.execute(\"INSERT INTO sells(user_id, symbol, name, price, shares, cost, time) VALUES (:user_id, :symbol, :name, :price, :shares, :cost, :time)\", user_id = session[\"user_id\"], symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], shares = shares, cost = cost, time = datetime.datetime.now())\n db.execute(\"INSERT INTO hist(user_id, typ, symbol, name, price, nos, cost, time) VALUES (:user_id, :typ, :symbol, :name, :price, :nos, :cost, :time)\", user_id = session[\"user_id\"], typ = \"SOLD\", symbol = lookup(symbol)[\"symbol\"], name = lookup(symbol)[\"name\"], price = lookup(symbol)[\"price\"], nos = shares, cost = cost, time = datetime.datetime.now())\n\n db.execute(\"UPDATE ind SET nos = ? WHERE symbol = ? AND user_id = ?\", int(hav[0][\"nos\"]) - shares, request.form.get(\"symbol\"), session[\"user_id\"])\n hav = db.execute(\"SELECT nos FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n if int(hav[0][\"nos\"]) == 0:\n db.execute(\"DELETE FROM ind WHERE symbol = ? AND user_id = ?\", request.form.get(\"symbol\"), session[\"user_id\"])\n return redirect(\"/\")\n\n else:\n stocks = db.execute(\"SELECT * FROM ind WHERE user_id = ?\", session[\"user_id\"])\n\n return render_template(\"sell.html\", stocks = stocks)", "def sell():\n\n # User submits information\n if request.method == \"POST\":\n\n # Ensure user entered a stock\n if not request.form.get(\"symbol\"):\n return apology(\"must choose a stock\")\n\n # Get stock selected\n symbol = request.form.get(\"symbol\")\n \n # Ensure is a valid stock symbol\n if not lookup(symbol):\n return apology(\"Invalid stock symbol\")\n\n # Ensure user owns the stock requested\n test = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n if not test:\n return apology(\"you have 0 shares of this stock\")\n\n owns = db.execute(\"SELECT * FROM portfolios WHERE user_id = ? AND stocks = ?\", session[\"user_id\"], symbol)\n\n # Ensure user entered a number in shares\n if not request.form.get(\"shares\") or not isinstance(request.form.get(\"shares\"), int):\n return apology(\"must enter postive whole number of shares\")\n\n shares = request.form.get(\"shares\")\n\n # Ensure number is positive\n if shares <= 0:\n return apology(\"must enter a positive number\")\n\n # Ensure user owns the amount of stock entered to sell\n if shares > owns[0]['shares']:\n return apology(\"you don't own that much of this stock\")\n\n # Get date and time for transaction\n day = datetime.now()\n time = datetime.now().time()\n\n # Get total and stock name for transaction\n price = lookup(symbol)['price']\n total = price * shares\n name = lookup(symbol)['name']\n\n # Sell shares of the stock and add to transactions history\n db.execute(\"INSERT INTO transactions (user_id, date, time, price, shares, total, stock, name, type) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)\",\n session[\"user_id\"], day, time, price, shares * -1, total, symbol, name, \"sell\")\n\n # Update portfolios table\n db.execute(\"UPDATE portfolios SET shares = shares - ? WHERE user_id = ? AND stocks = ?\", shares, session[\"user_id\"], symbol)\n\n # If stock shares is 0, delete from portfolio\n db.execute(\"DELETE FROM portfolios WHERE shares = ? \", 0)\n\n return redirect(\"/\")\n\n # If user reached page via link or redirect\n else:\n\n # Get list of stocks owned\n owns = db.execute(\"SELECT stocks FROM portfolios WHERE user_id = ? ORDER BY stocks\", session[\"user_id\"])\n\n return render_template(\"sell.html\", owns=owns)", "def userSellShipObj(self, user : bbUser.bbUser, ship : bbShip.bbShip):\n user.credits += ship.getValue()\n self.shipsStock.addItem(ship)\n user.inactiveShips.removeItem(ship)", "def sell():\n if request.method == \"POST\":\n symbol = request.form.get(\"symbol\")\n sharesToSell = int(request.form.get(\"shares\"))\n if sharesToSell < 0:\n return apology(\"Shares to sell cannot be negative\", 400)\n\n sharesRows = db.execute(\"SELECT * FROM portfolio WHERE UserID = :userid AND Symbol = :enteredSymbol\",\n userid=session.get(\"user_id\"), enteredSymbol = symbol)\n\n numSharesOwned = 0\n for row in sharesRows:\n numSharesOwned += row[\"NumberOfShares\"]\n\n if numSharesOwned < sharesToSell:\n return apology(\"You don't own that many shares!\", 400)\n\n remainingSharesToSell = sharesToSell\n for row in sharesRows:\n numShares = row[\"NumberOfShares\"]\n if remainingSharesToSell >= numShares:\n '''delete row'''\n delete = db.execute(\"DELETE FROM portfolio WHERE id = :rowid\", rowid = row[\"id\"])\n remainingSharesToSell -= numShares\n else:\n '''update row'''\n updatedShares = numShares - remainingSharesToSell\n update = db.execute(\"UPDATE portfolio SET NumberOfShares = :numshares, TotalPrice = :tp WHERE id = :rowid\",\n numshares = updatedShares, tp = updatedShares * row[\"UnitPrice\"], rowid = row[\"id\"])\n remainingSharesToSell = 0\n\n if remainingSharesToSell == 0:\n break;\n\n quote = lookup(symbol)\n cashToReturn = quote[\"price\"] * sharesToSell\n userRows = db.execute(\"SELECT cash FROM users WHERE id = :userid\", userid = session.get(\"user_id\"))\n usersCurrentCash = userRows[0][\"cash\"]\n\n updatedBalance = usersCurrentCash + cashToReturn\n db.execute(\"UPDATE users SET cash = :cash WHERE id = :userid\", cash = updatedBalance, userid = session.get(\"user_id\"))\n '''Update history'''\n dateNow = datetime.datetime.now()\n db.execute(\"INSERT INTO history (Symbol, Shares, Price, Date, UserID) VALUES(:symbl, :shares, :price, :date, :userid)\", symbl = symbol, shares = -1 * sharesToSell, price = -1 * cashToReturn, date = dateNow, userid = session.get(\"user_id\"))\n '''Update history end'''\n return redirect(\"/\")\n\n else:\n symbolRows = db.execute(\"SELECT Symbol FROM portfolio WHERE UserID = :userid GROUP BY Symbol\", userid=session.get(\"user_id\"))\n symbls = []\n for row in symbolRows:\n symbls.append(row[\"Symbol\"])\n\n return render_template(\"sell.html\", symbols=symbls)", "def sell():\n\n if request.method == \"POST\":\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n symbol = request.form.get(\"symbol\")\n shares = int(request.form.get(\"shares\"))\n\n if not symbol:\n return apology(\"please select a valid symbol\")\n\n target_stock = db.execute(\"SELECT *, sum(shares) FROM transactions WHERE user=:user AND symbol=:symbol\",\n user=user, symbol=symbol)\n print(target_stock)\n if not shares:\n return apology(\"must provide how many shares to sell\")\n\n elif shares > target_stock[0]['sum(shares)'] or shares < 1:\n return apology(\"shares must be more than 0 and less than \" + str(target_stock[0]['shares']))\n\n query = lookup(symbol)\n price = query['price']\n name = query['name']\n cash = entry[0]['cash']\n\n db.execute(\"INSERT INTO transactions (id, user, symbol, name, price, shares) VALUES(NULL, :user, :symbol, :name, :price, :shares)\",\n user=user, symbol=symbol, name=target_stock[0]['name'], price=price, shares=-int(shares))\n db.execute(\"UPDATE users SET cash=:cash WHERE id = :id\",\n cash=cash+price*shares, id=session['user_id'])\n\n return redirect(url_for(\"index\"))\n\n else:\n entry = db.execute(\"SELECT * FROM users WHERE id=:id\",\n id=session['user_id'])\n user = entry[0]['username']\n owned = db.execute(\"SELECT * FROM transactions WHERE user=:user GROUP BY symbol HAVING SUM(shares) > 0\",\n user=user)\n\n return render_template(\"sell.html\", stocks=owned)", "def sell():\n\n if request.method == \"POST\":\n sellstock = request.form.get(\"symbol\")\n sellq = int(request.form.get(\"shares\"))\n if sellstock == None:\n return apology(\"Please select a stock symbol to sell.\")\n if sellq < 0:\n return apology(\"Please enter a valid quantity of stocks to sell\")\n invq = db.execute(\"SELECT quantity FROM inventory WHERE userid = :uid AND symbol = :sy\",\n {\"uid\":session[\"user_id\"],\"sy\":sellstock})[0][\"quantity\"]\n if sellq > invq:\n return apology(\"You don't have enough shares.\")\n stock = lookup(sellstock)\n cost = round(sellq*stock[\"price\"], 2)\n db.execute(\"INSERT INTO shares (stock,symbol,value,quantity,cost,userid) VALUES(:st,:sy,:va,:qu,:co,:uid)\",\n {\"st\":stock[\"name\"],\"sy\":sellstock,\"va\":stock[\"price\"],\"qu\":sellq,\"co\":cost,\"uid\":session[\"user_id\"]})\n db.execute(\"UPDATE inventory SET quantity = :qu WHERE userid =:uid AND symbol = :sy\",\n {\"qu\":(invq-sellq),\"uid\":session[\"user_id\"],\"sy\":sellstock})\n db.execute(\"UPDATE users SET cash = cash + :cash WHERE id =:uid\", {\"cash\":cost,\"uid\":session[\"user_id\"]})\n flash(\"Shares successfully sold!\")\n return redirect(\"/\")\n inventory = db.execute(\"SELECT symbol FROM inventory WHERE userid = :uid\", uid=session[\"user_id\"])\n return render_template(\"sell.html\", context = inventory)", "def test_NegativePriceCheck(self):\n # Basic price check\n self.log.info(\"Price checking Negative Item via speedkey\")\n pos.click(\"Price Check\")\n pos.click_speed_key(\"Negative Item\")\n \n # Confirm the right item, at the right price\n # NOTE: Price check returns negative prices as possitive. Legacy defect deemed 'Will Not Fix'\n self.read_price_check(\"Negative Item\", \"$5.00\")\n # Add the item\n pos.click(\"Sell Item\")\n \n # Confirm we are in a transaction\n if not self.in_transaction():\n self.tc_fail(\"POS did not start a transaction; can not confirm item was added\")\n else:\n self.log.info(\"Confirmed we are in a transaction\")\n \n # Confirm we added the item, and that it was negative\n ret = self.confirm_line(-1, \"Negative Item\", \"-$5.00\")\n if ret == True:\n self.log.info(\"Confirmed item added\")\n else:\n self.tc_fail(ret)\n \n # Setup for next test\n self.recover()", "def ingredient_used_canceled(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used canceled initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.used.id)]\n , order=[('batch_number', 'DESC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n # pdb.set_trace()\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False" ]
[ "0.608748", "0.59747124", "0.5912512", "0.5866839", "0.5833634", "0.5832183", "0.5770894", "0.5730225", "0.57119876", "0.569739", "0.56778073", "0.5667274", "0.5618804", "0.56061447", "0.556615", "0.55538535", "0.55318004", "0.55185896", "0.5517954", "0.5495279", "0.54871625", "0.54671174", "0.5460408", "0.5455049", "0.54511726", "0.5435943", "0.5432113", "0.54305625", "0.5406039", "0.540274" ]
0.62343097
0
Clear all holdings for a paritcular user, if they exist, and reset the balance back to the default balance amount
def clear_holdings(username): user_obj = User.query.filter(User.username == username).first() if user_obj is None: return util.build_json_response('User does not exist') try: db.session.execute( update(User) .values(balance=Config.DEFAULT_BALANCE) .where(User.id == user_obj.id) ) db.session.execute( delete(Portfolio) .where(Portfolio.user_id == user_obj.id) ) db.session.commit() except: return util.build_json_response("Failure accessing DB") return util.build_json_response("Portfolio cleared")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n for key in self.portfolio.keys():\n self.portfolio[key] = {'holdings': 0}\n self.buys[key] = 0\n self.portfolio['balance'] = 2500000.0", "def reset(cls):\n GrandChallenge.objects.all().delete()\n GrandChallengeUser.objects.update(lost=0, last_round=0)\n cls.set_current_round(0)", "def reset(self) -> None:\n self.cash_balance = self.initial_cash_balance()", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def clear_blockages(self):\n debug.info(3,\"Clearing all blockages\")\n self.rg.clear_blockages()", "def reset_players(self):\n self.dealer.reset()\n for player in self.players:\n player.reset()\n if player.bank <= 500:\n player.set_bank(1000)", "def empty(self):\n Bid.objects.filter(bid_busket=self, is_locked=False).delete()\n self.save()", "def unfreeze_hold(session, hold_id):\n query = {\n 'freeze': False\n }\n headers = {'content-type': 'application/json'}\n data = json.dumps(query)\n r = session.put(api_url_base + '/patrons/holds/{}'.format(str(hold_id)),\n data=data, headers=headers)\n return r", "def reset() -> None:\n RwLocks.by_name = {}\n RwLocks.lockers = {}", "def unblock_all(t):\n blocked_count = 0\n\n while True:\n blocked_user_ids = t.blocks.ids()[\"ids\"]\n if not blocked_user_ids:\n print(\"No more IDs to unblock\")\n break\n\n for user_id in blocked_user_ids:\n blocked_count = blocked_count + 1\n print(f\"{blocked_count}: {user_id}\")\n try:\n t.blocks.destroy(user_id=user_id, include_entities=False, skip_status=True)\n except:\n print(\"error\")", "def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )", "def round_reset(self):\n\t\tfor p in self.players: p.reset_state()\n\t\t#self.player = player(self.player.name, self.player.cards)\n\t\tself.small_blind.money -= self.blind // 2\n\t\tself.small_blind.stake += self.blind // 2\n\t\tself.big_blind.money -= self.blind\n\t\tself.big_blind.stake += self.blind\n\t\tself.last_bet_by = self.big_blind\n\t\tself.bank = self.blind\n\t\tself.bank_part = self.blind\n\t\tself.table_cards = []\n\t\tself.stage = stages.nocards", "def clear_user_module_score(self, user):\r\n self.set_user_module_score(user, None, None)", "def clear_lockout_counter(cls, user):\r\n try:\r\n entry = LoginFailures.objects.get(user=user)\r\n entry.delete()\r\n except ObjectDoesNotExist:\r\n return", "def clear_pins(self):\n self.pins = {}\n self.all_pins = set()\n self.pin_groups = {} \n # DO NOT clear the blockages as these don't change\n self.rg.reinit()", "def clearTroves(self):\n self.primaryTroveList.thaw(\"\")\n self.newTroves.thaw(\"\")\n self.oldTroves.thaw(\"\")", "def reset_auctioneer(self):\n self.bidders.clear()\n self._highest_bid = 0\n self._highest_bidder = None", "def unblock(self):\n self.failed_logins = 0\n self.blocked = False", "def reset(self):\n self.book = {}\n self.book[Trade.WAY_BUY] = []\n self.book[Trade.WAY_SELL] = []", "def removeall(self):\n\n # If there used to be a key, there must exist an old value blob somewhere in the database. It should be deallocated after a successful commit to disk.\n for key in self.keys:\n if self.keys[key] is not None:\n punchat,punchlen = self.keys[key]\n self.awaitingpunch.append((punchat, punchlen))\n \n self.keys = {}\n self.buffered = {}\n self.cache = {}\n \n if self.autocommit:\n commit()", "def reset_all_users():\n for user in User.objects.all():\n user.delete()", "def refresh(self):\n self._accounts = None", "def reset(self):\n self.state = {\n \"price\" : self.history[0]['price'],\n \"timestamp\" : self.history[0]['timestamp'],\n \"prev_price\" : 0,\n \"imm_prev_transaction\" : 0,\n \"prev_transaction\" : 0,\n \"day\" : self.history[0]['day'],\n \"hour\" : self.history[0]['hour'],\n \"minute\" : self.history[0]['minute'],\n \"posture\" : \"\",\n \"balance\" : self.balance,\n \"bag\" : self.balance,\n \"value\" : self.balance,\n \"transaction\" : 0\n }\n self.posture = 1\n self.states = []\n self.transactions = []\n self.balance = 100\n self.bag = 100\n self.pointer = 0\n self.profit = 0\n self.value = 0\n self.initial_value = self.calculate_value()\n self.trade(1).next()", "def reset(self):\n for lane in self.lanes.values():\n lane.puck_area.clear_widgets()\n lane.patrons = list()\n lane.disabled = False\n lane.beers = list()\n\n self.message_holder.remove_widget(self.you_lose_label)\n self.message_holder.remove_widget(self.you_win_label)", "async def clear(self, ctx, amount: int, user: discord.Member = None):\n amount += 1\n\n def clear_x(m):\n return m.author == user\n if not user:\n everyone = True\n else:\n everyone = False\n if amount <= 101:\n if not everyone:\n await ctx.channel.purge(limit=amount, check=clear_x, bulk=True)\n elif everyone:\n await ctx.channel.purge(limit=amount, bulk=True)\n log.console(f\"Pruned {amount} messages from {ctx.channel.id}\")\n if amount >= 102:\n if amount > 1000:\n amount = 1000\n number = (amount // 100)\n await ctx.send(\n f\"> **{amount}** messages will be deleted in 5 seconds and will be split in intervals of 100.\")\n for _ in range(number):\n await asyncio.sleep(0)\n if not everyone:\n await ctx.channel.purge(limit=100, check=clear_x, bulk=True)\n elif everyone:\n await ctx.channel.purge(limit=100, bulk=True)\n log.console(f\"Pruned 100 messages from {ctx.channel.id}\")\n await ctx.send(f\"> **{amount}** messages have been pruned from {ctx.channel.id}.\")", "def reset(self):\n self.last_round = False\n self.last_player = None\n self.scores = [0] * self.num_players\n self.current_player = 0\n self.turn = 0\n self.roll = None", "def resetPlayerBetAmount(self, players):\n\t\tfor x in players:\n\t\t\tx.betAmount = []", "def unfreeze(self,):\n if self.frozen and self.id_lock.locked():\n self.id_lock.release()\n self.loglocker.release()\n self.frozen = False", "def reset(self) -> None:\n self.logger.info(\"Reset\")\n\n self._has_bob = False\n self._has_single = False\n self._index = 0\n self._row = self.rounds()", "def finalize_memberships():\n memberships = Membership.objects.all()\n for membership in memberships:\n membership.available_days -= 1\n membership.save()\n if membership.available_days == 0:\n membership.delete()\n profile = membership.user.profile\n profile.is_active = False\n profile.save()" ]
[ "0.64368814", "0.6183782", "0.60984427", "0.60094863", "0.59135014", "0.5843152", "0.58169794", "0.5763059", "0.57237643", "0.57175833", "0.5624603", "0.5614059", "0.5593863", "0.5558275", "0.5531175", "0.55167025", "0.5496675", "0.5473267", "0.5462998", "0.54517573", "0.5424972", "0.5388487", "0.53616", "0.52948785", "0.5280318", "0.52736914", "0.52449334", "0.523853", "0.52331233", "0.5228" ]
0.66361046
0
Retrieve price for a particular stock ticker on a particular date of the form YYYYMMDD (default price type to 'low')
def get_price(ticker): date = request.args.get('date') if (date is None) or (not util.is_valid_date_string(date)): return util.build_json_response("No date selected or not in the form YYYY-MM-DD") price = market_data.get_stock_price(ticker, date, 'low') if price is None: return util.build_json_response("No Data Found") return util.build_json_response("Stock found", ticker=ticker, date=date, price=price)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stock_price(stock):\n pass", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def get_price_on_or_before_date(date, prices):\n for i in range(6):\n current_date = date - timedelta(days=i)\n if current_date in prices:\n return float(prices[current_date]), i\n return (None, None)", "def get_stock_price(self):\n day = request.args[\"day\"]\n return jsonify({'price': float(self.market.get_stock_price(int(day)))})", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def get_price(horizon_host, pair):\n print \"fetching latest price for:\" + pair[\"name\"]\n params = make_trade_params(pair)\n res = requests.get(horizon_host + \"/trades\", params).json()\n try:\n trade_record = res[\"_embedded\"][\"records\"][0]\n except IndexError:\n return DatedPrice(date=datetime.utcfromtimestamp(0), price=0)\n price = float(trade_record[\"price\"][\"n\"]) / float(trade_record[\"price\"][\"d\"])\n timestamp = parser.parse(trade_record[\"ledger_close_time\"])\n return DatedPrice(date=timestamp, price=price)", "def get_daily_data_from_stooq(ticker_symbol, start_date, end_date):\n # check whether the start_date and end_date are strings\n if isinstance(start_date, str) and isinstance(end_date, str):\n pass\n else:\n raise ValueError(\"Dates passed to the function are not strings!!!\")\n # validate formats of dates passed to the function\n validate_date_format_yyy_mm_dd(start_date)\n print(\"Validation of start_date format result: positive...\")\n validate_date_format_yyy_mm_dd(end_date)\n print(\"Validation of end_date format result: positive...\")\n d_1 = start_date.replace(\"-\", \"\")\n d_2 = end_date.replace(\"-\", \"\")\n temp_url = \"https://stooq.com/q/d/l/?s=\" + ticker_symbol + \"&d1=\" \\\n + d_1 + \"&d2=\" + d_2 + \"&i=d\"\n print(\"Getting data from URL: \", temp_url)\n # try-except block to catch the cases when the ticker symbol is nonexistent\n try:\n data_in = pd.read_csv(temp_url, usecols=['Date', 'Close'],\n parse_dates=[0])\n except ValueError:\n print(\"ValueError occurred! Probably a nonexistent ticker has been\"\n \" passed to the function\")\n except Exception:\n print(\"General error has occurred! Please check function arguments...\")\n else:\n # if data is obtained, rename \"Close\" ===> ticker name\n data_in.rename(columns={\"Close\": ticker_symbol}, inplace=True)\n return data_in", "def prices(symbol):\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e", "def getDatePrice(self):\n return self.getHistorical().ix[:,[0,5]]", "def get_price(data):\n return data[\"summaryDetail\"][\"regularMarketPreviousClose\"][\"raw\"]", "def get_stock_prices(ticker, start_date, end_date=None):\n if end_date is None:\n end_date = dt.date.today()\n\n shares = Share(ticker)\n df = pd.DataFrame(shares.get_historical(start_date.isoformat(),\n end_date.isoformat()))\n return df.set_index(\"Date\", drop=True) \\\n .drop(\"Symbol\", axis=1) \\\n .astype(float) \\\n .sort_index()", "def get_stock_prices(ticker_symbol, start_date, finnhub_client):\n end_date = pd.Timestamp(pd.Timestamp.today().date())\n end_unix = get_unix_time(end_date)\n start_unix = get_unix_time(start_date)\n\n # Pause shortly\n time.sleep(1)\n\n # Stock candles\n res = finnhub_client.stock_candles(ticker_symbol, 'D', start_unix, end_unix)\n if res[\"s\"] == \"no_data\":\n return pd.DataFrame()\n # Convert to Pandas Dataframe\n df_finnhub = pd.DataFrame(res)\n timestamp_index = df_finnhub[\"t\"].apply(lambda x: pd.Timestamp(pd.to_datetime(x, unit='s', origin='unix').date()))\n df_ticker = pd.DataFrame(df_finnhub[\"o\"].values, index=timestamp_index.values)\n return df_ticker", "def stock_data(ticker, start,today=date.today()):\n df= web.DataReader(ticker,'yahoo',start,today)\n return df", "def stock():\n stock=stock_data('AAPL',start(2019,12,1))\n return stock", "def get_price(ticker_symbol, page=None):\n if page is None:\n page = scrape_page(BASE_URL + ticker_symbol)\n\n sentiment = page.xpath(PRICE_XPATH)\n\n if not sentiment:\n return None\n else:\n return sentiment[0].replace(\"\\n\", \"\")", "def getprice():\n\n print(\"Get price\")\n latest_price = get_latest_price(item_code)\n return latest_price", "def get_price(self, pair='XBTZAR'):\n data = {'pair': pair}\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, None, 'ticker', query_string))\n if r.status_code == 200:\n return r.json()", "def get_price_quote(self, d=None, column='adj_close'):\n quote = namedtuple('Quote', 'price time')\n if d is None:\n df = web.get_quote_yahoo(self.ticker)\n d = date.today()\n time = dt_utils.parse_date(df['time'][0]).time()\n dt = datetime.combine(d, time=time)\n return quote(price=df['last'], time=dt)\n else:\n price = self.ohlcv.ix[d][column][0]\n return quote(price=price, time=d)", "def daily_price():\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']", "def getData(symbol, dataKind):\n try:\n link = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol='+symbol+'&outputsize=compact&apikey=ENTER_KEY'\n htmltext = urllib.request.urlopen(link)\n data = json.load(htmltext)\n myDict = {}\n print(type(data))\n price_data = data['Time Series (Daily)']\n for key, value in price_data.items():\n date_num = datetime.strptime(key,\"%Y-%m-%d\")\n price = value[dataKind]\n myDict[date_num] = float(price)\n masterDF = pd.DataFrame.from_dict(myDict, orient = 'index')\n masterDF.index.name = \"Time\"\n masterDF.columns = [symbol]\n return masterDF\n\n except:\n print('Error occured when fetching data.')\n exit(0)", "def gettickerdata(tickername):\n\n r = requests.get(constants.bloomurl + getticker(tickername) + ':US')\n soup = BeautifulSoup(r.text, 'html.parser')\n results = soup.find_all('div', class_=\"price\")\n return (\"$\" + results[0].text)", "def get_stock_daily(id: int, db: Session = Depends(get_db)):\n return crud.get_stock(id, db)", "def get_price_for_volume_series(conn, sticker, limit_price, volume, is_back):\n ticks = get_sticker_odds(conn, sticker)\n rets = get_volume_at_price(ticks, limit_price, volume, is_back)\n return rets", "def get_data(ticker, tickers):\n \n print(ticker)\n ## Date setting\n today = datetime.today()\n days_ago_90 = today - timedelta(days = 90)\n today = today.strftime(\"%Y-%m-%d\")\n days_ago_90 = days_ago_90.strftime(\"%Y-%m-%d\")\n \n df_ticker = web.DataReader(ticker, 'yahoo', start = days_ago_90, end = today)\n \n ## To get prices, iloc is used. It's because shifting by timedetlas will result in error in cases where some holidays occured \n price_most_recent = df_ticker.iloc[-1, 5]\n price_7_days_ago = df_ticker.iloc[-7, 5]\n price_21_days_ago = df_ticker.iloc[-21, 5]\n price_30_days_ago = df_ticker.iloc[-30, 5]\n price_90_days_ago = df_ticker.iloc[0,5]\n \n ## Getting price change\n price_change_7_days = price_change(price_most_recent, price_7_days_ago)\n price_change_21_days = price_change(price_most_recent, price_21_days_ago)\n price_change_30_days = price_change(price_most_recent, price_30_days_ago)\n price_change_90_days = price_change(price_most_recent, price_90_days_ago)\n \n ## Checking for constant price drop\n constant_price_drop_7 = constant_price_drop_detector(df_ticker, 7)\n ## Only if price drops constantly for 7 days it makes sense to check for this pattern in 21 days period\n if constant_price_drop_7 == \"YES\":\n constant_price_drop_21 = constant_price_drop_detector(df_ticker, 21)\n else:\n constant_price_drop_21 = \"NO\"\n \n ## Now creating the final df to return\n df_prices = df_ticker[['Adj Close']].T\n df_prices.index = [ticker]\n df_prices.reset_index(inplace = True)\n \n full_name = tickers.loc[tickers[\"Ticker\"] == ticker, 'Full Name'].values[0]\n df_prices['company_name'] = full_name\n df_prices['price_90_days_ago'] = price_90_days_ago\n df_prices['price_30_days_ago'] = price_30_days_ago\n df_prices['price_21_days_ago'] = price_21_days_ago\n df_prices['price_7_days_ago'] = price_7_days_ago\n df_prices['price_most_recent'] = price_most_recent\n \n df_prices['price_change_7_days'] = price_change_7_days\n df_prices['price_change_21_days'] = price_change_21_days\n df_prices['price_change_30_days'] = price_change_30_days\n df_prices['price_change_90_days'] = price_change_90_days\n \n df_prices['constant_price_drop_7'] = constant_price_drop_7\n df_prices['constant_price_drop_21'] = constant_price_drop_21\n \n df_prices.fillna(\"None\", inplace = True)\n \n return df_prices", "def get_price_for_volume_at(conn, sticker, limit_price, volume, is_back, timestamp):\n tick = get_last_tick_before(conn, sticker, timestamp)\n rets = get_volume_at_price([tick], limit_price, volume, is_back)\n return rets[0]", "def lookup(symbol):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n response = requests.get(f\"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(str(symbol))}/quote?token={api_key}\")\n response.raise_for_status()\n except requests.RequestException:\n flash(\"Please set API_KEY\", 'danger')\n return None\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n \"change\": quote[\"change\"],\n \"changePercent\": quote[\"changePercent\"],\n \"volume\": quote[\"volume\"],\n \"week52High\": quote[\"week52High\"],\n \"week52Low\": quote[\"week52Low\"],\n \"open\" :quote[\"open\"],\n \"high\" :quote['high'],\n \"low\" : quote[\"low\"]\n }\n except (KeyError, TypeError, ValueError):\n return None", "def get_rates_for(currency: str, date: str):\n baseurl = f\"https://openexchangerates.org/api/historical/{date}.json\"\n params = {\"app_id\": OEG_APP_ID, \"symbols\": currency, \"base\": \"USD\"}\n return make_request(baseurl=baseurl, params=params)", "def get_call_data(stock_name, expire_time, strike_price):\n date = time.mktime(datetime.datetime.strptime(expire_time, \"%d/%m/%Y\").timetuple())+(16*3600)\n url = 'https://finance.yahoo.com/quote/'+stock_name+'/options?date='+str(int(date))+'&p='+stock_name\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n values = soup.findAll(\"td\" )\n\n for i in range(2,len(values),11):\n x = float(str(values[i].contents[0].contents[0]))\n if x == float(strike_price):\n option_link = 'https://finance.yahoo.com/'+str(values[i-2].contents[0])[61:109]\n bid = float(values[i+2].contents[0])\n ask = float(values[i+3].contents[0])\n return bid, ask", "def get_current_price(self, tickers: Union[Ticker, Sequence[Ticker]],\n frequency: Frequency = None) -> Union[float, QFSeries]:\n frequency = frequency or self.fixed_data_provider_frequency or Frequency.MIN_1\n\n if frequency <= Frequency.DAILY:\n raise ValueError(\"The Intraday Data Handler can be used only with the Intraday Frequency\")\n\n tickers, was_single_ticker_provided = convert_to_list(tickers, Ticker)\n # if an empty tickers list was supplied then return an empty result\n if not tickers:\n return QFSeries()\n\n current_datetime = self.timer.now()\n\n # Check if the current time is at the market open, if so - take the Open price of the time range, starting\n # at current datetime\n if current_datetime + MarketOpenEvent.trigger_time() == current_datetime:\n time_range_start = current_datetime\n field = PriceField.Open\n else:\n time_range_start = current_datetime - frequency.time_delta()\n field = PriceField.Close\n\n prices_data_array = self.data_provider.get_price(tickers,\n field,\n time_range_start,\n time_range_start + frequency.time_delta(),\n frequency)\n try:\n # Below, the loc[time_range_start] is used instead of iloc[0], in order to return the price exactly from the\n # time_range_start, and not from the range between time_range_start and time_range_start +\n # frequency.time_delta()\n prices_series = prices_data_array.loc[time_range_start]\n except KeyError:\n prices_series = QFSeries(index=tickers)\n\n prices_series.name = \"Current asset prices\"\n\n prices_series = cast_series(prices_series, QFSeries)\n if was_single_ticker_provided:\n return prices_series[0]\n else:\n return prices_series", "def new_get_historical_price(base, target, date):\n if base == \"BTC\" and target == \"EUR\":\n return {\"BTC\": {\"EUR\": 10000}}\n elif base == \"EUR\" and target == \"BTC\":\n return {\"EUR\": {\"BTC\": 0.00012}}\n elif base == \"LTC\" and target == \"BTC\":\n return {\"LTC\": {\"BTC\": 0.02}}\n elif base == \"LTC\" and target == \"EUR\":\n return {\"LTC\": {\"EUR\": 250}}" ]
[ "0.6860741", "0.64612657", "0.6285231", "0.627891", "0.6268929", "0.62418354", "0.6219314", "0.6187929", "0.61725336", "0.6109318", "0.6077271", "0.6072425", "0.6063008", "0.6049149", "0.6006757", "0.6006257", "0.5955611", "0.5879088", "0.5806045", "0.5804109", "0.57986856", "0.57555187", "0.5747502", "0.5729375", "0.57039315", "0.5694835", "0.5692848", "0.56921446", "0.56883544", "0.5680205" ]
0.76464707
0
Return the repository path from a CVS path. >>> getrepopath(b'/foo/bar') '/foo/bar'
def getrepopath(cvspath): # According to CVS manual, CVS paths are expressed like: # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository # # CVSpath is splitted into parts and then position of the first occurrence # of the '/' char after the '@' is located. The solution is the rest of the # string after that '/' sign including it parts = cvspath.split(b':') atposition = parts[-1].find(b'@') start = 0 if atposition != -1: start = atposition repopath = parts[-1][parts[-1].find(b'/', start) :] return repopath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repo_path(repo, *path):\n return os.path.join(repo.vcsdir, *path)", "def _branchPath(self, path):\n assert self.branch_dir is not None\n return os.path.join(self.branch_dir, path)", "def path(src, name='default'):\n try:\n return get_output(['hg', 'path', name], cwd=src).strip()\n except subprocess.CalledProcessError:\n return None", "def repo_path(repo, *path):\n return os.path.join(repo.gitdir, *path)", "def find_git_repository(self, path):\n while path is not None:\n git_path = os.path.join(path,'.git')\n if os.path.exists(git_path) and os.path.isdir(git_path):\n return path\n path = os.path.dirname(path)\n return None", "def BrocCVSPath(self):\n return self._module.broc_cvspath", "def svn_fs_path(*args):\r\n return _fs.svn_fs_path(*args)", "def GcsPath(*path_components):\n return os.path.join(*path_components)", "def ConvertToCygpath(path):\n if sys.platform == 'cygwin':\n p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)\n path = p.communicate()[0].strip()\n return path", "def BrocPath(self):\n return os.path.join(self._module.workspace, self._module.broc_cvspath)", "def getGitPath() -> osp:\n current_dir = osp.dirname(osp.realpath(__file__))\n git_dir = osp.dirname(osp.dirname(current_dir))\n return git_dir", "def get_current_path(self, cvs_path, lod):\n\n node = self.get_current_lod_directory(lod)\n\n for sub_path in cvs_path.get_ancestry()[1:]:\n node = node[sub_path]\n\n return node", "def repositoryPathToURI( path ):\n return \"pbi://secondary/references/%s\" % os.path.basename( path )", "def svn_fs_berkeley_path(*args):\r\n return _fs.svn_fs_berkeley_path(*args)", "def svn_branch():\n return svn_url().split('/')[-1]", "def getpath(self, path):\n return self._join(path)", "def rpath(path):\n if path.startswith('/'):\n path = path[1:]\n return path", "def svn_client_url_from_path(char_url, char_path_or_url, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path", "def get_git_repo_url(path: str) -> Optional[str]:\n try:\n from git import Repo\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the Git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return None\n\n try:\n repo = Repo(path, search_parent_directories=True)\n return next((remote.url for remote in repo.remotes), None)\n except Exception:\n return None", "def _getEncodedUri(self, path):\n\n return self._repositoryUri + pepareSvnPath(path)", "def pretty_path(path):\n return path.replace(REPO_DIR + '/', '')", "def trim_repo_path(self, path):\n # get the repo first\n repo = self.find_repo(path)\n\n if not repo:\n return path\n\n # then try to trim the path\n if path.startswith(repo.path):\n return path[len(repo.path) :]\n elif path.startswith(repo.windows_path):\n return path[len(repo.windows_path) :]\n elif path.startswith(repo.linux_path):\n return path[len(repo.linux_path) :]\n elif path.startswith(repo.osx_path):\n return path[len(repo.osx_path) :]\n return path", "def get_path(self, path):\n return abspath(join(self.origin, *path))", "def _git_path(request, wiki):\n\n path = request.path.split(u'/{0}/'.format(wiki))[1]\n\n # Remove slashes\n while path and path[0] == u'/':\n path = path[1:]\n\n while path and path[-1] == u'/':\n path = path[:-1]\n\n return path", "def get_url(path, repo=None, rev=None, remote=None):\n with _make_repo(repo, rev=rev) as _repo:\n _require_dvc(_repo)\n out = _repo.find_out_by_relpath(path)\n remote_obj = _repo.cloud.get_remote(remote)\n return str(remote_obj.checksum_to_path_info(out.checksum))", "def _fullpath(self, path):\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result", "def getPath(self, uri):\n if os.path.isdir(uri):\n return uri\n else:\n raise RepoException(\"The repo path does not exist: %s\" % uri)", "def get_bucket_and_path_from_uri(path):\n parsed_url = urlparse(path)\n return parsed_url.netloc, parsed_url.path.lstrip('/')", "def get_realpath(cls, path_str):\n if path_str.startswith('/'):\n return path_str\n return os.path.abspath(os.path.join(cls.apollo_root, path_str))" ]
[ "0.61027926", "0.6061968", "0.5992272", "0.59105736", "0.5753857", "0.569133", "0.56849474", "0.56822276", "0.56474394", "0.56140965", "0.5577763", "0.5547634", "0.5543549", "0.5512189", "0.54327875", "0.5401934", "0.5398828", "0.53873706", "0.53672487", "0.53585917", "0.5331998", "0.53196496", "0.5312679", "0.52800715", "0.5256072", "0.52273744", "0.5223478", "0.5213586", "0.5210158", "0.5210009" ]
0.811643
0
Returns the project name and the version name
def fullname(self): return "{project}/{version}".format( project=self.project.name, version=self.name )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getProjectName():", "def project_name(self):\n pass", "def name(self):\r\n return self.setuptools_requirement.project_name", "def get_project_name(self):\n return self.line_edit.text()", "def get_package_name(self):\n return self.name + '-' + self.version", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def project(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"project\")", "def get_package_name(self):\n return self.name + '-' + self.version + '-' + self.release", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def project(self) -> str:\n return pulumi.get(self, \"project\")", "def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")", "def _project_name(self):\n name = getattr(self._req.req, 'project_name', '')\n if name:\n return name\n raise ValueError('Requirement has no project_name.')", "def get_project_name(self):\n remote = self.get_gitlab_remote()\n return self.get_project_name_from_url(remote.url)", "def getVersionString():\n return str(version_gen.major) + \".\" + str(version_gen.minor) + \".\" + str(version_gen.compilation)", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")" ]
[ "0.811286", "0.7547395", "0.72261554", "0.7059194", "0.7008339", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.6974289", "0.69604677", "0.69451076", "0.69451076", "0.69451076", "0.69451076", "0.69451076", "0.69451076", "0.6932376", "0.6856724", "0.6839156", "0.68181694", "0.6816193" ]
0.7832595
1
Gets a list of projectversions which are recursive dependencies of the given projectversion.
def get_projectversion_deps(projectversion_id, session): query = """ WITH RECURSIVE getparents(projectversion_id, dependency_id) AS ( SELECT projectversion_id, dependency_id FROM projectversiondependency WHERE projectversion_id = :projectversion_id UNION ALL SELECT s2.projectversion_id, s2.dependency_id FROM projectversiondependency s2, getparents s1 WHERE s2.projectversion_id = s1.dependency_id ) SELECT projectversion_id, dependency_id FROM getparents; """ result = session.execute(query, {"projectversion_id": projectversion_id}) projectversion_ids = [] for row in result: projectversion_ids.append(row[1]) return projectversion_ids
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dependencies(self, revision: Dict) -> List[Dict]:\n dependency_ids = revision['auxiliary']['phabricator:depends-on']\n revisions = self.get_revisions(phids=dependency_ids)\n result = []\n for r in revisions:\n result.append(r)\n sub = self.get_dependencies(r)\n result.extend(sub)\n return result", "def get_revision_dependencies(self, revision_name: str) -> List[Set[str]]:\n revisions: Dict[str, DBRevision] = self.load_revisions()\n revision_layers: List[Set[str]] = [{revision_name}]\n\n while True:\n new_layer: Set[str] = set()\n for rev in revision_layers[-1]:\n new_layer = new_layer.union(revisions[rev].dependencies)\n\n if len(new_layer) == 0:\n break\n\n revision_layers.append(new_layer)\n revision_layers.reverse()\n return revision_layers", "def _get_dependencies(self, requirement_name, version):\n pkg_metadata = self._get_metadata(requirement_name)\n versions = pkg_metadata.get('versions', dict())\n version = versions.get(str(version), dict())\n return sorted(version.get('dependencies', dict()).items())", "def dependent_projects(self):\n projects = self._metadata.get('dependent_projects', [])\n projects = map(lambda p: os.path.join(self.path, p) if not os.path.isabs(p) else p, projects)\n return list(projects)", "def dependencies(project_name):\n deps = []\n logging.info('Locating {}'.format(project_name))\n located = distlib.locators.locate(project_name, prereleases=True)\n if located is None:\n logging.warn('{} not found'.format(project_name))\n return []\n for dep in located.run_requires:\n # Drop any version details from the dependency name.\n deps.append(just_name(dep))\n return deps", "def versions(self) -> List['RadsProjectVersion']:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\")\n return [RadsProjectVersion(self, RadsVersion(l)) for l in listing.splitlines()]", "def getDependenciesList(self) -> List[Mapping[Any, Any]]:\n if self._dependencyList is not None:\n return self._dependencyList\n\n chartfile = self.getChartFile()\n if chartfile['apiVersion'] == 'v2':\n if 'dependencies' in chartfile:\n self._dependencyList = chartfile['dependencies']\n else:\n self._dependencyList = []\n elif chartfile['apiVersion'] == 'v1':\n self.readArchiveFiles()\n if self._archiveFiles is not None and 'requirements.yaml' in self._archiveFiles:\n self._dependencyList = self._getFile('requirements.yaml')['dependencies']\n else:\n self._dependencyList = []\n else:\n raise ConfigurationError('Unknown chart file version: {}'.format(chartfile))\n return self._dependencyList", "def get_project_versions(self, package):\n with self._conn.begin():\n return [\n ProjectVersionsRow(*row)\n for row in self._conn.execute(\n \"SELECT version, yanked, released, skip, builds_succeeded, \"\n \"builds_failed \"\n \"FROM get_project_versions(%s)\", (package,)\n )\n ]", "def find_dependants_recurse(key, rev_tree, previous=None):\n if previous is None:\n previous = set()\n if not key in rev_tree:\n return []\n this_level_dependants = set(rev_tree[key])\n next_level_dependants = set()\n for dependant in this_level_dependants:\n if dependant in previous:\n continue\n tmp_previous = previous.copy()\n tmp_previous.add(dependant)\n next_level_dependants.update(\n find_dependants_recurse(dependant, rev_tree,\n previous=tmp_previous,\n ))\n # ensures reloading order on the final list\n # by postponing the reload of modules in this level\n # that also appear later on the tree\n dependants = (list(this_level_dependants.difference(\n next_level_dependants)) +\n list(next_level_dependants))\n return dependants", "def projects(self, langs=True) -> List['RadsProjectVersion']:\n dependencies = self.dependencies()\n if langs is False:\n return dependencies[None]\n elif langs is True:\n return list({pv for pvs in dependencies.values() for pv in pvs})\n elif isinstance(langs, Language):\n return dependencies[langs]\n else:\n return list({pv for lang in langs for pv in dependencies[lang]})", "def project_dependents(self, manager: str, package: str) -> Any:\n\n return search_api(\"pproject_dependents\", manager, package)", "def project_dependencies(self, manager: str, package: str) -> Any:\n\n return search_api(\"pproject_dependencies\", manager, package)", "def select_versions(self):\n return []", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for reference in self.references:\n if isinstance(reference.ref_cell, Cell):\n if recursive:\n dependencies.update(reference.ref_cell.get_dependencies(True))\n dependencies.add(reference.ref_cell)\n return dependencies", "def get_linked_versions(version='current'):\n version = check_version_str(version)\n chapters = [10, 9, 8]\n version_page = 'https://research.cs.wisc.edu/htcondor/manual/{ver}/{chapter}_Version_History.html'\n r = requests.get(version_page.format(ver=version, chapter=chapters[0]))\n if r.status_code == 404:\n # Try different chapter numbers, as it changes for different versions\n i = 1\n while r.status_code == 404 and i < len(chapters):\n r = requests.get(version_page.format(ver=version, chapter=chapters[i]))\n i += 1\n if r.status_code == 404:\n return []\n soup_vers = bs4.BeautifulSoup(r.text, 'lxml')\n versions = [x.text.replace('Version ', '')\n for x in soup_vers.find_all('a')\n if x.text.startswith('Version')]\n return versions", "def get_pkg_recursive_deps(self, pkg):\n return self.recursive_pkg_deps[pkg].union(\n *[self.recursive_pkg_deps[test_import] for test_import in self.test_imports.get(pkg, ())])", "def get_dependency_configurations(self):\n deps = []\n\n for variant in self.resolve_variants():\n # Note: the variants have already been resolved\n # This for loop simply needs to resolve the dependencies one\n # by one, potentially overwriding earlier ones\n name, value = next(iter(variant.items()))\n if 'requires' in value and value['requires'] is not None:\n requires = value['requires']\n for req_name, req_config in requires.items():\n deps.append((req_name, req_config['version']))\n\n return deps", "def list_dependencies(self, value):\n try:\n self.dependency_re = self.dependency_re or re.compile(r\"\\${\\w*}\")\n matched = self.dependency_re.findall(value)\n if matched:\n dependencies = [match[2:-1] for match in matched if match[2:-1] != self.name]\n return list(set(dependencies))\n except:\n pass\n return []", "def check_referenced_versions(self, pdm=None):\n if not pdm:\n pdm = ProgressManagerFactory.get_progress_manager()\n\n caller = pdm.register(\n 3, \"%s.check_referenced_versions() prepare data\" % self.__class__.__name__\n )\n\n # deeply get which file is referencing which other files\n self.deep_version_inputs_update()\n if caller:\n caller.step()\n\n from anima.dcc import empty_reference_resolution\n\n reference_resolution = empty_reference_resolution(\n root=self.get_referenced_versions()\n )\n\n if caller:\n caller.step()\n\n # reverse walk in DFS\n dfs_version_references = []\n\n version = self.get_current_version()\n if not version:\n return reference_resolution\n\n for v in version.walk_inputs():\n dfs_version_references.append(v)\n\n if caller:\n caller.step()\n\n # pop the first element which is the current scene\n dfs_version_references.pop(0)\n\n caller.end_progress()\n\n # register a new caller\n caller = pdm.register(\n len(dfs_version_references),\n \"%s.check_referenced_versions()\" % self.__class__.__name__,\n )\n\n # iterate back in the list\n for v in reversed(dfs_version_references):\n # check inputs first\n to_be_updated_list = []\n for ref_v in v.inputs:\n if not ref_v.is_latest_published_version():\n to_be_updated_list.append(ref_v)\n\n if to_be_updated_list:\n action = \"create\"\n # check if there is a new published version of this version\n # that is using all the updated versions of the references\n latest_published_version = v.latest_published_version\n if latest_published_version and not v.is_latest_published_version():\n # so there is a new published version\n # check if its children needs any update\n # and the updated child versions are already\n # referenced to the this published version\n if all(\n [\n ref_v.latest_published_version\n in latest_published_version.inputs\n for ref_v in to_be_updated_list\n ]\n ):\n # so all new versions are referenced to this published\n # version, just update to this latest published version\n action = \"update\"\n else:\n # not all references are in the inputs\n # so we need to create a new version as usual\n # and update the references to the latest versions\n action = \"create\"\n else:\n # nothing needs to be updated,\n # so check if this version has a new version,\n # also there could be no reference under this referenced\n # version\n if v.is_latest_published_version():\n # do nothing\n action = \"leave\"\n else:\n # update to latest published version\n action = \"update\"\n\n # before setting the action check all the inputs in\n # resolution_dictionary, if any of them are update, or create\n # then set this one to 'create'\n if any(\n rev_v in reference_resolution[\"update\"]\n or rev_v in reference_resolution[\"create\"]\n for rev_v in v.inputs\n ):\n action = \"create\"\n\n # so append this v to the related action list\n reference_resolution[action].append(v)\n\n # from stalker import Version\n # assert isinstance(v, Version)\n caller.step(message=v.nice_name)\n\n caller.end_progress()\n\n return reference_resolution", "def find_files_for_packageversion(self, packageversion, absolute_path=False):\n package_files = []\n for attr in ('binary_packages', 'source_packages'):\n if hasattr(packageversion, attr):\n for bp in getattr(packageversion, attr):\n for files in bp.package_files:\n if not files.filename in package_files:\n package_files.append(files.filename if not absolute_path\n else pylons.config['debexpo.repository'] + files.filename)\n return package_files", "def show_rev_deps(self, package):\n return self.show_deps(package, \"show-rev-deps\")", "def getDeps(self):\n return self._depth.copy()", "def find_branches(versions):\n\n versions = map(LooseVersion, versions)\n\n # group versions by (major, minor) parts\n major_minor = lambda item: item.version[:2]\n versions.sort()\n tip = last(versions)\n grouped = groupby(versions, key=major_minor)\n\n chunks = (tuple(value) for key, value in grouped)\n\n # we only take versions which has patches\n chunks = (versions for versions in chunks if len(versions) > 1)\n\n # and we only need latest patch releases\n result = map(last, chunks)\n\n # we also add the last version bacause it is a tip\n if last(result) is not tip:\n result.append(tip)\n\n return [item.vstring for item in result]", "def list_project_ref_hierarchy(self, entity):\n\n refs = []\n\n for ref in self.cache.list_project_refs(entity.objects['project'], self.tagRefs):\n if ref.name.startswith(entity.objects['refPrefix']):\n remainingRefName = pathlib.Path(ref.name).relative_to(pathlib.Path(entity.objects['refPrefix'])).parts[0]\n refs.append(remainingRefName)\n\n return refs", "def getDependencyList(self):\n return self.getDocumentedObject().getDependencyList()", "def get_all_dependencies_for_task(task):\n from pybuilder.reactor import Reactor\n task_name = task.__name__\n execution_manager = Reactor.current_instance().execution_manager\n task_and_all_dependencies = execution_manager.collect_all_transitive_tasks([task_name])\n return [dependency for dependency in task_and_all_dependencies if dependency.name != task_name]", "def versions(self, stored=False) -> List['RadsSolutionVersion']:\n\n if stored:\n fspath = self.storage.fspath(self.path)\n if not os.path.isdir(fspath):\n return [] # solution not in storage\n listing = []\n for path in os.listdir(fspath):\n if not os.path.isdir(os.path.join(fspath, path)):\n continue\n listing.append(path)\n else:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\").splitlines()\n return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)", "def __gitSubmodulesList(self):\n self.vcs.gitSubmoduleList(self.project.getProjectPath())", "def get_stack_versions(stack_root):\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n if not versions:\n versions = get_versions_from_stack_root(stack_root)\n return versions", "def list_projects(self) -> List['RadsProject']:\n ret = []\n base = self.fspath(\"projects\")\n for name in os.listdir(base):\n if os.path.isdir(f\"{base}/{name}/releases\"):\n ret.append(RadsProject(self, name))\n return ret" ]
[ "0.6127279", "0.6023296", "0.58947617", "0.58304703", "0.5793256", "0.5770767", "0.57204896", "0.5594969", "0.55554485", "0.5517041", "0.54677266", "0.54007363", "0.5350627", "0.5348529", "0.5333446", "0.5318038", "0.5244087", "0.52239704", "0.5212116", "0.520666", "0.5197854", "0.5171073", "0.5161903", "0.51600355", "0.5158778", "0.5155784", "0.51436764", "0.5136931", "0.5095952", "0.50672466" ]
0.77341354
0
Initializes grid to be empty, take height and width of grid as parameters Indexed by rows (left to right), then by columns (top to bottom)
def __init__(self, grid_height, grid_width): self._grid_height = grid_height self._grid_width = grid_width self._cells = [[EMPTY for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_grid(self):\r\n for i in range(self.height):\r\n for j in range(self.width):\r\n self.grid[i][j] = 0\r\n \r\n # fill up unvisited cells\r\n for r in range(self.height):\r\n for c in range(self.width):\r\n if r % 2 == 0 and c % 2 == 0:\r\n self.unvisited.append((r,c))\r\n\r\n self.visited = []\r\n self.path = dict()\r\n self.generated = False", "def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)", "def create_empty_grid(width, height):\n return [[None] * width for _ in range(height)]", "def initialize_grid(self):\n self.grid = np.zeros([self.N, self.N, self.N])\n return self.grid", "def __init__(self, grid, x, y, cols):\n self.grid = grid\n self.x = x\n self.y = y\n self.cols = cols", "def fill_grid(self):\n\n for row_margin, row in enumerate(range(self.rows)):\n self.grid.append([])\n\n for col_margin, col in enumerate(range(self.cols)):\n x = col*self.cell_size + col_margin\n y = row*self.cell_size + row_margin\n\n rect = pygame.Rect(x, y, self.cell_size, self.cell_size)\n\n cell = Cell(row, col, rect)\n\n if row == 7 and col == 3:\n cell.root = True\n self.root = cell\n elif row == 7 and col == 16:\n cell.goal = True\n self.goal = cell\n\n self.grid[row].append(cell)", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\r\n self._height = puzzle_height\r\n self._width = puzzle_width\r\n self._grid = [[col + puzzle_width * row\r\n for col in range(self._width)]\r\n for row in range(self._height)]\r\n\r\n if initial_grid != None:\r\n for row in range(puzzle_height):\r\n for col in range(puzzle_width):\r\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid = None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, grid):\n self.grid = grid\n (self.H, self.W) = self.grid.shape\n\n # Store the empty cells to simplify `random_state`\n self.empty_cells = set()\n for y, row in enumerate(grid):\n for x, is_wall in enumerate(row):\n if not is_wall:\n self.empty_cells.add((x, y))\n # Getting random empty cells uses a list.\n self.empty_cell_list = list(self.empty_cells)", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def __init__(self, puzzle_height, puzzle_width, initial_grid=None):\n self._height = puzzle_height\n self._width = puzzle_width\n self._grid = [[col + puzzle_width * row\n for col in range(self._width)]\n for row in range(self._height)]\n\n if initial_grid != None:\n for row in range(puzzle_height):\n for col in range(puzzle_width):\n self._grid[row][col] = initial_grid[row][col]", "def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()", "def initialize(self, height, width,):\n grid = list()\n for x in xrange(height):\n grid.append(list())\n for y in xrange(width):\n grid[x].append(Node(x, y))\n return grid", "def __init__(self, init_grid=None):\n\n self.height = len(init_grid)\n self.width = len(init_grid[0])\n\n self.grid = [[Cell(self, c) for c in row]\n for row in init_grid]\n\n self.g = nx.Graph()\n self.tangle()", "def reset(self):\r\n # creating the grid with the values all initialized to zero\r\n \r\n self._grid = [[ 0 for dummy_col in range(self._width)]\r\n for dummy_row in range(self._height)]\r\n # introducing the two initial tiles\r\n self.new_tile()\r\n self.new_tile()\r\n #for testing purposes\r\n #print self.grid\r\n #print self\r", "def __init__(self, row=4, col=4, initial=2):\n self.grid = Grid(row, col, initial)", "def create_grid(grid):\r\n for i in range (4):\r\n grid.append ([])\r\n for j in range (4):\r\n grid[i].append (0)", "def reset(self):\n # self.grid = [[0] * self.grid_width] * self.grid_height\n self.grid = []\n for dummy_row in range(self.grid_height):\n new_row = []\n for dummy_col in range(self.grid_width):\n new_row.append(0)\n self.grid.append(new_row)\n self.new_tile()\n self.new_tile()", "def __init__(self) -> None:\n self.row = 6\n self.col = 7\n self.grid = []\n\n for y in range(self.row):\n temp_row = []\n for x in range(self.col):\n temp_row.append(\" \")\n self.grid.append(temp_row)", "def __init__(self, width = 7, height = 7):\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])", "def create_grid(self):\n return [[0] * self.width for _ in range(self.height)]", "def init_grid(self):\n grid = []\n for i in range(self.settings['grid_size']):\n grid.append([])\n for j in range(self.settings['grid_size']):\n if [j, i] in self.settings['walls']:\n grid[i].append(g.WALL)\n else:\n grid[i].append(g.EMPTY)\n return grid", "def __init__(self):\n self._grid = [[None]]", "def __init__(self, rows=9, columns=26):\n self.rows = rows\n self.columns = columns\n self.grid = [[Cell(j, i, constants.EMPTY_CELL_MARK) for i in range(rows)] for j in range(\n columns)]\n self.total_ships = 0\n self.active_ships = 0\n self.destroyed_ships = 0", "def reset(self):\n self._grid = [[0 for dummy_col in range(self._width)]\n for dummy_row in range(self._height)]\n self.new_tile()\n self.new_tile()", "def _create_grid_with_cells(self, width, height):\n grid = []\n for row in range(height):\n grid.append([])\n for column in range(width):\n if column % 2 == 1 and row % 2 == 1:\n grid[row].append(TILE_EMPTY)\n elif (\n column == 0 or row == 0 or column == width - 1 or row == height - 1\n ):\n grid[row].append(TILE_CRATE)\n else:\n grid[row].append(TILE_CRATE)\n grid[-2][-3] = TILE_EMPTY\n grid[1][0] = TILE_EMPTY\n return grid" ]
[ "0.7630782", "0.74650055", "0.7415772", "0.73265195", "0.7318922", "0.72002435", "0.7185097", "0.7185097", "0.7185097", "0.7169826", "0.7161286", "0.71592313", "0.71498895", "0.71498895", "0.7091087", "0.70734245", "0.7069152", "0.7042371", "0.70330805", "0.7016255", "0.69839865", "0.69783366", "0.69602436", "0.6935206", "0.6934083", "0.691766", "0.69034487", "0.6890137", "0.68800783", "0.68772596" ]
0.81462014
0
Clears grid to be empty
def clear(self): self._cells = [[EMPTY for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self._grid = [[None]]", "def clear(self):\r\n\t\tself.grid.fill(False)", "def clear(self):\n board.change_grid(self.x, self.y, 0)", "def reset(self):\n # self.grid = [[0] * self.grid_width] * self.grid_height\n self.grid = []\n for dummy_row in range(self.grid_height):\n new_row = []\n for dummy_col in range(self.grid_width):\n new_row.append(0)\n self.grid.append(new_row)\n self.new_tile()\n self.new_tile()", "def reset(self):\n\n for _ in range(self.grid_height):\n row = [0 for _ in range(self.grid_width)]\n self.grid.append(row)", "def reset(self):\r\n self.grid = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\n self._grid = [[0 for dummy_col in range(self._width)]\n for dummy_row in range(self._height)]\n self.new_tile()\n self.new_tile()", "def reset(self):\n self._grid = [[0] * self._width for _ in range(self._height)]\n self.new_tile()\n self.new_tile()", "def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()", "def clear(self):\n\n for cell in self.cells:\n cell.clear()", "def reset(self):\n # replace with your code\n self._grid = [[0 for dummy_column in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n for dummy_num in range(2):\n self.new_tile()", "def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\n # replace with your code\n self._grid = [[0] * self._width for _ in xrange(self._height)]\n self.new_tile()\n self.new_tile()", "def clear(self):\n for key in self.__columns:\n self.__widths[key] = 0\n self.__data = []\n self.__selectedRow = -1\n self.__formatString = \"\"\n self._window.clear()\n self.drawBorder()", "def reset(self):\r\n # creating the grid with the values all initialized to zero\r\n \r\n self._grid = [[ 0 for dummy_col in range(self._width)]\r\n for dummy_row in range(self._height)]\r\n # introducing the two initial tiles\r\n self.new_tile()\r\n self.new_tile()\r\n #for testing purposes\r\n #print self.grid\r\n #print self\r", "def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()", "def emptyGrid(self, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n for r, c in gameGrid:\n self.emptyCell(r, c, gameGrid=gameGrid, emptyValue=emptyValue)", "def clear(self):\n self.gridLayout.setRowStretch(self.gridLayout.rowCount()-1, 0)\n for i in reversed(range(self.gridLayout.count())):\n item = self.gridLayout.itemAt(i)\n axis = item.widget()\n if axis:\n self.gridLayout.removeWidget(axis)\n axis.hide()\n axis.deleteLater()\n else:\n self.gridLayout.removeItem(item)\n \n self.axisWidgets = []\n self.axesNames = []", "def clear(self):\r\n poc_grid.Grid.clear(self)\r\n self._zombie_list = []\r\n self._human_list = []", "def clear(self):\n poc_grid.Grid.clear(self)\n self._zombie_list = []\n self._human_list = []\n # need proof it works", "def reset(self):\r\n self.grid = np.array([[' '] * self.width for row in range(self.height)])\r\n self.num_checkers = 0", "def clear(self):\n poc_grid.Grid.clear(self)\n self._fire_boundary.clear()", "def clear_board(self):\n pygame.draw.rect(self.display, self.white, pygame.Rect(0, 0, self.window_x, self.window_y))\n self.draw_grid()", "def clearPlayground(self):\n\n for cell in self.cells:\n cell.delete()\n self.cells = []\n self.generation = 0", "def reset(self):\n width = len(self.cell)\n height = len(self.cell[0])\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def clear_tiles(self):\n for y in range(Settings.SIZE_Y):\n for x in range(Settings.SIZE_X):\n self.__tile_grid[y][x].configure(\n image=self.__marker_images[MarkerType.NONE])", "def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n #return self._cells", "def clear(self):\n for i in range(self.height):\n for j in range(self.width):\n self.data[i][j] = \" \"", "def reset(self):\n self._last_item = None\n self._connected_items = []\n\n self._title_label.deleteLater()\n\n for item in self._items:\n item.deleteLater()\n\n for i in range(self._column_span):\n self._grid.setColumnStretch(self._column_id + i, 0)\n\n self._items = []\n self._row_index = 0" ]
[ "0.887634", "0.8503257", "0.8120353", "0.7833024", "0.7820857", "0.7812106", "0.7717571", "0.76588106", "0.7652856", "0.7600189", "0.7595094", "0.7563524", "0.7540327", "0.7539779", "0.7524181", "0.75000095", "0.7498649", "0.7464991", "0.74193263", "0.7418663", "0.73871803", "0.73763674", "0.7360471", "0.73443204", "0.7341187", "0.7333545", "0.73220515", "0.7281629", "0.7270584", "0.7238458" ]
0.8566958
1
Set cell with index (row, col) to be empty
def set_empty(self, row, col): self._cells[row][col] = EMPTY
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def reset(self):\n width = len(self.cell)\n height = len(self.cell[0])\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def emptyCell (self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n self.addObject(emptyValue, row, column, gameGrid=gameGrid)", "def clearCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == True\n self.grid[xIndex][yIndex] = False\n if changed:\n self.drawSquare((xIndex, yIndex))", "def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()", "def clear(self):\n for row in range(self.rows):\n for col in range(self.cols):\n self.data[row][col] = '.'", "def clear(self):\n for row in range(self.rows):\n for col in range(self.cols):\n self.data[row][col] = '.'", "def clear(self):\n\n for cell in self.cells:\n cell.clear()", "def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def clear(self):\n self._grid = [[None]]", "def setBlank(self, pos):\n self.tiles[-1] = pos", "def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()", "def empty_cell(cls):\n return SPACE", "def clear_cell(self, row=None, col=None):\n if row is not None:\n if row < 0 or row >= self.rows:\n raise ValueError('%d is not a valid row' % row)\n if col < 0 or col >= self.columns:\n raise ValueError('%d is not a valid column' % col)\n cellid = self._get_cell_id(row, col)\n else:\n cellid = None\n self._clear_component(cellid)", "def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n #return self._cells", "def emptyGrid(self, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n for r, c in gameGrid:\n self.emptyCell(r, c, gameGrid=gameGrid, emptyValue=emptyValue)", "def clear(self):\n for i in range(self.height):\n for j in range(self.width):\n self.data[i][j] = \" \"", "def clear(self):\r\n\t\tself.grid.fill(False)", "def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()", "def clear(self):\n row, col = self.selected\n if self.cubes[row][col].value == 0:\n self.cubes[row][col].set_temp(0)", "def clear(self):\n board.change_grid(self.x, self.y, 0)", "def set_full(self, row, col):\n self._cells[row][col] = FULL", "def clear(self) -> None:\n for y in range(self.width):\n for x in range(self.height):\n self.set_value(Point(y, x), FieldState.EMPTY)", "def reset(self):\n for rows in range(self.height):\n for col in range(self.width):\n self.slots[rows][col] = ' '", "def clear_cell(self, x, y):\n r = self.rect_area(x, y)\n background = pygame.Surface((75, 75)) # creates a white surface\n background.fill((255, 255, 255))\n self.screen.blit(background, (x * 80 + 3, 80 + y * 80 + 3)) # draw\n pygame.display.update(r) # update screen to showcase changes", "def _empty_cell(self, i_row, i_col):\n return self._board[i_row][i_col] == \" \"", "def reset(self):\n\n for _ in range(self.grid_height):\n row = [0 for _ in range(self.grid_width)]\n self.grid.append(row)", "def clear(self):\n self.fill(None)", "def is_empty(self, row, col):\n return self._cells[row][col] != FULL", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty" ]
[ "0.78295135", "0.7669678", "0.76374394", "0.7258782", "0.71456724", "0.71178824", "0.71178824", "0.7110621", "0.71029586", "0.7092802", "0.70917785", "0.7086161", "0.7058204", "0.7033031", "0.6960891", "0.68685335", "0.6855809", "0.68292814", "0.6816586", "0.6803696", "0.67927927", "0.67713714", "0.6706634", "0.668237", "0.66504395", "0.6646695", "0.6632732", "0.6611775", "0.65792096", "0.657367" ]
0.8645582
0
Set cell with index (row, col) to be full
def set_full(self, row, col): self._cells[row][col] = FULL
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_empty(self, row, col):\n self._cells[row][col] = EMPTY", "def reset(self):\n width = len(self.cell)\n height = len(self.cell[0])\n self.cell = [ [EMPTY for r in range(height)] for c in range(width) ]", "def setBlank(self, pos):\n self.tiles[-1] = pos", "def set_tile(self, row, col, value):\r\n # replace with your code\r\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._cells[row][col] = value", "def reset(self):\r\n # replace with your code\r\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\r\n self.new_tile()\r\n self.new_tile()", "def reset(self):\r\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \r\n for dummy_row in range(self._grid_height) ]\r\n \r\n \r\n self.new_tile()\r\n self.new_tile()", "def set_cell_by_index(self, column_index, cell):\n while len(self) <= column_index:\n self.append(None)\n self[column_index] = cell", "def reset(self):\n # replace with your code\n dummy_row = self._grid_height\n dummy_col = self._grid_width\n self._cells = [ [0 for dummy_col in range(self._grid_width)] \n for dummy_row in range(self._grid_height)]\n \n self.new_tile()\n self.new_tile()", "def clear(self):\n self._cells = [[EMPTY for dummy_col in range(self._grid_width)]\n for dummy_row in range(self._grid_height)]", "def set_tile(self, row, col, value):\n # replace with your code\n if col < self.grid_height and row < self.grid_width:\n self.board[row][col] = value", "def set_tile(self, row, col, value):\r\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n if row >= 0 and row < self.get_grid_height():\n if col >= 0 and col < self.get_grid_width():\n # Only set if the row and column are ok\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value", "def set_tile(self, row, col, value):\r\n self._cells[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid_2048[row][col] = value", "def set_tile(self, row, col, value):\n # replace with your code\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\r\n self._grid[row][col]=value", "def set_tile(self, row, col, value):\r\n del self.board[row][col]\r\n self.board[row].insert(col,value)\r\n return self.board", "def set_tile(self, row, col, value):\n # replace with your code\n self._grid[row][col] = value;", "def set_cell(self, x, y, val):\n pass", "def set_cell(self, x, y, val):\n pass", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self.grid[row][col] = value", "def set_tile(self, row, col, value):\n self._cells[row][col] = value", "def reset(self):\n self._cells = [[0 for dummy_col in range(self._grid_width)] for dummy_row in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n #return self._cells", "def emptyCell (self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n self.addObject(emptyValue, row, column, gameGrid=gameGrid)", "def reset(self):\r\n # replace with your code\r\n for row in range(0, self._grid_height):\r\n for col in range(0, self._grid_width):\r\n self._grid_tile[row][col] = 0\r\n # at this step, all cells should be available\r\n self.new_tile()\r\n self.new_tile()", "def setCell(self, (xIndex, yIndex)):\n changed = self.grid[xIndex][yIndex] == False\n self.grid[xIndex][yIndex] = True\n if changed:\n self.drawSquare((xIndex, yIndex))" ]
[ "0.74354434", "0.6868537", "0.67742896", "0.66115975", "0.65451497", "0.6479824", "0.64311063", "0.6417367", "0.64082235", "0.6407858", "0.64059615", "0.6401799", "0.6378078", "0.6373002", "0.6373002", "0.6368696", "0.6360975", "0.6360384", "0.6356319", "0.63451356", "0.63406634", "0.6312832", "0.6312832", "0.62957793", "0.62957793", "0.6255554", "0.6250467", "0.62484175", "0.62375313", "0.6229911" ]
0.81318814
0
Checks whether cell with index (row, col) is empty
def is_empty(self, row, col): return self._cells[row][col] != FULL
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _empty_cell(self, i_row, i_col):\n return self._board[i_row][i_col] == \" \"", "def is_empty(self, row, col):\n return self.field[row, col] == '-'", "def check_empty(cell):\n return pd.isna(cell)", "def testEmptyCell(self, row, column, gameGrid=None, emptyValue=0):\n if not gameGrid:\n gameGrid = self.gameGrid\n row = self.limitValue(row, 0, self.rows-1)\n column = self.limitValue(column, 0, self.columns-1)\n if gameGrid.getItem(row, column) == emptyValue:\n return True\n else:\n return False", "def is_empty(self, row, column):\n\n return self.board[row][column] == placeholder", "def is_empty(self):\n return (self.number_of_rows() == 0 and\n self.number_of_columns() == 0)", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def is_empty(self, x, y):\n if x in range(self.nx) and y in range(self.ny):\n if self.grid[y][x] == ' ':\n return True\n return False", "def is_empty(self):\n return self.idx == 0", "def cellIsEmpty(self, x, y):\n\n\n #If x and y are out of bounds, we return False\n if x < 0 or y < 0:\n return False\n\n #Else we get the cell that interests us\n else:\n cell = self._get_grille()[y][x]\n\n\n if cell.element is None:\n print(\"Cell element is None\")\n return True\n \n #If it is a box, depart, arrivee, wall or spikes. The cell is not empty\n elif cell.element.name == \"box\" or cell.element.name == \"depart\" or cell.element.name == \"arrivee\" or cell.element.name == \"spikes\" or cell.element.name == \"mur\":\n print(\"Cell element : \"+cell.element.name)\n return False\n \n #Else, it means there is nothing in the cell or an element the box can cross\n else:\n return True", "def checkEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return True\n return False", "def is_blank(self):\n return not any(self._1 in _row for _row in self._pixels)", "def is_full(self):\n return all(map(lambda x: x != self.CELL_EMPTY, self.__values))", "def at_least_one_cell_is_empty(cell_list):\n for cell in cell_list:\n if self.environment.grid.out_of_bounds(cell) or self.environment.grid.is_cell_empty(cell):\n return True\n return False", "def isEmpty(self, i,j):\n if (self.r_sheet.cell(i,j).ctype == XL_CELL_EMPTY or self.r_sheet.cell(i,j).ctype == XL_CELL_BLANK) or self.r_sheet.cell(i,j).value == '' :\n return True\n else :\n return False", "def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True", "def check_empty_neighbours(self, cell):\n\t\tneighbours = self.get_neighbours(cell)\n\t\tflag = True\n\t\tfor neighbour in neighbours:\n\t\t\tif neighbour.state != 0:\n\t\t\t\tflag = False\n\t\treturn flag", "def isEmptyRow(self, i, colns):\n for j in range(0,colns) :\n if not self.isEmpty(i,j):\n return False\n return True", "def _find_empty_cell(self):\n\n for r, row in enumerate(self._board):\n for c, cell in enumerate(row):\n if cell is None:\n return r, c", "def has_cells(self):\n return len(self._cells) > 0", "def any_empty_tiles(self):\n for i in range(self.TILES_PER_ROW):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == 0:\n return True\n\n return False", "def isEmptyColumn(self, j, rowns ):\n for i in range(0,rowns) :\n if not self.isEmpty(i,j):\n return False\n return True", "def find_empty(grid):\n for i in range(LEN_GRID):\n for j in range(LEN_GRID):\n if grid[i][j] == 0:\n return (i, j) # row, col\n return None", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def _position_is_empty_in_board(position, board):\n return board[position[0]][position[1]] == \"-\"", "def get_blank(self):\n return self.cell.value == ''", "def is_empty(self):\n return self.n==0", "def is_empty(self, square: Square):\n return self.state[0][square.row][square.col] == 0 and self.state[1][square.row][square.col] == 0", "def is_empty(self) -> bool:\n return self.num_grna() == 0", "def get_empty_cells(grid):\n\tempty = []\n\tfor j,row in enumerate(grid):\n\t\tfor i,val in enumerate(row):\n\t\t\tif not val:\n\t\t\t\tempty.append((j,i))\n\treturn empty" ]
[ "0.8279983", "0.8197386", "0.81066924", "0.8022967", "0.80025375", "0.79176986", "0.76810986", "0.7655337", "0.7567471", "0.7562856", "0.7515833", "0.75053334", "0.74276817", "0.7426374", "0.7417211", "0.7400333", "0.7385057", "0.73581314", "0.73380595", "0.73341995", "0.72937787", "0.7290146", "0.72831243", "0.72126245", "0.7193134", "0.7119489", "0.7095366", "0.7094175", "0.7083125", "0.7074627" ]
0.87621516
0
Takes point in screen coordinates and returns index of containing cell
def get_index(self, point, cell_size): return (point[1] / cell_size, point[0] / cell_size)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index", "def cell_index(self, coord):\n\n for x in range(len(self.cell_x)):\n if coord[0] >= self.cell_x[x] and coord[0] <= self.cell_x[x] + self.cell_size:\n i = x\n\n for y in range(len(self.cell_y)):\n if coord[1] >= self.cell_y[y] and coord[1] <= self.cell_y[y] + self.cell_size:\n j = y\n\n return [i, j]", "def which_cell(loc_x, loc_y):\n column = int(math.ceil((loc_x - LEFT_MARGIN) / CELL_SIZE))\n row = int(math.ceil((loc_y - TOP_MARGIN) / CELL_SIZE))\n cell_id = (row - 1) * CELL_COLUMN + column\n return cell_id", "def cell_containing(self,xy,neighbors_to_test=4): \n hit = self.select_cells_nearest(xy, count=neighbors_to_test, inside=True)\n if hit is None:\n return -1\n else:\n return hit", "def __get_cell_index(self, x, y) -> int:\n # \"The map data, in row-major order, starting with (0,0)\"\n return x + y * self.occupancy_map.info.width", "def grid_to_index(mapdata, x, y):\n i = (y * mapdata.info.width) + x\n return int (i)", "def getIndex(x, y, rows, cols):\n x = cols-x-1\n if x % 2 != 0:\n return (x*rows)+y\n else:\n return (x*rows)+(rows-1-y)", "def get_cell_idx(max_coord, min_coord, separator, x_current):\n lenght = max_coord - min_coord\n return max(0, min(int((x_current - min_coord) * separator / lenght), separator - 1))", "def get_cell(self, point):\n return self._grid[point.x][point.y]", "def xy_to_index(x, y):\n index = y * columns + x\n return index", "def get_index_under_point(self, event):\r\n xy = np.asarray(list(zip(self.xs, self.ys)))\r\n xyt = self.line.get_transform().transform(xy)\r\n xt, yt = xyt[:, 0], xyt[:, 1]\r\n d = np.sqrt((xt - event.x) ** 2 + (yt - event.y) ** 2)\r\n pt_idx = np.argmin(d)\r\n if d[pt_idx] >= self.max_pixels_from_vertex:\r\n pt_idx = None\r\n return pt_idx", "def position_index(x, y):\r\n position_action_idx = x + y*8\r\n return position_action_idx", "def get_index(self, row, col):\n return (row * self.cols) + col", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def get_position(self, number):\n for rowidx, row in enumerate(self.numbers):\n for colidx, num in enumerate(row):\n if num == number:\n return rowidx, colidx", "def get_pos_index(self):\n return [self.row-1, self.col-1]", "def GetTileIndex(self, pos):\r\n #pixel = rpg_image.GetPixel(self.image, pos)\r\n try:\r\n pixel = self.image_buffer[pos[0]][pos[1]]\r\n except IndexError, e:\r\n pixel = -1\r\n \r\n return pixel", "def window(self, point):\n\n # Offsets from index pixel\n ref_x, ref_y = point\n index_row, index_col = self.index\n x_min = ref_x - (index_col * self.cell_size)\n y_max = ref_y + (index_row * self.cell_size)\n return x_min, y_max, self.n_cols, self.n_rows", "def i_index(self, coord):\n return coord + 1 if coord + 1 > self.dimensions - 1 else 0", "def getCellpos(self, event):\n e = event.widget\n cx, cy = cart(e.canvasx(event.x), e.canvasy(event.y))\n cellx = int(cx) // self.cell_width\n celly = int(cy) // self.cell_height\n return cellx, celly", "def cell(self, point):\n x = self.clampx((point[0] - self._origin.x) // self._cell_size[0])\n y = self.clampy((point[1] - self._origin.y) // self._cell_size[1])\n index = y * self._cell_count[0] + x\n cell = self._cells[index]\n if cell is None:\n return None\n else:\n return list(cell)", "def get_position(self, cell) -> tuple:\n for i, row in enumerate(self.cells):\n if cell in row:\n return row.index(cell), i\n if not isinstance(cell, Cell):\n raise TypeError(f\"Argument should be of type 'Cell', not '{cell.__class__.__name__}'.\")\n raise ValueError(\"The given cell is not a part of the grid.\")", "def get_cell_coords(self, pt):\n\n\t return int(pt[0] // self.a), int(pt[1] // self.a)", "def _get_position_grid_column(position, grid_row):\n \n for (box, grid_col_index) in zip(grid_row, range(len(grid_row))):\n if box.contains_point((position.x, position.y)):\n return grid_col_index\n return None", "def getContainingGrid( self, point ):\n idx = self.indxHash( point );\n return self.mGrids[idx];", "def GetPosition(board):\n\tfor i in range(len(board.matrix)):\n\t\tfor j in range(len(board.matrix[i])):\n\t\t\tif board.matrix[i][j]==\"X\":\n\t\t\t\treturn i,j", "def click(self, x, y):\n row = int((x - self.x)/self.cell_size)\n col = int((y - self.y)/self.cell_size)\n if 0 <= row < ROWS and 0 <= col < COLS:\n return row, col\n return None", "def locate(x, y):\n position(x * 6, y)", "def clickCell(self, event):\n position = self.input.checkMouseInput(event)\n if not position:\n return None\n x = math.floor(position[0] / self.imageWidth)\n y = math.floor(position[1] / self.imageHeight)\n return (int(x), int(y))" ]
[ "0.76455384", "0.7513032", "0.74592626", "0.73822683", "0.7260477", "0.72356343", "0.7188108", "0.7178427", "0.71581024", "0.71462774", "0.6948103", "0.69273347", "0.6926569", "0.6914826", "0.6914826", "0.69116056", "0.68796355", "0.6856874", "0.6826299", "0.67381704", "0.6735695", "0.6697084", "0.6694946", "0.6671088", "0.66654044", "0.6645551", "0.6628175", "0.6617249", "0.6612605", "0.66111785" ]
0.7794232
0
Return true if the tree element is a widget with typeId menumux
def is_menumux(element): return element.tag == 'widget' and \ element.get('typeId', default=MISSING) == MENU_MUX_ID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_active(widget):\n return widget in active_widgets or widget in layer_widgets", "def has(self, component_type):\n return component_type in self._children", "def isNodeType(self, t):\n return isinstance(self, t)", "def isTree(self, t):\n\n if type(t) != tree:\n return False;\n if not hasattr(t, \"label\"):\n return False;\n if type(t.label) != str:\n return False;\n if not hasattr(t, \"children\"):\n return False;\n if type(t.children) != list:\n return False;\n return all([type(c) == tree for c in t.children])", "def check_tree_type(tree):\n return tree.type in ref", "def exists_type(self, type):\n for i in range(1, self.grid_size - 1):\n for j in range(1, self.grid_size - 1):\n obj = self.grid.get(i, j)\n if obj and obj.type == type:\n return True\n return False", "def has_focus(widget):\n for group in widget.groups():\n if isinstance(group, Focus):\n return True\n return False", "def _is_name_type(self, type_id):\n return type_id == self.name_type", "def content_widget_exists():\n return ContentWidget.query.first()", "def is_orphan_widget(filename, tree, root, obj, orphan, orphan_root, doprint = False):\n global warnexists\n if obj.tag != 'object':\n return False\n\n oid = obj.attrib.get('id')\n klass = obj.attrib.get('class')\n\n # \"Don't care\" special case\n if klass in widgets_ignored:\n return False\n for suffix in widgets_suffixignored:\n if klass[-len(suffix):] == suffix:\n return False\n\n # Widgets usual do not strictly require a label, i.e. a labelled parent\n # is enough for context, but some do always need one.\n requires_label = klass in widgets_needlabel\n\n labelled_by = obj.findall(\"accessibility/relation[@type='labelled-by']\")\n labelled_by += obj.findall(\"accessibility/relation[@name='labelled-by']\")\n\n label = obj.findall(\"accessibility/property[@name='label']\")\n\n label = obj.findall(\"accessibility/property[@name='label']\")\n\n # Labels special case\n if klass in widgets_labels:\n return False\n\n # Case 1: has an explicit <child internal-child=\"accessible\"> sub-element\n children = obj.findall(\"child[@internal-child='accessible']\")\n if len(children) > 1 and doprint:\n err(filename, tree, obj, \"multiple-accessible\", \"has multiple <child internal-child='accessible'>\"\n \"%s\" % elms_lines(children))\n if len(children) >= 1:\n return False\n\n # Case: has an accessibility label\n if len(label) > 0:\n return False\n\n # Case 2: has an <accessibility> sub-element with a \"labelled-by\"\n # <relation> pointing to an existing element.\n if len(labelled_by) > 0:\n return False\n\n # Case 3: has a label-for\n if oid in label_for_elm:\n return False\n\n # Case: has a description-for\n if oid in description_for_elm:\n return False\n\n # Case 4: has a mnemonic\n if oid in mnemonic_for_elm:\n return False\n\n # Case 5: Has a <property name=\"tooltip_text\">\n tooltips = obj.findall(\"property[@name='tooltip_text']\") + \\\n obj.findall(\"property[@name='tooltip-text']\")\n if len(tooltips) > 1 and doprint:\n err(filename, tree, obj, \"multiple-tooltip\", \"has multiple tooltip_text properties\")\n if len(tooltips) >= 1 and klass != 'GtkCheckButton':\n return False\n\n # Case 6: Has a <property name=\"placeholder_text\">\n placeholders = obj.findall(\"property[@name='placeholder_text']\") + \\\n obj.findall(\"property[@name='placeholder-text']\")\n if len(placeholders) > 1 and doprint:\n err(filename, tree, obj, \"multiple-placeholder\", \"has multiple placeholder_text properties\")\n if len(placeholders) >= 1:\n return False\n\n # Buttons usually don't need an external label, their own is enough, (but they do need one)\n if klass in widgets_buttons:\n\n labels = obj.findall(\"property[@name='label']\")\n if len(labels) > 1 and doprint:\n err(filename, tree, obj, \"multiple-label\", \"has multiple label properties\")\n if len(labels) >= 1:\n # Has a <property name=\"label\">\n return False\n\n actions = obj.findall(\"property[@name='action_name']\")\n if len(actions) > 1 and doprint:\n err(filename, tree, obj, \"multiple-action_name\", \"has multiple action_name properties\")\n if len(actions) >= 1:\n # Has a <property name=\"action_name\">\n return False\n\n gtklabels = obj.findall(\".//object[@class='GtkLabel']\") + obj.findall(\".//object[@class='GtkAccelLabel']\")\n if len(gtklabels) >= 1:\n # Has a custom label\n return False\n\n # no label for a button, warn\n if doprint:\n warn(filename, tree, obj, \"button-no-label\", \"does not have its own label\");\n if not is_enabled(obj, \"button-no-label\", enables, True):\n # Warnings disabled\n return False\n (_, suppr) = elm_suppr(filename, tree, obj, \"button-no-label\", False)\n if suppr in false_positives:\n # That was actually expected\n return False\n if suppr in suppressions:\n # Warning suppressed for this widget\n if suppressions[suppr]:\n warnexists += 1\n suppressions[suppr] = False\n return False\n return True\n\n # GtkImages special case\n if klass == \"GtkImage\":\n uses = [u for u in tree.iterfind(\".//object/property[@name='image']\") if u.text == oid]\n if len(uses) > 0:\n # This image is just used by another element, don't warn\n # about the image itself, we probably want the warning on\n # the element instead.\n return False\n\n if find_button_parent(root, obj) is not None:\n # This image is part of a button, we want the warning on the button\n # instead, if any.\n return False\n\n # GtkEntry special case\n if klass == 'GtkEntry' or klass == 'GtkSearchEntry':\n parent = elm_parent(root, obj)\n if parent is not None:\n if parent.tag == 'child' and \\\n parent.attrib.get('internal-child') == \"entry\":\n # This is an internal entry of another widget. Relations\n # will be handled by that widget.\n return False\n\n # GtkShortcutsShortcut special case\n if klass == 'GtkShortcutsShortcut':\n children = obj.findall(\"property[@name='title']\")\n if len(children) >= 1:\n return False\n\n\n # Really no label, perhaps emit a warning\n if not is_enabled(obj, \"no-labelled-by\", enables, True):\n # Warnings disabled for this class of widgets\n return False\n (_, suppr) = elm_suppr(filename, tree, obj, \"no-labelled-by\", False)\n if suppr in false_positives:\n # That was actually expected\n return False\n if suppr in suppressions:\n # Warning suppressed for this widget\n if suppressions[suppr]:\n warnexists += 1\n suppressions[suppr] = False\n return False\n\n if not orphan:\n # No orphan label, so probably the labelled parent provides enough\n # context.\n if requires_label:\n # But these always need a label.\n if doprint:\n warn(filename, tree, obj, \"no-labelled-by\", \"has no accessibility label\")\n return True\n return False\n\n if doprint:\n context = elm_name(orphan_root)\n if context:\n context = \" within \" + context\n warn(filename, tree, obj, \"no-labelled-by\", \"has no accessibility label while there are orphan labels\" + context)\n return True", "def _valid_typable_object(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys()\n else:\n assert False, 'Wrong Platform'", "def _is_run_type(cls, object_):\n # Do a string comparison instead of using isinstance() to avoid needing\n # to import lyse or other modules with these classes.\n return (type(object_).__name__ in cls._RUN_TYPES)", "def isWidgetSelected(self, QWidget): # real signature unknown; restored from __doc__\n return False", "def explore_type(name, datatype, is_child):\n target_type = datatype.target()\n Explorer.explore_type(name, target_type, is_child)\n return False", "def is_type(self, typ):\n return typ == self.__class__.__name__", "def _valid_typable_object_with_name(ui_object, platform=Platform.ANDROID):\n if platform == Platform.ANDROID:\n return (ui_object.obj_type in _TYPABLE_OBJECT_DESC.keys() and\n _valid_object_with_name(ui_object))\n else:\n assert False, 'Wrong Platform'", "def is_button(widget):\n # CEBALERT: document why try/except is needed\n try:\n button = 'command' in widget.config() and not hasattr(widget,'toggle')\n except T.TclError:\n button = False\n return button", "def is_etree_element(obj: Any) -> bool:\n return hasattr(obj, 'append') and hasattr(obj, 'tag') and hasattr(obj, 'attrib')", "def isTextWidget(self, w: Wrapper) -> bool:\n if Qsci:\n return isinstance(w, (Qsci.QsciScintilla, QtWidgets.QTextEdit))\n return isinstance(w, QtWidgets.QTextEdit)", "def is_labelled_parent(elm):\n klass = elm.attrib.get('class')\n if klass in widgets_toplevel:\n return True\n if klass == 'GtkShortcutsGroup':\n children = elm.findall(\"property[@name='title']\")\n if len(children) >= 1:\n return True\n if klass == 'GtkFrame' or klass == 'GtkNotebook':\n children = elm.findall(\"child[@type='tab']\") + elm.findall(\"child[@type='label']\")\n if len(children) >= 1:\n return True\n return False", "def isAlive(self):\n\n import wx\n\n if not fwidgets.isalive(self.parent):\n return False\n\n if isinstance(self.widget, wx.MenuItem):\n return fwidgets.isalive(self.menu)\n\n else:\n return fwidgets.isalive(self.widget)", "def explore_type(name, datatype, is_child):\n actual_type = datatype.strip_typedefs()\n if is_child:\n print (\"The type of %s is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n else:\n print (\"The type '%s' is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n\n Explorer.explore_type(name, actual_type, is_child)\n return False", "def hasChildren():", "def isContainedIn(self, t):\n if self.parent is None:\n return False\n if self.parent.getClassName() == t:\n return True\n return self.parent.isContainedIn(t)", "def isItemSelectable(self, itemName, touchType=True, contentType=None, index=1, containerObject=None, relatedAreaEnd=None):\r\n item=0\r\n result = None\r\n item=self.searchItem(itemName, touchType, contentType, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n\r\n if item:\r\n assert item.getAttribute('center-x') and \\\r\n item.getAttribute('center-y') and \\\r\n item.getAttribute('width') and \\\r\n item.getAttribute('heigth'), \\\r\n 'Attributes missing from widget dump xml!'\r\n\r\n x_center = int(item.getAttribute('center-x'))\r\n y_center = int(item.getAttribute('center-y'))\r\n w = int(item.getAttribute('width'))\r\n h = int(item.getAttribute('heigth'))\r\n\r\n # use bottom y value when image-widget has swipe* in handled-touch-events\r\n if item.getName() == 'image-widget' and 'swipe' in item.getAttribute('handled-touch-events') and \\\r\n item.getAttribute('bottom'):\r\n y_center = int(item.getAttribute('bottom'))-1\r\n\r\n if item.getAttribute('visible')=='hidden':\r\n result = (self.HIDDEN,(x_center,y_center), item)\r\n\r\n # if item is in statusbar\r\n elif self.isItemInStatusBar(itemName):\r\n # if statusbar is outside screen in horizontal level, it's hidden and can be revealed\r\n if ( x_center < 0 or x_center > self.getScreenWidth() ) and (y_center > 0 or y_center < self.statusbarHeight ):\r\n result = (self.HIDDEN,(x_center,y_center), item)\r\n # if item is for some reason outside the statusbar area, it is not visible and cannot be revealed\r\n elif ( x_center > 0 or x_center < self.getScreenWidth() ) and (y_center < 0 or y_center > self.statusbarHeight ):\r\n result = (self.NOT_FOUND,(0,0), item)\r\n else:\r\n result = (self.VISIBLE,(x_center,y_center), item)\r\n else:\r\n # check if statusbar is on the screen\r\n if self.statusbarHeight > 0:\r\n # statusbar will get the screen touches in height + 10 pixel area\r\n untouchableAreaHeight = self.statusbarHeight + 10\r\n else:\r\n untouchableAreaHeight = 0 # if no statusbar, then whole area is touchable\r\n\r\n if x_center < 0 or x_center > self.getScreenWidth() or y_center < untouchableAreaHeight or y_center > self.getScreenHeight()-1:\r\n result = (self.HIDDEN,(x_center,y_center), item)\r\n else:\r\n if not item.getAttribute('is-in-tab-area')=='true':\r\n result = self.__checkCenterVisibility(item, x_center, y_center)\r\n else:\r\n result = (self.VISIBLE,(x_center,y_center))\r\n else:\r\n result = (self.NOT_FOUND,(0,0), item)\r\n\r\n return result", "def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))", "def is_container(self):\n return (self.__type & NODE_TAG) and self.children", "def ismarker(typename, tree):\n if type(tree) is not With or len(tree.items) != 1:\n return False\n ctxmanager = tree.items[0].context_expr\n return type(ctxmanager) is Name and ctxmanager.id == typename", "def isinstance_blender_object(self, b_obj):\n # lame and slow, but functional\n return b_obj in Blender.Object.Get()", "def is_registered(self, type):\n attr = self._type_to_attr(type)\n return getattr(self, attr, None) is not None" ]
[ "0.59847116", "0.59238446", "0.59065753", "0.58840156", "0.58742225", "0.5860729", "0.58427274", "0.58137167", "0.57992494", "0.5782972", "0.5708701", "0.5702631", "0.5700949", "0.5691066", "0.5682201", "0.5667302", "0.5559833", "0.55532175", "0.55429095", "0.55364543", "0.5534331", "0.5509618", "0.5504189", "0.5493479", "0.54908955", "0.5467665", "0.5443251", "0.5429089", "0.54234225", "0.5415668" ]
0.722235
0
Recursively find all MenuMux symbols from root node. Return a list of (name, first_value).
def find_mm_symbols(node): symbols = {} if len(node) == 0: return symbols else: for child in node: if is_menumux(child): try: # grab the number of sets of target-values defined num_sets = child.find('num_sets') for set_idx in range(0, int(num_sets.text)): target = child.find("target%d" % set_idx) values = child.find("values%d" % set_idx) if LOC_PREFIX in target.text: log.info("Skipping already updated target %s", target.text) else: first_value = values.find('s') symbols[target.text] = first_value.text # Update the target text to be a (private) locPV target.text = create_loc_pv(target.text) except AttributeError as e: log.warn("Error parsing MenuMux: %s", e) else: symbols.update(find_mm_symbols(child)) return symbols
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAllNames(self):\n result = []\n node = self\n while not node.isRoot():\n result.insert(0, node.getName())\n node = node.getParent()\n result.insert(0, node.getName())\n return result", "def fn(node):\n ans, stack = [], []\n while stack or node: \n if node: \n stack.append(node)\n node = node.left\n else: \n node = stack.pop()\n ans.append(node.val)\n node = node.right \n return ans", "def items(self, _prec=\"\"):\n if self.isLeaf:\n yield (_prec + self.ch, self.value)\n\n for chld in self.children.values():\n yield from chld.items(_prec + self.ch)", "def keys(self, _prec=\"\"):\n if self.isLeaf:\n yield _prec + self.ch\n\n for chld in self.children.values():\n yield from chld.keys(_prec + self.ch)", "def _traverse_with_names(tree):\n if dataclasses.is_dataclass(tree):\n tree = flax.serialization.to_state_dict(tree)\n if isinstance(tree, dict) or isinstance(tree, flax.core.FrozenDict):\n keys = sorted(tree.keys())\n for key in keys:\n for path, v in _traverse_with_names(tree[key]):\n yield (key + '/' + path).rstrip('/'), v\n else:\n yield '', tree", "def collect(self, node, key):\n if node is None:\n return\n # go over the left subtree\n for item in self.collect(node.left, key):\n yield item\n if node.has_value:\n yield (key + chr(node.char), node.value)\n # go over the middle subtree and add the current node char to the key variable\n for item in self.collect(node.middle, key + chr(node.char)):\n yield item\n # go over the right subtree\n for item in self.collect(node.right, key):\n yield item", "def get_strings(self, prefix=''):\n yield prefix + str(self.value) + ':'\n\n for child in self.children:\n for s in child.get_strings(prefix + ' '):\n yield s", "def getVisitableNodesNamed(self):\n\n return (\n (\"list_arg\", self.subnode_list_arg),\n (\"value\", self.subnode_value),\n )", "def get_global_roots(roots_list_name):\n roots_list = get_value_safe(roots_list_name)\n ret = []\n if roots_list is None or roots_list == 0:\n return ret\n\n global_root = roots_list['forward'].dereference()\n while global_root != 0:\n root = global_root.dereference()['root'].dereference()\n ret.append((root, root+1, roots_list_name))\n global_root = global_root.dereference()['forward'].dereference()\n return ret", "def level_order_1(root: Node):\n if not root:\n return\n temp = root\n que = [temp]\n while len(que) > 0:\n print(que[0].data, end=\" \")\n temp = que.pop(0)\n if temp.left:\n que.append(temp.left)\n if temp.right:\n que.append(temp.right)\n return que", "def get_all_menu():", "def preorder(self, root):\n if not root:\n return []\n nodes, stack = [], [root]\n while stack:\n node = stack.pop()\n nodes.append(node.val)\n for child in reversed(node.children):\n stack.append(child)\n return nodes", "def node_name_list(self):\n return list(self._node_reg.keys())", "def dfs(root, lvl=0):\n vals = []\n if root:\n vals.append([root.data, lvl])\n if root.left:\n vals += dfs(root.left, lvl + 1)\n if root.right:\n vals += dfs(root.right, lvl + 1)\n return vals", "def search_prefix(self, key, record_limit):\n keys = []\n\n node = self._search_prefix(key, self.root)\n\n if node:\n if node.real:\n keys.append(node.value)\n self.get_nodes(node, keys, record_limit)\n\n return keys", "def getVisitableNodesNamed(self):\n\n return (\n (\"set_arg\", self.subnode_set_arg),\n (\"value\", self.subnode_value),\n )", "def get_rootnodes(self) -> List[RootNode]:\n\t\treturn sorted(self.root_nodes, key=lambda x: x.name.lower())", "def pre_order_search_stack(self, root):\n if root is None:\n return\n myStack = []\n all_nodes = []\n node = root\n while node or myStack:\n while node: # 从根节点开始,一直找它的左子树\n # print(node.elem)\n all_nodes.append(node.elem)\n myStack.append(node)\n node = node.lchild\n node = myStack.pop() # while结束表示当前节点node为空,即前一个节点没有左子树了\n node = node.rchild # 开始查看它的右子树\n return all_nodes", "def getVisitableNodesNamed(self):\n\n return (\n (\"value\", self.subnode_value),\n (\"dict_arg\", self.subnode_dict_arg),\n (\"key\", self.subnode_key),\n )", "def node_names(self):\n\n for node_name in self.nodes.keys():\n\n yield node_name", "def get_trees(self, data, showerrors = False): # -> list:\r\n if showerrors:\r\n raise NotImplementedError(\"This parser doesn't implement errors\")\r\n self.data = data\r\n self.index = 0\r\n try:\r\n return [self.__aux_parser(self._productionset.initialsymbol)]\r\n except (IndexError, ParseError):\r\n return []", "def getAllKeyValuePair(self,root,key):\n\n if root==None:\n return []\n \n node = root\n result = []\n\n for index,child in enumerate(node.children):\n if(child!=None):\n if(child.value!=None):\n result.append((key+str(index),child.value.value))\n \n result += self.getAllKeyValuePair(child,key+str(index))\n\n return result", "def levelOrder(self, root: 'Node') -> List[List[int]]:\n if not root: return []\n level = []\n waiting = []\n result = []\n level.append(root)\n while level:\n current = []\n while level:\n tmp = level.pop(0)\n if not tmp:\n continue\n current.append(tmp.val)\n waiting.append(tmp)\n if len(current) > 0:\n result.append(current)\n while waiting:\n tmp = waiting.pop(0)\n for ch in tmp.children:\n level.append(ch)\n return result", "def nodes(evt, node=None):\n nodenames = []\n\n if node is None:\n root = evt.retrieveObject('')\n node = root.registry()\n\n if node.object():\n nodenames.append(node.identifier())\n for l in evt.leaves(node):\n # skip a location that takes forever to load\n # XXX How to detect these automatically??\n if 'Swum' in l.identifier():\n continue\n \n temp = evt[l.identifier()]\n nodenames += nodes(evt, l)\n else:\n nodenames.append(node.identifier())\n\n return nodenames", "def get_full_name_definition(node):\n\n def _is_end(node):\n return isinstance(node, tree.Operator) and node.value == \"]\"\n\n nodes = []\n current = node\n\n while not _is_end(current):\n nodes.append(current)\n current = current.get_next_leaf()\n\n if current not in nodes:\n nodes.append(current)\n\n return nodes", "def symbify(nodes):\n if not nodes: return None\n \n root, children = nodes\n return [(unique_id(root), node_type(root)), \n [symbify(child) for child in children]]", "def build_tree(text):\n if text[0] == \"-\":#Application\n l = [None, None]\n l[0], text = build_tree(text[1:])#use build_tree recursively to allow nested stuff, return text so that the next function can continue where the nested one left off.\n l[1], text = build_tree(text[1:])\n elif text[0] == \"*\":#Abstraction\n l = [text[0:2], None]\n text = text[1:]#the variable has to be removed before proceeding\n l[1], text = build_tree(text[1:])#same as above\n l = (l[0], l[1])\n else:\n l = text[0]\n return l, text", "def lutList():\n sessionLuts = nuke.Root()[\"luts\"]\n luts = re.findall('[a-zA-Z0-9.*]+', sessionLuts.toScript())\n return luts", "def getVisitableNodesNamed(self):\n\n return ((\"module\", self.subnode_module),)", "def _get_tree(root: spacy.tokens.Token, depth: int, token_filter: types.FunctionType) -> [spacy.tokens.Token]:\n if depth == 0:\n return [root] if token_filter(root) else []\n\n result = []\n # for tokens on the left of the root, whose head is root\n for child in filter(token_filter, root.lefts):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n result.append(root)\n # for tokens on the right of the root, whose head is root\n for child in filter(token_filter, root.rights):\n result += SpacyEventExtractor._get_tree(child, depth - 1, token_filter)\n return result" ]
[ "0.509325", "0.4992512", "0.49513537", "0.493068", "0.4924768", "0.4924014", "0.48970065", "0.48819217", "0.48680574", "0.48541766", "0.48541066", "0.48351964", "0.48308712", "0.47602692", "0.47494695", "0.47325057", "0.4718129", "0.4705719", "0.46953857", "0.4692597", "0.4669213", "0.4664451", "0.46616474", "0.46607134", "0.4659847", "0.46594065", "0.46572945", "0.46567664", "0.46311453", "0.46253222" ]
0.58911955
0
Recursively replace any instance of a symbol with a local PV.
def replace_symbols(node, symbols): warning = False if len(node) == 0: if node.text is not None and not node.text.isspace(): if '$' in node.text and not (node.tag in EXCLUDED_TAGS): node.text = try_replace(node.text, symbols) if node.tag in NON_PV_TAGS: warning = True else: for child in node: if replace_symbols(child, symbols): warning = True return warning
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recursiveSearchReplace(x, s, r):\n for k, v in x.items():\n if type(v) is dict:\n recursiveSearchReplace(v, s, r)\n else:\n if v == s:\n x[k] = r", "def resolve_symbol(self, symbol):\n if symbol in self.labels:\n return self.labels[symbol]\n if symbol in PREDEFINED_MEM:\n return PREDEFINED_MEM[symbol]\n\n return self.variables[symbol] # autoincrement default", "def in_place_substitute(self):\r\n if self.substitute is not None:\r\n node = self.convert_type()\r\n self.leaf_replace(node) # for internals only\r\n self.root_replace(node)", "def try_replace(text, symbols):\n\n updated = text\n matched = False\n\n for sym, value in symbols.iteritems():\n sym_rep = '$(%s)' % sym\n sym_loc = create_loc_pv(sym, value)\n sym_loc_ts = \"toString('%s')\" % sym_loc\n\n if sym_rep == text:\n matched = True\n updated = sym_loc\n # can abort here as we've finished\n break\n\n elif sym_rep in updated:\n\n matched = True\n # Three cases:\n # i) the pattern is at the start of the string\n # ii) the pattern is at the end of the string\n # iii) the pattern is somewhere in the middle\n length = len(sym_rep)\n index = updated.index(sym_rep)\n\n if index == 0: # match at the start\n body = '%s, \"%s\"' % (sym_loc_ts, updated[length:])\n elif index + length == len(updated): # match at the end\n body = '\"%s\", %s' % (updated[:-length], sym_loc_ts)\n else: # match in the middle\n body = '\"%s\", %s, \"%s\"' % (updated[:index], sym_loc_ts, updated[index+length:])\n\n if updated.startswith('concat('):\n updated = body[1:-1] # strip start/end quotes\n else:\n updated = 'concat(%s)' % body\n\n ## remove any empty strings\n updated = updated.replace(', \"\"', '')\n updated = updated.replace('\"\", ', '')\n\n if matched:\n # A concat() function does not need to be in quotes\n # in the pv() function; a local PV does.\n if updated.startswith(\"loc\"):\n updated = \"'%s'\" % updated\n updated = '=pv(%s)' % updated\n log.info(\"Converted %s to %s\", text, updated)\n return updated", "def __setitem__(self, name, symbol):\n self.current_scope[name] = symbol", "def as_dummy(self):\n from .symbol import Dummy, Symbol\n def can(x):\n # mask free that shadow bound\n free = x.free_symbols\n bound = set(x.bound_symbols)\n d = {i: Dummy() for i in bound & free}\n x = x.subs(d)\n # replace bound with canonical names\n x = x.xreplace(x.canonical_variables)\n # return after undoing masking\n return x.xreplace({v: k for k, v in d.items()})\n if not self.has(Symbol):\n return self\n return self.replace(\n lambda x: hasattr(x, 'bound_symbols'),\n can,\n simultaneous=False)", "def _replace_path_with_type_symbol(cost_trees):\n cost_trees_ = {}\n tmp_dict = {}\n for k, v in cost_trees.items():\n if k == 'subcosts':\n for kk, vv in v.items():\n type_symbol = vv.pop('_type_symbol')\n d = {type_symbol: vv}\n if tmp_dict.get(k) is not None:\n tmp_dict[k].update(d)\n else:\n tmp_dict[k] = d\n elif type(v) == dict:\n for kk, vv in v.items():\n type_symbol = vv.pop('_type_symbol')\n d = {type_symbol: _replace_path_with_type_symbol(vv)}\n if cost_trees_.get(k) is not None:\n cost_trees_[k].update(d)\n else:\n cost_trees_[k] = d\n else:\n tmp_dict[k] = v\n cost_trees_.update(tmp_dict)\n return cost_trees_", "def promote_live_variables(paths):\n for path in paths:\n symbol_table = {} # We build a new symbol table for each path\n for block in path:\n if isinstance(block, BasicBlock):\n new_statements = []\n for statement in block.statements:\n # Replace any symbols currently in the symbol table\n statement = replace_symbols(statement, symbol_table, ctx=ast.Load)\n # Fold constants\n statement = constant_fold(statement)\n # Update symbol table if the statement is an assign\n if is_assign_to_name(statement):\n symbol_table[statement.targets[0].id] = statement.value\n new_statements.append(statement)\n block.statements = new_statements\n elif isinstance(block, Branch):\n # For branches we just promote in the condition\n block.cond = replace_symbols(block.cond, symbol_table, ctx=ast.Load)\n block.cond = constant_fold(block.cond)\n return paths", "def mineval(expr, ctx):\n for k, v in ctx.items():\n if k in expr:\n expr = re.sub(k, str(v), expr)\n return evaluateRPN(expr)", "def replace(smap):\n def _replace_xducer(step):\n def _replace_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if x in smap:\n return step(r, smap[x])\n else:\n return step(r, x)\n return _replace_step\n return _replace_xducer", "def replace_symbol(text, replacement_text=\"\"):\n\n return __RE_SYMBOL.sub(replacement_text, text)", "def test_rebind_prefix_replace(tmp_path: Path, store_name: str, override: bool) -> None:\n graph = make_graph(tmp_path, store_name)\n graph.bind(\"egsub\", EGNSSUB_V0)\n if override:\n graph.bind(\"egsub\", EGNSSUB_V1, override=override, replace=True)\n check_ns(graph, {\"egsub\": EGNSSUB_V1})\n else:\n graph.bind(\"egsub\", EGNSSUB_V1, override=override, replace=True)\n check_ns(graph, {\"egsub\": EGNSSUB_V0})", "def test_tensors_can_substitute_symbols_simultaneously(\n free_alg, full_balance, full_simplify\n):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n alpha = Symbol('alpha')\n beta = IndexedBase('beta')\n i, j, k = p.i, p.j, p.k\n v = p.v\n\n orig = dr.einst(alpha ** 2 * x[i] * v[i])\n alpha_def = dr.einst(alpha * beta[i, i])\n assert alpha_def.n_terms == 1\n assert len(alpha_def.local_terms[0].sums) == 1\n\n dr.full_simplify = full_simplify\n res = orig.subst(alpha, alpha_def, full_balance=full_balance).simplify()\n dr.full_simplify = True\n\n expected = dr.einst(\n alpha ** 2 * beta[i, i] * beta[j, j] * x[k] * v[k]\n ).simplify()\n assert res == expected", "def deep_replace(obj, s, r):\n if isinstance(obj, list) or isinstance(obj, tuple):\n obj = map(lambda o: deep_replace(o, s, r), obj)\n elif isinstance(obj, dict):\n obj = dict(map(lambda o: deep_replace(o, s, r), obj.items()))\n elif isinstance(obj, str):\n if obj == s:\n obj = str(r)\n else:\n obj = obj.replace(s, str(r))\n else:\n obj = deepcopy(obj)\n return obj", "def goto(fixed_pc: int):\n\n def _goto(state: State) -> State:\n return state._replace(pc=fixed_pc)\n\n return _goto", "def _revert_encoded_reg_name(self, vdef):\n if vdef.find(\"%\") != -1:\n for (o_reg, re_reg) in self.arch.reg_rename_tbl.items():\n vdef = vdef.replace(re_reg, o_reg)\n return vdef", "def replace_symbol(text, replacement_text=\"\"):\n\n if not is_not_null_string(text):\n raise TypeError(\"text must be a string\")\n\n return __RE_SYMBOL.sub(replacement_text, _preprocess(text))", "def process_symbol(self, symbol):\n try:\n return self._discretised_symbols[symbol]\n except KeyError:\n discretised_symbol = self._process_symbol(symbol)\n self._discretised_symbols[symbol] = discretised_symbol\n discretised_symbol.test_shape()\n\n # Assign mesh as an attribute to the processed variable\n if symbol.domain != []:\n discretised_symbol.mesh = self.mesh[symbol.domain]\n else:\n discretised_symbol.mesh = None\n\n # Assign secondary mesh\n if symbol.domains[\"secondary\"] != []:\n discretised_symbol.secondary_mesh = self.mesh[\n symbol.domains[\"secondary\"]\n ]\n else:\n discretised_symbol.secondary_mesh = None\n return discretised_symbol", "def recursive_search_replace(x, s, r):\n\n # go through each of the items in the dictionary\n for k, v in x.items():\n\n # if value is a dictionary do search replace on that\n if type(v) is dict:\n recursive_search_replace(v, s, r)\n\n # otherwise if the value matches the search string\n # replace it with replacement\n else:\n if v == s:\n x[k] = r", "def root_replace(self,node):\r\n self.feature_index = node.feature_index\r\n self.threshold = node.threshold\r\n self.label = node.label\r\n self.left = node.left\r\n self.right = node.right\r\n self.substitute = node.substitute\r\n if node.left is not None and node.right is not None:\r\n node.left.parents.remove(node) if node in node.left.parents else node.left.parents\r\n node.left.parents.append(self) if self not in node.left.parents else node.left.parents\r\n node.right.parents.remove(node) if node in node.right.parents else node.right.parents\r\n node.right.parents.append(self) if self not in node.right.parents else node.right.parents", "def _parname_override(self, parname, pvname):\n self._pv_cache[parname] = Pv.Pv(pvname, initialize=True, monitor=True)", "def _subs(self, exp, p, seen):\n if id(self) in seen:\n return (seen[id(self)], False)\n seen[id(self)] = p\n if self._has(\"p\") and not p._has(\"p\"):\n p._.p = self._.p.subs(*exp)\n if self._has(\"q\") and not p._has(\"q\"):\n p._.q = self._.q.subs(*exp)\n if self._has(\"P\") and not p._has(\"P\"):\n p._.P = self._.P.subs(*exp)\n if self._has(\"Q\") and not p._has(\"Q\"):\n p._.Q = self._.Q.subs(*exp)\n for k, v in self._.triple.items():\n p._.triple[k] = v.subs(*exp)\n for k, v in self._.quadruple.items():\n p._.quadruple[k] = v.subs(*exp)\n for par, part in self._.subschemes.items():\n try:\n p.add_subscheme(par.subs(*exp, seen=seen), part)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=part)\n for par, part in self._.fusion_schemes.items():\n try:\n p.add_subscheme(par.subs(*exp, seen=seen), part)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=part)\n for h, s in enumerate(self._.subconstituents):\n if s is None:\n continue\n s, refs = s\n name = self._subconstituent_name(h)\n try:\n p._.subconstituents[h] = (p.add_subscheme(\n s.subs(*exp, seen=seen), name), refs)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=name)\n if self._has(\"complement\") and not p._has(\"complement\"):\n try:\n p._.complement = self._.complement.subs(*exp, seen=seen)\n except (InfeasibleError, AssertionError) as ex:\n raise InfeasibleError(ex, part=\"complement\")\n return (p, True)", "def _kv_resolve_symbolic(kv,\n keys,\n input_values = None,\n intermediate_values = None):\n for k, v in kv.items():\n if k in keys:\n kv[k] = _resolve_symbolic(v, input_values, intermediate_values)\n return kv", "def do_subs(self, e):\n for expr, var in self.items():\n e = e.xreplace({var: expr})\n return e", "def push_local_ns(self, name, value):\n self.interpreter.locals[name] = value", "def update_for_ssa(self, ast, symbol_table):\n # Print dominance frontier\n if debug:\n print(\"Dominance frontier:\")\n for block in self.blocks:\n print(('DF(%d) = %s' % (block.id, block.dominance_frontier)))\n\n argnames = [name.id for name in ast.args.args]\n\n #\n ### 1) Insert phi nodes in the right places\n #\n for name, variable in symbol_table.iteritems():\n if not variable.renameable:\n continue\n\n defining = []\n for b in self.blocks:\n if variable in b.gen:\n defining.append(b)\n\n for defining_block in defining:\n for f in defining_block.dominance_frontier:\n phi = f.phis.get(variable, None)\n if phi is None:\n phi = PhiNode(f, variable)\n f.phis[variable] = phi\n defining.append(f)\n\n #\n ### 2) Reaching definitions and variable renaming\n #\n\n # Set originating block for each variable (as if each variable were\n # initialized at the start of the function) and start renaming of\n # variables\n symbol_table.counters = dict.fromkeys(symbol_table, -1) # var_name -> counter\n self.blocks[0].symtab = symbol_table\n for var_name, var in symbol_table.items():\n if var.renameable:\n new_var = symbol_table.rename(var, self.blocks[0])\n new_var.uninitialized = var.name not in argnames\n\n self.rename_assignments(self.blocks[0])\n\n for block in self.blocks[1:]:\n block.symtab = symtab.Symtab(parent=block.idom.symtab)\n for var, phi_node in block.phis.iteritems():\n phi_node.variable = block.symtab.rename(var, block)\n phi_node.variable.name_assignment = phi_node\n phi_node.variable.is_phi = True\n\n self.rename_assignments(block)\n\n #\n ### 3) Update the phis with all incoming entries\n #\n for block in self.blocks:\n # Insert phis in AST\n block.phi_nodes = block.phis.values()\n for variable, phi in block.phis.iteritems():\n for parent in block.parents:\n incoming_var = parent.symtab.lookup_most_recent(variable.name)\n phi.incoming.add(incoming_var)\n\n phi.variable.uninitialized |= incoming_var.uninitialized\n\n # Update def-use chain\n incoming_var.cf_references.append(phi)", "def use_variable(self, v):\n self.resolve(v)", "def psi_inplace(a):", "def replace(name, newobject):", "def resolve(self, expr: loxExprAST.Expr, depth: int) -> None:\n self.locals[expr] = depth" ]
[ "0.52873677", "0.5232028", "0.51239073", "0.5026712", "0.5018338", "0.5013244", "0.49680254", "0.49638858", "0.49216217", "0.4905777", "0.48270255", "0.48172233", "0.47987476", "0.4792203", "0.47846404", "0.47664478", "0.47544852", "0.47457287", "0.47394028", "0.47348565", "0.4681551", "0.4667931", "0.46644187", "0.46510038", "0.4647129", "0.46441367", "0.4631544", "0.46081632", "0.46079522", "0.45794365" ]
0.61905044
0
Use factory mode to create the backbone. The backbone is ResNet.
def create_backbone(cfg): return backbone_factory[cfg.model.backbone.name](cfg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model(self):\n pass", "def create_model(self):\n pass", "def get_factory():", "def viewfactory(self):\n raise NotImplementedError()", "def __init__(self, backbone_name, config):\n\n backbone_config = Schema(\n {\n Required(\"input_shape\"): Schema((int, int, int)),\n Required(\"include_top\"): bool,\n Required(\"weights\"): str,\n Optional(\"alpha\"): float,\n }\n )\n\n config = backbone_config(config)\n\n if backbone_name == \"MobileNetV2\":\n self.model = tf.keras.applications.MobileNetV2(**config)\n elif backbone_name == \"ResNet50\":\n self.model = tf.keras.applications.ResNet50(**config)\n elif backbone_name == \"InceptionV3\":\n self.model = tf.keras.applications.InceptionV3(**config)\n\n # Remove Layers until Conv4\n for i, layer in enumerate(reversed(self.model.layers)):\n if backbone_name == \"ResNet50\" and layer._name == \"conv4_block6_out\":\n break\n elif (\n backbone_name == \"MobileNetV2\" and layer._name == \"block_13_expand_relu\"\n ):\n break\n else:\n self.model._layers.pop()\n\n self.model.layers[-1]._name = \"feature_map\"\n\n self.model = Model(\n self.model.input, self.model.layers[-1].output, name=\"Backbone\"\n )", "def create_models( self ):", "def make(self):\n\t\tif RENDER_VIEWS > 1:\n\t\t\tself._make()", "def create_detr_backbone(model_name: str, pretrained: str = None,):\n if(model_name == \"resnet50\"):\n backbone = torch.hub.load('facebookresearch/detr', 'detr_resnet50', pretrained=False)\n\n elif(model_name == \"resnet101\"):\n backbone = torch.hub.load('facebookresearch/detr', 'detr_resnet101', pretrained=False)\n\n elif(model_name == \"resnet50_dc5\"):\n backbone = torch.hub.load('facebookresearch/detr', 'detr_resnet50_dc5', pretrained=False)\n\n elif(model_name == \"resnet101_dc5\"):\n backbone = torch.hub.load('facebookresearch/detr', 'detr_resnet101_dc5', pretrained=False)\n\n else:\n raise ValueError(\"Unuspported backbone\")\n\n if pretrained is not None:\n checkpoint = _load_pretrained_weights(detr_weights_dict, model_name, pretrained=pretrained,)\n backbone.load_state_dict(checkpoint[\"model\"])\n return backbone\n\n return backbone", "def factory(self):\n raise NotImplementedError()", "def factory(self):\n return self._factory", "def factory(self):\n return self._factory", "def build_backbone(self):\n backbone = self.arch.backbone\n self.backbone = build_blocks(backbone, 'backbone')", "def build_backbone(cfg, input_shape=None):\n if input_shape is None:\n input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))\n\n backbone_name = cfg.MODEL.BACKBONE.NAME\n backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)\n assert isinstance(backbone, Backbone)\n return backbone", "def get_backbone(self):\n print('sending you my backbone')\n return", "def create_reid_model(name, *args, **kwargs):\r\n if name not in __factory:\r\n raise KeyError(\"Unknown model:\", name)\r\n return __factory[name](*args, **kwargs)", "def modelfactory_factory(model: Type[Model], **kwargs: Any) -> Type[MIZModelFactory]:\n model_name, label = model._meta.model_name, model._meta.label\n # Check the cache for a factory with that label.\n if label in _cache:\n return _cache[label]\n # Check this module and the factory's base module for a factory\n # matching the default factory name.\n # Note that a factory's name may not be unique across multiple apps; need\n # to verify that any factory matching the name is a factory for the\n # requested model.\n factory_name = model_name.capitalize() + 'Factory'\n if hasattr(sys.modules[__name__], factory_name):\n modelfac = getattr(sys.modules[__name__], factory_name)\n if modelfac._meta.model == model:\n return modelfac\n # TODO: is it safe to use sys.modules to check for contents of a external\n # module, or should an import be attempted instead?\n # -- ALSO: why even bother checking factory.base?\n if hasattr(sys.modules['factory.base'], factory_name):\n modelfac = getattr(sys.modules['factory.base'], factory_name)\n if modelfac._meta.model == model:\n return modelfac\n # Create a new factory class:\n if 'Meta' not in kwargs:\n kwargs['Meta'] = type('Options', (MIZDjangoOptions,), {'model': model})\n modelfac = type(factory_name, (MIZModelFactory,), kwargs)\n _cache[label] = modelfac\n return modelfac", "def FactoryFactory():\n return tornado.web.Application([(r'/factory', FactoryHandler)])", "def backbone_generator(params):\n backbone_name = params.model_params.architecture.backbone.name\n if params.model in (\"RetinaNet\", \"MaskRCNN\"):\n if backbone_name == \"resnet\":\n resnet_params = params.model_params.architecture.backbone.params\n backbone_fn = resnet.Resnet(\n resnet_depth=resnet_params.depth,\n activation=params.model_params.norm_activation.activation,\n norm_activation=norm_activation_generator(params.model_params.norm_activation),\n )\n else:\n raise ValueError(\"Backbone {} is not supported for {} model.\".format(backbone_name, params.model))\n elif params.model == \"YOLOv4\":\n if backbone_name == \"darknet\":\n backbone_fn = darknet.CSPDarknet53()\n else:\n raise ValueError(\"Backbone {} is not supported for {} model.\".format(backbone_name, params.model))\n else:\n raise ValueError(\"Model {} is not supported.\".format(params.model))\n\n return backbone_fn", "def create():", "def create():", "def _init_model(self, forrest):\n rels = self.get_rels(forrest)\n self._model = RDPModel(rels)", "def factory_method(self):\n pass", "def factory_method(self):\n pass", "def create():\n\n return App()", "def MakeModel(self):\n pass", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def build_backbone(config):\n assert config.MODEL.BACKBONE in ['resnet50', 'resnet101'], \"backbone name is not supported!\"\n backbone_name = config.MODEL.BACKBONE\n dilation = False\n train_backbone = not config.EVAL\n return_interm_layers = False #TODO: impl case True for segmentation\n\n position_embedding = build_position_encoding(config.MODEL.TRANS.HIDDEN_SIZE)\n backbone = Backbone(backbone_name, train_backbone, return_interm_layers, dilation)\n model = Joiner(backbone, position_embedding)\n model.num_channels = backbone.num_channels\n\n return model", "def __create_model(self, classes):\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained_base=True)\r\n # self._model = model_zoo.get_model(model_name, classes=classes, pretrained=True)\r\n # self._model.reset_class(classes, reuse_weights=[cname for cname in classes if cname in self._model.classes])\r\n if self._model is None or classes != self.classes:\r\n model_name = 'ssd_{}_{}_custom'.format(self.img_size, self.backbone)\r\n self._model = model_zoo.get_model(model_name, classes=classes, pretrained=False, pretrained_base=True,\r\n root=self.temp_path)\r\n with warnings.catch_warnings(record=True):\r\n warnings.simplefilter(\"always\")\r\n self._model.initialize()\r\n self._model.collect_params().reset_ctx(self.ctx)\r\n _, _, _ = self._model(mx.nd.zeros((1, 3, self.img_size, self.img_size), self.ctx))\r\n\r\n self._model.reset_class(classes)\r\n self.classes = classes", "def make(self):\n pass", "def creator():\n return SeamlessFkIk()" ]
[ "0.63126415", "0.63126415", "0.6265553", "0.6215484", "0.61930734", "0.618915", "0.6162533", "0.6112772", "0.60765994", "0.59768045", "0.59768045", "0.5896723", "0.5890227", "0.5889007", "0.58806", "0.58799183", "0.58720887", "0.58451647", "0.5835862", "0.5835862", "0.5824702", "0.5795206", "0.5795206", "0.57787764", "0.57764673", "0.5740793", "0.5720503", "0.56928563", "0.5689456", "0.5680575" ]
0.74776757
0
Register multiple models with the same arguments. Calls register for each argument passed along with all keyword arguments.
def register_models(self, *models, **kwargs): for model in models: self.register(model, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self, *model):\n for m in model:\n m.Register()\n self.models.append(m)", "def register_admin_models(*args, **kwargs):\n for model in args:\n admin.register(model, session=kwargs['session'])\n return None", "def register(self, model_or_iterable, handler_class, **kwargs):\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model in self._registry:\n try:\n model_name = model._meta.model_name\n except AttributeError:\n # Django < 1.6\n model_name = model._meta.module_name\n raise ModelAlreadyRegistered(\n \"The model {} is already registered.\".format(model_name))\n handler = get_handler_instance(model, handler_class, kwargs)\n self._registry[model] = handler\n contribute_to_class(model)", "def register(self, models, keys, signals=post_save, **kwargs):\n if not isinstance(signals, (list, tuple)):\n signals = [signals]\n for signal in signals:\n if settings.DEBUG:\n err = \"{} is not a valid Signal subclass.\".format(signal)\n assert isinstance(signal, Signal), err\n self._registry.setdefault(signal, {})\n if not isinstance(models, (list, tuple)):\n models = [models]\n for model in models:\n if settings.DEBUG:\n err = \"{} is not a valid ModelBase subclass.\".format(model)\n assert isinstance(model, ModelBase), err\n self._registry.get(signal).setdefault(model, set())\n if not isinstance(keys, (list, tuple)):\n keys = [keys]\n for key in keys:\n self._registry.get(signal).get(model).add(key)", "def wrap(self, *args, **kwargs):\n registers = {}\n for name, reg in zip(self.names, args):\n registers[name] = reg\n\n duplicate = registers.keys() & kwargs.keys()\n if duplicate:\n raise Exception(\"Multiple values recieved for registers {}.\".format(duplicate))\n\n registers.update(kwargs)\n\n names = registers.keys()\n missing = set(self.names) - names\n if missing:\n raise Exception(\"No value provided for registers {}.\".format(missing))\n extra = names - set(self.names)\n if extra:\n raise Exception(\"Value provided for unknown registers {}.\".format(extra))\n values = [registers[name] for name in self.names]\n return concat(values)", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_arguments(self):\n pass", "def register_models(self, app_label, *models):\n for model in models:\n # Store as 'name: model' pair in a dictionary\n # in the app_models dictionary\n model_name = model._meta.model_name\n model_dict = self.app_models.setdefault(app_label, SortedDict())\n if model_name in model_dict:\n # The same model may be imported via different paths (e.g.\n # appname.models and project.appname.models). We use the source\n # filename as a means to detect identity.\n fname1 = os.path.abspath(upath(\n sys.modules[model.__module__].__file__))\n fname2 = os.path.abspath(upath(\n sys.modules[model_dict[model_name].__module__].__file__))\n # Since the filename extension could be .py the first time and\n # .pyc or .pyo the second time, ignore the extension when\n # comparing.\n if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:\n continue\n model_dict[model_name] = model\n self._get_models_cache.clear()", "def register_all(models, admin_class=admin.ModelAdmin):\n for attr in dir(models):\n attr = getattr(models, attr, None)\n if isinstance(attr, type):\n if issubclass(attr, Model) and not attr._meta.abstract:\n try:\n admin.site.register(attr, admin_class)\n except admin.sites.AlreadyRegistered:\n pass", "def register(self, *, model, optimizer, criterion, **kwargs):\n raise NotImplementedError()", "def register(cls, model):\n cls.models[model] = True", "def register_model(name: str, model=None):\n global REGISTRY\n if model is not None:\n REGISTRY[name] = model\n return model\n\n def do_registration(model):\n REGISTRY[name] = model\n return model\n\n return do_registration", "def register(self, model_or_iterable, admin_class=None, **options):\n models = model_or_iterable\n if not isinstance(model_or_iterable, (tuple, list)):\n models = tuple([model_or_iterable])\n\n for model in models:\n if admin_class:\n admin_class = type(\n str('DynamicAdminRestModelAdmin'),\n (admin_class, AdminRestModelAdmin),\n dict(admin_class.__dict__))\n else:\n admin_class = AdminRestModelAdmin\n\n super(AdminRestAdminSite, self).register(\n [model], admin_class, **options)", "def register(*models, site=None):\n\n def _model_admin_wrapper(admin_class):\n if not models:\n raise ValueError(\"At least one model must be passed to register.\")\n\n admin_site = site or default_site\n\n if not isinstance(admin_site, AdminSite):\n raise ValueError(\"site must subclass AdminSite\")\n\n if not issubclass(admin_class, ModelAdmin):\n raise ValueError(\"Wrapped class must subclass ModelAdmin.\")\n\n admin_site.register(models, admin_class=admin_class)\n\n return admin_class\n\n return _model_admin_wrapper", "def register_all_models(module=None,path=None):\n if module is None:\n module='models'\n if path is None:\n path=os.path.dirname(os.path.abspath(__file__))\n classes = pyclbr.readmodule(module,[path])\n elif type(path) is str:\n classes = pyclbr.readmodule(module,[path])\n else:\n classes = pyclbr.readmodule(module,path)\n for model in classes:\n # now the dirty part, check that the models are classes that inherit from models.Model\n # if this inhertance is not explicit in the class call it will not be registered\n for superclass in classes[model].super:\n if re.search('models.Model',superclass):\n # this could be a from module import * above this loop\n exec('from %s import %s'%(module,classes[model].name))\n exec('admin.site.register(%s)'%classes[model].name)", "def register(cls, L):\r\n ...", "def register_inputs(self, args_):\n # TODO Should we be able to rebuild?\n def traversal_function(obj):\n if obj.id.value not in self.placeholders:\n self.placeholders[obj.id.value] = obj\n self.input_placeholder_ids.append(obj.id.value)\n\n self.input_placeholder_ids = []\n Role.nested_object_traversal(args_, traversal_function, PlaceHolder)\n self.input_placeholder_ids = tuple(self.input_placeholder_ids)", "def addAll(self, *args):\n pass", "def addAll(self,*args, **kwargs):\n pass", "def register_models(self):\n models = self.configs_['pipeline_models']\n\n for i in range(0, len(models)):\n new_model = PipelineModel(models[i], self.score_method_,\n self.predict_as_probability_)\n\n if not new_model.init():\n print 'Error registering model', models[i]['model']['id'],\\\n 'in ensemble'\n return False\n print 'Model', new_model.get_name(), 'registered in ensemble'\n self.models_.append((models[i]['model']['id'],\n new_model.get_sklearn_pipeline()))\n\n if 'weight' not in models[i]:\n self.model_weights_.append(1)\n else:\n self.model_weights_.append(models[i]['weight'])\n return True", "def register(self, model_or_iterable, moderation_class):\n if isinstance(model_or_iterable, ModelBase):\n model_or_iterable = [model_or_iterable]\n for model in model_or_iterable:\n if model in self._registry:\n raise AlreadyModerated(\"The model '%s' is already being moderated\" % model._meta.model_name)\n self._registry[model] = moderation_class(model)", "def register_arguments(self, mandatory = [], optional = {}, bind_to_vectors = ''):\n\n self.args_mandatory = mandatory\n self.args_optional = optional.copy()\n\n # Arguments in session has more priority than registered variables\n optional.update(self.session[self.name]['stored_args'])\n self.session[self.name]['stored_args'] = optional\n\n self.bind_to_vectors = bind_to_vectors", "def _RegisterInputs(self):\n args = []\n for source in ['FcA', 'FcB']:\n gps_type = self._gps_type_per_source[source]\n if gps_type == 'Septentrio':\n args += [\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.x'),\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.y'),\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.z'),\n self._Arg('SeptentrioSolution', source, 'pvt_cartesian.mode'),\n self._Arg('SeptentrioSolution', source,\n 'pvt_cartesian.timestamp.tow'),\n ]\n elif gps_type == 'NovAtel':\n args += [\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_x'),\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_y'),\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_z'),\n self._Arg('NovAtelSolution', source, 'best_xyz.pos_type'),\n self._Arg('NovAtelSolution', source, 'best_xyz.timestamp.tow'),\n ]\n else:\n assert False\n return args", "def register_model(self, model_name: str, model: Any, training_columns: List[str]):\n self.models[model_name] = (model, training_columns)" ]
[ "0.79722357", "0.67167324", "0.66146886", "0.6543116", "0.6176708", "0.600071", "0.600071", "0.600071", "0.600071", "0.600071", "0.600071", "0.600071", "0.600071", "0.59464824", "0.58884686", "0.5851817", "0.58074623", "0.5693294", "0.5667267", "0.5585553", "0.55767953", "0.5563988", "0.5536636", "0.55274564", "0.55225456", "0.5514957", "0.5502708", "0.5494994", "0.5450415", "0.5399668" ]
0.7950732
1
This will return a string that can be used as a prefix for django's cache key. Something like key.1 or key.1.2 If a version was not found '1' will be stored and returned as the number for that key. If extra is given a version will be returned for that value. Otherwise the major version will be returned.
def get_version(self, extra=None): if extra: key = self._get_extra_key(extra) else: key = self.key v = self._get_cache(key).get(key) if v == None: v = self._increment_version(extra=extra) return "%s.%s" % (key, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cache_key(prefix):\n return '%s' % (prefix)", "def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))", "def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))", "def get_cache_key(self, extra_args='', version=None):\r\n query, params = self.query.get_compiler(using=self.db).as_sql()\r\n query_string = (query % params).strip().encode(\"utf-8\")\r\n base_key = md5_constructor('.'.join((query_string, extra_args))).hexdigest()\r\n return cache.make_key('.'.join((self.model._meta.db_table, 'cachebot.results', base_key)), version=version)", "def cache_key_part(self) -> str:\n return self.name", "def safe_key(key, key_prefix, version):\r\n\r\n # Clean for whitespace and control characters, which\r\n # cause memcache to raise an exception\r\n key = cleaned_string(key)\r\n key_prefix = cleaned_string(key_prefix)\r\n version = cleaned_string(version)\r\n\r\n # Attempt to combine the prefix, version, and key\r\n combined = \":\".join([key_prefix, version, key])\r\n\r\n # If the total length is too long for memcache, hash it\r\n if len(combined) > 250:\r\n combined = fasthash(combined)\r\n\r\n # Return the result\r\n return combined", "def prefix(self) -> Optional[str]:\n return RE_VERSION.match(str(self._version)).group(1)", "def get_version_key(self, version):\n if self._generic_only:\n return GENERIC_VERSION\n else:\n self.check_version_exists(version)\n return version", "def _make_cache_key(key_prefix):\n if callable(key_prefix):\n cache_key = key_prefix()\n elif '%s' in key_prefix:\n cache_key = key_prefix % request.path\n else:\n cache_key = key_prefix\n\n cache_key = cache_key.encode('utf-8')\n\n return cache_key", "def verPrefix(version, versionPattern=''):\n if not versionPattern:\n versionPattern = os.environ.get('KOMBI_VERSION_PATTERN', DEFAULT_VERSION_PATTERN)\n\n patternParts = __splitVersionPattern(versionPattern)\n return str(version)[:len(patternParts['prefix'])]", "def _cache_key(cls, pk, db):\r\n key_parts = ('o', cls._meta, pk, db)\r\n return ':'.join(map(encoding.smart_unicode, key_parts))", "def build_flattened_key(prefix, key):\n return key if not prefix else prefix + \".\" + key", "def generate_cache_key(cached, **kwargs):\r\n\r\n if isinstance(cached, QuerySet):\r\n key = str(cached.query)\r\n\r\n elif isinstance(cached, (Model, ModelBase)):\r\n key = '%s.%s:%s' % (cached._meta.app_label,\r\n cached._meta.module_name,\r\n ','.join('%s=%s' % item for item in kwargs.iteritems()))\r\n\r\n else:\r\n raise AttributeError(\"Objects must be queryset or model.\")\r\n\r\n if not key:\r\n raise Exception('Cache key cannot be empty.')\r\n\r\n key = clean_cache_key(key)\r\n return key", "def make_key(k, with_locale=True):\r\n key = encoding.smart_str('%s:%s' % (CACHE_PREFIX, k))\r\n if with_locale:\r\n key += encoding.smart_str(translation.get_language())\r\n # memcached keys must be < 250 bytes and w/o whitespace, but it's nice\r\n # to see the keys when using locmem.\r\n return hashlib.md5(key).hexdigest()", "def _pre_key_for(self, *objects):\n obj_type = objtype(objects[0])\n return \"{}/{}/%s/{}\".format(self.cache_prefix, obj_type, str(self.pk))", "def memoize_key(prefix, *args, **kwargs):\n key = hashlib.md5()\n for arg in itertools.chain(args, sorted(kwargs.items())):\n key.update(str(arg))\n return '%s:memoize:%s:%s' % (settings.CACHE_PREFIX,\n prefix, key.hexdigest())", "def GetKey(self, version_number):\n return self.dict[str(version_number)]", "def key_version(self) -> Optional[str]:\n return pulumi.get(self, \"key_version\")", "def kms_key_version_name(self) -> str:\n return pulumi.get(self, \"kms_key_version_name\")", "def getPrefix(self):\n return \"20gig\"", "def build_key(key):\n return os.path.join(PREFIX, key)", "def format_prefix(meta):\n ts = meta.time.strftime('%H:%M:%S.%f')[:-3]\n if meta.comm and meta.pid:\n return \"%s %s[%d]: \" % (ts, meta.comm, meta.pid)\n else:\n return ts + \": \"", "def _get_cache_key(self):\n\n return '__CACHED__{method}__'.format(\n method=function_utils.get_fully_qualified_name(self.fget).upper())", "def generate_discovery_cache_key(name, ext):\n\n return 'wopi_' + name + '_' + ext", "def cache_key(self):\n return self.__class__.create_cache_key(self.key, **self.get_kwargs())", "def generate_cache_key(self, *args, **kwargs):\n #smooshed = [\"%s=%s\" % (key, value) for key, value in kwargs.items()]\n smooshed = urlencode(kwargs)\n\n # Use a list plus a ``.join()`` because it's faster than concatenation.\n return \"%s:%s:%s:%s\" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), smooshed)", "def getPrefix(self):\n return( self.id.split('.')[0] )", "def cache_key(self):\n\n return \"{}.json\".format(self.path)", "def __buildVersion(version, versionPattern):\n patternParts = __splitVersionPattern(versionPattern)\n return patternParts['prefix'] + str(version).zfill(len(patternParts['padding'])) + patternParts['suffix']", "def version_get(self, string, prefix):\n\n regex = r\"[/_.]{}\\d+\".format(prefix)\n matches = re.findall(regex, string, re.IGNORECASE)\n\n if not len(matches):\n msg = \"No '_{}#' found in '{}'\".format(prefix, string)\n raise ValueError(msg)\n return matches[-1:][0][1], re.search(r\"\\d+\", matches[-1:][0]).group()" ]
[ "0.7012189", "0.6505339", "0.6505339", "0.64505917", "0.6364331", "0.6190883", "0.6112342", "0.6106189", "0.5995421", "0.59854543", "0.59607506", "0.58653015", "0.5854469", "0.5810253", "0.58066434", "0.5804502", "0.57891154", "0.57810754", "0.5778898", "0.56279063", "0.5620915", "0.56036514", "0.55848706", "0.5574617", "0.5564301", "0.55453587", "0.55222094", "0.5518352", "0.55139476", "0.5512786" ]
0.755687
0
Reduce the bond universe to bonds that are in any one grid
def reduceUniverse(self): self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates self.df = self.df.reindex(self.bondList) self.df = self.df[pandas.notnull(self.df['ISIN'])] self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index) self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN'] self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ignore_biasbn(directions):\n for d in directions:\n if d.dim() <= 1:\n d.fill_(0)", "def _remove_dangling_bonds(self) -> None:\n for residue in self.residues:\n bonds, impropers, cross_maps, ics = [], [], [], []\n for bond in residue.bonds:\n for atom_id in bond:\n if atom_id not in self._id_to_index:\n break\n else:\n bonds.append(bond)\n for improper in residue.impropers:\n for atom_id in improper:\n if atom_id not in self._id_to_index:\n break\n else:\n impropers.append(improper)\n for cross_map in residue.cross_maps:\n for atom_id in cross_map:\n if atom_id not in self._id_to_index:\n break\n else:\n cross_maps.append(cross_map)\n for ic in residue.ics:\n for res_index, atom_name in ic[:4]:\n if atom_name.replace(\"*\", \"\") not in self._id_to_index:\n break\n else:\n ics.append(ic)\n residue.bonds = bonds\n residue.impropers = impropers\n residue.cross_maps = cross_maps\n residue.ics = ics", "def build_bonds(self):\n shape_prime = np.array([self.shape[0]-1,self.shape[1]-1,self.shape[2]-1])\n zeros = np.array([0,0,0])\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for b,bond in enumerate(self.cell.bonds):\n newbond = copy.deepcopy(bond)\n newbond.cell1 += [i,j,k]\n newbond.cell2 += [i,j,k]\n #ToDo make a function to shorten those lines\n if np.prod(newbond.cell1 <= shape_prime) and np.prod(newbond.cell2<=shape_prime) and np.prod(zeros <=newbond.cell1) and np.prod(zeros <= newbond.cell2):\n newbond.coordinate1 = self.sites[newbond.cell1[0],newbond.cell1[1],newbond.cell1[2],newbond.site1].coordinate\n newbond.coordinate2 = self.sites[newbond.cell2[0],newbond.cell2[1],newbond.cell2[2],newbond.site2].coordinate\n self.bonds.append(newbond)", "def GetBonds(Bonds):\n b = sorted([(min(x), max(x)) for x in Bonds])\n Bonds13, Bonds14 = [], []\n for (a1,b1) in b:\n #check for bonds with a1 at the center of a 1-3 interaction,\n #letting b1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == a1 and b2 < b1] + \\\n [a2 for (a2,b2) in b if b2 == a1 and a2 < b1]\n Bonds13.extend([(min(c,b1), max(c,b1)) for c in clist])\n #check for bonds with b1 at the center of a 1-3 interaction,\n #letting a1 be the higher number of the two flanking\n clist = [b2 for (a2,b2) in b if a2 == b1 and b2 < a1] + \\\n [a2 for (a2,b2) in b if b2 == b1 and a2 < a1]\n Bonds13.extend([(min(c,a1), max(c,a1)) for c in clist])\n #find atoms connected to a1\n clist = [b2 for (a2,b2) in b if a1==a2 and not b1==b2] +\\\n [a2 for (a2,b2) in b if a1==b2 and not b1==a2]\n #find atoms connected to b1\n dlist = [a2 for (a2,b2) in b if b1==b2 and not a1==a2] +\\\n [b2 for (a2,b2) in b if b1==a2 and not a1==b2]\n Bonds14.extend([(min(c,d), max(c,d)) for c in clist for d in dlist])\n Bonds1213 = b + Bonds13\n #sort\n Bonds1213.sort()\n Bonds14.sort()\n #get unique values in case of loops\n Bonds1213 = [x for (i,x) in enumerate(Bonds1213) if i == 0 or x != Bonds1213[i-1]]\n Bonds14 = [x for (i,x) in enumerate(Bonds14) if i == 0 or x != Bonds14[i-1]]\n #convert to arrays \n Bonds1213 = array(Bonds1213, int)\n Bonds14 = array(Bonds14, int)\n return Bonds1213, Bonds14", "def _shipCollide(self):\n for s in range(self.getLengthAlien()):\n for t in range(len(self._aliens[0])):\n for b in self._bolts:\n if self._aliens[s][t] != None and + \\\n self._aliens[s][t].collides(b):\n self._aliens[s][t] = None\n self._bolts.remove(b)\n self._key = False", "def reduce_possibilities_by_column(self):\n y = self.targetCell.y\n for i in range(1,10): #content\n for n in range(9): #x-coord adjacent cells\n neighbour_cell = self.puzzleGrid.grid[n][y]\n if self.targetCell != neighbour_cell:\n self.targetCell.column_neighbour_possibilities.append( neighbour_cell.possibilities)\n if str(i) == neighbour_cell.finalNumber:\n self.RemovePossiblityFromTargetCell(i)\n self.targetCell.column_neighbour_possibilities = flatten_list(self.targetCell.column_neighbour_possibilities)", "def island_migration(self):\n for y in self.island_map:\n for cell in y:\n cell.migration()\n\n for y in self.island_map:\n for cell in y:\n for animal in cell.population:\n animal.has_moved = False", "def removeDoubleUnbondedAtoms (self):\r\n atomsToRemove = [] # Stores index of atoms we will need to remove\r\n \r\n # Go through each mol\r\n for i in range(len(self.mol)):\r\n # Atom is disconnected if number of unbonded spikes is equal to the number of spikes in the atom\r\n numUnbondedSpikes = 0\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.mol[i].spikeArray[j].bonded == False:\r\n # Spike not bonded so increment counter\r\n numUnbondedSpikes += 1\r\n # If atom disconnected then need to check to see if dangling nodes or tails are bonded\r\n if numUnbondedSpikes == len(self.mol[i].spikeArray):\r\n print (\"Atom: \" + str(self.mol[i].rbnNumber) + \" is being removed \\n\")\r\n anyBondedDanglingNodes = False\r\n for j in range(len(self.mol[i].spikeArray)):\r\n if self.isUnbondedAtomConnected(self.mol[i].spikeArray[j]) == True:\r\n anyBondedDanglingNodes = True\r\n # If atom has connected dangling nodes then need to convert atom to metaAtom, add metaAtom to metaMolecule and\r\n # remove atom from ring\r\n if anyBondedDanglingNodes == True:\r\n print (\"A new metaAtom is being created \\n\")\r\n newMetaAtom = self.convertUnbondedAtomToMetaAtom(self.mol[i])\r\n self.metaMolecule.addMetaAtom(newMetaAtom)\r\n atomsToRemove.append(i)\r\n \r\n # Now need to remove atoms\r\n print (\"Length of ring before removal: \" + str(len(self.mol)) + \"\\n\")\r\n for i in range(len(atomsToRemove)):\r\n self.mol.pop(atomsToRemove[i])\r\n print (\"Length of ring after removal: \" + str(len(self.mol)) + \"\\n\")\r\n # Finally need to update metaMolecule with new mol \r\n self.metaMolecule.updateListMols(self)", "def disconnect(self):\n self.costs = set()\n self.disconnectedHouses = []\n self.nthChoiceHouses = []\n self.allConnected = True\n for house in self.houses:\n house.connection = set()\n house.distance = 1000\n house.possible_connections = []\n for battery in self.batteries:\n battery.connectedHouses = []\n battery.capacity = battery.maxCapacity\n battery.totalDistance = set()", "def fL():\n for n in b.allNodes():\n n.autoplace()", "def simplify(self):\n \n added_clumps = []\n staying_tunnels = []\n removed_clumps = set()\n \n for tunnel in self.tunnels:\n tunnel_end_distance = self.get_distance(tunnel.start, tunnel.end)\n if tunnel_end_distance - tunnel.start.distance_from_wall < 0 or \\\n tunnel_end_distance - tunnel.end.distance_from_wall < 0:\n removed_clumps.add(tunnel.start.node)\n removed_clumps.add(tunnel.end.node)\n new_node = tunnel.merge_endpoints()\n added_clumps.append(new_node)\n else:\n staying_tunnels.append(tunnel)\n #print removed_clumps\n \n new_clumps = []\n \n for clump in list(self.clumps) + added_clumps:\n if clump not in removed_clumps:\n new_clumps.append(clump)\n else:\n removed_clumps.remove(clump)\n\n if removed_clumps:\n raise Exception(\"Some removed clumps couldn't be found in the main set and I'm scared\")\n \n self.clumps = new_clumps\n self.tunnels = staying_tunnels", "def hole_cleanup(atom_list): \n joey = atom_list.copy()\n while (len(joey) != 0):\n for atom in joey:\n takein = [atom]\n source_update = takein.copy()\n check = 1\n while (check == 1):\n source = source_update.copy()\n source_update = []\n c = len(takein)\n for element in source:\n bonds = [bond[0] for bond in identify_bonds(element, joey) if bond[0] not in takein]\n for h in bonds:\n takein.append(h)\n source_update.append(h)\n if ((len(takein) == c) and (len(takein) < 6)):\n check = 0\n for element in takein:\n atom_list.remove(element)\n elif (len(takein) == c):\n check = 0\n for element in takein:\n joey.remove(element)\n return atom_list", "def _remove_vacuumcoupling(self):\n if len(self.coupling) == 1 or \\\n [ll.vacuum for ll in self.indexes].count(True) == 0:\n return False\n\n flag = False\n for vac in self.indexes:\n if vac.vacuum:\n id0, id1 = self.coupling_id(vac)\n vac_coupling = self.coupling[id0]\n fid, sid = [x for x in range(3) if x != id1]\n fleg, sleg = vac_coupling[fid], vac_coupling[sid]\n\n # Other legs do not have the same flow\n if fleg[1] is not sleg[1]:\n flag = True\n break\n\n # No suitable vacuum found to remove.\n if not flag:\n return False\n\n def mappingf(okey):\n assert okey[id0][fid] == okey[id0][sid]\n yield tuple(c for ii, c in enumerate(okey) if ii != id0)\n\n prefdict = sls._prefremovevac(id1, [x[1] for x in vac_coupling])\n\n def prefactorf(okey, nkey):\n # return 1. for empty array\n return np.prod([prefdict.get(ss, lambda x: 1.)(okey[id0][fid][ii])\n for ii, ss in enumerate(self.symmetries)])\n\n self._manipulate_coupling(mappingf, prefactorf)\n\n # fleg or sleg (one of the other legs in the coupling) is internal\n # Or both. remove either sleg or fleg and substitute it by the other\n # everywhere in the couplings\n if fleg[0] in self._internallegs:\n to_rm, to_swap = fleg[0], sleg[0]\n else:\n to_rm, to_swap = sleg[0], fleg[0]\n\n # Substitute the internal by the other leg\n temp = self.substitutelegs([to_rm], [to_swap])\n # Remove the coupling\n self._coupling = tuple(c for ii, c in enumerate(temp) if ii != id0)\n\n # Remove vacuum from index\n ii = self._indexes.index(vac)\n self._indexes.remove(vac)\n\n self._internallegs.remove(to_rm)\n\n for k, v in self.items():\n self[k] = np.squeeze(v, axis=ii)\n return True", "def unsat(self):\n\n\t\tnearlist = residuesAroundAtoms(self.design.ligand.atom, self.design,12.0)\n\t\tHBN = HBnetwork()\t\n\t\tHBN.createNetwork(reslist=nearlist)\n\t\tHBN.findHbonds()\n\t\tunsat = HBN.unsatisfiedHbonds()\n\n\t\tids = []\n\t\tTaken = {}\n\t\tmysel = cmd.get_model(\"nearby\")\n\t\tnatom = len(mysel.atom)\n\t\tfor i in range(natom):\n\t\t\tTaken[mysel.atom[i].resi] = True\n\n\t\t\tmylist = []\n\t\t\tfor i in Taken.keys():\n\t\t\t\tmylist.append(int(i))\n\n\t\tmylist.sort()\n\n\t\tget_surface_area(self.designFile, \"surf\")\n\t\tsurfMol = Molecule()\n\t\tsurfMol.readPDB(\"surf.asa\")\n\t\tif len(surfMol.chain) == 0:\n\t\t\tprint \"surface area script not working ...\"\n\t\t\tprint \"aborting\"\n\t\t\treturn\n\n\t\tfor atm in unsat:\n\t\t\tprint atm\n\t\t\tif int(atm.resi) in mylist:\n\t\t\t\tmyres = surfMol.getResidue(atm.resi)\n\t\t\t\tmyatm = myres.getAtom(atm.name)\n\t\t\t\tif myatm.occupancy < 4.0:\n\t\t\t\t\tif not HBN.containsAtom(atm):\n\t\t\t\t\t\tids.append(str(int(atm.file_id)))\n\n\t\t\t\t\t\tif len(ids) > 100:\n\t\t\t\t\t\t\tprint \"over 100\"\n\t\t\t\t\t\t\tbreak\n\n\t\tids = string.join(ids, \",\")\t\n\t\tmysel = \"designed & (id \" + ids + \")\"\n\t\tcmd.create(\"_unsatisfied\", mysel)\n\t\tcmd.set(\"sphere_scale\", 0.25, \"_unsatisfied\")\n\t\tcmd.show(\"spheres\", \"_unsatisfied\")\n\t\tcmd.color(\"hotpink\", \"_unsatisfied\")\n\n\t\tHBN.clear()", "def clean_edges(self):", "def pruned_active_bonds(reactant, fbonds, bbonds):\n logger.info('Pruning active bonds for special cases')\n\n # Flat list of all the atom indexes in the breaking/forming bonds\n a_atoms = [bond.atom_indexes for bond in fbonds + bbonds]\n\n coords = reactant.coordinates\n\n if len(fbonds) == 1 and len(bbonds) == 2 and len(set(a_atoms)) == 3:\n logger.info('Found 3-membered ring with 2 breaking & 1 forming bonds')\n\n f_i, f_j = fbonds[0].atom_indexes\n f_vec = coords[f_i] - coords[f_j]\n f_vec /= np.linalg.norm(f_vec)\n\n b0_i, b0_j = bbonds[0].atom_indexes\n b0_projection = np.dot((coords[b0_i] - coords[b0_j]), f_vec)\n\n b1_i, b1_j = bbonds[1].atom_indexes\n b1_projection = np.dot((coords[b1_i] - coords[b1_j]), f_vec)\n\n if b0_projection > b1_projection:\n logger.info(f'Excluding {bbonds[0]}')\n bbonds.pop(0)\n else:\n logger.info(f'Excluding {bbonds[1]}')\n bbonds.pop(1)\n\n if any(bond.dr < 0 for bond in bbonds):\n logger.info('Found at least one breaking bond where the final distance'\n ' is shorter than the initial - removing')\n \"\"\"\n Counterintuitively, this is possible e.g. metallocyclobutate formation\n from a metalocyclopropane and a alkylidene (due to the way bonds are \n defined)\n \"\"\"\n bbonds = [bond for bond in bbonds if bond.dr > 0]\n\n return fbonds + bbonds", "def cut_bonds_z_random(xy, NL, KL, BL, target_z, min_coord=2, bulk_determination='Triangulation', check=False):\n print ' Cutting bonds z...'\n NP = len(xy)\n NN = np.shape(NL)[1]\n\n # Identify boundary pts, bulk pts\n print ' cut_bonds_z : extract boundary...'\n boundary = extract_boundary(xy, NL, KL, BL)\n # print 'boundary = ', boundary\n bulk = np.setdiff1d(np.arange(NP), boundary)\n NP_bulk = len(bulk)\n NP_bound = len(np.unique(boundary))\n print 'NP_bound = ', NP_bound\n print 'NP_bulk = ', NP_bulk\n\n if bulk_determination == 'Triangulation':\n # Form indices of BL in bulk. Bulk bonds appear in two simplices.\n # CHANGE THIS TO TEST IF BOND TWO SIMPLICES\n TRI = BL2TRI(BL, xy)\n Binds_list = []\n for ii in range(len(BL)):\n row = BL[ii]\n # get rows of TRI where each elem of row lives\n is_a = np.where(TRI == row[0])[0]\n is_b = np.where(TRI == row[1])[0]\n # The intersection of those rows gives where both live\n simplices = np.intersect1d(is_a, is_b)\n # print 'simplices = ', simplices\n # print 'np.size(simplices) = ', np.size(simplices)\n # If more than one simplex, bulk bond\n if np.size(simplices) < 2:\n # add to boundary list\n Binds_list.append(ii)\n # print ' --> Binds = ', Binds_list\n\n Binds = np.array(Binds_list).ravel()\n # Get the BL indices of bulk bonds --> (binds)\n binds = np.setdiff1d(np.arange(len(BL)), Binds)\n\n elif bulk_determination == 'Endpts':\n # Define bulk bonds as connecting at least one bulk particle\n is_a = np.in1d(BL[:, 0], bulk)\n is_b = np.in1d(BL[:, 1], bulk)\n binds = np.where(np.logical_or(is_a, is_b))[0]\n Binds = np.setdiff1d(np.arange(len(BL)), binds)\n else:\n raise RuntimeError('ERROR: argument <bulk_determination> did not match known method!')\n\n # print 'binds = ', binds\n # print 'Binds = ', Binds\n print 'len(binds) = ', len(binds)\n print 'len(Binds) = ', len(Binds)\n\n # Check\n if check:\n # plt.triplot(xy[:,0], xy[:,1], TRI, 'bo-')\n for bii in binds:\n XX = xy[BL[bii], 0]\n YY = xy[BL[bii], 1]\n plt.plot(XX, YY, 'b-')\n for Bii in Binds:\n XX = xy[BL[Bii], 0]\n YY = xy[BL[Bii], 1]\n plt.plot(XX, YY, 'r-')\n # for i in range(len(xy)):\n # plt.text(xy[i,0]+0.2,xy[i,1],str(i))\n plt.gca().set_aspect('equal')\n plt.show()\n\n # Compute the starting z in the bulk\n countKL = [KL[jj] for jj in bulk]\n # print 'found = ', np.count_nonzero(countKL), ' connections for ', NP_bulk, ' bulk particles...'\n z_start = float(np.count_nonzero(countKL)) / float(NP_bulk)\n print 'z_start = ', z_start\n print 'target_z = ', target_z\n\n # number of bonds to cut in the bulk\n # Be sure to divide the number of bonds by 2, since each bond double counts\n nbulk2cut = int(max([0, round((z_start - target_z) * 0.5 * float(NP_bulk))]))\n print 'nbulk2cut = ', nbulk2cut\n # number of bonds to cut in the boundary = nbulk2cut * (# boundary bonds)/(#bulk bonds)\n nB2cut = int(round(nbulk2cut * float(len(Binds)) / float(len(binds))))\n print 'nB2cut = ', nB2cut\n\n # CUT RANDOM BONDS\n\n ############################################\n ## DO BOUNDARY FIRST --> to avoid dangling particles\n # Choose nB2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(Binds)\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n # Set up mask for BL\n mask = np.ones(len(BL), dtype=bool)\n\n #################################\n # # Check :\n # plt.figure()\n # plt.gca().set_aspect('equal')\n # for ii in range(len(BL)):\n # XX = xy[BL[ii],0]\n # YY = xy[BL[ii],1]\n # plt.plot(XX, YY, 'b-')\n # plt.text(np.mean(XX), np.mean(YY), str(ii))\n # plt.show()\n #################################\n\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting boundary bond: pass ', dmyi, ' (need to cut', nB2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[Binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n # print 'KLtmp.any(axis=1) = ', KLtmp.any(axis=1)\n if (np.where(~KLtmp.any(axis=1))[0]).size > 0:\n dmyi += 1\n else:\n mask[Binds[dmyi]] = False\n dmyi += 1\n\n ############################################\n # Choose nbulk2cut randomly from bulk\n # Shuffle bulk in-place\n np.random.shuffle(binds)\n # print 'binds = ', binds\n # Now work slowly towards selecting nbulk2cut: of the bonds,\n # but ensure that never leave a particle dangling without bonds\n done_cutting = False\n dmyi = 0\n while not done_cutting:\n if len(np.where(mask == False)[0]) == nB2cut + nbulk2cut:\n done_cutting = True\n else:\n if np.mod(dmyi, 200) == 1:\n print 'cutting bulk bond: pass ', dmyi, ' (need to cut', nbulk2cut, ')'\n # consider adding dmyi element of bind to cut (make a test list)\n test = copy.deepcopy(mask)\n test[binds[dmyi]] = False\n BLtmp = BL[test]\n # Check that BL leads to no dangling particles\n KLtmp = BL2KL(BLtmp, NL)\n # print 'KL = ', KLtmp\n # print 'np.where(~KLtmp.any(axis=1))[0] = ', np.where(~KLtmp.any(axis=1))[0]\n # if all the rows in KLtmp have at least one nonzero bond, add dmyi to cut\n if (np.where(~KLtmp.any(axis=1))[0]).size > min_coord - 1:\n dmyi += 1\n else:\n mask[binds[dmyi]] = False\n dmyi += 1\n\n # drop the nbulk2cut + nB2cut rows from total Bond List\n BL = BL[mask]\n # print 'BLout = ', BLout\n NL, KL = BL2NLandKL(BL, NN=NN)\n if check:\n display_lattice_2D(xy, BL)\n\n print '\\nReturning lattice with ', len(BL), ' bonds for ', NP, ' particles...'\n print 'KL[bulk] = ', KL[bulk]\n\n return NL, KL, BL", "def _adjust_constraints(self, point):\n logger.info(f'Adjusting constraints on point {len(self)}')\n\n # Flat list of all the atom indexes involved in the bonds\n atom_idxs = [i for bond in self.bonds for i in bond]\n\n max_step, min_step = ade.Config.max_step_size, ade.Config.min_step_size\n\n for bond in self.bonds:\n (i, j), coords = bond.atom_indexes, self[-1].species.coordinates\n\n # Normalised r_ij vector\n vec = coords[j] - coords[i]\n vec /= np.linalg.norm(vec)\n\n # Calculate |∇E_i·r| i.e. the gradient along the bond. Positive\n # values are downhill in energy to form the bond and negative\n # downhill to break it\n gradi = np.dot(self[-1].grad[i], vec) # |∇E_i·r| bond midpoint\n gradj = np.dot(self[-1].grad[j], -vec)\n\n # Exclude gradients from atoms that are being substituted\n if atom_idxs.count(i) > 1:\n grad = gradj\n elif atom_idxs.count(j) > 1:\n grad = gradi\n else:\n grad = np.average((gradi, gradj))\n\n logger.info(f'|∇E_i·r| = {grad:.4f} on {bond}')\n\n # Downhill in energy to break/form this breaking/forming bond\n if grad * np.sign(bond.dr) > 0:\n dr = np.sign(bond.dr) * ade.Config.max_step_size\n\n # otherwise use a scaled value, depending on the gradient\n # large values will have small step sizes, down to min_step Å\n else:\n dr = (max_step - min_step) * np.exp(-(grad/0.05)**2) + min_step\n dr *= np.sign(bond.dr)\n\n new_dist = point.species.distance(*bond.atom_indexes) + dr\n\n # No need to go exceed final distances on forming/breaking bonds\n if bond.forming and new_dist < bond.final_dist:\n new_dist = bond.final_dist\n\n elif bond.breaking and new_dist > bond.final_dist:\n new_dist = bond.final_dist\n\n else:\n logger.info(f'Using step {dr:.3f} Å on bond: {bond}')\n\n point.constraints[bond.atom_indexes] = new_dist\n\n return None", "def fixN5(self):\n smi = None\n _bom = self.bom.copy()\n _chgs = self.chgs.copy()\n for ia in self.iasN5:\n ai = self.atoms[ia]\n for bi in ai.GetBonds():\n ia1, ja1 = bi.GetBeginAtomIdx(), bi.GetEndAtomIdx()\n ja = ia1 if ja1 == ia else ja1\n aj = self.atoms[ja]\n assert ja != ia\n if _bom[ia,ja] == 2: # re-assign BO to 1 for the first double bond found\n _bom[ia,ja] = _bom[ja,ia] = 1\n _chgs[ia] = 1\n _chgs[ja] = -1\n bi.SetBondType( bo2bt['1.0'] )\n ai.SetFormalCharge(1)\n aj.SetFormalCharge(-1)\n break\n self._bom = _bom\n self._chgs = _chgs\n if self.i_remove_isotope:\n self.remove_isotope()\n try:\n Chem.SanitizeMol(self.m)\n smi = Chem.MolToSmiles(self.m, canonical=T)\n except:\n raise Exception(':: fixN5() failed??')\n self.smiles = smi", "def get_bridges(edges_list):\n\n # print(\"all edges:\", edges_list)\n\n # make a temporary graph\n temp_G = nx.Graph()\n\n # add all current edges to the graph\n for edge in edges_list:\n edge_node_1, edge_node_2 = edge\n temp_G.add_edge(edge_node_1, edge_node_2)\n\n # get all_bridges in temp graph\n bridges_all = list(nx.bridges(temp_G))\n\n # get set of edges with two traversals left (only want one of each, so use set)\n mult_trav_remaining = set([])\n\n for edge in edges_list:\n\n num_trav_remaining = edges_list.count(edge)\n\n if num_trav_remaining > 1:\n\n mult_trav_remaining.add(edge)\n\n mult_trav_remaining = list(mult_trav_remaining)\n\n # remove mult traversal edges from bridges list\n\n # print(\"bridges_ all:\", bridges_all)\n # print(\"\\nmult_trav_remaining:\", mult_trav_remaining)\n\n # make a new bridges list that contains only edges that don't have mult traversals left\n\n bridges_reduced = []\n\n for edge in bridges_all:\n # print(\"\\n\\nedge:\", edge)\n # print()\n if edge in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} is in {mult_trav_remaining}\")\n elif edge[::-1] in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} REVERSED is in {mult_trav_remaining}\")\n else:\n # print(f\"bridge {edge} is NOT in {mult_trav_remaining}\")\n\n bridges_reduced.append(edge)\n\n # return a list of true bridges\n return bridges_reduced", "def get_conjugated_nodes(self):\n sets = []\n self.get_backbone()\n m = self.mbb\n for bi in m.GetBonds():\n #print ' -- idx = ', bi.GetIdx()\n n = len(sets)\n iconj = bi.GetIsConjugated()\n ins = ( bt2bo[ bi.GetBondType() ] > 1 ) # is non-single bond?\n if iconj or ins:\n ia1, ia2 = bi.GetBeginAtomIdx(), bi.GetEndAtomIdx()\n set_i = set([ia1, ia2])\n if n == 0:\n sets.append( set_i )\n else:\n for j, set_j in enumerate(sets):\n if set_i.intersection( set_j ) > set([]):\n sets[j].update( set_i )\n else:\n if set_i not in sets: sets.append( set_i )\n #print '-- sets = ', sets\n sets_u = cim.merge_sets(sets)\n return sets_u", "def remove_charge_and_bond_order_from_guanidinium(offmol):\n for atom in offmol.atoms:\n if atom.element.symbol != \"C\":\n continue\n nitrogen_neighbors = 0\n for neighbor in atom.bonded_atoms:\n if neighbor.element.symbol == \"N\":\n nitrogen_neighbors += 1\n if nitrogen_neighbors != 3:\n continue\n atom.formal_charge = 0\n for neighbor in atom.bonded_atoms:\n neighbor.formal_charge = 0\n for bond in atom.bonds:\n # Set bond order 4, which will produce a \"$\" character. We later replace this with \"~\".\n bond.bond_order = 4", "def run_removing_edges(self):\n indices = np.where(self.X==1)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def neutralise_raw(self):\n # kekulization has to be done, otherwise u will encounter\n # issues when assigning bond types later\n Chem.Kekulize(self.m)\n\n # get pairs of charged atoms\n self.get_charged_pairs()\n\n # eliminate the charges by rebuilding the molecule\n m = Chem.Mol()\n mc = Chem.EditableMol(m)\n for i, az in enumerate(self.zs):\n ai = Chem.Atom( az )\n ci = self.charges[i]\n if ci != 0:\n if ci == 1:\n filt = (self.cpairs[:,0] == i)\n if np.any(filt):\n ai.SetFormalCharge( 1 )\n elif ci == -1:\n filt = (self.cpairs[:,1] == i)\n if np.any(filt): ai.SetFormalCharge( -1 )\n else:\n print((' -- i, charges[i] = ', i, self.charges[i]))\n print(' #ERROR: abs(charge) > 1??')\n raise\n mc.AddAtom( ai )\n\n ijs = np.array( np.where( np.triu(self.bom) > 0 ) ).astype(np.int)\n nb = ijs.shape[1]\n for i in range(nb):\n i, j = ijs[:,i]\n mc.AddBond( i, j, bo2bt[ '%.1f'%self.bom[i,j] ] )\n\n m = mc.GetMol()\n m2 = assign_coords(m, self.coords)\n self.m = m2", "def get_core_bonds(core_xyz, inp):\n core_bonds = []\n\n if inp.core_en:\n dists = cdist(core_xyz, core_xyz)\n if inp.core_shape != \"shell\":\n logger.info(\"\\tBuilding elastic network based on first neighbors...\")\n close_dists = dists <= (2*inp.bead_radius+0.01)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = np.where(close_dists[i])[0]\n if len(close_ndxs) == 1:\n dists_sorted = np.argsort(dists[i])\n close_ndxs = dists_sorted[[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n core_bonds.append([ndx1, ndx2])\n\n else:\n logger.info(\"\\tBuilding elastic network based on six nearest neighbours and one farthest neighbour...\")\n neighboring_bonds = []\n antipodal_bonds = []\n dists_sorted = np.argsort(dists, axis=1)\n for i in range(len(dists)):\n ndx1 = i*1\n close_ndxs = dists_sorted[i,[1,2,3,4,5,6]]\n for ndx2 in close_ndxs:\n if ndx2 != i and [ndx1, ndx2] not in core_bonds and [ndx2, ndx1] not in core_bonds:\n neighboring_bonds.append([ndx1, ndx2])\n antipodal_ndx = dists_sorted[i,-1]\n if antipodal_ndx != i and [ndx1, antipodal_ndx] not in core_bonds and [antipodal_ndx, ndx1] not in core_bonds:\n antipodal_bonds.append([ndx1, antipodal_ndx, \"antipodal\"])\n core_bonds = neighboring_bonds + antipodal_bonds\n\n return core_bonds", "def BlockNaturalDrop(self):\n # Checks for collision\n for i in self.coords:\n if i[1] == self.height - 1:\n for i in self.coords:\n self.matrix[i[1]][i[0]] = 1\n return() \n if self.matrix[i[1] + 1][i[0]] == 1:\n for i in self.coords:\n self.matrix[i[1]][i[0]] = 1\n return()\n self.coords = [(i[0], i[1] + 1) for i in self.coords]\n Grid.BlockToMatrix(self)", "def conj_inplace(a):", "def neighbors(i , j) :\n ns = []\n # vector de direction\n dx = [+1, +1, 0, 1]\n dy = [0, +1, 1, -1]\n for d in range(4) :\n ns.append((i + dx[d], j + dy[d]))\n #remove neagative element\n ns = [i for i in ns if i[0] >= 0 and i[1] >= 0]\n return ns", "def remove_neighbor(self):\n self.fono -= 1", "def grid_inflation(self):\n for obs in self.obstacle_list:\n\n inflation_x1 = round((obs[0][0]-self._inflation_radius)/self.step_size)\n\n inflation_y2 = round((obs[0][1] + obs[2] +self._inflation_radius)/self.step_size)\n\n inflation_x2 = round((obs[0][0] + obs[1] +self._inflation_radius)/self.step_size)\n\n inflation_y1 = round((obs[0][1] -self._inflation_radius)/self.step_size)\n\n self.grid[1, inflation_x1:inflation_x2+1,\n inflation_y1:inflation_y2+1] = INFLATION_COST\n\n # border inflation\n self.grid[1, 0:self.gridwidth+1, 0:round(self._inflation_radius/self.step_size)+1] = INFLATION_COST\n self.grid[1, 0:self.gridwidth+1, self.gridheight-round(self._inflation_radius / self.step_size):self.gridheight+1] = INFLATION_COST\n self.grid[1, 0:round(self._inflation_radius/self.step_size)+1, 0:self.gridheight+1] = INFLATION_COST\n self.grid[1, self.gridwidth-round(self._inflation_radius/self.step_size):self.gridwidth+1, 0:self.gridheight+1] = INFLATION_COST\n\n # if NEED_DRAW_INFLATED_GRID:\n # for i in range(self.gridwidth):\n # plt.scatter(i,0)\n # plt.scatter(i,self.gridheight)\n # for j in range(self.gridheight):\n # plt.scatter(0,j)\n # plt.scatter(self.gridwidth,j)\n # if self.grid[i, j] != 0:\n # plt.scatter(i,j)\n # plt.show()\n\n return self.grid" ]
[ "0.612162", "0.60860974", "0.60171133", "0.5589549", "0.55667216", "0.54495823", "0.543323", "0.54215306", "0.54030997", "0.53889775", "0.5369146", "0.5312899", "0.53013843", "0.52954197", "0.5294541", "0.52723455", "0.5264474", "0.5238258", "0.52358246", "0.5229589", "0.5214818", "0.5213973", "0.5210574", "0.52088845", "0.51846147", "0.51536304", "0.5149011", "0.51469445", "0.5142906", "0.512637" ]
0.62369883
0
Fills positions if trade history data is available
def fillPositions(self): if self.th is not None: self.df['POSITION'] = self.th.positions['Qty'] self.df['REGS'] = self.th.positions['REGS'] self.df['144A'] = self.th.positions['144A'] self.df['POSITION'].fillna(0, inplace=True) self.df['REGS'].fillna(0, inplace=True) self.df['144A'].fillna(0, inplace=True) self.df['RISK'] = -self.df['RISK_MID'] * self.df['POSITION'] / 10000.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def openPositionHistory(self):\n dt_only, tm_only = self.getDatetimeSplit()\n\n # GET OPEN POSITIONS\n open_positions_found = self.open_positions_history.find_one(\n {\"Date\": dt_only, \"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n obj = {}\n\n for position in open_positions:\n\n strategy = position[\"Strategy\"]\n\n buy_price = position[\"Buy_Price\"]\n\n last_price = position[\"Last_Price\"]\n\n qty = position[\"Qty\"]\n\n change = (last_price * qty) - (buy_price * qty)\n\n if strategy not in obj:\n\n obj[strategy] = {\"chng\": 0}\n\n obj[strategy][\"chng\"] += change\n\n if not open_positions_found:\n\n self.open_positions_history.insert_one({\n \"Trader\": self.user[\"Name\"],\n \"Date\": dt_only,\n \"Asset_Type\": self.asset_type,\n \"Account_ID\": self.account_id,\n \"Open_Position_Strategies\": obj\n })", "def _get_positions(self):\n pos_url = self.pos_url % (self.date, self.instrument, self.exchange)\n self.positions = pd.read_csv(pos_url, parse_dates=[0],\n date_parser=lambda t: pd.to_datetime(str(t), format='%Y%m%dT%H%M%S'))\n self.positions.fillna(np.nan)\n self.positions.index = pd.to_datetime(self.positions.time, unit='s')\n self.positions.columns = ['time', 'bid', 'bid_depth', 'bid_depth_total', 'ask', 'ask_depth', 'ask_depth_total']\n self.positions = self.positions[self.exchange_pre:self.exchange_post]", "def closedPositionHistory(self):\n dt_only, tm_only = self.getDatetimeSplit()\n\n # GET CURRENT POSITIONS COUNT\n closed_positions_found = self.closed_positions_history.find_one(\n {\"Date\": dt_only, \"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n closed_positions = self.closed_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n obj = {}\n\n for position in closed_positions:\n\n strategy = position[\"Strategy\"]\n\n buy_price = position[\"Buy_Price\"]\n\n sell_price = position[\"Sell_Price\"]\n\n qty = position[\"Qty\"]\n\n change = (sell_price * qty) - (buy_price * qty)\n\n if strategy not in obj:\n\n obj[strategy] = {\"chng\": 0}\n\n obj[strategy][\"chng\"] += change\n\n if not closed_positions_found:\n\n self.closed_positions_history.insert_one({\n \"Trader\": self.user[\"Name\"],\n \"Date\": dt_only,\n \"Asset_Type\": self.asset_type,\n \"Account_ID\": self.account_id,\n \"Closed_Position_Strategies\": obj\n })", "def _fill_market_order(self, order_event):\n if order_event.quantity == 0:\n return\n fill_time = self._get_fill_time(order_event.order_time, order_event.symbol)\n sym_data = self.curr_day_data[order_event.symbol]\n direction = self._get_order_direction(order_event)\n if direction == 1:\n fill_price = sym_data['level_1_price_sell'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)\n elif direction == -1:\n fill_price = sym_data['level_1_price_buy'].asof(fill_time)\n self.create_fill_event(order_event, fill_price, fill_time)", "def update_past_orders(self):\n\n #TODO: Implement a method to grab the order history for just one stock\n all_past_orders = self.portfolio.all_past_orders() #This is REALLY inefficient (takes forever)\n\n #Now pre-parse into commonly used categories\n self.past_orders = all_past_orders[all_past_orders['symbol']==self.symbol] #Past orders for only this stock\n self.filled_orders = self.past_orders[self.past_orders['state']=='filled'] #Only orders that were filled (not canceled)\n\n return True", "def update_positions_from_fill(self, fill):\n # Check whether the Fill is a Buy or a Sell\n fill_dir = 0\n if fill.direction == 'BUY':\n fill_dir = 1\n if fill.direction == 'SELL':\n fill_dir = -1\n \n # Update positions list with new quantities\n self.current_positions[fill.symbol] += fill_dir*fill.quantity\n print('positions: ', self.current_positions)", "def init_data_for_positions(db_data):\n positions = db_data.get('position')\n if positions is not None:\n rows = positions.get('data')\n for row in rows:\n position = Position(name=row)\n db_add_and_commit(db, position)", "def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data", "def test_transact_position_quantity_zero():\n # Create the PositionHandler, Transaction and\n # carry out a transaction\n ph = PositionHandler()\n asset = 'EQ:AMZN'\n dt = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)\n new_dt = pd.Timestamp('2015-05-06 16:00:00', tz=pytz.UTC)\n\n transaction_long = Transaction(\n asset,\n quantity=100,\n dt=dt,\n price=960.0,\n order_id=123, commission=26.83\n )\n ph.transact_position(transaction_long)\n\n transaction_close = Transaction(\n asset,\n quantity=-100,\n dt=new_dt,\n price=980.0,\n order_id=234,\n commission=18.53\n )\n ph.transact_position(transaction_close)\n\n # Go long and then close, then check that the\n # positions OrderedDict is empty\n assert ph.positions == OrderedDict()", "def slot_fullhistory(self, dummy_sender, data):\r\n (history) = data\r\n\r\n if not len(history):\r\n self.debug(\"### history download was empty\")\r\n return\r\n\r\n def get_time_round(date):\r\n \"\"\"round timestamp to current candle timeframe\"\"\"\r\n return int(date / self.timeframe) * self.timeframe\r\n\r\n #remove existing recent candle(s) if any, we will create them fresh\r\n date_begin = get_time_round(int(history[0][\"date\"]))\r\n while len(self.candles) and self.candles[0].tim >= date_begin:\r\n self.candles.pop(0)\r\n\r\n new_candle = OHLCV(0, 0, 0, 0, 0, 0) #this is a dummy, not actually inserted\r\n count_added = 0\r\n for trade in history:\r\n date = int(trade[\"date\"])\r\n price = int(trade[\"price_int\"])\r\n volume = int(trade[\"amount_int\"])\r\n time_round = get_time_round(date)\r\n if time_round > new_candle.tim:\r\n if new_candle.tim > 0:\r\n self._add_candle(new_candle)\r\n count_added += 1\r\n new_candle = OHLCV(\r\n time_round, price, price, price, price, volume)\r\n new_candle.update(price, volume)\r\n\r\n # insert current (incomplete) candle\r\n self._add_candle(new_candle)\r\n count_added += 1\r\n self.debug(\"### got %d updated candle(s)\" % count_added)\r\n self.ready_history = True\r\n self.signal_fullhistory_processed(self, None)\r\n self.signal_changed(self, (self.length()))", "def set_filled_orders(self, filled_orders):\n # set to class property\n self._filled_orders = filled_orders\n\n # start spread analysis\n spread = Spread(filled_orders=filled_orders)\n spread.get_name()\n spread.get_spread()\n\n filled_order = self._filled_orders[0]\n \"\"\":type: FilledOrder\"\"\"\n\n underlying = filled_order.underlying\n future = filled_order.future\n forex = filled_order.forex\n\n # primary field\n if self.position_set.name == '':\n self.position_set.name = spread.name\n\n if self.position_set.spread == '':\n self.position_set.spread = spread.spread\n\n self.position_set.status = 'OPEN'\n if not self.position_set.start_date:\n self.position_set.start_date = filled_order.trade_summary.date\n\n self.position_set.underlying = underlying\n self.position_set.future = future\n self.position_set.forex = forex\n\n # position stages\n if spread.get_spread() != 'CUSTOM':\n stage_module = import_module(\n '%s.%s' % (self.stage_path, spread.get_name(module=True, lower=True))\n )\n class_name = 'Stage%s' % spread.get_spread_module()\n class_obj = getattr(stage_module, class_name)\n self.stages = class_obj(filled_orders=filled_orders).create_stages()", "def FreshStart(self):\n # Create a vector holding historical data for the purpose of plotting.\n # The length may vary because the sampling speed of different are\n # sensors may vary.\n\n self.history = {'time': collections.deque( [], self.history_length ),\\\n 'data': collections.deque( [], self.history_length )\n }", "def before_run(self):\n self.trade_data = []\n return self", "def test_account_positions(self):\n pt: PerpetualTrading = PerpetualTrading()\n aPos: Position = Position(\"market1\", PositionSide.LONG, Decimal(\"0\"), Decimal(\"100\"), Decimal(\"1\"), Decimal(\"5\"))\n pt._account_positions[\"market1\"] = aPos\n self.assertEqual(len(pt.account_positions), 1)\n self.assertEqual(pt.account_positions[\"market1\"], aPos)\n self.assertEqual(pt.get_position(\"market1\"), aPos)\n self.assertEqual(pt.get_position(\"market2\"), None)", "def on_order(self, order: OrderData):\n self.position_calculator.update_position(order)\n\n self.current_pos = self.position_calculator.pos\n self.avg_price = self.position_calculator.avg_price\n\n if order.status == Status.ALLTRADED and order.vt_orderid in (self.long_orders + self.short_orders):\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.last_filled_order = order\n\n for ids in (self.long_orders + self.short_orders + self.profit_orders):\n self.cancel_order(ids)\n\n if abs(self.position_calculator.pos) < self.fixed_size:\n return\n\n step = self.get_step()\n\n # tick 存在且仓位数量还没有达到设置的最大值.\n if self.tick and abs(self.position_calculator.pos) < self.max_pos_size * self.fixed_size:\n buy_price = order.price - step * self.grid_step\n sell_price = order.price + step * self.grid_step\n\n buy_price = min(self.tick.bid_price_1 * (1 - 0.0001), buy_price)\n sell_price = max(self.tick.ask_price_1 * (1 + 0.0001), sell_price)\n\n long_ids = self.buy(buy_price, self.fixed_size)\n short_ids = self.sell(sell_price, self.fixed_size)\n\n self.long_orders.extend(long_ids)\n self.short_orders.extend(short_ids)\n\n if order.status == Status.ALLTRADED and order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n if abs(self.position_calculator.pos) < self.fixed_size:\n self.cancel_all()\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.stop_orders:\n self.stop_orders.remove(order.vt_orderid)\n\n self.put_event()", "def onUpdate(self):\n\t\t#fill all the order that satisfied\n\t\tfor x in filter(self.isFilled,self().keys()):\n\t\t\tself[x].fill()", "def update_historical_data():\n print('updating historical data')\n for sp in SupplyPoint.objects.filter(supplypointwarehouserecord__isnull=True).exclude(type__code=SupplyPointCodes.ZONE):\n update_historical_data_for_supply_point(sp)", "def update_positions_from_fill(self, fill):\r\n\r\n fill_dir = 0\r\n if fill.direction == 'BUY':\r\n fill_dir = 1\r\n if fill.direction == 'SELL':\r\n fill_dir = -1\r\n\r\n self.current_positions[fill.symbol] += fill_dir*fill.quantity", "def _get_future_stock_forecast_temp(self):\n # for scrapping_item in self: \n scrap_qty =0\n # self.compute_stock_at_once()\n for data in self:\n for quant in data.quant_ids:\n if quant.location_id.id == 23:\n scrap_qty= quant.qty \n _logger.error(scrap_qty)\n data.stock_forecast_temp = scrap_qty", "def update_positions_from_fill(self, fill):\n # Check whether the Fill is a Buy or a Sell\n fill_dir = 0\n if fill.direction == 'BUY':\n fill_dir = 1\n if fill.directoin == 'SELL':\n fill_dir = -1\n \n # Update positions list with new quantities\n self.current_positions[fill.symbol] += fill_dir*fill.quantity", "def update_positions_from_fill(self, fill):\n fill_dir = 0\n if fill.direction == 'BUY':\n fill_dir = 1\n if fill.direction == 'SELL':\n fill_dir = -1\n\n self.current_positions[fill.symbol] += fill_dir*fill.quantity", "def do(self, market_data):\r\n self.data.history = self.data.history + market_data", "def liquidate(self) -> None:\n if self.position.is_close:\n return\n\n if self.position.pnl > 0:\n self.take_profit = self.position.qty, self.price\n else:\n self.stop_loss = self.position.qty, self.price", "async def on_position_updated(self, position: MetatraderPosition):\n for i in range(len(self._positions)):\n if self._positions[i]['id'] == position['id']:\n self._positions[i] = position\n break\n else:\n self._positions.append(position)", "def update_foreign_keys(self, date=''):\n position_instruments = list()\n position_futures = list()\n position_forexs = list()\n profits_losses = list()\n profit_loss_query = None\n\n position_summary_query = Q()\n account_summary_query = Q()\n if date != '':\n position_summary_query = Q(position_summary__date=date)\n account_summary_query = Q(account_summary__date=date)\n\n if self.position_set.id is None:\n raise ValueError('Invalid position_set id, please save before running update_fk.')\n\n if self.position_set.status == 'CLOSE':\n raise ValueError('Position_set already closed, please use another position_set.')\n\n # update filled orders\n for filled_order in self.filled_orders:\n filled_order.position_set = self.position_set\n filled_order.save()\n\n if self.position_set.underlying:\n # update position instrument and profit loss\n PositionInstrument.objects.filter(\n Q(underlying=self.position_set.underlying) & Q(position_set=None)\n ).filter(position_summary_query).update(position_set=self.position_set)\n\n position_instruments = PositionInstrument.objects.filter(\n position_set=self.position_set\n ).filter(position_summary_query)\n\n profit_loss_query = Q(underlying=self.position_set.underlying)\n\n elif self.position_set.future:\n # update position future and profit loss\n PositionFuture.objects.filter(\n Q(future=self.position_set.future) & Q(position_set=None)\n ).filter(position_summary_query).update(position_set=self.position_set)\n\n position_futures = PositionFuture.objects.filter(\n position_set=self.position_set\n ).filter(position_summary_query)\n\n profit_loss_query = Q(future=self.position_set.future)\n\n elif self.position_set.forex:\n # update position forex\n PositionForex.objects.filter(\n Q(forex=self.position_set.forex) & Q(position_set=None)\n ).filter(position_summary_query).update(position_set=self.position_set)\n\n position_forexs = PositionForex.objects.filter(\n position_set=self.position_set\n ).filter(position_summary_query)\n\n if profit_loss_query:\n ProfitLoss.objects.filter(profit_loss_query & Q(position_set=None)).filter(\n account_summary_query).update(position_set=self.position_set)\n\n profits_losses = ProfitLoss.objects.filter(\n profit_loss_query & Q(position_set=self.position_set)\n ).filter(account_summary_query)\n\n return dict(\n filled_orders=self.filled_orders,\n position_instruments=position_instruments,\n position_futures=position_futures,\n position_forexs=position_forexs,\n profits_losses=profits_losses,\n )", "def reset(self,\n prices,\n offset):\n assert isinstance(prices, data.Prices)\n assert offset >= self.bars_count - 1\n self.have_position = False\n self.open_price = 0.0\n self._prices = prices\n self._offset = offset", "def place_orders(self):\n buy_orders = []\n sell_orders = []\n buy_stop_order = {}\n sell_stop_order = {}\n order_status = 0\n \"\"\"order_status参数说明\n 0: running_qty为0, 维持原样\n 1: self.running_qty > 0, 买卖都变化, 买单按照offset2, 卖单按照offset3\n 2: 买单维持不变, 卖单按照offset3\n 3: self.running_qty < 0, 买卖都变化, 买单按照offset3, 卖单按照offset2\n 4: 卖单维持不变, 买单按照offset3\n 5: 追加指定订单\n 6: 取消指定订单\n 7: self.running_qty > 0, 买单按照offset2, 卖单不变\n 8: self.running_qty < 0, 买单不变, 卖单按照offset2\n \"\"\"\n # Create orders from the outside in. This is intentional - let's say the inner order gets taken;\n # then we match orders from the outside in, ensuring the fewest number of orders are amended and only\n # a new order is created in the inside. If we did it inside-out, all orders would be amended\n # down and a new order would be created at the outside.\n position_grade = self.get_position_grade()\n avgCostPrice = self.exchange.get_position()['avgCostPrice']\n print ('position_grade: %s ' % position_grade)\n print ('running_qty: %s ' % self.running_qty)\n print ('ORDER_START_SIZE: %s ' % self.ORDER_START_SIZE)\n schedule.run_pending()\n\n if(self.countdown == True): #设置倒数计时, 60秒后delay_order_check设为True, 可以重新挂非清仓方向的价格\n self.cycleclock = self.cycleclock - 1\n if(self.cycleclock <= 0):\n if(self.check_last_price_upordown() == True):\n self.cycleclock = 5\n else:\n self.countdown = False\n self.delay_order_check = True\n\n if(self.get_ticker()['last'] > STOP_PRICE and self.buy_only_flag == False):\n self.buy_only_flag = True\n if(self.running_qty < 0):\n self.clear_position(buy_orders, sell_orders)\n return self.converge_orders(buy_orders, sell_orders, order_status)\n\n if(self.get_5th_max_MA15_defference(getmessage = 1) > 100):\n self.stop_market_maker_flag = True\n self.cancel_all_orders_flag = True\n self.buy_only_flag = False\n self.sell_only_flag = False\n tg_important_message('上涨差值超过100,暂停交易')\n\n if(self.stop_market_maker_flag == True and self.cancel_all_orders_flag == True):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Cancel all orders\")\n elif(self.stop_market_maker_flag == True and self.clear_position_flag == True):\n if(self.running_qty != 0):\n self.clear_position(buy_orders, sell_orders)\n else:\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n elif(self.stop_market_maker_flag == True):\n if(self.running_qty > 0):\n if avgCostPrice != None:\n sell_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice - STOP_SIZE, self.instrument['tickSize']), \"Sell\", abs(self.running_qty))\n order_status = 4\n elif(self.running_qty < 0):\n if avgCostPrice != None:\n buy_stop_order = self.prepare_stop_order(math.toNearest(avgCostPrice + STOP_SIZE, self.instrument['tickSize']), \"Buy\", abs(self.running_qty))\n order_status = 2\n elif(self.running_qty == 0 and self.last_running_qty == 0):\n if (len(self.exchange.get_orders()) != 0):\n self.exchange.cancel_all_orders()\n logger.info(\"Market_maker has stopped. No orders, no positions now\")\n\n elif(self.running_qty == 0 and self.restart_flag == False):\n if(self.check_last_price_upordown() == True):\n self.restart_flag = True\n self.countdown_restart = 5\n return\n self.ORDER_START_SIZE = self.start_XBt // 1000000 * START_SIZE_MAGNIFICATION #新算法, 每次初始交易重新设定ORDER_START_SIZE\n order_status = 0\n if not(self.sell_only_flag == True):\n buy_orders.append(self.prepare_order(-1, order_status))\n if not(self.buy_only_flag == True):\n sell_orders.append(self.prepare_order(1, order_status))\n self.countdown = False\n self.restart_flag = True\n self.countdown_restart = 30\n\n elif(self.running_qty == 0 and self.restart_flag == True):\n self.countdown_restart = self.countdown_restart - 1\n if(self.countdown_restart <= 0):\n self.restart_flag = False\n return\n\n elif(self.running_qty != 0 and self.running_qty != self.last_running_qty): #仓位变动后开始倒计时60秒, 60秒后delay_order_check为True, 可以重新挂非清仓方向的价格\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n self.cycleclock = 60\n self.countdown = True\n self.restart_flag = False\n self.delay_order_check = False\n\n elif(self.running_qty != 0 and self.running_qty == self.last_running_qty and self.delay_order_check == True): #可以重新挂非清仓方向的价格\n i = abs(self.running_qty) // (self.ORDER_START_SIZE//4) + 1\n if(self.running_qty > 0):\n order_status = 7\n if(i <= 3):\n buy_orders.append(self.prepare_order(-i, order_status))\n if(self.running_qty < 0):\n order_status = 8\n if(i <= 3):\n sell_orders.append(self.prepare_order(i, order_status))\n self.cycleclock = 30\n self.countdown = True\n self.delay_order_check = False\n\n else:\n if(self.running_qty > 0):\n order_status = 2\n sell_orders.append(self.prepare_order(1, order_status))\n elif(self.running_qty < 0):\n order_status = 4\n buy_orders.append(self.prepare_order(-1, order_status))\n\n if(self.last_running_qty != self.running_qty):\n self.send_tg_message()\n self.last_running_qty = self.running_qty\n self.reset = False\n buy_orders = list(filter(None.__ne__, buy_orders)) #去除None\n sell_orders = list(filter(None.__ne__, sell_orders)) #去除None\n print('BXBT_MA15: %s' % self.get_BXBT_MA15())\n print(buy_orders)\n print(sell_orders)\n if((self.last_buy_orders == buy_orders and self.last_sell_orders == sell_orders) or (buy_orders == [] and sell_orders == [])):\n print('order no change, return')\n return\n else:\n self.last_buy_orders = buy_orders\n self.last_sell_orders = sell_orders\n self.converge_stop_order(buy_stop_order, sell_stop_order)\n return self.converge_orders(buy_orders, sell_orders, order_status)", "async def _update_live(self, trade: Dict[str, Any]):\n\n order = await self.api.get_order(trade['pair'], trade['order_id'])\n if order is None:\n self.log.error(\"Could not update trade {}.\", trade['order_id'])\n return\n\n is_open = order['open']\n quantity = order['quantity']\n remaining = order['remaining']\n unit_value = order['value']\n fees = order['fees']\n\n trade['filled'] = not is_open\n trade['quantity'] = quantity\n trade['remaining'] = remaining\n\n if trade['filled'] and unit_value is not None:\n base_mult = await self.market.get_pair_base_mult(config['trade_base'], trade['pair'])\n adjusted_value = unit_value * base_mult\n trade['open_value'] = adjusted_value\n trade['base_value'] = base_mult\n trade['fees'] = fees * base_mult\n\n self.log.info(\"Updated trade {}: filled {}, quantity {}, remaining {}.\",\n trade['order_id'], trade['filled'], quantity, remaining)", "def reset(self):\n self.state = {\n \"price\" : self.history[0]['price'],\n \"timestamp\" : self.history[0]['timestamp'],\n \"prev_price\" : 0,\n \"imm_prev_transaction\" : 0,\n \"prev_transaction\" : 0,\n \"day\" : self.history[0]['day'],\n \"hour\" : self.history[0]['hour'],\n \"minute\" : self.history[0]['minute'],\n \"posture\" : \"\",\n \"balance\" : self.balance,\n \"bag\" : self.balance,\n \"value\" : self.balance,\n \"transaction\" : 0\n }\n self.posture = 1\n self.states = []\n self.transactions = []\n self.balance = 100\n self.bag = 100\n self.pointer = 0\n self.profit = 0\n self.value = 0\n self.initial_value = self.calculate_value()\n self.trade(1).next()", "def set_filled_order(self):\n self.set_values(\n start_phrase='Filled Orders',\n end_phrase=None,\n start_with=2,\n end_until=-1,\n prop_keys=self.filled_order_keys,\n prop_name='filled_order'\n )\n\n self.filled_order = map(self.del_empty_keys, self.filled_order)\n self.fillna_dict_with_exists(\n self.filled_order,\n 'exec_time',\n ('exec_time', 'spread', 'order')\n )\n\n self.replace_nan(self.filled_order)\n self.convert_type(self.filled_order, 'exec_time', self.convert_datetime, 0)\n\n self.convert_type(self.filled_order, 'quantity', int, 0)\n self.convert_type(self.filled_order, 'strike', float, 0.0)\n self.convert_type(self.filled_order, 'price', float, 0.0)\n self.convert_type(self.filled_order, 'net_price', float, 0.0)\n self.convert_type(self.filled_order, 'expire_date', str, '')" ]
[ "0.6459351", "0.6103853", "0.6029683", "0.59873354", "0.59233195", "0.57485735", "0.5711293", "0.5682093", "0.56430924", "0.56382126", "0.5636288", "0.5630813", "0.56141084", "0.5610457", "0.5599238", "0.55738753", "0.5553225", "0.5523361", "0.551922", "0.55105466", "0.54689604", "0.5459916", "0.5435554", "0.54247314", "0.5417075", "0.54049945", "0.54043156", "0.5376626", "0.53614295", "0.5321425" ]
0.6728969
0
Starts live feed from Bloomberg.
def startUpdates(self): # Analytics stream self.blptsAnalytics = blpapiwrapper.BLPTS() self.streamWatcherAnalytics = StreamWatcher(self, BloombergQuery.ANALYTICS) self.blptsAnalytics.register(self.streamWatcherAnalytics) # Price only stream self.blptsPriceOnly = blpapiwrapper.BLPTS() self.streamWatcherPriceOnly = StreamWatcher(self, BloombergQuery.PRICEONLY) self.blptsPriceOnly.register(self.streamWatcherPriceOnly) # Price change subscription self.streamWatcherBID = StreamWatcher(self,BloombergQuery.BID) self.bbgstreamBIDEM = blpapiwrapper.BLPStream(list((self.embondsisins + BBGHand + ' Corp').astype(str)), 'BID', 0) self.bbgstreamBIDEM.register(self.streamWatcherBID) self.bbgstreamBIDEM.start() # Risk free bonds: no streaming as too many updates - poll every 15 minutes rfRequest = blpapiwrapper.BLPTS(list((self.rfbondsisins + '@CBBT' + ' Corp').astype(str)), self.bbgPriceRFQuery) self.RFtimer = RFdata(900, rfRequest, self) self.BDMdata = BDMdata(900, self) #15 MINUTES self.BDMEODsave = BDMEODsave(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start():\n print('Running...')\n with Feed(Config.database) as feed:\n feed.refresh()", "def start(self):\r\n self.debug(\"### starting gox streaming API, trading %s%s\" %\r\n (self.curr_base, self.curr_quote))\r\n self.client.start()", "def feed() -> None:\n ...", "def _start_live(self):\n StartLiveData(FromNow=False, FromTime=False, FromStartOfRun=True, UpdateEvery=1.0,\n Instrument=\"ISIS_Kafka_Event\", RunTransitionBehavior=\"Stop\", OutputWorkspace=self.outputWs,\n Address=self.listenerHost, PreserveEvents=True, AccumulationMethod=\"Add\",\n InstrumentName=self.instrumentName)\n\n # Grab most recent live data algorithm handle\n self._monitorLiveDataHandle = api.AlgorithmManagerImpl.Instance().newestInstanceOf(\"MonitorLiveData\")", "def run_rss(self):\n\n pass", "def start(self):\n\t\tself.stream.start_stream()", "def start(self):\n self.stream.start()\n self.running = True\n self.update()", "def start(self) -> None:\n\n self.bus.subscribe(\"server:ready\", self.setup)\n self.bus.subscribe(\"bookmarks:find:id\", self.find_id)\n self.bus.subscribe(\"bookmarks:find:url\", self.find_url)\n self.bus.subscribe(\"bookmarks:add\", self.add)\n self.bus.subscribe(\"bookmarks:add:fulltext\", self.add_full_text)\n self.bus.subscribe(\"bookmarks:domaincount\", self.domain_count)\n self.bus.subscribe(\"bookmarks:search\", self.search)\n self.bus.subscribe(\"bookmarks:prune\", self.prune)\n self.bus.subscribe(\"bookmarks:recent\", self.recent)\n self.bus.subscribe(\"bookmarks:tags:all\", self.all_tags)\n self.bus.subscribe(\"bookmarks:remove\", self.remove)\n self.bus.subscribe(\"bookmarks:repair\", self.repair)", "def feed(self) -> None:", "def start(self):\n while True:\n self.pull_accounts_rolls()\n sleep(PULL_FREQUENCY_SECONDS)", "def start(self):\n self.get(self.url)", "def start_stream(self):\n pass", "def _really_start_hb(self):\n if self._beating and not self.hb_stream.closed():\n self._hb_periodic_callback.start()", "def start(self):\n\t\tself.load_market_data = True\n\t\twhile(self.load_market_data):#TODO lock\n\t\t\tif(len(self.registered_symbol_list) > 0):\n\t\t\t\tOMSLogger.info(\"Start loading market data for registered symbols...\")\n\t\t\t\tself.__reload_market_data__(self.market_data_list, self.registered_symbol_list)\n\t\t\t\tOMSLogger.info(\"Successfully loaded market data for registered symbols!\")\n\t\t\ttime.sleep(10)\t\t\t\n\n\t\t# print self.market_data_list", "def public_market_data_feed(config, state):\n\n # Sleep until the next market event\n while not state.stopper.is_set():\n\n state.lock.acquire()\n while not state.event_queue.empty():\n\n # Get next event\n event = state.event_queue.get()\n\n # TODO: ugly\n if isinstance(event, dict):\n symbol = event['instrument']\n message_type = event['message-type']\n else:\n symbol = event.instrument\n message_type = event.message_type\n\n for client in state.get_market_data_clients():\n if client.handshaken and client.snapshot_sent:\n subscriptions = client.subscriptions\n if symbol in subscriptions:\n topics = client.subscriptions[symbol]\n if message_type in ['A', 'X', 'M']:\n if 'orderBookL2' in topics:\n if not isinstance(event, dict):\n message = event.get_message()\n messaging.send_data(client.socket, message, client.encoding)\n else:\n message = json.dumps(event)\n messaging.send_data(client.socket, message, client.encoding)\n\n elif message_type in ['E']:\n if 'trade' in topics:\n if not isinstance(event, dict):\n message = event.get_message()\n messaging.send_data(client.socket, message, client.encoding)\n else:\n message = json.dumps(event)\n messaging.send_data(client.socket, message, client.encoding)\n\n state.get_current_lob_state(event['instrument']).print()\n\n state.lock.release()\n\n print('Market data dispatching stopped.')", "def start_stream(self):\n self.handle = lt.add_magnet_uri(self.lt_ses, self.queue[0].magnet_link, # pylint: disable=no-member\n self.params)\n self.handle.set_sequential_download(True)\n\n self.stream_thread = threading.Thread(target=self._stream,\n name='stream')\n self.stream_thread.start()", "def firstPass(self, priorityBondList=[]):\r\n self.lastRefreshTime = datetime.datetime.now()\r\n if priorityBondList == []:\r\n emptyLines = list(self.df.index)\r\n isins = self.embondsisins + BBGHand + ' Corp'\r\n else:\r\n emptyLines = priorityBondList\r\n isins = self.df.loc[priorityBondList, 'ISIN'] + BBGHand + ' Corp'\r\n isins = list(isins.astype(str))\r\n blpts = blpapiwrapper.BLPTS(isins, self.bbgPriceLongQuery)\r\n blptsStream = StreamWatcher(self,BloombergQuery.FIRSTPASS)\r\n blpts.register(blptsStream)\r\n blpts.get()\r\n blpts.closeSession()\r\n\r\n isins = self.rfbondsisins + ' @CBBT Corp'\r\n isins = list(isins.astype(str))\r\n blpts = blpapiwrapper.BLPTS(isins, self.bbgPriceRFQuery)\r\n blptsStream = StreamWatcher(self, BloombergQuery.FIRSTPASS)\r\n blpts.register(blptsStream)\r\n blpts.get()\r\n blpts.closeSession()\r\n\r\n specialBondList = list(set(emptyLines) & set(SPECIALBONDS))\r\n specialIsins = map(lambda x:self.df.at[x,'ISIN'] + BBGHand + ' Corp',specialBondList)\r\n blpts = blpapiwrapper.BLPTS(specialIsins, self.bbgPriceLongSpecialQuery)\r\n specialbondStream = StreamWatcher(self,BloombergQuery.FIRSTPASS)\r\n blpts.register(specialbondStream)\r\n blpts.get()\r\n blpts.closeSession()\r\n\r\n for bond in emptyLines:\r\n self.updateStaticAnalytics(bond) # This will update benchmarks and fill grid. Has to be done here so all data for benchmarks is ready.\r", "def start(self) -> None:\n self.bus.subscribe(\"cache:ready\", self.revive)\n self.bus.subscribe(\"scheduler:add\", self.add)\n self.bus.subscribe(\"scheduler:persist\", self.persist)\n self.bus.subscribe(\"scheduler:remove\", self.remove)\n self.bus.subscribe(\"scheduler:upcoming\", self.upcoming)\n self.scheduler = sched.scheduler(time.time, time.sleep)\n cherrypy.process.plugins.Monitor.start(self)", "def run(self):\n self.arbiter.start()", "def start(self):\n\n # Start listening for records\n self._run_loop(True)\n # There might still be records in the queue.\n self._run_loop(False)", "def start(self):\r\n if self._ready:\r\n return\r\n\r\n self._start()\r\n self._ready = True", "def start_updater(self, interval, clbk):\n self._scheduler = BlockingScheduler(executors={\n 'default': {'type': 'threadpool', 'max_workers': 1}\n })\n\n def job():\n clbk(self.check_feeds())\n\n self._scheduler.add_job(job, trigger='interval', minutes=interval)\n self._scheduler.start()", "def start(self):\n self.open()\n #t = Thread(target=self._cache_update, args=())\n #t.daemon = True\n #t.start()", "def main():\n f = FeedHandler()\n f.add_feed(Coinbase(max_depth=10, channels=[L2_BOOK, TRADES, TICKER],\n symbols=['BTC-USD'],\n callbacks={TRADES: TradeMongo('coinbase', collection='trades'),\n L2_BOOK: BookMongo('coinbase', collection='l2_book'),\n TICKER: TickerMongo('coinbase', collection='ticker')\n }))\n\n f.run()", "def start(self):\n print \"starting to crawler qsbk's page(Enter Q or q to quit)\"\n print\n self.enable = True\n self.load_page()\n # a variabel to control counts\n nowpage = 0\n while self.enable:\n if len(self.stories) > 0:\n # get a page stories\n page_stories = self.stories[0]\n nowpage += 1\n del self.stories[0]\n # print stories\n self.print_one_story(page_stories, nowpage)", "def start(self) -> None:\n self._stream.start()", "def start_engine():\r\n traffic = TrafficCollector()\r\n weather = WeatherController()\r\n client = MongoClient()\r\n db = client.jam_forecaster\r\n\r\n scheduler = BlockingScheduler()\r\n scheduler.add_job(get_data, trigger='cron', hour='6-22', minute='*/5', second='0', max_instances=10, args=[traffic, weather, db])\r\n scheduler.start()", "def _start_acquisition(self):\n # escape\n if self._poll_interval is None or self._poll_interval == 0:\n return\n # setup polling memory and base date\n polling_memory = {}\n base_date = datetime.now(pytz.utc)\n # endless loop\n while self.comb_state == vars.COMB_STATE_ACTIVE:\n # prepare poll\n lst_vals = []\n # load data for all mappings\n for mapping in self._obsmappings:\n last_read_date = polling_memory.get(mapping.obs_uri, base_date)\n res = self._query(mapping, from_date=last_read_date)\n for row in res.to_list(): # list is: [[timestamp, value, ...], ...]\n # pub value: (obs_uri, value, timestamp)\n lst_vals.append(PublicationValue(mapping.obs_uri, row[1], row[0]))\n polling_memory[mapping.obs_uri] = row[0] # safe as last polled date\n # publish\n self.publish(lst_vals)\n # sleep\n time.sleep(self._poll_interval)", "def run(self):\n broker_channel = broker.init_broker_channel()\n\n print(\"Starting Twitter Stream Thread: %s\" % self.routing_key)\n while not self.shutdown_flag.is_set():\n tweet = 'Tweet {0}: {1}'.format(random.randint(1, 100), int(time.time()))\n lng = random.uniform(self.location['sw']['lng'], self.location['ne']['lng'])\n lat = random.uniform(self.location['sw']['lat'], self.location['ne']['lat'])\n\n cleaned_data = json.dumps({'tweet': tweet, 'lat': lat, 'lng': lng})\n\n broker_channel.basic_publish(exchange=broker.broker_exchange, routing_key=self.routing_key, body=cleaned_data)\n time.sleep(10+random.randint(0, 3))\n\n print(\"Exiting Twitter Stream Thread: %s\" % self.routing_key)", "def start(self):\r\n pass" ]
[ "0.6780575", "0.64053637", "0.625197", "0.62273294", "0.6178107", "0.6083856", "0.60136896", "0.6011202", "0.59738946", "0.59234995", "0.5886008", "0.5885424", "0.58506644", "0.58455986", "0.5831685", "0.57933074", "0.5747446", "0.5729477", "0.570291", "0.57021135", "0.5682732", "0.5680772", "0.5672942", "0.562363", "0.5622378", "0.5604707", "0.55509496", "0.55500233", "0.5547098", "0.5546865" ]
0.76409364
0