query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Set as target currently selected login on login list | def selected_login_event(self, event):
cursor = self.logins_list.get(self.logins_list.curselection())
target = cursor.split(':')[0]
status = cursor.split(':')[1][1:]
if target == None:
return
self.Target.config(text=target)
self.client.target = target
self.message_list.hide()
if target == None:
return
if status == 'Online':
if target not in self.client.buff_dict:
self.client.startChatTo(target)
elif self.client.buff_dict[target].status == False:
self.client.startChatTo(target)
print(target)
self.message_list = self.client.message_list_dict[target]
self.message_list.show()
else:
if target not in self.client.message_list_dict:
self.client.message_list_dict[target] = Message_list(self.Message_box_frame)
self.message_list = self.client.message_list_dict[target]
self.message_list.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def switchToLogin(self):\n self.username.setText(\"\")\n self.password.setText(\"\")\n self.lastView = None\n self.currentView = 0\n self.stacked.setCurrentIndex(0)\n self.show()",
"def login(self, login):\n\n self._login = login",
"def _setstaff_login(self):\r\n GlobalStaff().add_users(self.user)\r\n self.client.login(username=self.user.username, password='foo')",
"def do_login(user):\n session[CURRENT_USER_KEY] = user.id",
"def click_login_button(self):",
"def set_LoginID(self, value):\n super(DownloadDocumentInputSet, self)._set_input('LoginID', value)",
"def loginAsManager(self):\n self.browser.open('http://nohost/plone/')\n self.browser.getLink('Log in').click()\n self.browser.getControl('Login Name').value = 'root'\n self.browser.getControl('Password').value = 'secret'\n self.browser.getControl('Log in').click()",
"def login(self):\n\t\treturn",
"def log_in(self):\n\t\tpass",
"def login(self):\n self.driver.find_element(*BaseLocators.PRIMARY_BUTTON).click()",
"def login(self):\n #raise NotImplementedError(\"This method must be overridden\")",
"def goto_login(self):\n self.driver.find_element(*BasePageLocators.MY_ACCOUNT_DROPDOWN).click()\n self.driver.find_element(*BasePageLocators.GO_LOGIN).click()\n return LoginPage(self.driver)",
"def login(self):",
"def login(self, user):\n #self.logger.debug(\"Login with user '%s'\", user['username'])\n self.username.text = user['username']\n self.password.text = user['password']\n time.sleep(5)\n self.login_button.click()\n time.sleep(10)\n\n return self\n #hover abover recruitment tab and make a cursor move to the vacancies tab",
"def change_user(self, login):\n self.task_storage.change_user_config(login)",
"def log_in_to_targets(self):\n result = 0\n for target in self.target_list:\n iscsi_adm_string = 'sudo iscsiadm --mode node --targetname %s --portal %s:%s --login' % (target.iqn,\n target.ip_address,\n target.port)\n self.iscsiadm_logger.info('logging into %s at %s:%s' % (target.iqn, target.ip_address, target.port))\n response = self.execute_bash_command(iscsi_adm_string)\n if response.returncode != 0:\n self.iscsiadm_logger.error('failed logging into at %s %s:%s' % (target.iqn, target.ip_address,\n target.port))\n result = 1\n else:\n self.iscsiadm_logger.info('logged into %s at %s:%s' % (target.iqn, target.ip_address, target.port))\n\n return result",
"def login(self):\n # Browse to login url\n self.browser.get('https://www.netflix.com/be/login')\n\n time.sleep(3)\n\n # define email and password input fields\n email = self.browser.find_element_by_id('id_userLoginId')\n password = self.browser.find_element_by_id('id_password')\n\n # clear the input fields\n email.clear()\n password.clear()\n\n # put in the login info\n email.send_keys(self.email)\n password.send_keys(self.password)\n\n # submit\n password.send_keys(Keys.RETURN)\n\n time.sleep(3)\n\n # Check profiles for the given user\n profiles = self.browser.find_elements_by_class_name('profile')\n user_found = False\n\n for profile in profiles:\n profile_name = str(\n profile.find_element_by_class_name('profile-name').text\n )\n\n if profile_name == self.username:\n user_found = True\n profile.find_element_by_class_name('profile-icon').click()\n\n time.sleep(3)\n\n self.browser.get('https://www.netflix.com/browse/my-list')\n\n time.sleep(3)\n\n my_list_items = self.browser.find_elements_by_class_name('fallback-text')\n return_arr = []\n\n for item in my_list_items:\n return_arr.append(str(item.text))\n\n # Fetch items on this user's list\n self.titles = list(set(return_arr))\n break\n\n if not user_found:\n print('%s is not a user of this account' % self.username)",
"def handleLogin(self):\n aVar = self.session.getAttribute(self.settings.authenvar)\n self.loggedin = False\n if not aVar:\n self.currenttemplate = self.settings.logintemplate \n self.logger.debug(\"Not logged in, Login-Mask activated.\")\n return\n\n self.loggedin = True\n self.logger.debug('Loged in as: \"{}\"'.format(aVar))",
"def login_iscsi_target(self, portal_config, target_config):\n ip = portal_config.get('ip')\n port = portal_config.get('port')\n iqn = target_config.get('iqn')\n if ip and port and iqn:\n command = 'iscsiadm -m node -l -T %s -p %s:%d' % (iqn, ip, port)\n self.cmd(command)",
"def user_login(change):\n return change()",
"def login_bot(self):\n pass",
"def change(login):\n try:\n manager = Actions()\n manager.change_user(login)\n except Exception as e:\n print(e)",
"def do_login(self, login):\n if not login:\n print('please supply a user name to login')\n return\n self._user = re.sub(r'\\W', '_', str(login).strip().lower())\n print('Logged in as:', self._user)",
"def click_login(self):\n self.login.click()\n return self.login",
"def on_login(self, username):",
"def on_login(self, username):",
"def setCurrentUser(self, provider):\n pass",
"def setCurrent(userip, foldername):\n userspace[session[userip]].folder = foldername",
"def set_active_target(self, target_name):\n if target_name not in self.session_targets: # target hasn't been used in this session. Instantiate it.\n target = self.load_target(target_name)\n self.session_targets[target_name] = target\n self.active_target = target\n\n else:\n self.active_target = self.session_targets.get(target_name, None)",
"def do_login(self, backend, user):"
] | [
"0.6249682",
"0.611241",
"0.60492665",
"0.59257644",
"0.5898063",
"0.58395225",
"0.5781492",
"0.572091",
"0.5709027",
"0.5698393",
"0.569639",
"0.5675576",
"0.56696826",
"0.5641832",
"0.56213915",
"0.5619973",
"0.5607311",
"0.56019825",
"0.5600737",
"0.55805004",
"0.5575723",
"0.55715966",
"0.55591094",
"0.5545418",
"0.5542298",
"0.5542298",
"0.5527655",
"0.549946",
"0.5488723",
"0.5468689"
] | 0.7160275 | 0 |
Thread parser, yielding items populated with thread details | def parse_thread(self, response):
item = DataakForumItem()
# xpath selectors
thread = '//span[@class="active"]/text()'
navpath = '//div[@class="navigation"]/a/text()'
posts = '//div[@class="post "]'
# author_not_admin = '//div[@class="author_information"]//a/text()'
author = './/div[@class="author_information"]//a/text() | .//div[@class="author_information"]//em/text()'
body = './/div[@class="post_body scaleimages"]/text() | .//div[@class="post_body scaleimages"]//*/text()'
posts_selector = response.xpath(posts)
for post in posts_selector:
item['url'] = response.url
# self.log(response.url)
item['thread'] = response.xpath(thread).extract_first()
# self.log("thread: %s" % response.xpath(thread).extract())
# get the last item which is the forum name
item['forum'] = response.xpath(navpath).extract()[-1]
# self.log("nav path: %s" % response.xpath(navpath).extract())
item['author'] = post.xpath(author).extract_first()
# self.log("author: %s" % post.xpath(author).extract())
item['body'] = post.xpath(body).extract()
# self.log("body: %s" % post.xpath(body).extract())
yield item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_thread(self):",
"def get_threads(subforum_soup):\n threads = subforum_soup.findAll('a', attrs={'id':lambda x:x and x.startswith('thread_title')}) #pulls out the thread links\n\n #page _ of _\n page = 1\n page_count = subforum_soup.find('td', attrs={'class':'vbmenu_control'})\n if page_count:\n page_count = page_count.getText()\n page_match = re.search(r'(\\d+) .+? (\\d+)', page_count)\n if page_match:\n page_count = int(page_match.group(2))\n page = int(page_match.group(1))\n logger.debug(\"get_threads: page_count = %d, page = %d\" % (page_count, page))\n else:\n page_count = 1\n page = 1\n\n thread_counts = subforum_soup.findAll('td', attrs={'class':'alt2', 'title':lambda x:x and re.match(r'.+?: \\d+?', x)})\n if len(threads) != len(thread_counts):\n logger.error('get_threads: thread-count mismatch. Threads = %d; thread_counts = %d' % (len(threads), len(thread_counts)))\n logger.debug('get_threads: threads = %s' % str(threads))\n\tlogger.debug('get_threads: thread_counts = %s' % str(thread_counts))\n threadlinks = []\n for i in range(min(len(threads), len(thread_counts))):\n t = threads[i]\n c = thread_counts[i]\n sanatized = c['title'].replace(',', '')\n count = int(re.search(r'.+?: (\\d+?) .+?: (\\d+?)',sanatized).group(1)) + 1\n text = t.getText()\n link = t['href']\n threadlinks.append({'name':text, 'link':link, 'count':count})\n return threadlinks, (page, page_count)",
"def read_thread(thread_num):\n pass# TODO",
"def task_parse_results():\n pass",
"def scan_thread(self, response):\n story_item = response.meta.get(\"story_item\")\n print(\"\\nscraping thread {0}\\n\".format(response.url))\n\n # div_tmarks is a list of all threadmarked posts on this story thread\n # ...at least on this PAGE of the story.\n div_tmarks = response.xpath(\"//li[contains(@class, 'hasThreadmark')]\")\n \n if div_tmarks is not None and len(div_tmarks) > 0:\n\n for div_tmark in div_tmarks:\n # story_seg = StorySegment()\n\n author = div_tmark.xpath(\"@data-author\").extract_first()\n\n author_seg, created = Author.objects.get_or_create(name=author)\n\n title = \"\".join(div_tmark.xpath(\"div/span/text()\").extract()).encode('utf-8')\n title = \" \".join(title.split())\n\n # Get the Date and clean it up/format it ======================================\n date = div_tmark.xpath(\".//span[@class='DateTime' and ../@class!='editDate']/@title\").extract_first()\n if date is None:\n date = div_tmark.xpath(\".//abbr[@class='DateTime']/text()\").extract_first()\n date_obj = datetime.strptime(date, \"%b %d, %Y at %I:%M %p\")\n date_obj = date_obj.replace(tzinfo=utc)\n # story_seg.published = date_obj\n # =============================================================================\n\n story_seg, seg_created = StorySegment.objects.get_or_create(story=story_item,\n title=title,\n published=date_obj)\n\n # If you want to include the formatting of the original page, change the following\n # line to ..... .//blockquote/node()\").extract()\n # As it stands, we don't necessarily need the <br /> tags and such.\n content = \"\".join(div_tmark.xpath(\".//blockquote//text()\").extract())\n story_seg.contents = content\n\n story_item.authors.add(author_seg)\n\n print(\"Title: {0} Author: {1}\".format(story_seg.title, author))\n print(\"date_time: {0}\".format(date_obj))\n print(\"content length: {0}\".format(len(content)))\n\n story_seg.save()\n story_item.save()\n\n div_next_tmark = div_tmarks[-1].xpath(\".//span[@class='next']\")\n\n # navigate to the next threadmark.\n if div_next_tmark is not None:\n next_mark = div_next_tmark.xpath(\"a/@href\").extract_first() \n print(\"Next url: {0}\".format(next_mark))\n next_mark_url = response.urljoin(next_mark)\n yield scrapy.Request(\n next_mark_url,\n callback=self.scan_thread,\n priority=2,\n meta={\"story_item\": story_item}\n )",
"def parse_sam_in_threads(remap_csv, nthreads, semaphore):\n pool = Pool(processes=nthreads)\n try:\n reads = pool.imap(parse_sam,\n iterable=matchmaker(remap_csv, semaphore),\n chunksize=100)\n for read in reads:\n yield read\n finally:\n pool.close()\n pool.join()",
"def parser_txt_file(self, content):\n ai_cpu_str = str(content.replace(b'\\n\\x00', b' ___ ').replace(b'\\x00', b' ___ '))[2:-1]\n ai_cpu_lines = ai_cpu_str.split(\" ___ \")\n result_list = list()\n ai_cpu_total_time_summary = 0\n # Node serial number.\n serial_number = 1\n for i in range(len(ai_cpu_lines) - 1):\n node_line = ai_cpu_lines[i]\n thread_line = ai_cpu_lines[i + 1]\n if \"Node\" in node_line and \"Thread\" in thread_line:\n # Get the node data from node_line\n result = self._get_kernel_result(\n serial_number,\n node_line.split(','),\n thread_line.split(',')\n )\n\n if result is None:\n continue\n\n result_list.append(result)\n # Calculate the total time.\n total_time = result[2]\n ai_cpu_total_time_summary += total_time\n # Increase node serial number.\n serial_number += 1\n elif \"Node\" in node_line and \"Thread\" not in thread_line:\n node_type_name = node_line.split(',')[0].split(':')[-1]\n logger.warning(\"The node type:%s cannot find thread data\", node_type_name)\n return ai_cpu_total_time_summary, result_list",
"def on_parse(\n self,\n ) -> AsyncIteratorOrIterator[None]: # pragma: no cover # pyright: ignore\n yield None",
"def handle_thread_execution(self, data, index):\n return {}",
"def __getThreads(self):\n try:\n threads = [each.findParent('tr') for each in self.soup.find\\\n ('div', id='messageList').findAll('div', 'MessageSubjectCell')]\n if not threads:\n log.info(self.log_msg('No threads are found for url %s'%\\\n self.currenturi))\n return False\n except:\n log.info(self.log_msg('exception while getting threads'))\n return False\n for thread in threads:\n if 'lia-list-row-float' in thread.get('class',''):\n log.info(self.log_msg('Its a Sticky Thread, Ignore it in\\\n the url %s'%self.currenturi))\n continue\n self.__total_threads_count += 1\n if self.__total_threads_count > self.__max_threads_count:\n log.info(self.log_msg('Reaching maximum post,Return false \\\n from the url %s'%self.currenturi))\n return False\n try:\n thread_time = datetime.strptime(re.sub('\\s+', ' ', \\\n stripHtml(thread.find('span', 'DateTime')\\\n .renderContents())), '%m-%d-%Y %I:%M %p')\n except:\n log.exception(self.log_msg('data not found in %s'%\\\n self.currenturi))\n continue\n if checkSessionInfo('Search', self.session_info_out, thread_time, \\\n self.task.instance_data.get('update')):\n log.info(self.log_msg('Session info Returns True for %s'%\\\n self.currenturi))\n return False\n self.__last_timestamp = max(thread_time, self.__last_timestamp)\n temp_task = self.task.clone() \n try:\n temp_task.instance_data[ 'uri' ] = self.__baseuri + thread\\\n .find('h2', 'message-subject').a['href'].split(';')[0]\n except:\n log.exception(self.log_msg('Cannot find the thread url \\\n in the uri %s'%self.currenturi))\n continue\n temp_task.pagedata['edate_last_post_date'] = datetime.\\\n strftime(thread_time,\"%Y-%m-%dT%H:%M:%SZ\")\n temp_task.pagedata['et_thread_last_post_author'] = stripHtml\\\n (thread.find('div', 'MessagePostDateAndAuthorCell')\\\n .renderContents()).splitlines()[-1].strip()\n try:\n temp_task.pagedata['et_author_name'] = stripHtml(thread.\\\n find('div', 'lia-user-name').renderContents())\n except:\n log.info(self.log_msg('Author name not found in the url\\\n %s'%self.currenturi))\n try:\n temp_task.pagedata['ei_thread_replies_count'] = int(stripHtml\\\n (thread.find('td', attrs={'class':re.compile\\\n ('repliesCountColumn')}).renderContents()))\n\n except:\n log.info(self.log_msg('Views count not found in the url\\\n %s'%self.currenturi)) \n self.linksOut.append(temp_task)\n return True",
"def get_thread_urls(self, response):\n\n print(\"scraping {0}\".format(response.url))\n url_stories = []\n\n # <li_tags> is a list of all the <li> tags in the html doc with a certain class value.\n # This corresponds to all threads that are NOT sticky.\n li_tags = response.xpath(\"//li[@class='discussionListItem visible ']\")\n\n for thread_tag in li_tags:\n\n author_name = thread_tag.xpath('@data-author').extract_first()\n\n # Get the last post date for a thread ========================================================\n last_post_date = thread_tag.xpath(\".//dl[@class='lastPostInfo']//abbr/text()\").extract_first()\n if last_post_date is not None:\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n else:\n # fix with line continuation.\n last_post_date = thread_tag.xpath(\".//span[@class='DateTime']/@title\").extract_first()\n last_post_date = datetime.strptime(last_post_date, \"%b %d, %Y at %I:%M %p\").replace(tzinfo=utc)\n\n # ============================================================================================\n\n author, created = Author.objects.get_or_create(name=author_name)\n if created:\n author.save()\n\n title = thread_tag.xpath(\".//h3[@class='title']/a/text()\").extract_first().encode('utf-8')\n story, created = Story.objects.get_or_create(title=title)\n\n # if created is true, then it's a brand new story, so make sure to save it.\n if created:\n story.save()\n story.authors.add(author)\n\n a_node = thread_tag.xpath(\"div/div/h3/a\")\n thread_url = a_node.xpath(\"@href\").extract_first()\n\n cur_date = datetime.now(tz=utc)\n oldest_date = datetime.min.replace(tzinfo=utc)\n\n created = False\n \"\"\"\n Over here, I am attempting to either update an existing storyhost\n object, OR I am creating a new one. It looks redundant, but I found that\n if I just used get_or_create, I was forced to set last_date automatically.\n\n I didn't always want to create a brand new object, so this verbose code\n was necessary.\n \"\"\"\n try:\n # TRY TO UPDATE EXISTING object\n storyhost = StoryHost.objects.get(host=self.HOST, story=story, url=thread_url)\n storyhost.save()\n except StoryHost.DoesNotExist:\n\n # CREATE BRAND NEW STORYHOST OBJECT\n storyhost, created = StoryHost.objects.get_or_create(host=self.HOST,\n story=story,\n url=thread_url,\n last_scraped=oldest_date)\n\n storyhost.save()\n\n \"\"\"\n Check if the last post date is more recent than the\n storyhost's last scraped date. If it's not, skip it.\n\n If it is, update the last scraped date, and add it to the\n list of url_stories to be returned at the end of this function.\n \"\"\"\n\n last_seg_date = self.get_last_seg_date(story)\n if thread_url is not None:\n if last_post_date > storyhost.last_scraped or last_seg_date < last_post_date:\n storyhost.last_scraped = cur_date\n storyhost.save()\n thread_link = response.urljoin(thread_url)\n\n # Add this story to two separate lists, one for updating, one for just\n # scraping.\n if created:\n url_stories.append((thread_link, story))\n else:\n self.update_list.append((\"{0}threadmarks\".format(thread_link), story))\n else:\n print(\"Skipping {0}\".format(storyhost.url))\n\n return url_stories",
"def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)",
"def Threads():\n for i in range(0, idc.get_thread_qty()):\n yield idc.getn_thread(i)",
"def __getThreadPage( self ):\r\n threads = [x.findParent('tr') for x in self.soup.findAll('td','tbl_forum_thread')]\r\n if len(threads )==0:\r\n log.info(self.log_msg('No threads are found'))\r\n return False\r\n for thread in threads:\r\n self.total_posts_count = self.total_posts_count + 1\r\n try:\r\n thread_info = thread.find('td','tbl_forum_thread').findAll('a')\r\n date_str = stripHtml(thread_info[-1].next.next.__str__())\r\n thread_time = datetime.strptime(date_str,'on %B %d, %Y')\r\n except:\r\n log.exception(self.log_msg('Date cannot found, continue with other posts'))\r\n continue\r\n if self.total_posts_count > self.max_posts_count:\r\n log.info(self.log_msg('Reaching maximum post,Return false'))\r\n return False\r\n try:\r\n if checkSessionInfo('Search',self.session_info_out, thread_time,\\\r\n self.task.instance_data.get('update')):\r\n continue\r\n self.last_timestamp = max(thread_time , self.last_timestamp )\r\n temp_task=self.task.clone()\r\n try:\r\n title_tag = thread.find('h2')\r\n temp_task.pagedata['title']= stripHtml(title_tag.renderContents())\r\n temp_task.instance_data[ 'uri' ] = 'http://silverlight.net' + title_tag.find('a')['href']\r\n temp_task.pagedata['et_author_name'] = stripHtml(thread.find('p').find('a').renderContents())\r\n temp_task.pagedata['et_thread_last_post_author'] = stripHtml(thread_info[-1].renderContents())\r\n view_reply = {'ei_thread_num_replies':'tbl_forum_views','ei_thread_num_views':'tbl_forum_replies'}\r\n for each in view_reply.keys():\r\n temp_task.pagedata[each] = int(re.sub('[^\\d]','',stripHtml(thread.find('td',view_reply[each]).renderContents())))\r\n temp_task.pagedata['edate_last_post_date']= datetime.strftime(thread_time,\"%Y-%m-%dT%H:%M:%SZ\")\r\n except:\r\n log.exception(self.log_msg('Cannot find the uri'))\r\n continue\r\n try:\r\n temp_task.pagedata['ef_thread_rating']= float(re.search('\\[([^ ]+)',thread.find('span','ForumThreadRateControl star_ratings')['title']).group(1))\r\n except:\r\n log.info(self.log_msg('Thread rating not found'))\r\n self.linksOut.append( temp_task )\r\n log.info(self.log_msg('Task added'))\r\n except:\r\n log.info(self.log_msg('Cannot add the Task'))\r\n return True",
"def parse(self):\n gen = self.v6_gen() # read from workers\n gen = self.tuple_gen(gen) # convert v6->tuple\n gen = self.batch_gen(gen) # assemble into batches\n for b in gen:\n yield b",
"def __getThreads(self):\n threads = self.soup.find('div', 'fp_left').findAll('div', attrs={'class':re.compile('fp_topic_')}, recursive=False)\n if not threads:\n log.info(self.log_msg('No threads are found for url %s'%\\\n self.currenturi))\n return False\n for thread in threads[1:]:\n self.__current_thread_count += 1\n if self.__current_thread_count > self.__max_threads_count:\n log.info(self.log_msg('Reaching maximum post,Return false \\\n from the url %s'%self.currenturi))\n return False\n \n try:\n date_str = stripHtml(thread.find('span', 'fp_last_post_time').renderContents())\n thread_time = datetime.strptime(date_str, 'Last post: %m-%d-%Y %H:%M%p')\n except:\n log.exception(self.log_msg('Cannot fetch the date for the url\\\n %s'%self.currenturi))\n continue\n if checkSessionInfo('Search', self.session_info_out, thread_time,\\\n self.task.instance_data.get('update')):\n log.info(self.log_msg('Session info Returns True for url %s'%self.currenturi))\n return False\n self.__last_timestamp = max(thread_time , self.__last_timestamp )\n temp_task = self.task.clone() \n try:\n temp_task.instance_data['uri'] = thread.find('a', 'topictitle')['href']\n except:\n log.exception(self.log_msg('Cannot find the thread url \\\n in the uri %s'%self.currenturi))\n continue\n temp_task.pagedata['edate_last_post_date']= datetime.\\\n strftime(thread_time,\"%Y-%m-%dT%H:%M:%SZ\")\n last_post_author = thread.find('div', 'fp_topic_last_post_author', title=True)\n if last_post_author:\n temp_task.pagedata['et_thread_last_post_author'] = last_post_author['title']\n try:\n author_tag = thread.find('span', 'fp_topic_author')\n author_name_tag = author_tag.span.extract()\n temp_task.pagedata['et_author_name'] = stripHtml(author_name_tag.renderContents())\n except:\n log.info(self.log_msg('Author name not found in url %s'%self.currenturi))\n try:\n temp_task.pagedata['ei_thread_views_count'] = int(stripHtml(author_tag.renderContents()).replace(',', '').replace('views',''))\n except:\n log.info(self.log_msg('Views count not found in the url\\\n %s'%self.currenturi))\n try:\n replies_str = stripHtml(thread.find('div', 'fp_topic_content_replies').renderContents()).replace(',', '')\n if re.match('\\d+', replies_str):\n temp_task.pagedata['ei_thread_replies_count'] = int(replies_str)\n except:\n log.exception(self.log_msg('Replies count not found in the url\\\n %s'%self.currenturi))\n self.linksOut.append(temp_task)\n return True",
"def parse_task(k):\r\n return [stringify_children(xml_object.xpath(k)[i]) for i in xrange(0, len(xml_object.xpath(k)))]",
"def parse(self, parser, tokens):\n self.parser = parser\n self.bits = tokens.split_contents()\n self.tagname = self.bits.pop(0)\n self.kwargs = {}\n self.blocks = {}\n self.arguments = self.options.get_arguments()\n self.current_argument = None\n self.todo = list(self.bits)\n for bit in self.bits:\n self.handle_bit(bit)\n self.finish()\n self.parse_blocks()\n return self.kwargs, self.blocks",
"def get_info():\n with open('explorers.json', 'r') as file:\n block_expl_info = json.load(file)\n BLOCK_EXPL_INFO['block_explorers'] = [{'analytics': [None, None]} for i in range(len(block_expl_info))]\n analytic_thread = threading.Thread(target=get_analytics)\n analytic_thread.start()\n print(analytic_thread)\n counter_api = 0\n for elem in block_expl_info:\n print(counter_api, elem)\n api = search(block_expl_info[elem], \"api\")\n name, currency, url, best_height_key, timer = search(block_expl_info[elem], \"name\"), search(\n block_expl_info[elem], \"currency\"), search(block_expl_info[elem], \"url\"), search(block_expl_info[elem],\n \"best_height_key\"), search(\n block_expl_info[elem], \"api_limit\")\n if api:\n my_thread = threading.Thread(target=get_best_height,\n args=(name, currency, url, best_height_key, counter_api, timer))\n counter_api += 1\n my_thread.start()\n print(my_thread)\n else:\n latest_block = BLOCK_EXPL_INFO\n latest_block_list = latest_block['block_explorers']\n latest_block_list[counter_api][\"name\"] = name\n latest_block_list[counter_api][\"currency\"] = currency\n latest_block_list[counter_api][\"best_height\"] = best_height_key\n latest_block_list[counter_api][\"api\"] = None\n counter_api += 1",
"def parse(self, response):\n page_jobs=[]\n\n # Calling abstarct method get_jobs_list() and iterating...\n jobs_div_list=self.get_jobs_list(response)\n for div in jobs_div_list:\n \n # Calling abstarct method get_job_dict()\n job_dict=self.get_job_dict(div)\n\n if not job_dict['url'] or not job_dict['title'] :\n # At least url, title data is loaded from the list of job posting ...\n raise ValueError( \"Could not find valid job information ('url' and 'title') in data:\\n\" + \n str(div.get()) + \"\\nScraped infos:\\n\" + str(job_dict) + \"\\nReport this issue on github!\" )\n \n # Store source as the name of the spider aka website\n job_dict['source']=self.name\n page_jobs.append(job_dict)\n \n \"\"\"\n Load full job page only if:\n - it's a new job (not in database)\n - load_full_jobs=Yes\n - the method parse_full_job_page() has been re-wrote by the Scraper subclass\n \"\"\"\n if ( (not self.db or self.db.find_job(job_dict)==None)\n and self.load_full_jobs ):\n if type(self).parse_full_job_page != Scraper.parse_full_job_page:\n # load_full_jobs=Yes and it's supported by scraper\n # Call parse_full_job_page() with job URL\n\n # Handle SeleniumRequest if use_selenium=True\n if self.use_selenium:\n yield SeleniumRequest(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict),\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(url=job_dict['url'], \n callback=self.parse_full_job_page,\n cb_kwargs=dict(job_dict=job_dict))\n else:\n yield Job(job_dict)\n else:\n yield Job(job_dict)\n\n \"\"\" Just printing in one line \"\"\"\n if self.load_full_jobs:\n if type(self).parse_full_job_page == Scraper.parse_full_job_page:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True and load_all_new_pages=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraped {} jobs from {}. Scraper {} does not support load_full_jobs=True, some informations might be missing\".format(len(page_jobs), response.url, self.name))\n else:\n self.log.info(\"Scraping {} jobs from {}...\".format(len(page_jobs), response.url))\n else:\n if self.load_all_new_pages==False:\n self.log.info(\"Scraped {} jobs from {}. load_all_new_pages=False and load_full_jobs=False, some new job postings and job informations might be missing\".format(len(page_jobs), response.url))\n else:\n self.log.info(\"Scraped {} jobs from {}. load_full_jobs=False, some informations might be missing\".format(len(page_jobs), response.url))\n \n \"\"\"\n If all page jobs are new and \n The method get_next_page_url() has been re-wrote by the Scraper subclass\n Scrape next page\n \"\"\"\n if self.load_all_new_pages==True:\n if self.db and any( [self.db.find_job(job_dict)!=None for job_dict in page_jobs] ):\n # All new job postings loaded\n pass\n else:\n if self.get_next_page_url(response)!=None :\n # Loading next page...\n if self.use_selenium:\n yield SeleniumRequest(\n url=self.get_next_page_url(response),\n callback=self.parse,\n wait_time=self.selenium_wait_time, script=SCROLL_DOWN)\n else:\n yield response.follow(\n url=self.get_next_page_url(response),\n callback=self.parse)\n else:\n if type(self).get_next_page_url != Scraper.get_next_page_url:\n # Last page loaded\n pass\n else:\n self.log.info(\"Scraper {} does not support load_all_new_pages=True, some new job postings might be missing\".format(self.name))",
"def parse_cli():\n defaults = DefaultOptions()\n parser = CustomArgumentParser(usage=\"%(prog)s [OPTIONS] THREAD [THREAD]...\")\n\n parser.add_argument(\"thread\", nargs=\"*\", help=\"thread URL\")\n parser.add_argument(\n \"-l\", \"--list\", action=\"append\", type=valid_list, default=defaults.LIST\n )\n parser.add_argument(\n \"-q\", \"--quiet\", dest=\"verbosity\", action=\"store_const\",\n const=0, default=defaults.VERBOSITY\n )\n parser.add_argument(\"-p\", \"--path\", dest=\"base_dir\", default=defaults.PATH)\n parser.add_argument(\n \"-f\", \"--filenames\", dest=\"names\", action=\"store_true\",\n default=defaults.USE_NAMES\n )\n parser.add_argument(\n \"-a\", \"--archive\", dest=\"archive\", type=valid_archive,\n default=defaults.ARCHIVE\n )\n parser.add_argument(\n \"--connections\", type=positive_int, default=defaults.CONNECTIONS\n )\n parser.add_argument(\"--retries\", type=int, default=defaults.RETRIES)\n\n args = parser.parse_args()\n # Scan lists for thread links\n for l in args.list:\n with open(l, \"r\") as f:\n args.thread.extend([t.strip() for t in f if not t.startswith(\"#\")])\n # Make sure base_dir is an absolute path\n args.base_dir = os.path.abspath(args.base_dir)\n # Weed out clearly wrong thread URLs\n args.thread = set(fnmatch.filter(args.thread, \"*boards.4chan*.org/*/thread/*\"))\n\n return args",
"def spider(articles):\n\n article_links = []\n page = 1\n while len(article_links) < articles:\n url = 'https://www.theverge.com/games/archives/'+str(page)\n source_code = requests.get(url)\n plain_text = source_code.text\n soup = bs(plain_text, features=\"html.parser\")\n\n for article_link in soup.findAll('h2', class_=\"c-entry-box--compact__title\"):\n link = article_link.find('a')\n article_links.append(link.get('href'))\n if len(article_links) >= articles:\n break\n page += 1\n\n thread0 = threading.Thread(target=spider_thread(article_links[0]))\n thread0.start()\n thread1 = threading.Thread(target=spider_thread(article_links[1]))\n thread1.start()\n thread2 = threading.Thread(target=spider_thread(article_links[2]))\n thread2.start()\n thread3 = threading.Thread(target=spider_thread(article_links[3]))\n thread3.start()\n thread4 = threading.Thread(target=spider_thread(article_links[4]))\n thread4.start()\n thread5 = threading.Thread(target=spider_thread(article_links[5]))\n thread5.start()\n thread6 = threading.Thread(target=spider_thread(article_links[6]))\n thread6.start()\n thread7 = threading.Thread(target=spider_thread(article_links[7]))\n thread7.start()\n thread8 = threading.Thread(target=spider_thread(article_links[8]))\n thread8.start()\n thread9 = threading.Thread(target=spider_thread(article_links[9]))\n thread9.start()\n\n thread0.join()\n thread1.join()\n thread2.join()\n thread3.join()\n thread4.join()\n thread5.join()\n thread6.join()\n thread7.join()\n thread8.join()\n thread9.join()\n\n return article_links",
"def run(self, parsed):",
"def parse_jobs(self, response: scrapy.http.Response):\n hits = response.xpath('//div[@class=\"jobHit\"]')\n for hit in hits:\n job = self.default_job()\n job['queries'] = response.meta['queries']\n for i in MTADialogSpider.parse_job(hit, job):\n yield i",
"def parse(self):\n\n def parse_testcase(xml_object):\n testcase = xml_object\n\n tc_dict = {\n \"classname\": testcase.attrib.get(\"classname\", \"unknown\"),\n \"file\": testcase.attrib.get(\"file\", \"unknown\"),\n \"line\": int(testcase.attrib.get(\"line\", -1)),\n \"name\": testcase.attrib.get(\"name\", \"unknown\"),\n \"time\": float(testcase.attrib.get(\"time\", -1)),\n }\n\n # The following data is normally a subnode (e.g. skipped/failure).\n # We integrate it right into the testcase for better handling\n if hasattr(testcase, \"skipped\"):\n result = testcase.skipped\n tc_dict[\"result\"] = \"skipped\"\n tc_dict[\"type\"] = result.attrib.get(\"type\", \"unknown\")\n # tc_dict[\"text\"] = re.sub(r\"[\\n\\t]*\", \"\", result.text) # Removes newlines and tabs\n # result.text can be None for pytest xfail test cases\n tc_dict[\"text\"] = result.text or \"\"\n tc_dict[\"message\"] = result.attrib.get(\"message\", \"unknown\")\n elif hasattr(testcase, \"failure\"):\n result = testcase.failure\n tc_dict[\"result\"] = \"failure\"\n tc_dict[\"type\"] = result.attrib.get(\"type\", \"unknown\")\n # tc_dict[\"text\"] = re.sub(r\"[\\n\\t]*\", \"\", result.text) # Removes newlines and tabs\n tc_dict[\"text\"] = result.text\n tc_dict[\"message\"] = \"\"\n else:\n tc_dict[\"result\"] = \"passed\"\n tc_dict[\"type\"] = \"\"\n tc_dict[\"text\"] = \"\"\n tc_dict[\"message\"] = \"\"\n\n if hasattr(testcase, \"system-out\"):\n tc_dict[\"system-out\"] = testcase[\"system-out\"].text\n else:\n tc_dict[\"system-out\"] = \"\"\n\n return tc_dict\n\n def parse_testsuite(xml_object):\n testsuite = xml_object\n\n tests = int(testsuite.attrib.get(\"tests\", -1))\n errors = int(testsuite.attrib.get(\"errors\", -1))\n failures = int(testsuite.attrib.get(\"failures\", -1))\n\n # fmt: off\n skips = int(\n testsuite.attrib.get(\"skips\") or testsuite.attrib.get(\"skip\") or testsuite.attrib.get(\"skipped\") or -1\n )\n # fmt: on\n\n passed = int(tests - sum(x for x in [errors, failures, skips] if x > 0))\n\n ts_dict = {\n \"name\": testsuite.attrib.get(\"name\", \"unknown\"),\n \"tests\": tests,\n \"errors\": errors,\n \"failures\": failures,\n \"skips\": skips,\n \"passed\": passed,\n \"time\": float(testsuite.attrib.get(\"time\", -1)),\n \"testcases\": [],\n \"testsuite_nested\": [],\n }\n\n # add nested testsuite objects to\n if hasattr(testsuite, \"testsuite\"):\n for ts in testsuite.testsuite:\n # dict from inner parse\n inner_testsuite = parse_testsuite(ts)\n ts_dict[\"testsuite_nested\"].append(inner_testsuite)\n\n elif hasattr(testsuite, \"testcase\"):\n for tc in testsuite.testcase:\n new_testcase = parse_testcase(tc)\n ts_dict[\"testcases\"].append(new_testcase)\n\n return ts_dict\n\n # main flow starts here\n\n junit_dict = []\n\n if self.junit_xml_object.tag == \"testsuites\":\n for testsuite_xml_object in self.junit_xml_object.testsuite:\n complete_testsuite = parse_testsuite(testsuite_xml_object)\n junit_dict.append(complete_testsuite)\n else:\n complete_testsuite = parse_testsuite(self.junit_xml_object)\n junit_dict.append(complete_testsuite)\n\n return junit_dict",
"def __createTasksForThreads(self):\n self.__total_threads_count = 0\n self.__baseuri = 'http://forums.seagate.com'\n self.__last_timestamp = datetime(1980, 1, 1)\n #The Maximum No of threads to process, Bcoz, not all the forums get\n #updated Everyday, At maximum It will 100\n self.__max_threads_count = int(tg.config.get(path='Connector', key=\\\n 'seagateforums_maxthreads'))\n self.__setSoupForCurrentUri()\n while self.__getThreads():\n try:\n self.currenturi = self.__baseuri + self.soup.find('a', \\\n text='Next').findParent('a')['href'].split(';')[0]\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Next Page link not found for url \\\n %s'%self.currenturi))\n break\n #self.linksOut = []\n if self.linksOut:\n updateSessionInfo('Search', self.session_info_out, \\\n self.__last_timestamp , None, 'ForumThreadsPage', \\\n self.task.instance_data.get('update'))\n return True",
"def parse(self):\n logger=self.logger\n tokenizer=Tokenizer()\n self.scope=produtil.testing.parsetree.Scope()\n self.override(self.scope)\n self.parser=Parser(self.run_mode,logger,self.verbose)\n self.parser.requested_platform_name=self.platform_name\n morevars=self.make_vars()\n with open(self.inloc,'rt') as fileobj:\n self.parse_result=self.parser.parse(\n TokenizeFile(tokenizer,fileobj,self.inloc,1),self.scope,\n unique_id=self.unique_id,morevars=morevars)",
"def scrap_keywords():\n ParScr = ParallelScraper()\n ParScr.create_and_run_threads()\n return",
"def parse_items(self):",
"def do_threadedgen(self, args):\n\t\tfor lang in self.languages:\n\t\t\tt = threading.Thread(name = lang.name + \" hierarchies\", target = self.wrap_semaphore(getattr), args= (lang, \"hierarchyLengths\"))\n\t\t\tprint(\"spawning thread to generate hierarchies for \" + lang.name)\n\t\t\tt.start()\n\t\t\tself.generate_threads.append(t)"
] | [
"0.58861786",
"0.55657154",
"0.5539562",
"0.5480844",
"0.541937",
"0.5359861",
"0.53432167",
"0.53350884",
"0.5288622",
"0.5271926",
"0.52689695",
"0.5246764",
"0.52366966",
"0.52285147",
"0.52147704",
"0.52132064",
"0.5210715",
"0.51756674",
"0.5155512",
"0.51327235",
"0.5113877",
"0.51133704",
"0.51061064",
"0.510034",
"0.50876325",
"0.50807285",
"0.5076211",
"0.5040625",
"0.50199026",
"0.501901"
] | 0.6050263 | 0 |
Forum parser, yielding items populated with forum names | def parse_forum(self, response):
item = ForumItem()
item['forum'] = response.xpath('//span[@class="active"]/text()').extract_first()
yield item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_thread(self, response):\n item = DataakForumItem()\n\n # xpath selectors\n thread = '//span[@class=\"active\"]/text()'\n navpath = '//div[@class=\"navigation\"]/a/text()'\n posts = '//div[@class=\"post \"]'\n # author_not_admin = '//div[@class=\"author_information\"]//a/text()'\n author = './/div[@class=\"author_information\"]//a/text() | .//div[@class=\"author_information\"]//em/text()'\n body = './/div[@class=\"post_body scaleimages\"]/text() | .//div[@class=\"post_body scaleimages\"]//*/text()'\n\n posts_selector = response.xpath(posts)\n for post in posts_selector:\n item['url'] = response.url\n # self.log(response.url)\n\n item['thread'] = response.xpath(thread).extract_first()\n # self.log(\"thread: %s\" % response.xpath(thread).extract())\n\n # get the last item which is the forum name\n item['forum'] = response.xpath(navpath).extract()[-1]\n # self.log(\"nav path: %s\" % response.xpath(navpath).extract())\n\n item['author'] = post.xpath(author).extract_first()\n # self.log(\"author: %s\" % post.xpath(author).extract())\n\n item['body'] = post.xpath(body).extract()\n # self.log(\"body: %s\" % post.xpath(body).extract())\n\n yield item",
"def get_forum_names(self):\r\n return self.forum_set.values('slug', 'name')",
"def forums(self):\r\n return forums.Forums(self)",
"def forums(self):\n return forums.Forums(self)",
"def test_discussion_filter_forum(self):\n forum1 = ForumFactory(name=u'Forum 1')\n thread1 = ThreadFactory(forum=forum1, title=u'audio 1')\n PostFactory(thread=thread1)\n\n forum2 = ForumFactory(name=u'Forum 2')\n thread2 = ThreadFactory(forum=forum2, title=u'audio 2')\n PostFactory(thread=thread2)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for forum_id in (forum1.id, forum2.id):\n qs['forum'] = int(forum_id)\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(json.loads(response.content)['total'], 1)",
"def get_forums(forum_query, current_user=False):\n if not current_user:\n forum_query = [(item, None) for item in forum_query]\n\n forums = OrderedDict()\n for category in forum_query:\n if category[0].is_category:\n forums[category] = OrderedDict()\n\n for forum in forum_query:\n if forum[0].parent_id == category[0].id:\n forums[category][forum] = []\n\n for subforum in forum_query:\n if subforum[0].parent_id == forum[0].id:\n forums[category][forum].append(subforum)\n return forums",
"def suggestions(self):\r\n return suggestions.ForumSuggestions(self)",
"def get_threads(self):\n page = self.course.moodle.fetch(\n self._view_url % self.id,\n None\n )\n bs = bs4.BeautifulSoup(page.text, 'lxml')\n\n table = bs.find('table', class_='forumheaderlist')\n posts = table.find_all('tr', class_='discussion')\n\n data = []\n for p in posts:\n topiccell = p.find('td', class_='topic starter')\n topic = topiccell.find('a').text\n url = topiccell.find('a')['href']\n groupcell = p.find('td', class_=\"picture group\")\n if groupcell.text:\n groupname = groupcell.find('a').text\n else:\n groupname = None\n\n data.append((topic, url, groupname))\n\n return data",
"def init_data(cls):\n data=[{'forum':'General Discussion','group':0,'order':0,'note':''},\n {'forum':'Frequently Asked Questions','group':0,'order':1,'note':''},\n {'forum':'Rules and Policies','group':0,'order':2,'note':''},\n {'forum':'News and Announcements','group':1,'order':10,'note':''},\n {'forum':'Feedback and Suggestions','group':1,'order':11,'note':'Suggest ideas of improvement and new features'},\n {'forum':'Bug Reports','group':1,'order':12,'note':'Report problems of the web services'},\n {'forum':'Book Reviews','group':2,'order':20,'note':''},\n {'forum':'Artists Corner','group':2,'order':21,'note':'Discuss topics about art and artists'},\n {'forum':'Writers Corner','group':2,'order':22,'note':'Discuss topics about stories and writers'}\n ]\n for d in data:\n f = SuiForum(forum=d['forum'],note=d['note'],group=d['group'],order=d['order'])\n f.put()",
"def get_threads(subforum_soup):\n threads = subforum_soup.findAll('a', attrs={'id':lambda x:x and x.startswith('thread_title')}) #pulls out the thread links\n\n #page _ of _\n page = 1\n page_count = subforum_soup.find('td', attrs={'class':'vbmenu_control'})\n if page_count:\n page_count = page_count.getText()\n page_match = re.search(r'(\\d+) .+? (\\d+)', page_count)\n if page_match:\n page_count = int(page_match.group(2))\n page = int(page_match.group(1))\n logger.debug(\"get_threads: page_count = %d, page = %d\" % (page_count, page))\n else:\n page_count = 1\n page = 1\n\n thread_counts = subforum_soup.findAll('td', attrs={'class':'alt2', 'title':lambda x:x and re.match(r'.+?: \\d+?', x)})\n if len(threads) != len(thread_counts):\n logger.error('get_threads: thread-count mismatch. Threads = %d; thread_counts = %d' % (len(threads), len(thread_counts)))\n logger.debug('get_threads: threads = %s' % str(threads))\n\tlogger.debug('get_threads: thread_counts = %s' % str(thread_counts))\n threadlinks = []\n for i in range(min(len(threads), len(thread_counts))):\n t = threads[i]\n c = thread_counts[i]\n sanatized = c['title'].replace(',', '')\n count = int(re.search(r'.+?: (\\d+?) .+?: (\\d+?)',sanatized).group(1)) + 1\n text = t.getText()\n link = t['href']\n threadlinks.append({'name':text, 'link':link, 'count':count})\n return threadlinks, (page, page_count)",
"def bb_forum(hit):\n try:\n forum_slug = hit.group(1)\n f = Forum.objects.get(slug=forum_slug)\n return '<a href=\"%s\"><img src=\"%snewspaper.png\" alt=\"forum\" border=\"0\" /> %s</a>' % (f.get_absolute_url(), settings.MEDIA_URL, f)\n except:\n return \"[forum]%s[/forum]\" % (forum_slug)",
"def categories(self):\r\n return categories.ForumCategories(self)",
"def zrzutForum(dane):\n return tuple([daneForum(x) for x in (zupaForum(i))] for i in dane)",
"def process_item(self, item, spider):\n session = self.session()\n forumdb = ForumSection()\n\n forumdb.category = item[\"category\"]\n if item[\"sub_category\"] is None:\n forumdb.sub_category = None\n forumdb.sub_category = item[\"sub_category\"]\n forumdb.forum_link = item[\"forum_link\"]\n forumdb.forum_name = item[\"forum_name\"]\n forumdb.forum_description = item[\"forum_description\"]\n forumdb.threads_count = item[\"threads_count\"]\n forumdb.posts_count = item[\"posts_count\"]\n forumdb.forum_last_post = item[\"forum_last_post\"]\n\n try:\n session.add(forumdb)\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()\n\n return item",
"def forum(self, forum_id):\r\n return forums.Forum(self, forum_id)",
"def get_content(self, data):\n self.name = name = data['feed'].get('title')\n for feed in data['entries']:\n title = feed.get('title', 'Absence of title')\n link = feed.get('link', 'Absence of link')\n date = feed.get('published_parsed', 'Absence of date')\n img = get_img_container(link)\n summary_list = []\n links = []\n if feed.get('summary'):\n summary_list = [feed.get('summary')]\n if feed.get('links'):\n uncleaned_links = feed.get('links')\n links = string_handlers.get_links(uncleaned_links)\n img.extend(if_link_is_image(uncleaned_links))\n fields = 'name, title, link, date, img, content, links'\n item = namedtuple('item', fields)._make((name, title, link, date, img, summary_list, links))\n save_feed_into_cache(item)\n self.items.append(item)",
"def parse_items(self):",
"def get_forums(self, project_id, param=None):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/forums/'\n response = zoho_http_client.get(url, self.details, param)\n return parser.get_forums(response)",
"def testAddForum(self):\n board = self.board\n forum_id = 'forum'\n board.addForum(forum_id, 'title', 'description')\n self.failUnless(forum_id in board.objectIds())\n forum = getattr(board, forum_id)\n self.failUnless(IForum.providedBy(forum))",
"def parse(self):\n\t\tfor part in self.mail.walk():\n\t\t\tself.process_part(part)",
"def test_discussion_forum_with_restricted_forums(self):\n # This is a long test, but it saves us from doing the setup\n # twice.\n forum1 = ForumFactory(name=u'ou812forum')\n thread1 = ThreadFactory(forum=forum1, title=u'audio 2')\n PostFactory(thread=thread1)\n\n forum2 = RestrictedForumFactory(name=u'restrictedkeepout')\n thread2 = ThreadFactory(forum=forum2, title=u'audio 2')\n PostFactory(thread=thread2)\n\n self.refresh()\n\n # Get the Advanced Search Form as an anonymous user\n response = self.client.get(reverse('search.advanced'), {'a': '2'})\n eq_(200, response.status_code)\n\n # Regular forum should show up\n assert 'ou812forum' in response.content\n\n # Restricted forum should not show up\n assert 'restrictedkeepout' not in response.content\n\n u = UserFactory()\n g = GroupFactory()\n g.user_set.add(u)\n ct = ContentType.objects.get_for_model(forum2)\n PermissionFactory(\n codename='forums_forum.view_in_forum',\n content_type=ct,\n object_id=forum2.id,\n group=g)\n\n # Get the Advanced Search Form as a logged in user\n self.client.login(username=u.username, password='testpass')\n response = self.client.get(reverse('search.advanced'), {'a': '2'})\n eq_(200, response.status_code)\n\n # Both forums should show up for authorized user\n assert 'ou812forum' in response.content\n assert 'restrictedkeepout' in response.content",
"def extract_titles():\n \"\"\"\n The final data has the mapping post_title -> cat.\n This requires three relations:\n (pid, id) -> feed_url, feed_url -> blog_url, blog_url -> cat.\n Each file contains one raw feed with several titles, thus:\n (pid, id) -> list(post_title, cat)\n \"\"\"\n #(pid, id) -> feed_url\n idvals = cPickle.load(open(prefix + \"idvals.pickle\"))\n #blog_url -> cat\n cats = cPickle.load(open(prefix + \"blogcats.pickle\"))\n #feed_url -> blog_url\n urls = cPickle.load(open(prefix + \"blogurls.pickle\"))\n\n patt = re.compile('<title>(.*?)</title>')\n titles_success = 0\n titles_bad = 0\n successes = 0\n failures = 0\n #iterate through all raw feed HTML files.\n for infile in glob.glob(os.path.join(feeds_path, '*.xml')):\n info = infile.split('.')[0].split('/')[-1]\n pid, id = info.split('-')\n #(pid, id) -> blog\n blog = idvals[(int(pid), int(id))]\n cat = None\n try:\n # blog -> url -> cat\n cat = cats[urls[blog]]\n except KeyError:\n logging.info(\"Could not find category for blog %s. Skipping...\" % blog)\n continue\n try:\n root = etree.parse(infile)\n successes += 1\n except Exception:\n logging.info(\"Title extraction failed for %s.\" % infile)\n failures += 1\n continue\n\n #PARSE THE FILE\n #Get the encoding of the document (doesn't seem to work)\n enc = root.docinfo.encoding\n titles = root.xpath('/rss/channel/item/title') # titles should be here.\n OUT = open(prefix + \"meta/titles.dat\", \"a\")\n if len(titles) == 0: # didn't find titles using that xpath.\n IN = open(infile) # look for the title in HTML instead.\n content = IN.read()\n IN.close()\n titles = patt.findall(content)\n #for each found title, print it to the FINAL log used for research.\n for title in titles:\n if title is not None:\n try:\n print >> OUT, ','.join([blog, cat, str(info),\n title.strip().replace(\",\", \"\")])\n titles_success += 1\n except:\n try:\n print >> ','.join([OUT, blog, cat, str(info),\n title.strip().encode(enc).replace(\",\", \"\")])\n titles_success += 1\n except:\n titles_bad += 1\n logging.info(\"Character encoding failed in file %s.\" % infile)\n else:\n titles_bad += 1\n else:\n for title in titles:\n if title.text is None:\n titles_bad += 1\n continue\n try:\n print >> OUT, ','.join([blog, cat, str(info),\n title.text.strip().encode(enc).replace(\",\", \"\")])\n titles_success += 1\n except:\n logging.info(\"Character encoding failed in file %s.\" % infile)\n titles_bad += 1\n OUT.close()\n logging.info(\"Document Parse Successes: %d\" % successes)\n logging.info(\"Document Parse Failes Failures: %d\" % failures)\n logging.info(\"TOTAL TITLES FETCHED: %d (%d failed)\" %\n (titles_success, titles_bad))",
"def feed2fields(file):\r\n import feedparser\r\n d = feedparser.parse(file)\r\n for entry in d.entries:\r\n date = (time.strftime(\"%Y-%m-%d %H:%M\", entry.updated_parsed)\r\n if hasattr(entry, \"updated_parsed\") else None)\r\n author = entry.author if hasattr(entry, \"author\") else None\r\n tags = [e['term'] for e in entry.tags] if hasattr(entry, \"tags\") else None\r\n\r\n slug = slugify(entry.title)\r\n kind = 'article'\r\n yield (entry.title, entry.description, slug, date, author, [], tags,\r\n kind, \"html\")",
"def forum(self, forum_id):\n return forums.Forum(self, forum_id)",
"def __createTasksForThreads(self):\n try:\n \n self.__total_threads_count = 0\n self.__baseuri = 'http://broncosfreaks.com/forums/'\n self.__last_timestamp = datetime( 1980,1,1 )\n self.__max_threads_count = int(tg.config.get(path='Connector', key=\\\n 'broncosfreaks_maxthreads'))\n self.__setSoupForCurrentUri()\n while self.__processForumUrl():\n try:\n next_page_uri =self.__baseuri + self.soup.find('a', rel='next')['href']\n data_dict = dict(parse_qsl(next_page_uri.split('?')[-1]))\n if 's' in data_dict.keys():\n data_dict.pop('s')\n self.currenturi = self.__baseuri + 'forumdisplay.php?'+ urlencode(data_dict) \n self.__setSoupForCurrentUri()\n except:\n log.exception(self.log_msg('Next Page link not found for url \\\n %s'%self.currenturi))\n break \n \n #log.info(self.log_msg('LINKSOUT: ' + str(len(self.linksOut))))\n #self.linksOut = [] # To Remove\n if self.linksOut:\n updateSessionInfo('Search', self.session_info_out, \\\n self.__last_timestamp , None, 'ForumThreadsPage', \\\n self.task.instance_data.get('update'))\n return True \n except:\n log.info(log_msg('Exception while creating tasks for the url %s'\\\n %self.currenturi)) \n return False",
"def forum(request, forum_id):\n topics = Topic.objects.filter(forum=forum_id).order_by(\"-created\")\n topics = mk_paginator(request, topics, DJANGO_SIMPLE_FORUM_TOPICS_PER_PAGE)\n\n forum = get_object_or_404(Forum, pk=forum_id)\n\n return render_to_response(\"forum/forum.html\", add_csrf(request, topics=topics, pk=forum_id, forum=forum),\n context_instance=RequestContext(request))",
"def add(self, project_id, forum):\n url = base_url + 'portal/' + str(self.portal_id) + '/projects/' + str(project_id) + '/forums/' \n data = parser.to_json(forum)\n if forum.get_upload_file():\n file_list = []\n for value in forum.get_upload_file():\n attachment = {\n 'uploadfile': {\n 'filename': basename(value), \n 'content':open(value).read()\n } \n }\n file_list.append(attachment)\n else:\n file_list = []\n response = zoho_http_client.post(url, self.details, data, None, file_list)\n return parser.get_forums(response)[0]",
"def list_forum_members(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n course = get_course_by_id(course_id)\r\n has_instructor_access = has_access(request.user, 'instructor', course)\r\n has_forum_admin = has_forum_access(\r\n request.user, course_id, FORUM_ROLE_ADMINISTRATOR\r\n )\r\n\r\n rolename = request.GET.get('rolename')\r\n\r\n # default roles require either (staff & forum admin) or (instructor)\r\n if not (has_forum_admin or has_instructor_access):\r\n return HttpResponseBadRequest(\r\n \"Operation requires staff & forum admin or instructor access\"\r\n )\r\n\r\n # EXCEPT FORUM_ROLE_ADMINISTRATOR requires (instructor)\r\n if rolename == FORUM_ROLE_ADMINISTRATOR and not has_instructor_access:\r\n return HttpResponseBadRequest(\"Operation requires instructor access.\")\r\n\r\n # filter out unsupported for roles\r\n if not rolename in [FORUM_ROLE_ADMINISTRATOR, FORUM_ROLE_MODERATOR, FORUM_ROLE_COMMUNITY_TA]:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"Unrecognized rolename '{}'.\".format(rolename)\r\n ))\r\n\r\n try:\r\n role = Role.objects.get(name=rolename, course_id=course_id)\r\n users = role.users.all().order_by('username')\r\n except Role.DoesNotExist:\r\n users = []\r\n\r\n def extract_user_info(user):\r\n \"\"\" Convert user to dict for json rendering. \"\"\"\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }\r\n\r\n response_payload = {\r\n 'course_id': course_id.to_deprecated_string(),\r\n rolename: map(extract_user_info, users),\r\n }\r\n return JsonResponse(response_payload)",
"def __addThreadAndPosts(self):\n try:\n self.genre = \"Review\"\n self.__hierarchy = []\n self.__baseuri = 'http://broncosfreaks.com/forums/'\n self.__task_elements_dict = {\n 'priority':self.task.priority,\n 'level': self.task.level,\n 'last_updated_time':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'pickup_date':datetime.strftime(datetime.utcnow(),\"%Y-%m-%dT%H:%M:%SZ\"),\n 'connector_instance_log_id': self.task.connector_instance_log_id,\n 'connector_instance_id':self.task.connector_instance_id,\n 'workspace_id':self.task.workspace_id,\n 'client_id':self.task.client_id,\n 'client_name':self.task.client_name,\n 'versioned':False,\n 'category':self.task.instance_data.get('category',''),\n 'task_log_id':self.task.id }\n self.__setSoupForCurrentUri()\n self.__setParentPage()\n question_post = self.soup.find('div', id=re.compile('^edit.*?'))\n self.__addPost(question_post, True)\n self.__goToLastPage() \n while self.__iteratePosts():\n try:\n next_page_uri = self.soup.find('a', text='<',rel='prev').parent['href']\n data_dict = dict(parse_qsl(next_page_uri.split('?')[-1]))\n if 's' in data_dict.keys():\n data_dict.pop('s')\n self.currenturi = self.__baseuri + 'showthread.php?'+ urlencode(data_dict) \n self.__setSoupForCurrentUri()\n except:\n log.exception(self.log_msg('Next Page link not found for url \\\n %s'%self.currenturi))\n break\n return True\n \n\n \"\"\"#try: \n # question_post = self.soup.find('div', id=re.compile ('^edit\\d+'))\n #except:\n # log.info(self.log_msg('Question post cannot be added'))\n #main_page_soup = copy.copy(self.soup)\n # self.__addPost(question_post, True)\n except:\n log.info(self.log_msg('page not added'))\n self.__goToLastPage(main_page_soup)\n while True:\n main_page_soup = copy.copy(self.soup)\n #log.info(self.log_msg('Question post cannot be added'))\n if not self.__iteratePosts():\n break\n try:\n self.currenturi = main_page_soup.find('a',text='<').parent['href']\n log.info(self.log_msg('uri %s'%self.currenturi)) \n except:\n \n log.info(self.log_msg('No Previous URL found for url \\\n %s'%self.currenturi))\n break\n return True\n \"\"\" \n \n \n except:\n log.exception(self.log_msg('Exception while add the theread posts \\\n for the url %s'%self.currenturi))\n return False",
"def _populate_channels(self, channels):\n Channel = get_model('channels', 'Channel')\n\n for parser_chanel in channels:\n name = parser_chanel.find('h6').text\n channel, created = Channel.objects.get_or_create(\n name=name,\n slug=slugify(name),\n show_in_menu=True,\n published=True,\n user=self._user\n )\n\n # Catch the sub-channels\n sub_channels = parser_chanel.findAll(\n 'a', {'href': re.compile('^(?!(#))')})\n\n for parser_sub_chanel in sub_channels:\n name = parser_sub_chanel.text\n sub_channel, created = Channel.objects.get_or_create(\n name=name,\n slug=slugify(name),\n show_in_menu=True,\n published=True,\n user=self._user,\n parent=channel\n )\n self._populate_posts(sub_channel, parser_sub_chanel['href'])"
] | [
"0.71872187",
"0.63297033",
"0.62052065",
"0.6028845",
"0.5970876",
"0.5775043",
"0.5596723",
"0.55596906",
"0.5426935",
"0.5399257",
"0.53686744",
"0.5265473",
"0.5253417",
"0.52366894",
"0.51989734",
"0.5187849",
"0.5074167",
"0.5050157",
"0.50387317",
"0.49944046",
"0.49649176",
"0.49634778",
"0.4962644",
"0.49383414",
"0.4915675",
"0.49002692",
"0.48924467",
"0.48493248",
"0.48114127",
"0.4798814"
] | 0.7352012 | 0 |
header line associated with MAF block (first position of list) | def block_header(self):
return self._current_block[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def header(self):\n ...",
"def header(self) -> NoReturn:\n self.set_x(self.t_margin + self.b_margin)\n self.ln(self.line_height)",
"def __make_header__(self):\n header = lashead.Header(point_format=0)\n return header",
"def section_header(text):\n\n print \"---- %s ----\" % text",
"def header(self):\n return self[0]",
"def section_name_in_first_line(): # noqa: D416",
"def header(self):\r\n raise NotImplementedError",
"def headerFA(block_size,extended=True):\n if(extended):\n header =[\"Address\",\"Tag\",\"Real Address\"]\n else:\n header =[\"Address\"]\n for x in range(0,block_size):\n header.append(\"W%i\"%(x))\n header.append(\"Result\")\n return header",
"def test_first_line_amiramesh(self):\n self.assertEqual(self.header.designation.filetype, 'AmiraMesh')",
"def parse_header(self):",
"def getHeaders(self):\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tif len(self.line) == 7:\n\t\t\tself.header.kod = self.line[0]\n\t\t\tself.header.ver = self.line[1]\n\t\t\tpID_date = self.line[2]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_date)\n\t\t\tpID_time = self.line[3]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_time)\n\t\t\tself.header.knod = int(self.line[4])\n\t\t\tself.header.nps = int(self.line[5])\n\t\t\tself.header.rnr = int(self.line[6])\n\t\telif len(self.line) == 3:\n\t\t\tself.header.knod = int(self.line[0])\n\t\t\tself.header.nps = int(self.line[1])\n\t\t\tself.header.rnr = int(self.line[2])\n\t\t\t\n\n\t\tself.header.title = self.mctalFile.readline().strip()\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tself.header.ntal = int(self.line[1])\n\n\t\tif self.header.ntal == 0:\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mNo tallies in this MCTAL file. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tif len(self.line) == 4:\n\t\t\tself.header.npert = int(self.line[3])\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mMCTAL file with perturbation card. Not supported. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\twhile self.line[0].lower() != \"tally\":\n\t\t\tfor l in self.line: self.header.ntals = np.append(self.header.ntals,int(l))\n\t\t\tself.line = self.mctalFile.readline().split()",
"def _horizontal_header(self):\n return self.header()",
"def _horizontal_header(self):\n return self.header()",
"def rehydrate_atx_heading(self, next_token):\n\n self.block_stack.append(next_token)\n return next_token.extracted_whitespace + \"\".rjust(next_token.hash_count, \"#\")",
"def Show_Headers( self ):\r\n self.system.Change_Seq( \"Header\" )",
"def write_header(self, total_blocks):\n self.write_string('ASEF')\n self.write('2H', (1, 0))\n self.write('i', total_blocks)",
"def first(self):\n return self._make_position(self._header._next)",
"def header(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC + \"0m \" +self.A220 + self.A220 + self.A220 + self.A220 + self.A220 +\" \" + self.ESC + \"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A220+self.A220+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A220+self.A220+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mS\"+self.ESC+\"0;31mAGA\"+self.ESC+\"37m \"+self.A219+self.A219+self.ESC+\"30;47mo\"+self.ESC+\"37;40m\"+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mO\"+self.ESC+\"0;31mF THE\"+self.ESC+\"37m \"+self.A219+self.A219+self.ESC+\"30;47mO\"+self.ESC+\"37;40m\"+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.A220+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.ESC+\"0m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1m \"+self.ESC+\"31mR\"+self.ESC+\"0;31mED\"+self.ESC+\"37m \"+self.A219+self.ESC+\"30;47mo\"+self.ESC+\"37;40m\"+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"0m \"+self.A223+self.A219+self.ESC+\"1;47m\"+self.A176+self.A219+self.A219+self.A219+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m \"+self.ESC+\"1mD\"+self.ESC+\"0;31mRAGON 0.9.9\"+self.ESC+\"37m \"+self.A223+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.A219+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A223+self.ESC+\"0m \"+self.A219+self.ESC+\"1;47m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m concept\"+self.ESC+\"37m \"+self.A223+self.ESC+\"1;47m\"+self.A176+self.A177+self.A177+self.A178+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A223+self.ESC+\"0m \"+self.ESC+\"1m\"+self.A220+self.ESC+\"0m \"+self.ESC+\"1m\"+self.A220+self.ESC+\"0m \"+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"1;41m\"+self.A176+self.A178+\" \"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m Seth Robinson \"+self.ESC+\"37m\"+self.A222+\" \"+self.A223+self.A223+self.ESC+\"1;47m\"+self.A178+self.ESC+\"40m\"+self.A219+self.A219+self.A219+self.A219+self.A223+self.A223+self.ESC+\"0m \"+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+self.A223+\" \"+self.ESC+\"0m\"+self.A223+self.A219+self.A220+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"31m by\"+self.ESC+\"0m \"+self.A219+\" \"+self.A220+self.ESC+\"1;47m\"+self.A220+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A220+self.A220+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0m \"+self.A223+self.ESC+\"1;47m\"+self.A176+self.A219+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A178+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A176+\" \"+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"1;34m J\"+self.ESC+\"0;34m.\"+self.ESC+\"1mT\"+self.ESC+\"0;34m.\"+self.ESC+\"1mS\"+self.ESC+\"0;34mage\"+self.ESC+\"0m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.A220+self.ESC+\"1;47m\"+self.A177+self.A176+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A220+self.ESC+\"1;47m\"+self.A220+self.A220+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0m \"+self.A223+self.ESC+\"1;47m\"+self.A177+self.A219+self.A219+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A176+self.A178+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"1;41m\"+self.A176+self.A178+self.A176+self.A176+self.A177+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.A219+\" \"+self.ESC+\"1;47m\"+self.A176+self.A177+self.A219+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"41m\"+self.A223+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+\" \"+self.A220+self.A220+self.A220+self.A220+self.A223+self.A220+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A177+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A178+\" \"+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A178+self.A178+self.A176+self.A177+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.A219+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+\" \"+self.A176+self.A176+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A178+self.A178+self.A219+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.A176+self.A177+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+self.ESC+\"37m \"+self.ESC+\"31m\"+self.A178+self.A177+self.A177+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A177+self.A178+self.A219+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A219+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"1;41m\"+self.A178+\" \"+self.A177+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A177+self.A176+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A178+self.A177+self.A219+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.A223+self.ESC+\"1;41m\"+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A178+self.A178+self.A177+self.A176+self.A223+\" \"+self.A220+self.A220+\" \"+self.A223+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A221+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A223+self.A223+self.A223+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A178+self.A177+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+\" \"+self.A177+self.A219+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.A178+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.A223+self.A223+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A178+self.A178+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+\" \"+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+\" \"+self.A223+self.A178+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A178+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+\" \"+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.ESC+\"1;30;47m\"+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.A219+self.ESC+\"1;41m\"+self.A176+self.A176+self.ESC+\"0;31m\"+self.A221+\" \"+self.ESC+\"1;5;32m\"+self.A220+self.A220+self.A223+\" \"+self.ESC+\"0;31;41m \"+self.ESC+\"40m\"+self.A178+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A220+self.A220+self.A220+self.A220+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.A177+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"1;5;32m\"+self.A219+self.A219+self.A223+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.A219+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A178+self.A223+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.A219+self.A178+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A222+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.ESC+\"0;37;40m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"1;5;32m\"+self.A223+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.A219+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A223+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A177+self.A219+self.A178+self.ESC+\"37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0;37;40m\"+self.A221+\" \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+self.ESC+\"0;31m\"+self.A221+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+\" \"+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+self.ESC+\"0;31m\"+self.A220+self.A220+self.ESC+\"37m \"+self.ESC+\"1;31;41m\"+self.A176+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+self.A219+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.ESC+\"1;30;47m\"+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A176+self.A222+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A177+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A219+self.A221+\" \"+self.ESC+\"31;41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A177+self.A177+self.A176+self.A178+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A178+self.A220+self.A220+\" \"+self.ESC+\"1;41m\"+self.A178+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0m\"+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A178+self.A178+self.A177+self.A176+self.A176+self.A177+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"40m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.A178+\" \"+self.ESC+\"1;41m\"+self.A177+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A176+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A223+self.ESC+\"1m\"+self.A219+self.A220+\" \"+self.ESC+\"0;31;41m \"+self.A177+self.A178+self.A176+self.A176+\" \"+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A219+self.A176+self.A178+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A223+self.ESC+\"1;47m\"+self.A223+self.ESC+\"40m\"+self.A219+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A178+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;37;47m\"+self.A222+self.ESC+\"40m\"+self.A221+self.A223+self.A220+\" \"+self.ESC+\"0;31m\"+self.A177+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A221+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+\" \"+self.A177+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.A223+self.ESC+\"31m\"+self.A222+self.ESC+\"1;41m\"+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"1;37;47m\"+self.A222+self.ESC+\"40m\"+self.A221+\" \"+self.ESC+\"47m\"+self.A222+self.ESC+\"40m\"+self.A221+\" \"+self.ESC+\"0;31m\"+self.A178+self.A177+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"33m\"+self.A220+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"40m\"+self.A220+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+\" \"+self.A178+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"31;41m \"+self.A176+self.A176+self.ESC+\"37;40m \"+self.A220+self.ESC+\"1m\"+self.A219+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A219+self.A221+\" \"+self.A223+\" \"+self.A223+self.A220+\" \"+self.ESC+\"0;31;41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A178+self.A177+self.A176+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A220+self.ESC+\"1m\"+self.A219+\" \"+self.A219+self.A221+\" \"+self.A223+\" \"+self.A220+\" \"+self.A223+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"1;33;43m\"+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A219+self.A220+self.A222+self.ESC+\"1;41m\"+self.A219+self.A177+self.A176+self.ESC+\"0;31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+\" \"+self.A178+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.A223+self.ESC+\"1m\"+self.A220+\" \"+self.A223+\" \"+self.A220+self.A223+self.A220+\" \"+self.A223+\" \"+self.A223+self.ESC+\"0;31m\"+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.A223+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+\" \"+self.ESC+\"1;33m\"+self.A223+self.ESC+\"43m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.ESC+\"0;33m\"+self.A220+self.A219+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+self.ESC+\"0;31m\"+self.A223+\" \"+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A177+self.A178+\" \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1m\"+self.A220+\" \"+self.ESC+\"0;31m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A176+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.A176+self.A177+self.A178+\" \"+self.A219+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"30mÙ\"+self.ESC+\"31m\"+self.A222+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A177+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A223+self.ESC+\"43m\"+self.A219+self.ESC+\"0;33m\"+self.A223+self.A223+self.A223+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+self.A222+self.A223+\" \"+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A177+self.A176+\" \"+self.A219+\" \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"31m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A220+self.ESC+\"1;41m\"+self.A176+self.A177+self.A178+self.A176+self.A176+\" \"+self.A219+self.ESC+\"0;37;40m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.ESC+\"1;33;43m\"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A176+self.A177+self.A177+\" \"+self.ESC+\"0;31m\"+self.A223+self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += \" \"+self.ESC+\"1;33;43m\"+self.A219+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.A220+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"1;41m\"+self.A177+self.A177+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A219+self.A178+self.A177+self.A177+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.A220+self.A220+self.A220+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A178+self.A177+self.A176+self.ESC+\"0;31m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"43m\"+self.A219+self.A219+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A219+self.A219+\" \"+self.ESC+\"31m\"+self.A223+self.A223+\" \"+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"40m\"+self.A220+self.A220+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A219+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"31m\"+self.A220+self.A220+self.ESC+\"41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A177+\" \"+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A178+self.ESC+\"0;33m\"+self.A220+self.A219+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A177+self.A177+self.A176+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.A223+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33;43m\"+self.A178+self.A177+self.A176+self.ESC+\"0;33m\"+self.A223+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A178+self.A178+\" \"+self.A177+\" \"+self.A176+self.A176+\" \"+self.ESC+\"0;37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"37;40m \"+self.ESC+\"31m\"+self.A223+self.ESC+\"41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"1;33m\"+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.A223+\" \"+self.ESC+\"31;41m \"+self.ESC+\"1m\"+self.A176+self.A219+self.A178+self.A178+self.A177+self.A177+self.A176+\" \"+self.A176+\" \"+self.A176+self.A176+\" \"+self.ESC+\"0;31m\"+self.A220+\" \"+self.ESC+\"41m \"+self.ESC+\"37;40m \"+self.ESC+\"31;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"0;37m \"+self.ESC+\"1;33m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+\" \"+self.ESC+\"30;41m \"+self.ESC+\"1;31mShatterstar [W/X] \"+self.ESC+\"0;37;40m \"+self.ESC+\"30;41m \"+self.ESC+\"37;40m \"+self.ESC+\"30;41m \"+self.ESC+\"0m\\r\\n\"\n\t\tthismsg += self.ESC+\"37m \"+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg",
"def parse_header(self): # -> list[Unknown]:\n ...",
"def get_primary_header(input_lst):\n lst = [\n # 12345678 12345678901234567890123456789012345678901234567\n ('SIMPLE' , 'file does conform to FITS standard' ),\n ('BITPIX' , 'number of bits per data pixel' ),\n ('NAXIS' , 'number of data axes' ),\n ('NAXIS1' , 'length of data axis 1' ),\n ('NAXIS2' , 'length of data axis 2' ),\n ('BSCALE' , 'factor to linearly scale the data pixel values' ),\n ('BZERO' , 'offset to linearly scale the data pixel values' ),\n ('BUNIT' , 'physical unit of the data pixel values' ),\n ('BLANK' , 'value representing undefined physical values' ),\n ('DISPAXIS', 'main dispersion axis of the spectral data' ),\n ('DATATYPE', 'type of data (calibration/science)' ),\n ('OBJECT' , 'object observed' ),\n ('DATE-OBS', 'start date of observation run' ),\n ('MJD-OBS' , 'Modified Julian Date of observation run' ),\n ('TIMESYS' , 'time system' ),\n ('FRAMEID' , 'frame ID in observation run' ),\n ('RA' , 'right ascension of object' ),\n ('DEC' , 'declination of object' ),\n ('RADESYS' , 'name of reference frame' ),\n ('EQUINOX' , 'epoch of the mean equator and equinox in years' ),\n ('EXPTIME' , 'exposure time in seconds' ),\n ('PHO-OFF' , 'offset of photon middle time' ),\n ('UTC-STA' , 'UTC at start of exposure' ),\n ('UTC-MID' , 'UTC at middle of exposure' ),\n ('UTC-PHO' , 'UTC at photon middle of exposure' ),\n ('UTC-END' , 'UTC at end of exposure' ),\n ('LT-STA' , 'local time at start of exposure' ),\n ('LT-MID' , 'local time at middle of exposure' ),\n ('LT-PHO' , 'local time at photon middle of exposure' ),\n ('LT-END' , 'local time at end of exposure' ),\n ('LST-STA' , 'local sidereal time at start' ),\n ('LST-MID' , 'local sidereal time at middle' ),\n ('LST-PHO' , 'local sidereal time at photon middle' ),\n ('LST-END' , 'local sidereal time at end' ),\n ('MJD-STA' , 'Modified Julian Date of UTC-STA' ),\n ('MJD-MID' , 'Modified Julian Date of UTC-MID' ),\n ('MJD-PHO' , 'Modified Julian Date of UTC-PHO' ),\n ('MJD-END' , 'Modified Julian Date of UTC-END' ),\n ('AIRM-STA', 'airmass at start of exposure' ),\n ('AIRM-MID', 'airmass at middle of exposure' ),\n ('AIRM-PHO', 'airmass at photon middle of exposure' ),\n ('AIRM-END', 'airmass at end of exposure' ),\n ('AIRMASS' , 'effective airmass during exposure' ),\n ('ALT-STA' , 'telescope altitude at start' ),\n ('ALT-MID' , 'telescope altitude at middle' ),\n ('ALT-PHO' , 'telescope altitude at photon middle' ),\n ('ALT-END' , 'telescope altitude at end' ),\n ('AZ-STA' , 'telescope azimuth at start' ),\n ('AZ-MID' , 'telescope azimuth at middle' ),\n ('AZ-PHO' , 'telescope azimuth at photon middle' ),\n ('AZ-END' , 'telescope azimuth at end' ),\n ('MOON-AGE', 'days past new moon at middle of exposure' ),\n ('MOON-ALT', 'moon altitude at middle of exposure' ),\n ('MOON-AZ' , 'moon azimuth at middle of exposure' ),\n ('MOON-DIS', 'angular distance to moon (in degree)' ),\n ('TWI-END' , 'end time of astronomical twilight in UTC' ),\n ('TWI-STA' , 'start time of astronomical twilight in UTC' ),\n ('PROP-ID' , 'proposal ID' ),\n ('PROP-TIT', 'title of proposal' ),\n ('PROP-PI' , 'principal investigator of proposal' ),\n ('OBSERVER', 'people who acquire the data' ),\n ('OBSERVAT', 'observatory where the data is acquired' ),\n ('TELESCOP', 'telescope used to acquire the data' ),\n ('OBS-LONG', 'longitude of the telescope' ), \n ('OBS-LAT' , 'latitude of the telescope' ),\n ('OBS-ALT' , 'altitude of the telescope in meter' ),\n ('INSTRUME', 'instrument used to acquire the data' ),\n ('SETUP-ID', 'ID of the instrument setup' ),\n ('SLT-WID' , 'slit width (in mm)' ),\n ('SLT-LEN' , 'slit length (in mm)' ),\n ('NCHANNEL', 'number of simultaneous channels' ),\n ('CHANNEL1', 'object of channel 1' ),\n ('CHANNEL2', 'object of channel 2' ),\n ('FILTER1' , 'filter in channel 1' ),\n ('FILTER2' , 'filter in channel 2' ),\n ('EXPMETER', 'usage of exposure meter' ),\n ('SHAK_STA', 'status of fiber shaker (on/off)' ),\n ('SHAK_FRE', 'frequency of fiber shaker (in Hz)' ),\n ('SHAK_AMP', 'amplitude of fiber shaker' ),\n ('DETECTOR', 'detector used to acquire the data' ),\n ('GAIN' , 'readout gain of detector (in electron/ADU)' ),\n ('RO-SPEED', 'read out speed of detector' ),\n ('RO-NOISE', 'read out noise of detector' ),\n ('BINAXIS1', 'binning factor of data axis 1' ),\n ('BINAXIS2', 'binning factor of data axis 2' ),\n ('TEMP-DET', 'temperature of detector (in degree)' ),\n ('TEMP-BOX', 'temperature inside instrument box (in degree)' ),\n ('TEMP-ROO', 'temperature inside instrument room (in degree)' ),\n ('PRES-BOX', 'pressure inside instrument box (in hPa)' ),\n ('DATE' , 'file creation date' ),\n ('ORI-NAME', 'original filename' ),\n ('ORIGIN' , 'organization responsible for the FITS file' ),\n ('HEADVER' , 'version of header' ),\n ]\n now = datetime.datetime.now()\n header_lst = []\n for key, comment in lst:\n if key in input_lst.keys():\n value = input_lst[key]\n else:\n value = None\n if type(value) == type('a'):\n value = \"'%-8s'\"%value\n value = value.ljust(20)\n elif type(value) == type(u'a'):\n value = value.encode('ascii','replace')\n value = \"'%-8s'\"%value\n value = value.ljust(20)\n elif type(value) == type(1):\n value = '%20d'%value\n elif type(value) == type(1.0):\n if key[0:4]=='MJD-':\n # for any keywords related to MJD, keep 6 decimal places.\n # for reference, 1 sec = 1.16e-5 days\n value = '%20.6f'%value\n else:\n value = str(value).rjust(20)\n value = value.replace('e','E')\n elif type(value) == type(now):\n # if value is a python datetime object\n value = \"'%04d-%02d-%02dT%02d:%02d:%02d.%03d'\"%(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second,\n int(round(value.microsecond*1e-3))\n )\n elif value == True:\n value = 'T'.rjust(20)\n elif value == False:\n value = 'F'.rjust(20)\n elif value == None:\n value = \"''\".ljust(20)\n else:\n print('Unknown value: {}'.format(value))\n string = '%-8s= %s / %s'%(key,value,comment)\n if len(string)>=80:\n string = string[0:80]\n else:\n string = string.ljust(80)\n\n header_lst.append(string)\n\n return header_lst",
"def header_level(line):\n i = 0\n title = line + \"e\"\n while title[0] == \"#\":\n i += 1\n title = title[1:]\n return i",
"def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]",
"def rehydrate_setext_heading(self, next_token):\n self.block_stack.append(next_token)\n return next_token.extracted_whitespace",
"def header(self) -> List:\n return self.rows[0]",
"def headerDA(blocks,block_size,extended):\n if(extended):\n header =[\"Address\",\"Tag\",\"Real Address\",\"Index\",\"WordOffset\",\"ByteOffset\"]\n else:\n header =[\"Address\"]\n for i in range(0,blocks):\n for x in range(0,block_size):\n header.append(\"B%i W%i\"%(i,x))\n header.append(\"Result\")\n return header",
"def pp_file_header(self):\n self.separator()\n for item in self.file_header:\n print(item.ljust(27, ' ') + \": {}\".format(self.file_header[item]))\n \n self.separator()",
"def header(self, text, level, raw=None):\n return [[MdStyleInstructionCell('h{}'.format(level))] + text]",
"def _parse_dotlist_header(line):\n m = DOTLIST_HEADER_RE.match(line)\n\n # name, color, master\n return m.group(1), m.group(2), utils.slugify(m.group(3))",
"def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"",
"def colorize_headlines_visitor(c, p, item):\n if p.h.startswith(\"!= \"):\n f = item.font(0)\n f.setBold(True)\n item.setFont(0, f)\n raise leoPlugins.TryNext"
] | [
"0.6755511",
"0.63866097",
"0.6302058",
"0.62535906",
"0.62517387",
"0.62265146",
"0.62243164",
"0.61960685",
"0.61885786",
"0.61462426",
"0.613061",
"0.61277497",
"0.61277497",
"0.6099594",
"0.6066107",
"0.6057303",
"0.60342145",
"0.60192156",
"0.60112727",
"0.6006213",
"0.599501",
"0.5983414",
"0.59649247",
"0.5945996",
"0.59430754",
"0.5864512",
"0.5864329",
"0.58493423",
"0.58100677",
"0.5786463"
] | 0.64879733 | 1 |
A generator that splits a full chromosome MAF file into subfiles. Each time a file is completed the generator yields the name of the completed file. | def split_file(self):
# process lines into blocks with Parser until EOF triggers StopIteration
while self.maf_lines:
try:
# rest counters and open new file at the top of the loop AFTER
# the most recent yield
if self._stop:
self._yield(new_file=True)
# try to get next block from Parser and write to current file
block_string = self.parser.get_block(self.maf_lines).next()
self.current_file.write(block_string)
# update char count for the current file
self.char_count += len(block_string)
# if char count crosses limit, yield current file name start new file
if self._stop:
yield self.current_filename
except StopIteration:
self._yield(new_file=False)
yield self.current_filename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mbatGen(file_obj, mbatsize):\n for _, lines in enumerate(itertools.islice(file_obj, mbatsize)): \n #TODO(Akshay): Add filtering condition here, if any.\n yield lines",
"def split_fasta(infile, seqs_per_file, outfile_prefix, working_dir=''):\r\n if seqs_per_file <= 0:\r\n raise ValueError(\"seqs_per_file must be > 0!\")\r\n\r\n seq_counter = 0\r\n out_files = []\r\n if working_dir and not working_dir.endswith('/'):\r\n working_dir += '/'\r\n create_dir(working_dir)\r\n\r\n for seq_id, seq in parse_fasta(infile):\r\n if seq_counter == 0:\r\n current_out_fp = '%s%s.%d.fasta' \\\r\n % (working_dir, outfile_prefix, len(out_files))\r\n current_out_file = open(current_out_fp, 'w')\r\n out_files.append(current_out_fp)\r\n current_out_file.write('>%s\\n%s\\n' % (seq_id, seq))\r\n seq_counter += 1\r\n\r\n if seq_counter == seqs_per_file:\r\n current_out_file.close()\r\n seq_counter = 0\r\n\r\n if not current_out_file.closed:\r\n current_out_file.close()\r\n\r\n return out_files",
"def fasta_read_generator(file_handler):\r\n seq = []\r\n name = ''\r\n for line in file_handler:\r\n if line[0] == '>':\r\n sequence = ''.join(seq)\r\n if name: # only yield when we already have all data for the first sequence\r\n yield name, sequence\r\n name = line.rstrip()[1:] # omitting the leading >\r\n seq = []\r\n else:\r\n seq += [line]#.rstrip()] # keep line breaks\r\n sequence = ''.join(seq)\r\n yield name, sequence # don't forget the last sequence\r",
"def FileGen(TargetDir, extension = \".gbk\",subset_tag = \"final\"):\n \n for F in next(os.walk(TargetDir))[2]:\n print(F)\n if F[-1*len(extension):] == extension:\n if F.split(\".\")[-2][:len(subset_tag)] == subset_tag:\n yield(TargetDir + '/' + F)",
"def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()",
"def _chunks(filename, start):\n with open(filename, 'r') as f:\n buffer = []\n for line in f:\n if line.startswith(start):\n if buffer:\n yield buffer\n buffer = []\n else:\n buffer.append(line.strip())",
"def splitFile(f, rootdir=\"/tmp\", splitCmd=\"/usr/bin/split\", chunkSize=\"100m\"):\n d = str(uuid.uuid4())\n path = os.path.join(rootdir, d)\n # I want it to fail hard here\n os.makedirs(path)\n prefix = os.path.join(path, \"chunk-\")\n subprocess.check_call([splitCmd, \"-b\", chunkSize, \"-d\", \"-a\", \"5\", f, prefix])\n chunks = glob.glob(os.path.join(path, \"chunk-*\"))\n chunks.sort()\n return chunks",
"def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)",
"def generate_chunk(self, filename):\n\n # open resource file in binary\n with open(filename, 'rb') as resource:\n\n # instantiate chunk start byte and trailing line string\n p = 0\n overlap = ''\n\n while p <= self.file_size:\n\n try:\n if self.file_size - p < self.chunk_size:\n buffer = overlap + resource.read(self.file_size - p).decode(\"UTF-8\")\n else:\n buffer = overlap + resource.read(self.chunk_size).decode(\"UTF-8\")\n except:\n p += self.chunk_size\n continue\n\n # remove and store trailing sentence\n buffer, overlap = buffer.rsplit('\\n', maxsplit=1)\n\n yield buffer\n\n p += self.chunk_size",
"def elements(filelist, start, end):\n process = False\n n = 1\n with open(filelist) as fh:\n for line in fh:\n fname = line.strip()\n if n == start:\n process = True\n if n > end:\n return\n if process:\n yield (n, fname)\n n += 1",
"def FASTA_iterator (fasta_filename):\n file=open(fasta_filename,\"r\")\n seq=''\n for line in file:\n if line[0]==\">\":\n if seq != \"\":\n yield (lastid,seq)\n seq=''\n lastid=line.rstrip()[1:]\n else:\n lastid=line.rstrip()[1:]\n else:\n seq += line.rstrip()\n if seq != \"\":\n yield (lastid,seq)",
"def splitter(fasta_file, output, limit, large_handling=False):\n file_ = open(fasta_file, 'r')\n file_count = 1\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n nt_count = 0\n for seq in SeqIO.parse(fasta_file, 'fasta'):\n if large_handling == True and len(str(seq.seq)) >= int(limit):\n file_count += 1\n largefile = open(output.rstrip(\"/\")+\"/%s_%05d_XL.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n largefile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\")\n largefile.close()\n else:\n nt_count += len(str(seq.seq))\n outfile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\") \n if nt_count >= int(limit):\n outfile.close()\n file_count += 1\n nt_count = 0\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n outfile.close()",
"def generate_filename(self):\n file_pattern = os.path.join(self.path, \"TCGA-*\")\n for f in glob(file_pattern):\n organ = get_organ(f)\n for raw_f in glob(os.path.join(f, \"*.tif\")):\n gt_f = raw_f.replace(\".tif\", \".xml\")\n yield raw_f, gt_f, organ",
"def read_files(filenames, gram_size=1):\n assert isinstance(filenames, list), \"filenames argument must be a list\"\n parser = MorParser()\n for fn in filenames:\n for uid, speaker, ngram in generate_chunks(parser.parse(fn), gram_size):\n yield fn, uid, speaker, ngram",
"def divide_chunks(audio_file_, chunksize):\n\n for j in range(0, len(audio_file_), self.chunksize):\n yield audio_file[j:j + chunksize]",
"def combine(self):\n\n import re\n \n print 'Creating file', self.__filename\n \n bname = (os.path.split(self.__filename))[1]\n bname2 = bname\n \n # bugfix: if file contains characters like +,.,[]\n # properly escape them, otherwise re will fail to match.\n for a, b in zip(['+', '.', '[', ']','$', '(', ')'],\n ['\\+','\\.','\\[','\\]','\\$', '\\(', '\\)']):\n bname2 = bname2.replace(a, b)\n \n chunkre = re.compile(bname2 + '-' + '[0-9]+')\n \n chunkfiles = []\n for f in os.listdir(\".\"):\n print f\n if chunkre.match(f):\n chunkfiles.append(f)\n\n\n print 'Number of chunks', len(chunkfiles), '\\n'\n chunkfiles.sort(self.sort_index)\n\n data=''\n for f in chunkfiles:\n\n try:\n print 'Appending chunk', os.path.join(\".\", f)\n data += open(f, 'rb').read()\n except (OSError, IOError, EOFError), e:\n print e\n continue\n\n try:\n f = open(bname, 'wb')\n f.write(data)\n f.close()\n except (OSError, IOError, EOFError), e:\n raise FileSplitterException, str(e)\n\n print 'Wrote file', bname",
"def split_by_contigs(self, output_dir: Path = None) -> None:\n if output_dir is None:\n output_dir = (\n Path(self._input_file.parent) / \"split_\" + self._input_file.name\n )\n else:\n output_dir = Path(output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n contigs = pyfastx.Fasta(\n self.file_path.as_posix(), build_index=False, full_name=True\n )\n for contig_name, seq in contigs:\n output_file = (\n output_dir / f\"{contig_name.split(' ')[0]}{self._input_file.suffix}\"\n )\n with open(output_file, \"w+\", encoding=\"UTF-8\") as outfile:\n outfile.write(f\">{contig_name}\\n\")\n outfile.write(seq + \"\\n\")",
"def FASTA_iterator(filename):\n fasta_file=open(filename, \"r\")\n id_fasta=\"\"\n seq_fasta=\"\"\n\n for line in fasta_file:\n if line.startswith(\">\"):\n if id_fasta == \"\":\n id_fasta=line.strip()\n continue\n fasta = id_fasta , seq_fasta\n yield fasta\n seq_fasta=\"\"\n id_fasta=line.strip()\n\n else:\n seq_fasta += line.strip()\n\n if seq_fasta != \"\":\n yield id_fasta, seq_fasta",
"def FileIter(func_name):\n \n if func_name == 'convert_pmids_to_pmcs':\n sdir = partial(os.path.join,'Data', 'SearchResults')\n pmc_file = os.path.join('Data', 'PMC-ids.csv')\n files = [x for x in os.listdir(sdir('')) if x.endswith('.res')]\n for f in files:\n yield (sdir(f), pmc_file), sdir(f+'.conv')\n\n elif func_name == 'search_pubmed':\n sdir = partial(os.path.join,'Data', 'SearchResults')\n queryfile = os.path.join('Data', 'QueryList.txt')\n with open(queryfile) as handle:\n for row in csv.DictReader(handle):\n fname = '%s--%s.res' % (GeneralUtils.slugify(row['org']), \n GeneralUtils.slugify(row['search']))\n ofile = sdir(fname)\n yield queryfile, ofile, row['search']\n\n elif func_name == 'download_pmids':\n \n sdir = partial(os.path.join,'Data', 'SearchResults')\n odir = os.path.join('Data', 'RawXML')\n files = [x for x in os.listdir(sdir('')) if x.endswith('.conv')]\n \n for f in files:\n yield sdir(f), sdir(f+'.dl'), odir\n\n elif func_name == 'extract_text':\n \n sdir = partial(os.path.join, 'Data', 'RawXML')\n odir = partial(os.path.join, 'Data', 'SentenceFiles')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.xml')])\n for f in files:\n name = f.split('.')[0]\n if f.startswith('PMC'):\n typ = 'pmc'\n else:\n typ = 'pubmed'\n\n yield sdir(f), odir(name+'.sent'), typ\n\n elif func_name == 'get_mutations':\n \n sdir = partial(os.path.join, 'Data', 'SentenceFiles')\n odir = partial(os.path.join, 'Data', 'MutFiles')\n finder = None#mutfinder_gen('regex.txt')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.sent')])\n\n for f in files:\n name = f.split('.')[0]\n yield sdir(f), odir(name + '.mut')\n \n elif func_name == 'process_mut_file':\n \n sdir = partial(os.path.join, 'Data', 'MutFiles')\n odir = partial(os.path.join, 'Data', 'ProteinFiles')\n\n files = sorted([x for x in os.listdir(sdir('')) if x.endswith('.mut')])\n\n for f in files:\n name = f.split('.')[0]\n yield sdir(f), (odir(name + '.prot'), odir(name + '.sen'))\n elif func_name == 'mapping_files':\n path = 'Data/Mapping/'\n items = (('ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/knowledgebase/idmapping/idmapping.dat.gz', 'idmapping.dat.sort'),\n ('ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene_info.gz', 'gene_info'),\n ('ftp://ftp.ncbi.nlm.nih.gov/pub/pmc/PMC-ids.csv.gz', 'PMC-ids.csv'),\n ('ftp://nlmpubs.nlm.nih.gov/online/mesh/.asciimesh/d2011.bin', 'd2011.bin'))\n for url, ofile in items:\n yield None, os.path.join(path, ofile), url, path",
"def _collect_bams(self, wildcards, library_name):\n folder_name = get_ngs_library_folder_name(self.parent.sheets, wildcards.library_name)\n for _, path_infix, filename in self.path_gen.run(folder_name, (\"bam\",)):\n yield os.path.join(self.base_path_in, path_infix, filename).format(**wildcards)",
"def go(self):\n\n self._write_master()\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n njobs=0\n fobj=None\n\n icondor=0\n for isplit,fof_split in enumerate(fof_splits):\n if njobs % self['jobs_per_sub']==0:\n if fobj is not None:\n fobj.close()\n fobj = self._open_condor_script(icondor)\n icondor += 1\n\n self._write_split(fobj, isplit, fof_split)\n\n njobs += 1",
"def test_split_fasta_equal_num_seqs_per_file(self):\r\n fd, filename_prefix = mkstemp(dir=get_qiime_temp_dir(),\r\n prefix='split_fasta_tests',\r\n suffix='')\r\n close(fd)\r\n infile = ['>seq1', 'AACCTTAA', '>seq2', 'TTAACC', 'AATTAA',\r\n '>seq3', 'CCTT--AA']\r\n\r\n actual = split_fasta(infile, 1, filename_prefix)\r\n actual_seqs = []\r\n for fp in actual:\r\n actual_seqs += list(open(fp))\r\n remove_files(actual)\r\n\r\n expected = ['%s.%d.fasta' % (filename_prefix, i) for i in range(3)]\r\n\r\n self.assertEqual(actual, expected)\r\n self.assertEqual(\r\n SequenceCollection.from_fasta_records(parse_fasta(infile), DNA),\r\n SequenceCollection.from_fasta_records(parse_fasta(actual_seqs), DNA))",
"def splitter(file_name: str, MAX_SIZE: int = 7):\n\n # convertion to MB\n MAX_SIZE = MAX_SIZE * 1024 * 1024\n\n # index go throught the bit stream\n start_index: int = 0\n\n # harvested data\n data: bytes = None\n\n created_files: int = 0\n\n with open(file_name, \"rb\") as input_stream:\n # while we didn't go out the file\n while data != b'':\n # we place the cursor at start index\n input_stream.seek(start_index)\n # read a chunk of size MAX_SIZE bytes\n data = input_stream.read(MAX_SIZE)\n\n if data == b'':\n break\n # then we open an output file\n with open(str(start_index) + \"_\" + file_name, \"wb\") as ouput_stream:\n # A write the related chunk in it\n ouput_stream.write(data)\n\n created_files += 1\n\n # we translate the cursor\n start_index += MAX_SIZE\n\n print(\"Done! \", created_files, \" files created\")",
"def parse_proteome(fasta_file,kmer_size=12,out_base=\"kmers\",seq_per_file=50000,num_to_write=1000000):\n\n all_kmers = {}\n seq_name = None\n current_sequence = []\n\n # Parse fasta file, splitting into kmers as we go\n with open(fasta_file) as infile:\n for l in infile:\n\n if l.startswith(\">\"):\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n current_sequence = []\n seq_name = l[1:].strip()\n else:\n if seq_name is None or l.strip() == \"\":\n continue\n current_sequence.append(l.strip())\n\n if seq_name is not None:\n\n sequence = \"\".join(current_sequence)\n kmer_list = create_kmers(sequence,kmer_size)\n\n for k in kmer_list:\n try:\n all_kmers[k].append(seq_name)\n except KeyError:\n all_kmers[k] = [seq_name]\n\n # Sort kmers\n to_sort = [(len(all_kmers[k]),k) for k in all_kmers.keys()]\n to_sort.sort(reverse=True)\n\n # kmers \n kmers = [k[1] for k in to_sort]\n\n if len(kmers) > num_to_write:\n kmers = kmers[:num_to_write]\n else:\n\n # If there are more single kmers than the total we want to get, grab a\n # random selection of them.\n single_kmers = [k[1] for k in to_sort if k[0] == 1]\n if num_to_write - len(kmers) > 0:\n to_grab = num_to_write - len(kmers)\n random.shuffle(single_kmers)\n kmers.extend(single_kmers[:to_grab])\n\n out = []\n counter = 0\n for k in kmers:\n\n # make sure kmer has only amino acids in it\n score = sum([1 for l in k if l not in \"ACDEFGHIKLMNPQRSTVWY\"])\n if score > 0:\n continue\n\n ids = \",\".join(all_kmers[k])\n out.append(\"{} {:5d} {}\\n\".format(k,len(all_kmers[k]),ids))\n\n if counter != 0 and counter % seq_per_file == 0:\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n print(counter,len(kmers))\n sys.stdout.flush()\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()\n\n out = []\n\n counter += 1\n\n\n out_file = \"{}_{}.kmers\".format(out_base,counter)\n\n f = open(out_file,'w')\n f.write(\"\".join(out))\n f.close()",
"def fasta_iter(fasta_name):\n fh = open(fasta_name)\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n faiter = (x[1] for x in groupby(fh, lambda line: line[0] == \">\"))\n for header in faiter:\n # drop the \">\"\n header = header.next()[1:].strip()\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in faiter.next())\n yield header, seq",
"def divide_fasta_like_file(input_file, output_dir, ext=''):\n with open(input_file, 'r') as file:\n body = ''\n p_id = ''\n for line in file:\n if line[0] == '>':\n if len(p_id) > 0:\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')\n body = ''\n p_id = line.strip()[1:]\n else:\n body += line.strip()\n with open(output_dir + p_id.replace(':', '_') + '.' + ext, \"w\") as out_file:\n out_file.write('>' + p_id.replace(':', '_') + '\\n' + body + '\\n')",
"def process_all_leading_genes(f_path):\n with open(f_path, 'r') as f:\n contents = f.read()\n parts = contents.strip().split('\\t')\n genes = parts[2:]\n return genes",
"def _out_fn(self, split_index):\n if split_index > 999:\n raise ValueError(\"Too many splitted files to generate: number \" +\n \"of splitted files exceed 1000.\")\n name = self.out_format.format(split_index)\n return op.join(self.out_dir, name)",
"def parse_multifasta_file(file, number_of_fastas):\n\n with open(file) as file:\n for i in range(number_of_fastas):\n fasts_seq = ''\n fasta_name = file.readline().strip()[1:]\n end_of_file = False\n end_of_seq = False\n while not end_of_seq and not end_of_file:\n x = file.tell()\n seq = file.readline()\n if not seq:\n end_of_file = True\n elif '>' not in seq:\n fasts_seq = fasts_seq + seq\n else:\n file.seek(x)\n end_of_seq = True\n fasts_seq = re.sub(r'\\n', '', fasts_seq)\n yield fasta_name, fasts_seq",
"def geneProcess(self, name):\n self.fileHandle = open(self.fileName, 'r+b')\n self.mm = mmap.mmap(self.fileHandle.fileno(), 0)\n positions = self.geneFeatures[name]\n exons = []\n for position in positions:\n self.mm.seek(position)\n row = self.mm.readline().decode('utf-8').rstrip().split(\"\\t\")\n attributes = row[-1].split(\"; \")\n for attribute in attributes:\n if attribute.startswith(\"gene_type\"):\n _gt = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_id\"):\n _gid = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_name\"):\n _gn = attribute.split(\" \")[-1][1:-1]\n exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))\n self.fileHandle.close()\n exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',\n 'strand', 'gene_type', 'gene_id', 'gene_name'])\n\n for record in self.geneExonicRegions(exons_df):\n yield record"
] | [
"0.60271287",
"0.58416164",
"0.5833081",
"0.5790323",
"0.5722602",
"0.569153",
"0.56704223",
"0.56582576",
"0.565721",
"0.5654158",
"0.562368",
"0.5616169",
"0.5610848",
"0.56098634",
"0.55184925",
"0.55172205",
"0.5510026",
"0.5495891",
"0.54387915",
"0.54282844",
"0.5399085",
"0.5393757",
"0.5389265",
"0.5384354",
"0.5379378",
"0.53756946",
"0.53675",
"0.53598773",
"0.53547424",
"0.5349408"
] | 0.647833 | 0 |
return True if the Splitter should stop and create a new file; always False if no char_limit | def _stop(self):
if self.char_limit is not None:
return self.char_count > self.char_limit
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_truncation(self):\n\n temp_pos = self._handle.tell()\n self._handle.seek(-28, 2)\n eof = self._handle.read()\n self._handle.seek(temp_pos)\n if eof == _bgzf_eof:\n return False\n else:\n warnings.BytesWarning('No EOF character found. File may be truncated')\n return True",
"def can_write_eof(self):\n return True",
"def eof_check(self) -> bool:\n eof = False\n curr_pos = self.fileobject.tell()\n # print(curr_pos, self.st_size)\n chunk = self.fileobject.read(25)\n if chunk == '':\n # Is there something on the back burner??\n if len(self._backburner) > 0:\n self.fileobject = self._backburner.pop()\n # TODO: what if it is the end of the back burner file? Is that handled?\n else:\n eof = True\n else:\n self.fileobject.seek(curr_pos)\n return eof",
"def is_new_file(self):\n return self.filename is None",
"def is_too_long(file_name: str) -> bool:\n return len(file_name + REPORT_FILE_EXT) > 255",
"def canSplit(self):\n return False",
"def _is_terminated(self) -> bool:\n raise NotImplementedError",
"def is_eof(self) -> bool:\n ...",
"def starting_new_file(self):",
"def split(self):\n if(self.back == 'y'):\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1\n backNames = [self.file_path + str(num) + 'b' for num in range(len(files))]\n for num,file in enumerate(files):\n open(backNames[num],'w').write(file)\n else:\n files = open(self.file_path,'r').read().split('Splitting Text')\n names = [self.file_path + str(num) for num in range(len(files))]\n for num,file in enumerate(files):\n open(names[num],'w').write(file)\n self.file_count += 1",
"def split_file(self):\n # process lines into blocks with Parser until EOF triggers StopIteration\n while self.maf_lines:\n try:\n # rest counters and open new file at the top of the loop AFTER\n # the most recent yield\n if self._stop:\n self._yield(new_file=True)\n # try to get next block from Parser and write to current file\n block_string = self.parser.get_block(self.maf_lines).next()\n self.current_file.write(block_string)\n # update char count for the current file\n self.char_count += len(block_string)\n # if char count crosses limit, yield current file name start new file\n if self._stop:\n yield self.current_filename\n\n except StopIteration:\n self._yield(new_file=False)\n yield self.current_filename",
"def is_generate_per_split(self):\n return True",
"def splitter(fasta_file, output, limit, large_handling=False):\n file_ = open(fasta_file, 'r')\n file_count = 1\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n nt_count = 0\n for seq in SeqIO.parse(fasta_file, 'fasta'):\n if large_handling == True and len(str(seq.seq)) >= int(limit):\n file_count += 1\n largefile = open(output.rstrip(\"/\")+\"/%s_%05d_XL.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n largefile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\")\n largefile.close()\n else:\n nt_count += len(str(seq.seq))\n outfile.write(\">\"+str(seq.description)+\"\\n\"+\"\\n\".join(\n str(seq.seq)[i:i+50]for i in range(0,len(seq.seq),50))+\"\\n\") \n if nt_count >= int(limit):\n outfile.close()\n file_count += 1\n nt_count = 0\n outfile = open(output.rstrip(\"/\")+\"/%s_%05d.fa\"%(\n fasta_file.split('/')[-1].split('.')[0],file_count),'w')\n outfile.close()",
"def test_large_file(self):\n\t\tfixedgenerator.GenerateFixedWidthFile().generate()\n\t\tmain.Main(['input/large.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/large.csv'))\n\t\tos.remove('input/large.txt')\n\t\tos.remove('output/large.csv')",
"def newfile(self) :\n\n\t\tfrom tempfile import mkstemp\n\t\timport os\n\t\tglobal configurer\n\n\t\tfd,name = mkstemp(suffix='.blend')\n\t\tos.close(fd)\n\t\tself.name = name\n\t\tfd = open(name,'wb', configurer.get('ServerBufferSize'))\n\t\tself.fd = fd\n\t\tprint name\n\t\treturn 1",
"def test_no_eof(self):",
"def ischunked() :",
"def ischunked() :",
"def has_more_tokens(self) -> bool:\n return len(self.jack_file_tokens) > self._token_idx",
"def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()",
"def at_eof(self) -> bool:\n ...",
"def at_eof(self) -> bool:\n ...",
"def at_eof(self) -> bool:\n ...",
"def at_eof(self) -> bool:\n ...",
"def write_part(filename, csvreader, header, num=0, size=(DEFAULT_MAX_SIZE * MEGABYTE)):\n fname_part, ext = os.path.splitext(filename)\n size_estimate = 0\n split_filename = f\"{fname_part}_{num}{ext}\"\n try:\n with open(split_filename, FILE_FLAG) as split_part:\n LOG.info(f\"Writing new file: {split_filename}\")\n csvwriter = csv.writer(split_part)\n csvwriter.writerow(header)\n for row in csvreader:\n csvwriter.writerow(row)\n\n row_len = len(\",\".join(row))\n size_estimate += row_len + (row_len * VARIANCE)\n\n LOG.debug(f\"file size (est): {size_estimate}\")\n if size_estimate >= size:\n return (split_filename, False)\n except (IOError, FileExistsError) as exc:\n LOG.critical(f\"Fatal error: {exc}\")\n sys.exit(2)\n return (split_filename, True)",
"def has_digits(self):\n #Reached digit limit\n if self.digits_read == self.limit:\n return False\n #File closed\n if self.file is None:\n return False\n #Otherwise should be ok\n return True",
"def _is_truncated(self) -> bool:\n raise NotImplementedError",
"def hasMoreCommands(self):\n return self.currentIndex < len(self.fileLines)",
"def is_done(self):\n return self.is_terminated or self.is_truncated",
"def eol(self):\n return self.pos == len(self.tokens)"
] | [
"0.60422593",
"0.58545196",
"0.55908334",
"0.5479781",
"0.54545206",
"0.5429927",
"0.542197",
"0.53861135",
"0.5348217",
"0.53061837",
"0.5257799",
"0.5238273",
"0.52270555",
"0.51750046",
"0.51486933",
"0.5139923",
"0.51225036",
"0.51225036",
"0.5110876",
"0.5105658",
"0.509727",
"0.509727",
"0.509727",
"0.509727",
"0.50725675",
"0.50377697",
"0.5024119",
"0.49883085",
"0.498383",
"0.49606434"
] | 0.6584869 | 0 |
Iterate through 'call_limit' MAF files with the splitter and analyze with phastcons | def send_jobs(self, call_limit=None):
for f in self.splitter.split_file():
# if using a call limit, break once call limit is reached
if (call_limit is not None) and (self.fnum > call_limit):
break
self.maf = f
self._jobfile()
# print self.qsub_cmmd
call(self.qsub_cmmd, shell=True)
self.fnum += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_arem(in_files, out_peaks, max_fdr):\n in_treat, in_control = in_files[0]\n matches = re.search(r'(.*\\.treat)(.*)\\.mapped_reads', in_treat).groups()\n name = matches[0] + matches[1] + '.arem.peaks'\n cmd = 'arem -t %s -c %s --name=%s %s' % (in_treat, in_control, name,\n cfg.get('peaks', 'arem_params'))\n sys_call(cmd)\n # convert to proper bedfile- ints for score and + for strand\n peaks_to_keep = set()\n with open(out_peaks, 'w') as outfile:\n with open(name + '_peaks.xls') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n start = str(max(0, int(fields[1])))\n score = str(max(0, min(1000, int(float(fields[6])))))\n fdr = float(fields[8])\n if fdr <= max_fdr:\n outfile.write('\\t'.join([fields[0], start, fields[2],\n 'AREM_peak_%s' % (index + 1), score])\n + '\\t+\\n')\n peaks_to_keep.add(index)\n # take region surrounding the peak summit\n summit_size = cfg.getint('peaks', 'peak_summit_size')\n with open(out_peaks + '_summits.%s_around' % \\\n cfg.get('peaks', 'peak_summit_size'), 'w') as outfile:\n with open(name + '_summits.bed') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.strip().split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n score = str(max(0, min(1000, int(float(fields[-1])))))\n start = str(max(0, int(fields[1]) - summit_size / 2))\n stop = str(int(fields[2]) + summit_size / 2)\n if index in peaks_to_keep:\n outfile.write('\\t'.join([fields[0], start, stop,\n 'AREM_peak_%s' % (index + 1), score])\n + '\\t+\\n')",
"def run_macs14(in_files, out_peaks, max_fdr):\n in_treat, in_control = in_files[0]\n matches = re.search(r'(.*\\.treat)(.*)\\.mapped_reads', in_treat).groups()\n name = matches[0] + matches[1] + '.macs14.peaks'\n cmd = 'macs14 -t %s -c %s --name=%s %s --diag' % (in_treat, in_control, name,\n cfg.get('peaks', 'macs14_params'))\n sys_call(cmd)\n peaks_to_keep = set()\n # convert to proper bedfile- ints for score and + for strand\n with open(out_peaks, 'w') as outfile:\n with open(name + '_peaks.xls') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n start = str(max(0, int(fields[1])))\n score = str(max(0, min(1000, int(float(fields[6])))))\n fdr = float(fields[8])\n if fdr <= max_fdr:\n outfile.write('\\t'.join([fields[0], start, fields[2],\n 'MACS14_peak_%s' % (index + 1), score])\n + '\\t+\\n')\n peaks_to_keep.add(index)\n # take region surrounding the peak summit\n summit_size = cfg.getint('peaks', 'peak_summit_size')\n with open(out_peaks + '_summits.%s_around' % \\\n cfg.get('peaks', 'peak_summit_size'), 'w') as outfile:\n with open(name + '_summits.bed') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.strip().split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n # score is number of reads at summit\n score = str(max(0, min(1000, int(float(fields[-1])))))\n start = str(max(0, int(fields[1]) - summit_size / 2))\n stop = str(int(fields[2]) + summit_size / 2)\n if index in peaks_to_keep:\n outfile.write('\\t'.join([fields[0], start, stop,\n 'MACS_peak_%s' % (index + 1), score])\n + '\\t+\\n')",
"def findpeaks(project_name, treatment_id, control_id, index_file_parameters, tool_parameters_dict, temp_dir, macs_cnv_region_identifiers, output_dir):\n treatment_bamfile=getcodetofilename(index_file_parameters,treatment_id)\n control_bamfile=getcodetofilename(index_file_parameters,control_id)\n \n cmd_dict=genPeakToolRunCommands(project_name,treatment_id,treatment_bamfile,control_bamfile, tool_parameters_dict, temp_dir )\n MACSpeakfile='%s/MACS/%s_peaks.bed'%(temp_dir,treatment_id)\n HMCanpeakfile='%s/HMCan/%s_regions.bed'%(temp_dir,treatment_id)\n \n if not os.path.exists(MACSpeakfile): \n flog.write('%s: Running %s\\n'%(time.asctime(),cmd_dict['MACS']))\n os.system(cmd_dict['MACS'])\n else:\n flog.write('%s: No need to run %s\\nMACS peaks already there\\n'%(time.asctime(),cmd_dict['MACS']))\n \n if not os.path.exists(HMCanpeakfile): \n flog.write('%s: Running %s\\n'%(time.asctime(),cmd_dict['HMCan'])) \n os.system(cmd_dict['HMCan'])\n else:\n flog.write('%s: No need to run %s\\nHMCan peaks already there'%(time.asctime(),cmd_dict['HMCan'])) \n \n min_size,min_coverage_gain_over_average,window_size=macs_cnv_region_identifiers\n \n MACSpeaklist=[]\n for lntxt in open(MACSpeakfile):\n ln=lntxt.rstrip('\\n').split('\\t')\n MACSpeaklist.append([ln[0],int(ln[1]),int(ln[2])]) \n flog.write('%s: Info: number of MACS peaks %d\\n'%(time.asctime(),len(MACSpeaklist)))\n missedoutregionslist=getmissedoutregions(MACSpeakfile,treatment_bamfile, min_size, min_coverage_gain_over_average,window_size)\n \n \n HMCanpeaklist=[]\n for lntxt in open(HMCanpeakfile):\n ln=lntxt.rstrip('\\n').split('\\t')\n HMCanpeaklist.append([ln[0],int(ln[1]),int(ln[2])])\n flog.write('%s: Info: number of HMCan peaks %d\\n'%(time.asctime(),len(HMCanpeaklist)))\n \n HMCanadditions=common.interval_join(HMCanpeaklist, missedoutregionslist,3)\n flog.write('%s: Info: number of HMCan added peaks %d\\n'%(time.asctime(),len(HMCanadditions)))\n \n all_peaklist=[]\n for peak in MACSpeaklist:\n all_peaklist.append(peak+['MACS'])\n for peak in HMCanadditions:\n all_peaklist.append(peak+['HMCan']) \n all_peaklist.sort()\n \n outcsv='%s/peaks/%s__%s__peaks.bed'%(output_dir,project_name,treatment_id)\n outjson='%s/peaks/%s__%s__peaks.json'%(output_dir,project_name,treatment_id)\n \n fout=open(outcsv,'w')\n jsondict={}\n \n for peak in all_peaklist:\n fout.write('%s\\t%d\\t%d\\t%s\\n'%tuple(peak))\n jsondict['%s:%d-%d'%tuple(peak[0:3])]={}\n jsondict['%s:%d-%d'%tuple(peak[0:3])]['called_by']=peak[3]\n \n fout.close()\n json.dump(jsondict, open(outjson,'w'),indent=4,sort_keys=True)",
"def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1",
"def process_raw_filelist():\n\n scan_list = []\n curr_scan = \"\"\n\n tools = generate_tools_list()\n\n print(\"Beginning scan of %s\" % os.path.join(sites_dir, \"ua-mac/raw_data/stereoTop\"))\n dates = sorted(os.listdir(os.path.join(sites_dir, \"ua-mac/raw_data/stereoTop\")))\n for date in dates:\n if date not in limit_dates:\n continue\n date_dir = os.path.join(os.path.join(sites_dir, \"ua-mac/raw_data/stereoTop\"), date)\n print(\"Scanning %s\" % date_dir)\n\n timestamps = sorted(os.listdir(date_dir))\n for ts in timestamps:\n ts_dir = os.path.join(date_dir, ts)\n\n meta, lbin, rbin = None, None, None\n\n files = os.listdir(ts_dir)\n for fname in files:\n fpath = os.path.join(ts_dir, fname)\n if fname.endswith(\"metadata.json\"):\n meta = fpath\n if fname.endswith(\"left.bin\"):\n lbin = fpath\n if fname.endswith(\"right.bin\"):\n rbin = fpath\n\n # TODO: More logging\n if meta and lbin and rbin:\n scan = get_scan_from_metadata(meta)\n\n if scan and scan != curr_scan:\n if len(scan_list) > 0:\n print(\"%s - [%s] %s datasets\" % (date, curr_scan, len(scan_list)))\n create_scan_dax(date, curr_scan, scan_list, tools)\n # TODO: Temporary\n return\n\n scan_list = []\n curr_scan = scan\n\n elif len(scan_list) > scan_size_limit and scan_size_limit > 0:\n print(\"%s - [%s] %s datasets\" % (date, curr_scan, len(scan_list)))\n create_scan_dax(date, curr_scan, scan_list, tools)\n return\n\n # TODO: What do we do if there is no scan in the metadata? \"unknown_scan_{date}\"?\n scan_list.append({\"left\": lbin, \"right\": rbin, \"metadata\": meta})\n\n if len(scan_list) > 0:\n print(\"%s - [%s] %s datasets\" % (date, curr_scan, len(scan_list)))\n create_scan_dax(date, curr_scan, scan_list, tools)",
"def run_macs(in_files, out_peaks, max_fdr):\n in_treat, in_control = in_files[0]\n matches = re.search(r'(.*\\.treat)(.*)\\.mapped_reads', in_treat).groups()\n name = matches[0] + matches[1] + '.macs.peaks'\n max_fdr = cfg.getfloat('peaks', 'max_FDR')\n cmd = 'macs -t %s -c %s --name=%s %s' % (in_treat, in_control, name,\n cfg.get('peaks', 'macs_params'))\n sys_call(cmd)\n \n # convert to proper bedfile- ints for score and + for strand\n with open(out_peaks, 'w') as outfile:\n with open(name + '_peaks.xls') as infile:\n for index, line in enumerate(itertools.ifilter(\n bedCommentFilter, infile)):\n fields = line.split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n start = str(max(0, int(fields[1])))\n score = str(max(0, min(1000, int(float(fields[6])))))\n fdr = float(fields[8])\n if fdr <= max_fdr:\n outfile.write('\\t'.join([fields[0], start, fields[2],\n 'MACS_peak_%s' % (index + 1), score]) +\n '\\t+\\n')\n # take region surrounding the peak center as the summit\n summit_size = cfg.getint('peaks', 'peak_summit_size')\n with open(out_peaks + '_summits.%s_around' % \\\n cfg.get('peaks', 'peak_summit_size'), 'w') as outfile:\n with open(name + '_peaks.xls') as infile:\n for index, line in enumerate(itertools.ifilter(bedCommentFilter,\n infile)):\n fields = line.strip().split('\\t')\n if fields[0] == 'chr':\n continue # skip header\n score = str(max(0, min(1000, int(float(fields[6])))))\n p_start, p_stop = max(0, int(fields[1])), int(fields[2])\n p_center = p_start + (p_stop - p_start) / 2\n s_start = p_center - summit_size / 2\n s_stop = p_center + summit_size / 2\n fdr = float(fields[8])\n if fdr <= max_fdr:\n outfile.write('\\t'.join([fields[0], str(s_start),\n str(s_stop),\n 'MACS_peak_%s' % (index + 1), score])\n + '\\t+\\n')",
"def splitDetectorPeakInfo(self):\r\n\t\tsplit_raw_min = np.amin(self.splitData)\r\n\t\tsplit_min = split_raw_min - self.splitBaseline\r\n\t\t\t\t\r\n\t\tsplit_raw_max = np.amax(self.splitData)\r\n\t\tsplit_max = split_raw_max - self.splitBaseline\r\n\t\r\n\t\tself.splitMax = split_max\r\n\t\tself.splitMin = split_min",
"def analyze_wfs(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001, compact=True):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n # Ora faccio un loop sugli eventi..\n if compact:\n for event in range(0, len(self.table_sipm_time['ev']), 9):\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_dataframe = self.analyze_ev_wf_compact(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_dataframe], ignore_index=True)\n bar.update(counter+1)\n counter += 9\n else:\n for event in self.table_sipm_time['ev']:\n # ..e chiamo la funzione analyze_ev_wf per ogni evento\n peaks_time, peaks_ampl = self.analyze_ev_wf(\n event, n_bsl, pic_name, peak_height, peak_prominences)\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat([self.wf_peaks, pd.DataFrame(\n {'t': peaks_time, 'A': peaks_ampl})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n bar.finish()\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))",
"def test_processing():\n # It's always harder with a small mailbox:\n strax.Mailbox.DEFAULT_MAX_MESSAGES = 2\n for request_peaks in (True, False):\n for peaks_parallel in (True, False):\n for max_workers in (1, 2):\n Peaks.parallel = peaks_parallel\n print(f\"\\nTesting with request_peaks {request_peaks}, \"\n f\"peaks_parallel {peaks_parallel}, \"\n f\"max_workers {max_workers}\")\n\n mystrax = strax.Context(storage=[],\n register=[Records, Peaks])\n bla = mystrax.get_array(\n run_id=run_id,\n targets='peaks' if request_peaks else 'records',\n max_workers=max_workers)\n assert len(bla) == recs_per_chunk * n_chunks\n assert bla.dtype == (\n strax.peak_dtype() if request_peaks else strax.record_dtype())",
"def run( args ):\n # Parse options...\n options = opt_validate_callpeak( args )\n # end of parsing commandline options\n info = options.info\n warn = options.warn\n debug = options.debug\n error = options.error\n \n #0 output arguments\n info(\"\\n\"+options.argtxt)\n options.PE_MODE = options.format in ('BAMPE','BEDPE')\n if options.PE_MODE:\n tag = 'fragment' # call things fragments not tags\n else:\n tag = 'tag'\n\n tempfile.tempdir = options.tempdir\n\n #1 Read tag files\n info(\"#1 read %s files...\", tag)\n if options.PE_MODE:\n (treat, control) = load_frag_files_options (options)\n else:\n (treat, control) = load_tag_files_options (options)\n if control is not None:\n # check if chromosome names are consistent. quit if not.\n check_names(treat, control, error)\n\n info(\"#1 %s size = %.1f\", tag, options.tsize)\n tagsinfo = \"# %s size is determined as %d bps\\n\" % (tag, options.tsize)\n\n t0 = treat.total\n tagsinfo += \"# total %ss in treatment: %d\\n\" % (tag, t0)\n info(\"#1 total %ss in treatment: %d\", tag, t0)\n\n # handle duplicates\n if options.keepduplicates != \"all\":\n if options.keepduplicates == \"auto\":\n info(\"#1 calculate max duplicate %ss in single position based on binomial distribution...\", tag)\n treatment_max_dup_tags = cal_max_dup_tags(options.gsize,t0)\n info(\"#1 max_dup_tags based on binomial = %d\" % (treatment_max_dup_tags))\n else:\n info(\"#1 user defined the maximum %ss...\", tag)\n treatment_max_dup_tags = int(options.keepduplicates)\n if options.PE_MODE:\n info(\"#1 filter out redundant fragments by allowing at most %d identical fragment(s)\", treatment_max_dup_tags)\n else:\n info(\"#1 filter out redundant tags at the same location and the same strand by allowing at most %d tag(s)\", treatment_max_dup_tags)\n\n treat.filter_dup(treatment_max_dup_tags)\n t1 = treat.total\n info(\"#1 %ss after filtering in treatment: %d\", tag, t1)\n tagsinfo += \"# %ss after filtering in treatment: %d\\n\" % (tag, t1)\n if options.PE_MODE:\n tagsinfo += \"# maximum duplicate fragments in treatment = %d\\n\" % (treatment_max_dup_tags)\n else:\n tagsinfo += \"# maximum duplicate tags at the same position in treatment = %d\\n\" % (treatment_max_dup_tags)\n info(\"#1 Redundant rate of treatment: %.2f\", float(t0 - t1) / t0)\n tagsinfo += \"# Redundant rate in treatment: %.2f\\n\" % (float(t0-t1)/t0)\n else:\n t1 = t0\n\n if control is not None:\n c0 = control.total\n tagsinfo += \"# total %ss in control: %d\\n\" % (tag, c0)\n info(\"#1 total %ss in control: %d\", tag, c0)\n\n if options.keepduplicates != \"all\":\n if options.keepduplicates == \"auto\":\n info(\"#1 for control, calculate max duplicate %ss in single position based on binomial distribution...\", tag)\n control_max_dup_tags = cal_max_dup_tags(options.gsize,c0)\n info(\"#1 max_dup_tags based on binomial = %d\" % (control_max_dup_tags))\n else:\n info(\"#1 user defined the maximum %ss...\", tag)\n control_max_dup_tags = int(options.keepduplicates)\n if options.PE_MODE:\n info(\"#1 filter out redundant fragments by allowing at most %d identical fragment(s)\", treatment_max_dup_tags)\n else:\n info(\"#1 filter out redundant tags at the same location and the same strand by allowing at most %d tag(s)\", treatment_max_dup_tags)\n control.filter_dup(treatment_max_dup_tags)\n #control.separate_dups(treatment_max_dup_tags) # changed 5-29; changed back since we don't need to call addbackdup+refinepeak anymore\n c1 = control.total\n\n info(\"#1 %ss after filtering in control: %d\", tag, c1)\n tagsinfo += \"# %ss after filtering in control: %d\\n\" % (tag, c1)\n if options.PE_MODE:\n tagsinfo += \"# maximum duplicate fragments in control = %d\\n\" % (treatment_max_dup_tags)\n else:\n tagsinfo += \"# maximum duplicate tags at the same position in control = %d\\n\" % (treatment_max_dup_tags)\n\n info(\"#1 Redundant rate of control: %.2f\" % (float(c0-c1)/c0))\n tagsinfo += \"# Redundant rate in control: %.2f\\n\" % (float(c0-c1)/c0)\n else:\n c1 = c0\n info(\"#1 finished!\")\n\n #2 Build Model\n info(\"#2 Build Peak Model...\")\n\n if options.nomodel:\n info(\"#2 Skipped...\")\n if options.PE_MODE:\n options.d = options.tsize\n else:\n options.d=options.extsize\n info(\"#2 Use %d as fragment length\" % (options.d))\n if options.shift > 0:\n info(\"#2 Sequencing ends will be shifted towards 3' by %d bp(s)\" % (options.shift))\n elif options.shift < 0:\n info(\"#2 Sequencing ends will be shifted towards 5' by %d bp(s)\" % (options.shift * -1))\n options.scanwindow=2*options.d # remove the effect of --bw\n else:\n peakmodel = PeakModel(treatment = treat,\n max_pairnum = MAX_PAIRNUM,\n opt = options\n )\n try:\n peakmodel.build()\n info(\"#2 finished!\")\n debug(\"#2 Summary Model:\")\n debug(\"#2 min_tags: %d\" % (peakmodel.min_tags))\n debug(\"#2 d: %d\" % (peakmodel.d))\n debug(\"#2 scan_window: %d\" % (peakmodel.scan_window))\n info(\"#2 predicted fragment length is %d bps\" % peakmodel.d)\n info(\"#2 alternative fragment length(s) may be %s bps\" % ','.join(map(str,peakmodel.alternative_d)))\n info(\"#2.2 Generate R script for model : %s\" % (options.modelR))\n model2r_script(peakmodel,options.modelR,options.name)\n options.d = peakmodel.d\n options.scanwindow= 2*options.d\n if options.d <= 2*options.tsize:\n warn(\"#2 Since the d (%.0f) calculated from paired-peaks are smaller than 2*tag length, it may be influenced by unknown sequencing problem!\" % (options.d))\n if options.onauto:\n options.d=options.extsize\n options.scanwindow=2*options.d\n warn(\"#2 MACS will use %d as EXTSIZE/fragment length d. NOTE: if the d calculated is still acceptable, please do not use --fix-bimodal option!\" % (options.d))\n else:\n warn(\"#2 You may need to consider one of the other alternative d(s): %s\" % ','.join(map(str,peakmodel.alternative_d)))\n warn(\"#2 You can restart the process with --nomodel --extsize XXX with your choice or an arbitrary number. Nontheless, MACS will continute computing.\")\n\n except NotEnoughPairsException:\n if not options.onauto:\n sys.exit(1)\n warn(\"#2 Skipped...\")\n options.d=options.extsize\n options.scanwindow=2*options.d\n warn(\"#2 Since --fix-bimodal is set, MACS will use %d as fragment length\" % (options.d))\n\n #3 Call Peaks\n info(\"#3 Call peaks...\")\n if options.nolambda:\n info(\"# local lambda is disabled!\")\n\n if control and options.PE_MODE:\n c1 = c1 * 2 # in PE_MODE, PE data has to be doubled since both ends will be counted for calculating background noise.\n\n # decide the scaling to balance the depth between treatment and control\n if control:\n if options.downsample:\n # use random sampling to balance treatment and control\n info(\"#3 User prefers to use random sampling instead of linear scaling.\")\n if t1 > c1:\n info(\"#3 MACS is random sampling treatment %ss...\", tag)\n if options.seed < 0:\n warn(\"#3 Your results may not be reproducible due to the random sampling!\")\n else:\n info(\"#3 Random seed (%d) is used.\" % options.seed)\n treat.sample_num(c1, options.seed)\n info(\"#3 %d Tags from treatment are kept\", treat.total)\n elif c1 > t1:\n info(\"#3 MACS is random sampling control %ss...\", tag)\n if options.seed < 0:\n warn(\"#3 Your results may not be reproducible due to the random sampling!\")\n else:\n info(\"#3 Random seed (%d) is used.\" % options.seed)\n control.sample_num(t1, options.seed)\n info(\"#3 %d %ss from control are kept\", control.total, tag)\n # set options.tocontrol although it would;t matter now\n options.tocontrol = False\n else:\n if options.scaleto == \"large\":\n if t1 > c1:\n # treatment has more tags than control, since tolarge is\n # true, we will scale control to treatment.\n options.tocontrol = False\n else:\n # treatment has less tags than control, since tolarge is\n # true, we will scale treatment to control.\n options.tocontrol = True\n else:\n if t1 > c1:\n # treatment has more tags than control, since tolarge is\n # false, we will scale treatment to control.\n options.tocontrol = True\n else:\n # treatment has less tags than control, since tolarge is\n # false, we will scale control to treatment.\n options.tocontrol = False\n\n peakdetect = PeakDetect(treat = treat,\n control = control,\n opt = options\n )\n peakdetect.call_peaks()\n\n # filter out low FE peaks\n peakdetect.peaks.filter_fc( fc_low = options.fecutoff )\n\n #4 output\n #4.1 peaks in XLS\n info(\"#4 Write output xls file... %s\" % (options.peakxls))\n ofhd_xls = open( options.peakxls, \"w\" )\n ofhd_xls.write(\"# This file is generated by MACS version %s\\n\" % (MACS_VERSION))\n ofhd_xls.write(options.argtxt+\"\\n\")\n ofhd_xls.write(tagsinfo)\n if options.shift > 0:\n ofhd_xls.write(\"# Sequencing ends will be shifted towards 3' by %d bp(s)\\n\" % (options.shift))\n elif options.shift < 0:\n ofhd_xls.write(\"# Sequencing ends will be shifted towards 5' by %d bp(s)\\n\" % (options.shift * -1))\n\n ofhd_xls.write(\"# d = %d\\n\" % (options.d))\n try:\n ofhd_xls.write(\"# alternative fragment length(s) may be %s bps\\n\" % ','.join(map(str,peakmodel.alternative_d)))\n except:\n # when --nomodel is used, there is no peakmodel object. Simply skip this line.\n pass\n if options.nolambda:\n ofhd_xls.write(\"# local lambda is disabled!\\n\")\n # pass write method so we can print too, and include name\n peakdetect.peaks.write_to_xls(ofhd_xls, name = options.name.encode())\n ofhd_xls.close()\n\n #4.2 peaks in BED\n if options.log_pvalue != None:\n score_column = \"pscore\"\n elif options.log_qvalue != None:\n score_column = \"qscore\"\n #4.2 peaks in narrowPeak\n if not options.broad:\n info(\"#4 Write peak in narrowPeak format file... %s\" % (options.peakNarrowPeak))\n ofhd_bed = open( options.peakNarrowPeak, \"w\" )\n peakdetect.peaks.write_to_narrowPeak (ofhd_bed, name_prefix=b\"%s_peak_\", name=options.name.encode(), score_column=score_column, trackline=options.trackline )\n ofhd_bed.close()\n #4.2-2 summits in BED\n info(\"#4 Write summits bed file... %s\" % (options.summitbed))\n ofhd_summits = open( options.summitbed, \"w\" )\n peakdetect.peaks.write_to_summit_bed (ofhd_summits, name_prefix=\"%s_peak_\".encode(), name=options.name.encode(),\n description=(\"Summits for %s (Made with MACS v2, \" + strftime(\"%x\") + \")\").encode(),\n score_column=score_column, trackline=options.trackline )\n ofhd_summits.close()\n #4.2 broad peaks in bed12 or gappedPeak\n else:\n info(\"#4 Write broad peak in broadPeak format file... %s\" % (options.peakBroadPeak))\n ofhd_bed = open( options.peakBroadPeak, \"w\" )\n peakdetect.peaks.write_to_broadPeak (ofhd_bed, name_prefix=b\"%s_peak_\", name=options.name.encode(), description=options.name.encode(), score_column=score_column, trackline=options.trackline)\n ofhd_bed.close()\n info(\"#4 Write broad peak in bed12/gappedPeak format file... %s\" % (options.peakGappedPeak))\n ofhd_bed = open( options.peakGappedPeak, \"w\" )\n peakdetect.peaks.write_to_gappedPeak (ofhd_bed, name_prefix=b\"%s_peak_\", name=options.name.encode(), description=options.name.encode(), score_column=score_column, trackline=options.trackline)\n ofhd_bed.close()\n\n info(\"Done!\")",
"def run_main():\n # Matching lines against a matcher function.\n matched_lines = match_file(file_names, matcher)\n\n # Will contain data sorted by file.\n binned_data = {}\n\n # Looking through the lines that were inserted into the metrics file via the metrics component.\n for key in matched_lines:\n\n # Grabbing matched lines by the file or orgination.\n buffer = matched_lines[key]\n\n # This will contain dictionaries converted from JSON.\n data = []\n\n # Loop through the collection, appending data converted from JSON entries.\n for line in buffer:\n data.append(extract_data(line))\n\n # Sort the data by file.\n binned_data[key] = sort_data(data)\n\n # Output the final results.\n generate_statistics(binned_data)\n return 0",
"def process(config, bufr_list, advection_diagnostic=True):\r\n if config['verbose']:\r\n print('bufr.process: processing array for BUFR data...')\r\n for i in range(len(bufr_list)):\r\n bufr = bufr_list[i]\r\n # PROF part of the BUFR data\r\n items = list(bufr.items())\r\n for item in items:\r\n if item[0] == b'PROF' or item[0] == 'PROF':\r\n bufr_prof = item[1]\r\n bufr_prof = get_array(bufr_prof)\r\n bufr_dims = list(range(len(bufr_prof.shape)))\r\n bufr_dims[0] = 1\r\n bufr_dims[1] = 0\r\n bufr_prof = bufr_prof.transpose(bufr_dims)\r\n bufr_shape = bufr_prof.shape\r\n bufr_reshape = [bufr_shape[0]] + [np.cumprod(bufr_shape[1:])[-1]]\r\n bufr_prof = bufr_prof.reshape(tuple(bufr_reshape))\r\n # SFC part of the BUFR data\r\n for item in items:\r\n if item[0] == b'SFC' or item[0] == 'SFC':\r\n bufr_sfc = item[1]\r\n bufr_sfc = get_array(bufr_sfc)\r\n bufr_dims = list(range(len(bufr_sfc.shape)))\r\n bufr_dims[0] = 1\r\n bufr_dims[1] = 0\r\n bufr_sfc = bufr_sfc.transpose(bufr_dims)\r\n bufr_shape = bufr_sfc.shape\r\n bufr_reshape = [bufr_shape[0]] + [np.cumprod(bufr_shape[1:])[-1]]\r\n bufr_sfc = bufr_sfc.reshape(tuple(bufr_reshape))\r\n # DAY part of the BUFR data\r\n for item in items:\r\n if item[0] == b'DAY' or item[0] == 'DAY':\r\n bufr_day = item[1]\r\n bufr_day = get_array(bufr_day)\r\n bufr_dims = list(range(len(bufr_day.shape)))\r\n bufr_dims[0] = 1\r\n bufr_dims[1] = 0\r\n bufr_day = bufr_day.transpose(bufr_dims)\r\n bufr_shape = bufr_day.shape\r\n bufr_reshape = [bufr_shape[0]] + [np.cumprod(bufr_shape[1:])[-1]]\r\n bufr_day = bufr_day.reshape(tuple(bufr_reshape))\r\n bufr_one_out = np.concatenate((bufr_prof, bufr_sfc, bufr_day), axis=1)\r\n # Fix missing values\r\n bufr_one_out[bufr_one_out < -1000.] = np.nan\r\n if advection_diagnostic:\r\n advection_array = temp_advection(bufr)\r\n bufr_one_out = np.concatenate((bufr_one_out, advection_array), axis=1)\r\n if i == 0: #first station\r\n bufr_out = bufr_one_out\r\n else:\r\n bufr_out = np.concatenate((bufr_out,bufr_one_out),axis=1)\r\n return bufr_out",
"def init_moscatel(filedir, filters_in_config, output_dir, skip_every=None):\n file_list = glob(os.path.join(filedir,'*.fits'))\n file_list.sort()\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if os.listdir(filedir) != []:\n #if len(file_list)>0:\n print('total no. of raw data frames: {0}\\n'.format(len(file_list)))\n\n if skip_every is not None:\n print('Skipping every {0}-th frames per band\\n'.format(skip_every))\n\n else: #elif skip_every == None:\n '''\n bug: does not print even if skip_every is not entered in terminal\n '''\n print('Analyzing all raw frames per band')\n skip_every=1\n\n bands = {}\n for j in filters_in_config:\n j=j.strip(' ')\n bands[j]=[]\n filters_in_hdr=[]\n\n #get list of frames by filter based on header\n for i in tqdm(file_list[::skip_every]):\n hdr = pf.open(i)[0].header\n filters_in_hdr.append(hdr['FILTER'])\n for j in filters_in_config:\n if hdr['FILTER'] == j:\n j=j.strip(' ')\n bands[j].append(i)\n\n filters_in_hdr_set=list(set(filters_in_hdr)).sort()\n\n for k in bands.keys():\n print('{0}-band={1} frames'.format(k, len(bands[k])))\n #save into txtfile\n name = os.path.join(output_dir,k+'-band.txt')\n with open(name, 'w') as z: #overwrite\n #write line by line\n for line in bands[k]:\n z.write('{}\\n'.format(line))\n print('\\nfilenames sorted by band saved in {}'.format(output_dir))\n\n else:\n print('ERROR: check your data directory')\n #return empty dict\n bands={}\n\n return filters_in_hdr_set, bands",
"def test150EventMultipleFileSplit(self):\n splitter = SplitterFactory()\n jobFactory = splitter(self.multipleFileSubscription)\n\n jobGroups = jobFactory(events_per_job=150,\n performance=self.performanceParams)\n\n self.assertEqual(len(jobGroups), 1)\n\n self.assertEqual(len(jobGroups[0].jobs), 10)\n\n for job in jobGroups[0].jobs:\n self.assertEqual(len(job.getFiles(type=\"lfn\")), 1)\n self.assertEqual(job[\"mask\"].getMaxEvents(), self.eventsPerJob)\n self.assertEqual(job[\"mask\"][\"FirstEvent\"], 0)\n self.assertEqual(job[\"mask\"][\"LastEvent\"], 99)",
"def automatic_checking(files):\n for i in range(10):\n fft_checking(files[i])",
"def main_SS(maf_file, segment_file, vaf_threshold = 1.05, filterSegments = False):\n all_mutations = pd.read_csv(maf_file, low_memory=False, delimiter='\\t')\n all_segments = pd.read_csv(segment_file, low_memory=False, delimiter='\\t')\n\n if not os.path.exists(\"./sample_mutations_withCN\"):\n os.makedirs(\"./sample_mutations_withCN\")\n if not os.path.exists(\"./pyclone_input\"):\n os.makedirs(\"./pyclone_input\")\n\n for i, sample in enumerate(all_mutations.Tumor_Sample_Barcode.unique()):\n print(\"Processing sample {}: {}\".format(i+1, sample))\n\n # Subset the mutations and segments to those belonging to the patient\n sample_mutations = all_mutations[all_mutations['Tumor_Sample_Barcode'] == sample]\n sample_segments = all_segments[all_segments['Tumor_Sample_Barcode'] == sample]\n\n patient_VAF = sample_mutations.loc[:, 'VAF']\n filter_VAF_index = (patient_VAF > vaf_threshold)\n\n # Remove the mutations where the condition is true for ALL segments, i.e. it has to be below\n # 0.05 for all sectors. If it's above 0.05 in any sector, keep the mutations. This will keep most\n # of the private mutations.\n num_filtered = filter_VAF_index.loc[filter_VAF_index == False, ]\n print(\"Patient {} has {} mutations with average VAF < {} removed\".format(sample, num_filtered.shape[0], vaf_threshold))\n # Filter out the variants\n sample_mutations = sample_mutations.loc[filter_VAF_index, ]\n # Get the segments dictionary for the patient.\n seg_dict = segments_to_dict(sample_segments)\n\n overlap_seg = pd.DataFrame()\n filtered_seg = pd.DataFrame()\n for _, mut_row in sample_mutations.iterrows():\n # Skip X and Y chromosome\n if (mut_row['Chromosome'] == \"X\" or mut_row['Chromosome'] == \"Y\"):\n continue\n\n # Search for the segment\n buf = search_overlap_singleSample(mut_row, seg_dict)\n # Skip if no overlapping segments\n if (buf.empty):\n continue\n elif filterSegments:\n print(\"--filterSegments specified. Will filter segments of low quality.\")\n if (buf.iloc[0]['numMarker'] < 100) or (buf.iloc[0]['end.pos'] - buf.iloc[0]['start.pos'] < 5000000) or (buf.iloc[0]['CNt'] >= 8):\n if (filtered_seg.empty):\n filtered_seg = buf.iloc[0].to_frame()\n else:\n filtered_seg = pd.concat([filtered_seg, buf.iloc[0]], axis=1)\n else:\n # Get copy number for mutations\n assigned_row = mut_row.copy(deep=True)\n assigned_row['CNt'] = buf.iloc[0]['CNt']\n assigned_row['Major_CN'] = buf.iloc[0]['A']\n assigned_row['Minor_CN'] = buf.iloc[0]['B']\n assigned_row['adjustedCN'] = buf.iloc[0]['adjustedCN']\n # Initialize dataframe for merging.\n if (overlap_seg.empty):\n overlap_seg = assigned_row.to_frame()\n else:\n overlap_seg = pd.concat([overlap_seg, assigned_row], axis=1)\n\n overlap_seg = overlap_seg.transpose()\n overlap_seg.to_csv(\"./sample_mutations_withCN/{}_SNV_withCN.maf\".format(sample),sep=\"\\t\", index=False)\n\n filtered_seg = filtered_seg.transpose()\n print(\"Sample {} has {} segments with marker<100 or smaller than 5 Mb or >= 8 copy number (Canopy guideline)\".format(sample, filtered_seg.shape[0]))\n filtered_seg.to_csv(\"./sample_mutations_withCN/{}_filtered_seg.maf\".format(sample),sep=\"\\t\", index=False)\n\n pyclone_input = overlap_seg.loc[:, ['Hugo_Symbol', 'Chromosome',\n 'Start_position', 'ref_count', 'alt_count', 'VAF', 'Major_CN',\n 'Minor_CN']]\n pyclone_input['mutation_id'] = pyclone_input['Hugo_Symbol'].map(str) + \"_\" + pyclone_input['Chromosome'].map(str) + \":\" + pyclone_input['Start_position'].map(str)\n pyclone_input['normal_cn'] = 2\n towrite = pyclone_input.loc[:, ['mutation_id', 'ref_count', 'alt_count', 'normal_cn', 'Minor_CN', 'Major_CN']]\n towrite.columns = ['mutation_id', 'ref_counts', 'var_counts', 'normal_cn', 'minor_cn', 'major_cn']\n towrite['ref_counts'] = towrite['ref_counts'].map(int)\n towrite['var_counts'] = towrite['var_counts'].map(int)\n towrite.to_csv(\"./pyclone_input/{}_mutations.tsv\".format(sample), sep='\\t', index=False)",
"def test_peak_detection(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, _) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_sms = utilFunctions.peakDetection(mx, self.sm.t)\n for j, (p, p_s) in enumerate(itertools.zip_longest(ploc, ploc_sms)):\n with self.subTest(frame=i, peak_n=j):\n self.assertEqual(p, p_s)",
"def analyze_wfs_no_png(self, n_bsl, pic_name, peak_height=0.001, peak_prominences=0.0001):\n\n print(\"---------------------------------\")\n print(\"Analyzing waveforms to get maxima\")\n print(\"---------------------------------\")\n\n # Creo una progress bar per rendere piu' fruibile visivamente il programma\n bar = progressbar.ProgressBar(maxval=self.number_of_events,\n widgets=[progressbar.Bar(\"=\", \"[\", \"]\"), \" \", progressbar.Percentage()])\n bar.start()\n counter = 0\n peaks_temp = pd.DataFrame()\n num_fig = 0\n print(\"Events: \"+str(len(self.table_sipm_time['ev'])))\n # Ora faccio un loop sugli eventi..\n for event in self.table_sipm_time['ev']:\n\n # Creo un np.array con gli indici della singola waveform..\n wf_idx = [event*self.points_per_wf, event *\n self.points_per_wf+self.points_per_wf]\n # ..i tempi di ciascun punto..\n wf_time = self.table_sipm_time['t'].iloc[event] + \\\n self.table_sipm_wf['TIME'][int(wf_idx[0]):int(wf_idx[1])]\n # ..e i valori del segnale di ciascun ppunto\n wf_ch = - \\\n self.table_sipm_wf['CH1'][int(wf_idx[0]):int(wf_idx[1])]\n\n # Per trovare la baseline, faccio un fit polinomiale di grado 0..\n # ..su un numero finito di punti iniziali, specificato dall'utente..\n # ..poi la salvo internamente alla classe\n self.baseline = np.polyfit(\n wf_time[0:n_bsl], wf_ch[0:n_bsl], 0)[0]\n # Voglio anche disegnarla sui plot, quindi mi creo una lista di x e di y..\n # ..nello spazio della waveform\n bsl_time = wf_time[0:n_bsl]\n bsl_ch = [self.baseline] * n_bsl\n\n # Per trovre i picchi, uso la funzione find_peaks di scipy.signal\n # I valori di height e prominence sono specificati dall'utente..\n # ..e scalti per selezionare tutti i picchi senza prendere rumore\n peaks, _ = sp.find_peaks(\n wf_ch, height=peak_height, prominence=peak_prominences)\n\n peaks_temp = pd.concat([peaks_temp, pd.DataFrame(\n {'t': wf_time.iloc[peaks], 'A': wf_ch.iloc[peaks]-self.baseline})], ignore_index=True)\n bar.update(counter+1)\n counter += 1\n\n # I parametri dei picchi sono quindi salvati nella tabella finale dei risultati\n self.wf_peaks = pd.concat(\n [self.wf_peaks, peaks_temp], ignore_index=True)\n\n bar.finish()\n print(\"---------------------------------\")\n print(\"Waveform analysis completed!\")\n # Devo ora ricavare di nuovo i Dt dai tempi assoluti, utilizzando la funzione diff()..\n self.wf_peaks['dt'] = self.wf_peaks['t'].diff()\n # ..e scartando il primo valore (che non ha un Dt)\n self.wf_peaks = self.wf_peaks.iloc[1:]\n print('Found {:d} peaks in waveforms\\n'.format(len(self.wf_peaks)))",
"def build_all_traces_from_files(trace_files, min_length, agg_window):\n results = mp.Manager().list()\n procs = []\n cores, traces_per_core = get_cores_and_traces_per_core(len(trace_files))\n for core_num in range(cores):\n core_trace_files = get_traces_for_core(\n trace_files, traces_per_core, core_num)\n procs.append(\n mp.Process(target=build_traces_from_files,\n args=(core_trace_files, results,\n min_length, agg_window)))\n initialize_and_join_processes(procs)\n return list(results)",
"def test_scan_file(self):\n self.run_scan(self.filename, 1)",
"def main():\n\n\t# eesAmplitudes = range(200,321,10)\n\teesAmplitudes = [\"%\"+\"%.2f_0_0\"%(i) for i in np.arange(0,1.01,.05)]\n\t# eesFrequencies = range(10,1001,20)\n\teesFrequencies = np.logspace(1,3,50)\n\t# nrnStructureFile = \"fsSFrFfMnArtMod.txt\"\n\t# nrnStructureFile = \"fsSFrFfMnArtModHuman.txt\"\n\tnrnStructureFile = \"fsMnArtModHuman.txt\"\n\t# name = \"FreqAmpModHuman_0367S\"\n\tname = \"FreqAmpModHuman_ArtmodHuman_10msBurst\"\n\n\tnSim = len(eesFrequencies)*len(eesAmplitudes)\n\tcount=0.\n\tpercLastPrint=0.\n\tprintPeriod = 0.05\n\t# simTime = 250\n\tsimTime = 15\n\tspecies = \"human\"\n\n\tfor eesAmplitude in eesAmplitudes:\n\t\tfor eesFrequency in eesFrequencies:\n\t\t\tfilName = name+\"_amp_\"+str(eesAmplitude)+\"_freq_\"+str(eesFrequency)\n\t\t\tresultFile = gt.find(\"*\"+filName+\".p\",pathToResults)\n\t\t\tif not resultFile:\n\t\t\t\treturnCode = None\n\t\t\t\twhile not returnCode==0:\n\t\t\t\t\tprogram = ['python','scripts/computeAfferentsEfferentsModulation.py',\n\t\t\t\t\t\tstr(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,\"--simTime\",str(simTime)]\n\t\t\t\t\tprint \" \".join(program)\n\t\t\t\t\tforwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\t\t\t\treturnCode = None\n\t\t\t\t\twhile returnCode is None:\n\t\t\t\t\t\tmessage = forwardSimulation.stdout.readline().rstrip(\"\\n\").split()\n\t\t\t\t\t\tif message != None:print \"\\t\\t\"+\" \".join(message)+\"\\t\\t\"\n\t\t\t\t\t\treturnCode = forwardSimulation.poll()\n\t\t\t\t\tif returnCode != 0: print \"\\t\\t\\t\\t Error n: \",forwardSimulation.poll(),\" resetting simulation...\"\n\t\t\tcount+=1\n\t\t\tif count/nSim-percLastPrint>=printPeriod:\n\t\t\t\tpercLastPrint=count/nSim\n\t\t\t\tprint str(round(count/nSim*100))+\"% of simulations performed...\"\n\tplot_stats(eesAmplitudes,eesFrequencies,simTime,name)",
"def main(file_list, outname, fit_func, starting_guess, chunk, hill):\n \n class StopWhile(Exception): pass\n\n # See if we want to analyze chunks\n if chunk: pHStatFile.my_re = pHStatFile.chunkre\n\n xdata = np.zeros(len(file_list))\n ydata = np.zeros(len(file_list))\n # Convert the file_list into a list of pHStatFile objects if it's not yet\n if type(file_list[0]).__name__ == 'str':\n tmp = [pHStatFile(open(fname, 'r')) for fname in file_list]\n file_list = tmp\n del tmp\n # Build the list of output files\n output_files = {}\n for resid in file_list[0].list_of_residues:\n output_files[resid] = open('%s_%s.dat' % (outname, resid), 'w', 0)\n \n # Generate the x-data (the pHs). This never changes\n for i, frec in enumerate(file_list): xdata[i] = frec.pH\n\n # Now loop through all of our data\n numres = 0 # Number of residues we've looped through so far\n numframes = 0 # Number of frames we've looped through so far\n\n # This is the easiest way to bust out of an infinite loop -- Engulf the whole\n # thing in a big try-except, and catch a specialized exception.\n try:\n while True:\n numres += 1\n # If we've looped through all of our residues, then we know we've hit\n # the next frame, so update our counters accordingly\n if numres % len(output_files) == 0:\n numframes += 1\n numres = 1\n # Zero out the y-data, because we're about to fill it up\n ydata = np.zeros(len(file_list)) # fraction protonated\n offset = np.zeros(len(file_list)) # Offset for pKa\n pred = np.zeros(len(file_list)) # Predicted pKas\n trans = [0 for i in range(len(file_list))] # num of transitions\n # Loop through all of the files and get our next residue -- they should\n # be synchronized, so this should pull the same residue from each file\n for i, frec in enumerate(file_list):\n stuff = frec.get_next_residue()\n # If we got nothing bust out of the loop\n if not stuff:\n raise StopWhile\n resname,resnum,offset[i],pred[i],ydata[i],trans[i] = stuff\n ydata[i] = 1-ydata[i] # Get fraction DEprotonated\n # Make the y-data into a hill-plottable form\n if fit_func:\n # If we're doing a hill plot, adjust our starting guess to be\n # relatively close -- hill will start as 1, and pKa will start\n # as the average of pKa values (not including infinity)\n if hill:\n starting_guess = (get_avg_pka(pred), 1)\n try:\n params, cov = optimize.curve_fit(fit_func, xdata,\n ydata, starting_guess)\n except (RuntimeError, ValueError):\n # If we can't fit the data (expected at the beginning) just go on\n continue\n line = '%d ' % numframes\n try:\n for i, param in enumerate(params):\n try:\n# line += '%.4f %.4f ' % (param, math.sqrt(cov[i][i]))\n line += '%.4f ' % param\n except TypeError:\n# line += '%.4f %.4f ' % (param, cov)\n line += '%.4f ' % param\n except ValueError:\n continue\n else:\n # Average all of the predicted pKas, ignoring values whose offset is\n # >= 3 pH units\n runsum = runsum2 = numpts = 0\n for i in range(len(file_list)):\n if abs(offset[i]) < 3:\n runsum += pred[i]\n runsum2 += pred[i] * pred[i]\n numpts += 1\n\n if numpts == 0: continue\n avg = runsum / numpts\n stdev = math.sqrt(abs(runsum2/numpts - avg*avg))\n line = '%d %.4f %.4f' % (numframes, avg, stdev)\n \n # Now write out the data as: Frame # pKa1 std.dev. [hill.coef. std.dev.]\n # but only write out if we actually got a pKa this time around\n ofile = output_files['%s_%d' % (resname, resnum)]\n ofile.write(line + os.linesep)\n\n except StopWhile: pass",
"def test_filtered_scan(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount, ext=\".txt\")",
"def __init__(\n self,\n manifest_filepath: Union[str, Path, List[str], List[Path]],\n sample_rate: int,\n n_segments: Optional[int] = None,\n max_duration: Optional[float] = None,\n min_duration: Optional[float] = None,\n ignore_file: Optional[Union[str, Path]] = None,\n trim: Optional[bool] = False,\n load_precomputed_mel: bool = False,\n hop_length: Optional[int] = None,\n ):\n super().__init__()\n\n if load_precomputed_mel:\n if hop_length is None:\n raise ValueError(\"hop_length must be specified when load_precomputed_mel is True\")\n\n if n_segments is None:\n raise ValueError(\"n_segments must be specified when load_precomputed_mel is True\")\n\n # Initialize and read manifest file(s), filter out data by duration and ignore_file\n if isinstance(manifest_filepath, str):\n manifest_filepath = [manifest_filepath]\n self.manifest_filepath = manifest_filepath\n\n data = []\n total_duration = 0\n for manifest_file in self.manifest_filepath:\n with open(Path(manifest_file).expanduser(), 'r') as f:\n logging.info(f\"Loading dataset from {manifest_file}.\")\n for line in tqdm(f):\n item = json.loads(line)\n\n if \"mel_filepath\" not in item and load_precomputed_mel:\n raise ValueError(f\"mel_filepath is missing in {manifest_file}\")\n\n file_info = {\n \"audio_filepath\": item[\"audio_filepath\"],\n \"mel_filepath\": item[\"mel_filepath\"] if \"mel_filepath\" in item else None,\n \"duration\": item[\"duration\"] if \"duration\" in item else None,\n }\n\n data.append(file_info)\n\n if file_info[\"duration\"] is None:\n logging.info(\n \"Not all audio files have duration information. Duration logging will be disabled.\"\n )\n total_duration = None\n\n if total_duration is not None:\n total_duration += item[\"duration\"]\n\n logging.info(f\"Loaded dataset with {len(data)} files.\")\n if total_duration is not None:\n logging.info(f\"Dataset contains {total_duration / 3600:.2f} hours.\")\n\n self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration)\n self.base_data_dir = get_base_dir([item[\"audio_filepath\"] for item in self.data])\n\n # Initialize audio and mel related parameters\n self.load_precomputed_mel = load_precomputed_mel\n self.featurizer = WaveformFeaturizer(sample_rate=sample_rate)\n self.sample_rate = sample_rate\n self.n_segments = n_segments\n self.hop_length = hop_length\n self.trim = trim",
"def main(filein, min_reads=150, n_group=2000):\n from random import sample\n assert os.path.exists(filein)\n hq = filter_reads(filein)\n logging.info('remove matching reads')\n no_pol = remove_matching_reads(hq, cont_file)\n # no_pol = 'clean_reads.fasta'\n no_pol_reads = list(SeqIO.parse(no_pol, 'fasta'))\n no_pol_reads = sample(no_pol_reads, k=len(no_pol_reads))\n covering_reads = set([])\n logging.info('blast reads in batches until enough are found')\n total_blasted = 0\n for i, group in enumerate(grouper(n_group, no_pol_reads)):\n if i > 2 and len(covering_reads) < 20:\n sys.exit('not enough reads covering V3 were found')\n logging.info('blast call %d', i + 1)\n _ = blast_reads(group)\n covering_reads.update(_)\n total_blasted += n_group\n logging.info('this blast: %d covering out of %d total - %3.2f %%', len(_), n_group,\n 100 * float(len(_)) / n_group)\n logging.info('cumulative: %d covering out of %d total - %3.2f %%', len(covering_reads), total_blasted,\n 100 * float(len(covering_reads)) / total_blasted)\n if len(covering_reads) >= min_reads:\n break\n\n logging.info('covering_reads used in MSA: %d out of %d blasted (%3.2f %%)', len(covering_reads), total_blasted,\n 100 * float(len(covering_reads)) / total_blasted)\n cov_reads, n_fwd, n_rev = extract_reads(covering_reads, no_pol)\n\n SeqIO.write(cov_reads, 'v3reads.fasta', 'fasta')\n logging.info('%d covering reads in forward orientation', n_fwd)\n logging.info('%d covering reads in reverse orientation', n_rev)\n if n_fwd + n_rev < min_reads:\n logging.error('Not enough reads: %d', n_fwd + n_rev)\n sys.exit('Not enough reads: %d' % (n_fwd + n_rev))\n\n no_singleton_reads = [s for s in SeqIO.parse('v3reads.fasta', 'fasta') if int(s.id.split('_')[-1]) > 1]\n SeqIO.write(no_singleton_reads, 'v3reads_no_singleton.fasta', 'fasta')\n\n cml = shlex.split('muscle -in v3reads_no_singleton.fasta -out msa.fasta -quiet')\n subprocess.call(cml)\n\n df, haplotypes, support = msa_2_df('msa.fasta')\n logging.info('Haplotypes supported by %d reads out of %d: %3.1f%%',\n support, n_fwd + n_rev, 100.0 * support / (n_fwd + n_rev))\n cons_seq = df_2_ambiguous_sequence(df)\n SeqIO.write([SeqRecord(Seq(cons_seq), id='v3_consensus', description='')], 'v3cons.fasta', 'fasta')\n\n haps = []\n hi = 1 # counter for haplotypes, used in fasta file\n accounted_f = 0.0 # keep track of the cumulative accounted frequency\n tot_reads = sum(haplotypes.values())\n for h, support in haplotypes.most_common():\n f = round(float(support) / tot_reads, 2)\n accounted_f += f\n sr = SeqRecord(Seq(h), id='v3_haplotype_%d-support_%3.2f' % (hi, f), description='')\n haps.append(sr)\n hi += 1\n\n SeqIO.write(haps, 'v3haplotypes.fasta', 'fasta')\n for f in ['high_quality.fastq', 'clean_reads.fasta']:\n os.remove(f)\n logging.info('Haplotypes written to haplotypes.fasta')",
"def main(_):\n print('argument to expand', ARGS.video_in)\n print('argument expanded', glob.glob(ARGS.video_in))\n video_count = 0\n for video_filename in glob.glob(ARGS.video_in):\n print('start parsing', video_filename)\n data = skvideo.io.ffprobe(video_filename)['video']\n rate_str = six.ensure_str(data['@r_frame_rate']).split('/')\n rate = float(rate_str[0]) / float(rate_str[1])\n print('detected frame rate:', rate)\n\n print('load frames:')\n video = skvideo.io.vreader(video_filename)\n frame_count = 0\n file_count = 0\n for frame in video:\n if (frame_count > ARGS.offset) and \\\n ((frame_count-ARGS.offset)%ARGS.skip == 0) and \\\n (frame_count/rate >= ARGS.from_s) and \\\n (frame_count/rate <= ARGS.to_s or ARGS.to_s == -1):\n print(frame_count,)\n img = Image.fromarray(frame)\n if ARGS.crop:\n img = crop(img, ARGS.size)\n # save file\n file_number = file_count + video_count * ARGS.multiple + ARGS.start\n if ARGS.format_ext.lower() == 'jpg':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.jpg'.format(file_number))\n img.save(file_out, 'JPEG')\n elif ARGS.format_ext.lower() == 'png':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.png'.format(file_number))\n img.save(file_out, 'PNG')\n else:\n print('unrecognize format', ARGS.format_ext)\n sys.exit()\n file_count += 1\n frame_count += 1\n video_count += 1",
"def main():\n stats.set_time_start()\n\n if config.options.show_progress:\n stats.start_monitor()\n\n recorders = Recorder.launch(config.options.recorders)\n\n try:\n for filename in config.filenames:\n parser.parse(filename)\n\n Recorder.wait_empty()\n except KeyboardInterrupt:\n pass\n\n stats.set_time_stop()\n\n if config.options.show_progress:\n stats.stop_monitor()\n\n stats.print_summary()",
"def main():\n dir_path='.'\n meas_file='magic_measurements.txt'\n samp_file=\"er_samples.txt\"\n out_file='magic_measurements.txt'\n if '-h' in sys.argv:\n print(main.__doc__)\n sys.exit()\n if '-WD' in sys.argv:\n ind = sys.argv.index('-WD')\n dir_path=sys.argv[ind+1]\n if '-f' in sys.argv:\n ind = sys.argv.index('-f')\n meas_file=sys.argv[ind+1]\n if '-fsa' in sys.argv:\n ind = sys.argv.index('-fsa')\n samp_file=sys.argv[ind+1]\n if '-F' in sys.argv:\n ind = sys.argv.index('-F')\n out_file=sys.argv[ind+1]\n # read in measurements file\n meas_file=dir_path+'/'+meas_file\n out_file=dir_path+'/'+out_file\n samp_file=dir_path+'/'+samp_file\n data,file_type=pmag.magic_read(meas_file)\n samps,file_type=pmag.magic_read(samp_file)\n MeasRecs=[]\n sampnames,sflag=[],0\n for rec in data:\n for samp in samps:\n if samp['er_sample_name'].lower()==rec['er_sample_name'].lower():\n if samp['er_sample_name'] not in sampnames:sampnames.append(samp['er_sample_name'].lower())\n rec['er_site_name']=samp['er_site_name']\n rec['er_location_name']=samp['er_location_name']\n MeasRecs.append(rec)\n break\n if rec['er_sample_name'].lower() not in sampnames:\n sampnames.append(rec['er_sample_name'].lower())\n sflag=1\n SampRec={}\n for key in list(samps[0].keys()):SampRec[key]=\"\"\n SampRec['er_sample_name']=rec['er_sample_name']\n SampRec['er_citation_names']=\"This study\"\n SampRec['er_site_name']='MISSING'\n SampRec['er_location_name']='MISSING'\n SampRec['sample_desription']='recorded added by update_measurements - edit as needed'\n samps.append(SampRec)\n print(rec['er_sample_name'],' missing from er_samples.txt file - edit orient.txt file and re-import')\n rec['er_site_name']='MISSING'\n rec['er_location_name']='MISSING'\n MeasRecs.append(rec)\n pmag.magic_write(out_file,MeasRecs,'magic_measurements')\n print(\"updated measurements file stored in \", out_file)\n if sflag==1:\n pmag.magic_write(samp_file,samps,'er_samples')\n print(\"updated sample file stored in \", samp_file)",
"def scan():\n print \"Filtering started\"\n #filter new CC & merche\n filterNewOperators()\n\n #add the sample-info to 4_Analysed.csv, with hash, ip, port\n readd_to_toscan()\n\n print \"Scann started\"\n timestampFile = datetime.now()\n\n addHeaderToCSVIfNecessery(trashLog)\n # addHeaderToCSVIfNecessery(activityLog)\n if os.path.isfile(liveAnalysisFile):\n with open(liveAnalysisFile, 'r') as csvFile:\n targetList = csv.DictReader(csvFile)\n for target in targetList:\n process = subprocess.Popen(\"sudo nmap -p \" + target['PORT'] + \" -n --data-string \\\"\" + messageScan + \"\\\" --script \" + darkCometScript + \" --append-output -oN \" + resultLog + \" \" + target['HOST'], stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n print output\n if err is not None:\n print err\n if \"|_script: DarkComet\" in output:\n # Means the operator is active\n print \"--> Operator is active: \"+target[\"FILE HASH\"]\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(activityLog, 'a') as f:\n banner = getBanner(output)\n row.append(banner)\n wr = csv.writer(f)\n wr.writerow(row)\n counter = 0\n with open(targetFile, 'r') as csvFile:\n targetList = csv.DictReader(csvFile)\n with open(tempFile, 'w') as f:\n wrTemp = csv.writer(f)\n wrTemp.writerow(['HOST', 'PORT', 'FILE HASH'])\n for target in targetList:\n # TODO: Solve Python problem which doesn't recognise format [command,arg1,arg2]\n process = subprocess.Popen(\"sudo nmap -p \" + target[\n 'PORT'] + \" -n --data-string \\\"\" + messageScan + \"\\\" --script \" + darkCometScript + \" --append-output -oN \" + resultLog + \" \" +\n target['HOST'], stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n print output\n\n if \"0 IP addresses\" in output:\n # Means the domain name could not be resolved\n print \"--> Goes to trash\"\n addHeaderToCSVIfNecessery(trashFile)\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(trashFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow(row)\n elif \"|_script: DarkComet\" in output:\n # Means the operator is active\n print \"--> Operator is active\"\n\n addHeaderToCSVIfNecessery(liveAnalysisFile)\n row = [timestampFile, target['HOST'], target['PORT'], target['FILE HASH']]\n with open(activityLog, 'a') as f:\n wr = csv.writer(f)\n banner = getBanner(output)\n row.append(banner)\n wr.writerow(row)\n if counter < 6:\n with open(liveAnalysisFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow(row)\n with open(onlineFile, 'a') as f:\n wr = csv.writer(f)\n wr.writerow([target['FILE HASH']])\n counter += 1\n else:\n print \"--> to many to analyse, not added!\"\n wrTemp.writerow([target['HOST'], target['PORT'], target['FILE HASH']])\n else:\n # Means the operator is now not active but could it be later\n wrTemp.writerow([target['HOST'], target['PORT'], target['FILE HASH']])\n os.remove(targetFile)\n os.rename(tempFile, targetFile)\n if os.path.isfile(trashFile):\n print \"There are hosts in the trash\"\n try:\n host = socket.gethostbyname(\"www.google.com\")\n socket.create_connection((host, 80), 2)\n print \"Connected to internet -- hosts in trash are removed\"\n with open(trashFile, 'r') as csvFile:\n trashList = csv.DictReader(csvFile)\n with open(trashLog, 'a') as f:\n wr = csv.writer(f)\n for trash in trashList:\n wr.writerow([timestampFile, trash['HOST'], trash['PORT'], trash['FILE HASH']])\n os.remove(trashFile)\n except:\n print \"No internet - the hosts will be replaced in target\"\n with open(trashFile, 'r') as csvFile:\n trashList = csv.DictReader(csvFile)\n with open(targetFile, 'a') as f:\n wr = csv.writer(f)\n for trash in trashList:\n wr.writerow([trash['HOST'], trash['PORT'], trash['FILE HASH']])\n os.remove(trashFile)\n online()",
"def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')"
] | [
"0.59097695",
"0.57130927",
"0.56844276",
"0.56577593",
"0.5633177",
"0.5608301",
"0.5600647",
"0.5502245",
"0.54992604",
"0.54839313",
"0.5466021",
"0.5442815",
"0.54148203",
"0.53719056",
"0.5355851",
"0.5337445",
"0.53222096",
"0.53084624",
"0.5303184",
"0.52833384",
"0.52820265",
"0.528095",
"0.5269902",
"0.5267302",
"0.52585477",
"0.52529126",
"0.52404463",
"0.5239624",
"0.5234781",
"0.52259606"
] | 0.6520813 | 0 |
returns rm command to delete MAF file after it has been used by phastCons | def cleanup_cmmd(self):
return 'rm {}\n'.format(self.maf) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cleanUp(self, f):\n os.system('rm ' + f)",
"def fake_sudo_rm(self, cmd):\n for filename in cmd[2:]:\n if os.path.exists(filename):\n os.remove(filename)",
"def rm(cli):\n __check_in_autonotes_dir()\n\n # File args\n files = cli.config.rm.file\n\n # Remove the files\n __rm(files)",
"def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))",
"def removeConfigFile(alg):\r\n configPath = alg.getParameterValue('config')\r\n if isWindows():\r\n command = \"DEL {}\".format(os.path.join(rliPath(), configPath))\r\n else:\r\n command = \"rm {}\".format(os.path.join(rliPath(), configPath))\r\n alg.commands.append(command)",
"def delete_file(name):\n subprocess.check_output(cmd_preamble + [\"rm\", name])",
"def clean():\n local('rm -fr %s' % os.path.abspath(env.config['destination']))",
"def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass",
"def rm(args):\n args.delete = True\n return remove(args)",
"def remove(self, fileName):\n self.communicate(CMD_RM + ' ' + fileName)",
"def remove():\n run('pew rm {0}'.format(package_name()))",
"def rm_cmd(server, client, line):\n try:\n target = line.split(' ')[1].strip()\n except:\n client.send(client.container.exec_run(\"/bin/sh -c rm\")\n .decode(\"utf-8\"))\n return\n response = client.container.exec_run(\n \"/bin/sh -c cd {} && test -f {} && echo 0\"\n .format(client.pwd, target)).decode(\"utf-8\").strip()\n if response != \"0\":\n response = client.container.exec_run(\n \"/bin/sh -c cd {} && rm {}\".format(client.pwd, target))\n client.send(response)\n else:\n client.container.exec_run(\"/bin/sh -c cd {} && cp {} /tmp/\"\n .format(client.pwd, target))\n client.container.exec_run(\"/bin/sh -c cd {} && rm {}\"\n .format(client.pwd, target))",
"def remove_stuff_post_error(self):\n os.system('rm %s' % self.destination)",
"def delete(configsetname):\n cnfset = configsetPath(configsetname)\n files = os.listdir(cnfset)\n for f in files: os.remove(os.path.join(cnfset, f))\n os.rmdir(cnfset)\n return None",
"def clean_up(user, fname, tango_output):\n time.sleep(1)\n run(['rm', fname])\n time.sleep(1)\n path = tango_output + user + '.out'\n run(['rm', path])",
"def deleteSingleFile(filename):\n os.popen('rm {}'.format(filename))",
"def delete(self):\n os.system(\"rm \"+self._name)",
"def test_cleanup():\n os.remove(test_file[:-4] + \"_no_grave.h5m\")",
"def delete_file(mapper, connection, target):\n if target.filename and app.config['CLEANUP_FILES']:\n try:\n os.remove(join(app.config['FILE_PATH'], str(target.talk.id),\n str(target.version), target.filename))\n except OSError:\n # We don't care if wasn't deleted because it does not exist\n pass",
"def test_remove_conanfile(self, setup):\n client, pref = setup\n server = client.servers[\"default\"]\n path = server.test_server.server_store.export(pref.ref)\n conanfile = os.path.join(path, \"conanfile.py\")\n os.unlink(conanfile)\n client.run(\"install --requires=hello/0.1\", assert_error=True)\n assert \"Corrupted hello/0.1 in 'default' remote: no conanfile.py\" in client.out",
"def delete_ffmlp_data():\n import shutil\n ffmlp_dir = \"%s/data/fnc-1/mlp_models/temp_models\" % (\n path.dirname(path.dirname(path.abspath(__file__))))\n if (os.path.exists(ffmlp_dir)):\n for the_file in os.listdir(ffmlp_dir):\n file_path = os.path.join(ffmlp_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)",
"def remove(self): \n self.doRoot(self.removeDir)\n settings.getChanged('mosh.resourceReplacer.applied').remove(self.file)",
"def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)",
"def main_remove(args):\n return remove_command(args.directory, args.name)",
"def destroy(self):\n res = subprocess.run(\"{} rm {}\".format(self.binary,\n self.args['name']))\n if res.returncode != 0:\n sys.exit(2)\n return res",
"def delete_file(self):\n os.remove(self.id+\"-input.txt\")\n if(self.lang == \"PYTHON\"):\n os.remove(self.id+\".py\")\n elif(self.lang == \"C\"):\n os.remove(self.id+\".c\")\n if(self.status == 1):\n os.remove(self.id+\"_c\")\n elif(self.lang == 'CPP'):\n os.remove(self.id+\".cpp\")\n if(self.status == 1):\n os.remove(self.id+\"_cpp\")\n elif(self.lang == 'JAVA'):\n os.remove(self.id+\".java\")\n if(self.status == 1):\n os.remove(self.id+\"_java\") \n elif(self.lang == \"JS\"):\n os.remove(self.id+\".js\")\n # if(self.status == 1):\n # os.remove(self.id+\"_js\")s",
"def cleanup(fname):\n if os.path.isfile(fname):\n try:\n os.remove(fname)\n print \"Cleaned up\", fname\n except OSError:\n print \"Failed to clean up\", fname",
"def clean_gem5(c):\n _delete_file(f'{ROOT_DIR}/gem5/build/')",
"def rm(file_name):\n if os.path.isfile(file_name):\n flag = os.remove(file_name)\n else:\n return \"error\"",
"def delete(self, filename):\n pass"
] | [
"0.71679854",
"0.6729233",
"0.67085826",
"0.6530478",
"0.64783084",
"0.64774156",
"0.64272",
"0.64220965",
"0.6401939",
"0.63940406",
"0.6365888",
"0.6314277",
"0.61995727",
"0.6196345",
"0.6186721",
"0.6138527",
"0.6134482",
"0.61244386",
"0.6114564",
"0.6044866",
"0.6037266",
"0.60312045",
"0.6020776",
"0.59880334",
"0.59228015",
"0.592192",
"0.59175175",
"0.59174645",
"0.59149456",
"0.59107196"
] | 0.72506166 | 0 |
Computes the probability of finding an active transcription factor at a given range of ligand concentrations. | def p_act(c_range, k_a, k_i, ep_ai=5, n_sites=int(2)):
if type(n_sites) is not int:
raise TypeError('n_sites must be an integer.')
numer = (1 + c_range / k_a)**n_sites
denom = numer + np.exp(-ep_ai) * (1 + c_range / k_i)**n_sites
return numer / denom | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n probabilities[possible_target_location] = integrate.quad(\n integral_function,\n -np.inf, np.inf,\n args=(possible_target_location,Dprime_map[fixation]),\n epsabs=0,\n limit=100,\n full_output=1\n )[0] #MOD Dprime_map deleted\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted",
"def get_pc_per_range(model, class_name):\n class_total = model.class_counts[class_name]\n if model.num_runs is not None:\n class_total = model.num_runs * class_total * .33\n\n true_positives, totals = model.range_metrics_10[class_name]\n purities = [] # Accuracy per range (true positive/total)\n comps = []\n TP_count = 0\n total_count = 0\n\n for index in reversed(range(len(true_positives))):\n cur_p = 0 # Current purity\n cur_c = 0 # Current completeness\n TP_count += true_positives[index]\n total_count += totals[index]\n if total_count != 0:\n # positive class samples / totals # with prob in range\n cur_p = TP_count / total_count\n if class_total != 0:\n cur_c = TP_count / class_total\n\n purities.append(cur_p)\n comps.append(cur_c)\n purities.reverse()\n comps.reverse()\n return purities, comps",
"def _compute_register_bounds(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.cdf(bits, probability)\n return probs / probs[-1]",
"def probability_from_internal(internal_values, constr):\n return internal_values / internal_values.sum()",
"def boost_probability_for(fixation):\n probabilities = np.zeros(Number_of_locs) #MOD Number_of_locs deleted\n for possible_target_location in xrange(Number_of_locs): #MOD Number_of_locs deleted\n Lib_c.set_target(possible_target_location)\n probabilities[possible_target_location] = integrate.quad(\n Lib_c.function,\n -np.inf, np.inf,\n epsabs=0,\n limit=50,\n full_output=1\n )[0]\n return np.sum(Post_probs * probabilities) #MOD Post_probs deleted",
"def gomeroccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n gomer_occupancy = 1\n area_pwm_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(pwm_length - 1, 1, -1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length):\n if j <= i:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n elif (j + i) > len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n # print \"got to else\"\n s = seq[j + i]\n prod_gomer *= pwm_dictionary[s][j]\n prod_gomer_rc *= area_pwm_rc[s][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n for i in range(len(seq) - 1):\n prod_gomer = 1\n prod_gomer_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq) - 1:\n prod_gomer *= 0.25\n prod_gomer_rc *= 0.25\n else:\n prod_gomer *= pwm_dictionary[seq[j + i]][j]\n prod_gomer_rc *= area_pwm_rc[seq[j + i]][j]\n gomer_occupancy *= (1 - prod_gomer) * (1 - prod_gomer_rc)\n gomer_occupancy = 1 - gomer_occupancy\n\n return gomer_occupancy",
"def probability_to_internal(external_values, constr):\n return external_values / external_values[-1]",
"def calc_prob_prior(iterations, lam):\n return list(map(lambda x: math.exp(-lam * x), range(iterations)))",
"def get_confidence_interval(self,a,b):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\tworking_indices = [i for i,v in enumerate(k_vals) if (v >= a and v<= b)]\n\t\tworking_prob_vals = [prob_vals[i] for i in working_indices]\n\t\treturn sum(working_prob_vals)",
"def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob",
"def _perceive(self, p_range: Union[int, np.ndarray]) -> np.ndarray:\n dist = self.distance\n p_filter = np.where(((dist<p_range) & (dist>0)), 1, 0)\n inv_dist = 1 / (dist+EPSILON)\n inv_dist = np.multiply(inv_dist, p_filter)\n mut_influence = inv_dist / (np.sum(inv_dist, axis=-1, \n keepdims=True) + EPSILON)\n diags = 1 - np.sum(p_filter, axis=-1)\n np.fill_diagonal(mut_influence, diags)\n return mut_influence",
"def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n omega = self.alpha / (doc_length + self.alpha)\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-omega) * p1 + omega * p2",
"def amaoccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n occupancy_list = []\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n occupancy = 1\n occupancy_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n occupancy *= 0.25\n occupancy_rc *= 0.25\n else:\n occupancy *= pwm_dictionary[seq[j + i]][j]\n occupancy_rc *= pwm_dictionary_rc[seq[j + i]][j]\n occupancy_list.append(occupancy + occupancy_rc)\n ama_occupancy = sum(occupancy_list) / len(occupancy_list)\n return ama_occupancy",
"def make_light_prob(distance):\n if distance <= 1250 / 9:\n return 1\n return .99 * make_light_prob(distance - 250 / 9)",
"def cal_pn(grams_set, grams, candidate, reference):\n count = 0\n for gram in grams_set:\n # print(gram)\n count += count_clip(gram, grams, reference)\n # calculate log() for p, so '+10**-8' avoid 'p==0'\n p = count / len(grams) + 10**-8 \n return p",
"def range_probability_cdf(mean, devi, range_low, range_high):\r\n # 1 / (2 * pi * deviation**2) = x\r\n # e ** -((range_num - mean)**2 / 2*deviation**2 = y\r\n # area = y/x\r\n\r\n large = norm.cdf(range_high, mean, devi)\r\n print(\"scipy large area = \", large)\r\n small = norm.cdf(range_low, mean, devi)\r\n print(\"scipy small area = \", small)\r\n range_area = large - small\r\n message = f\"The area in range {range_low} - {range_high} is {range_area}\"\r\n return range_area",
"def base_norm_pro(pro,m0,std0): \n X = stats.norm(loc=m0, scale=std0) \n t = np.arange((m0-30), (m0+30), 0.01) \n t = (t[:-1] + t[1:])/2\n \n for i in t:\n if X.cdf(i)>=pro:\n return int(i)\n break",
"def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound",
"def compute_log_probability_of_text(text, char_to_ix, frequency_statistics, transition_matrix):\n t = text\n cix = char_to_ix\n fr = frequency_statistics\n tm = transition_matrix\n \n i0 = cix[t[0]]\n p = np.log(fr[i0])\n i = 0\n while i < len(t)-1:\n i1 = cix[t[i+1]]\n p += np.log(tm[i0, i1])\n i0 = i1\n i += 1\n \n return p",
"def CalculateBoundProbability(self, psi):\n\n\t\t_, _, _, boundTotal = self.CalculateBoundDistribution(psi)\n\n\t\treturn boundTotal",
"def sumoccupancyscore(pwm_dictionary, seq):\n if \"N\" in seq:\n return 0\n else:\n # pwm_length = len(pwm_dictionary)\n pwm_length = len(pwm_dictionary[\"A\"])\n sum_occupancy = 0\n pwm_dictionary_rc = rc_pwm(pwm_dictionary, pwm_length)\n for i in range(len(seq) - 1):\n occupancy = 1\n occupancy_rc = 1\n for j in range(pwm_length - 1):\n if (j + i) >= len(seq):\n occupancy *= 0.25\n occupancy_rc *= 0.25\n elif seq[j + i] not in [\"A\", \"C\", \"G\", \"T\"]:\n occupancy *= 0.25\n occupancy_rc *= 0.25\n else:\n occupancy *= pwm_dictionary[seq[j + i]][j]\n occupancy_rc *= pwm_dictionary_rc[seq[j + i]][j]\n sum_occupancy += occupancy + occupancy_rc\n return sum_occupancy / 2",
"def probability(s, a, b):\r\n return s.cdf(b) - s.cdf(a)",
"def categorical_likelihood_range(node, data, dtype=np.float64, **kwargs):\r\n\r\n # Assert that the given node is only build on one instance\r\n assert len(node.scope) == 1, node.scope\r\n\r\n # Initialize the return variable log_probs with zeros\r\n probs = np.ones((data.shape[0], 1), dtype=dtype)\r\n\r\n # Only select the ranges for the specific feature\r\n ranges = data[:, node.scope[0]]\r\n\r\n # For each instance\r\n for i, rang in enumerate(ranges):\r\n\r\n # Skip if no range is specified aka use a log-probability of 0 for that instance\r\n if rang is None:\r\n continue\r\n\r\n if rang.is_not_null_condition:\r\n probs[i] = 1 - node.p[rang.null_value]\r\n continue\r\n\r\n # Skip if no values for the range are provided\r\n if len(rang.possible_values) == 0:\r\n probs[i] = 0\r\n\r\n # Compute the sum of the probability of all possible values\r\n probs[i] = sum([node.p[possible_val] for possible_val in rang.possible_values])\r\n\r\n return probs",
"def _term_probability(self, frequency, total_frequency, doc_length, total_doc_length):\n if doc_length == 0:\n p1 = 0\n else:\n p1 = frequency / doc_length\n if total_doc_length == 0:\n p2 = 0\n else:\n p2 = total_frequency / total_doc_length\n return (1-self.omega) * p1 + self.omega * p2",
"def get_prob_for_distributions(p):\n w1 = p[0]\n mu1 = p[1]\n sigma1 = p[2]\n w2 = p[3]\n mu2 = p[4]\n sigma2 = p[5]\n w3 = p[6]\n mu3 = p[7]\n sigma3 = p[8]\n dist_range = (0, 4.330310991999920844e+01)\n x = np.linspace(dist_range[0], dist_range[1], 1000)\n A1 = np.array(w1 * mlab.normpdf(x, mu1, sigma1)).sum()\n A2 = np.array(w2 * mlab.normpdf(x, mu2, sigma2)).sum()\n A3 = np.array(w3 * mlab.normpdf(x, mu3, sigma3)).sum()\n p1 = A1 / (A1 + A2 + A3)\n p2 = A2 / (A1 + A2 + A3)\n p3 = A3 / (A1 + A2 + A3)\n return p1, p2, p3",
"def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))",
"def raw_trigram_probability(self,trigram):\n\n result = 0.0\n try:\n bigram = (trigram[0],trigram[1],)\n result = self.trigramcounts[trigram]/self.bigramcounts[bigram]\n except Exception as e:\n pass\n else:\n pass\n return result",
"def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))",
"def range_weights(rangemap, rr, pulselength, db=False):\n\n dr = rangemap - rr\n fr = 1. - np.abs(dr) / pulselength\n\n ind0 = fr < 0\n fr[ind0] = 0.\n fr[~ind0] = fr[~ind0] * (rr / rangemap[~ind0]) ** 4\n\n if db:\n fr = 10. * np.log10(fr)\n\n return fr",
"def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result"
] | [
"0.578839",
"0.5781874",
"0.5769571",
"0.5661054",
"0.56165594",
"0.55934966",
"0.55533504",
"0.5542344",
"0.55398613",
"0.55281544",
"0.5497608",
"0.54954404",
"0.54951864",
"0.54611725",
"0.5446603",
"0.5415126",
"0.5392379",
"0.5385614",
"0.53782094",
"0.53614104",
"0.5341954",
"0.5311067",
"0.53035975",
"0.53027993",
"0.5300596",
"0.5297711",
"0.52862453",
"0.52839524",
"0.527132",
"0.52692026"
] | 0.58372784 | 0 |
Computes the foldchange for a simple repression motif over a range of ligand concentrations. | def fc_repression(c_range, num_rep, k_a, k_i, ep_r, ep_ai=5,
n_sites=int(2), n_ns=4.6E6):
# Compute the MWC probability.
mwc_term = p_act(c_range, k_a, k_i, n_sites=n_sites, ep_ai=ep_ai)
# Compute and return the foldchange.
repression = 1 + mwc_term * (num_rep / n_ns) * np.exp(-ep_r)
return 1 / repression | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _large_range_correction(self, old_estimate):\n return -(1 << 32) * math.log(1 - (old_estimate / (1 << 32)))",
"def _log_fold_change_pairs(self, idx0, idx1, base):\n logfc = np.zeros(shape=(len(idx0), len(idx1), self._theta_mle.shape[1]))\n for i, xi in enumerate(idx0):\n for j, xj in enumerate(idx1):\n logfc[i, j, :] = self._theta_mle[xi, :] - self._theta_mle[xj, :]\n\n if base == np.e:\n return logfc\n else:\n return logfc / np.log(base)",
"def reduce(self, threshold):\n def percentage_change(old, new):\n return (old - new) / old\n real_reduction_iterations = 0\n padic_reduction_iterations = 0\n cont_reduction_iterations = 0\n factor = len(self.constants.primes) + 1\n \n print('initial bound',max(self.coefficients['n1_bound'],max(self.coefficients['Z_bounds'])))\n\n # First, go through the real reduction loop.\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = None\n while True:\n real_reduction_iterations += 1\n logging.info(\"Real Reduction - Iteration %d\" % real_reduction_iterations)\n\n large_constant = self.calculate_large_constant(current_n1_bound, factor)\n logging.info(\"Large constant contains %d digits \" % large_constant.ndigits())\n\n # Find a new bound on n_1 - n_k\n new_diff_bound = self.real_reduce(current_n1_bound, large_constant)\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n self.update_real_constants(new_diff_bound)\n logging.info(\"new diff bound: \" + str(new_diff_bound))\n logging.info(\"New bound on n1: \" + str(self.coefficients[\"n1_bound\"]))\n logging.info(\"New bound on zi: \" + str(self.coefficients['Z_bounds']))\n \n if percentage_change(current_n1_bound, self.coefficients[\"n1_bound\"]) < self.threshold:\n logging.info(\"New bound did not improve in the real step; real reduction process is done.\")\n factor = factor + 5\n break\n\n current_n1_bound = self.coefficients['n1_bound']\n current_diff_bound = new_diff_bound\n\n # Second, go through the p-adic reduction loop.\n current_Z_bounds = self.coefficients['Z_bounds']\n while True:\n padic_reduction_iterations += 1\n logging.info(\"p-adic Reduction - Iteration %d\" % padic_reduction_iterations)\n\n new_Z_bounds = self.padic_reduce(math.ceil(current_diff_bound))\n logging.info(\"New bound on zi: \" + str(new_Z_bounds))\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n new_n1_bound = self.update_padic_constants(new_Z_bounds)\n logging.info(\"New bound on n1: \" + str(new_n1_bound))\n if percentage_change(current_n1_bound, new_n1_bound) < self.threshold:\n logging.info(\"New bound did not improve in the p-adic step; p-adic reduction process is done.\")\n break\n\n current_n1_bound = new_n1_bound\n\n print(current_n1_bound)\n\n return self.constants",
"def _log_fold_change_pairs(self, idx0, idx1, base):\n assert np.all([x < self._pval.shape[1] for x in idx0])\n assert np.all([x < self._pval.shape[1] for x in idx1])\n if base == np.e:\n return self._logfc[idx0, :, :][:, idx1, :]\n else:\n return self._logfc[idx0, :, :][:, idx1, :] / np.log(base)",
"def _log_fold_change_pairs(self, idx0, idx1, base):\n logfc = np.tile(np.NaN, [len(idx0), len(idx1), self.model_estim.x.shape[1]])\n for i, xi in enumerate(idx0):\n for j, xj in enumerate(idx1):\n logfc[i, j, :] = self._theta_mle[xj, :] - self._theta_mle[xi, :]\n logfc[j, i, :] = -logfc[i, j, :]\n\n if base == np.e:\n return logfc\n else:\n return logfc / np.log(base)",
"def changes(i, step, bins, C, n_u, ns, sums):\n offset= int((step-1)/2)\n sum_i= sums[i] + (-1)*step*row_col_sums(bins[i]+offset, i, bins, C, n_u)\n sum_im1= sums[i-1] + step*row_col_sums(bins[i]+offset, i-1, bins, C, n_u)\n ns_i = (ns[i] - step*n_u[bins[i]+offset])\n ns_im1 = (ns[i-1] + step*n_u[bins[i]+offset])\n \n change= (sum_i)/ns_i - sums[i]/ns[i] + (sum_im1)/ns_im1 - sums[i-1]/ns[i-1]\n \n return change, sum_i, sum_im1, ns_i, ns_im1",
"def _log_fold_change_pairs(self, idx0, idx1, base):\n pass",
"def check_cflcushion(delt=0.1, cfl_cushion_upper=0.5, cfl_cushion_lower=0.1, code_dt_max=0.1, nstep=100):\n \n # Define some characteristic delta t's as log10()\n vec_cfl_dt_discrete = [-1., -2., -3., -3., -3., -3., -2., -3., -1., -1] \n vec_code_dt = [delt]; changes_in_delt = []\n print(0.1/0.22)\n print(0.1, 0.1/0.22*0.5)\n \n # Construct a continues vector of time steps\n vec_cfl_dt = []\n for i in range(len(vec_cfl_dt_discrete)-1):\n vec_cfl_dt += list(vec_cfl_dt_discrete[i] + np.array(range(nstep))/nstep*(vec_cfl_dt_discrete[i+1]-vec_cfl_dt_discrete[i]))\n vec_cfl_dt = 10**np.array(vec_cfl_dt) \n vec_step = range(len(vec_cfl_dt))\n \n # Mimic the CFL decrease condition\n for i, cfl_dt in enumerate(vec_cfl_dt):\n if (vec_code_dt[-1] > cfl_dt*cfl_cushion_upper):\n print(10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2))\n vec_code_dt.append(cfl_dt*10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2))\n changes_in_delt.append(i)\n print()\n print(f\"DECREASE! Because {vec_code_dt[-2]:6.2e} > {cfl_dt*cfl_cushion_upper:6.2e}\")\n print(f\" {cfl_dt*cfl_cushion_upper:6.2e} = cfl_dt*cfl_cushion_upper\")\n print(f\" {cfl_dt:6.2e} = cfl_dt\")\n print(f\" {vec_code_dt[-2]:6.2e} = code_dt\") \n print(f\" ==> code_dt = {vec_code_dt[-1]}\")\n elif (vec_code_dt[-1] < np.min([cfl_dt*cfl_cushion_lower, code_dt_max])):\n vec_code_dt.append(np.min([cfl_dt*10**((np.log10(cfl_cushion_upper)+np.log10(cfl_cushion_lower))/2), code_dt_max]))\n changes_in_delt.append(i)\n print()\n print(f\"INCREASE! Because {vec_code_dt[-2]:6.2e} < {np.min([cfl_dt*cfl_cushion_lower, code_dt_max]):6.2e}\")\n print(f\" {cfl_dt*cfl_cushion_lower:6.2e} = cfl_dt*cfl_cushion/delt_adjust\")\n print(f\" {cfl_dt:6.2e} = cfl_dt\")\n print(f\" {vec_code_dt[-2]:6.2e} = code_dt\") \n print(f\" ==> code_dt = {vec_code_dt[-1]}\")\n else:\n vec_code_dt.append(vec_code_dt[-1])\n \n # Create a figure\n fig = plt.figure(figsize=(18, 9)); fig.set_tight_layout(False)\n grid_specifications = gridspec.GridSpec(1,1)\n grid_specifications.update(top=0.98, left=0.05, right=0.95, bottom=0.06, wspace=0.35, hspace=0.45)\n ax = plt.subplot(grid_specifications[0])\n \n # Plot dt(istep)\n ax.plot(vec_step, vec_cfl_dt, color='black', label='CFL dt')\n ax.plot(vec_step, vec_cfl_dt*cfl_cushion_upper, color='black', alpha=0.5, label='CFL dt*CFL cushion upper')\n ax.plot(vec_step, vec_cfl_dt*cfl_cushion_lower, color='black', alpha=0.2, label='CFL dt*CFL cushion lower')\n ax.plot(vec_step, vec_code_dt[1:], color='maroon', label='code dt')\n \n # Highlight the changes \n if False:\n for change in changes_in_delt:\n ax.axvline(x=change, color='maroon', alpha=0.5, zorder=1)\n \n # Show figure\n ax.set_yscale('log')\n ax.autoscale()\n ax.legend(labelspacing=0.0, handlelength=1, shadow=True)\n plt.show()\n return",
"def ef_pu_prf_chg(\r\n self,\r\n bckl_lim,\r\n pce_infl_cof,\r\n strn_rlf_cof):\r\n return (bckl_lim *\r\n (1 - pce_infl_cof *\r\n (1 - (1 - self.prf_recv_cof()) * strn_rlf_cof)\r\n ) / pce_infl_cof)",
"def cloud_cover_to_ghi_linear(cloud_cover, ghi_clear, offset=35):\n\n offset = offset / 100.\n cloud_cover = cloud_cover / 100.\n ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear\n return ghi",
"def Lc(i:int, imgs:list, masks:list, features:list, phi:models) -> torch.tensor:\n Ion = phi(masks[i] * imgs[i])\n fi, fts = split_i(features, 1)\n cumsum = 0\n for IGm, I_Gm in fts:\n P = torch.sqrt(((Ion - IGm)**2)+1e-016)\n N = torch.sqrt(((Ion - I_Gm)**2)+1e-016)\n cumsum += softplus(P-N).sum()\n cumsum /= len(fts)\n return cumsum",
"def compute_fold_regulation(fc_data_list):\n # if isinstance(fc_data_list, list):\n return [{TEST_SAMPLE: fc_elem[TEST_SAMPLE], CONTROL_SAMPLE: fc_elem[CONTROL_SAMPLE],\n FOLD_REGULATION: fc_elem[FOLD_CHANGE].applymap(lambda x: -1.0/x if x < 1.0 else x)}\n for fc_elem in fc_data_list]\n # return fc_data_list.applymap(lambda x: -1.0/x if x < 1.0 else x)",
"def localize_red_clump(star_catalog,close_cat_idx,log):\n\n def select_within_range(mags, colours, mag_min, mag_max, col_min, col_max):\n \"\"\"Function to identify the set of array indices with values\n between the range indicated\"\"\"\n\n idx1 = np.where(colours >= col_min)[0]\n idx2 = np.where(colours <= col_max)[0]\n idx3 = np.where(mags >= mag_min)[0]\n idx4 = np.where(mags <= mag_max)[0]\n idx = set(idx1).intersection(set(idx2))\n idx = idx.intersection(set(idx3))\n idx = list(idx.intersection(set(idx4)))\n\n return idx\n\n RC = photometry_classes.Star()\n\n inst_i = star_catalog['cal_ref_mag_ip'][close_cat_idx]\n inst_r = star_catalog['cal_ref_mag_rp'][close_cat_idx]\n inst_g = star_catalog['cal_ref_mag_gp'][close_cat_idx]\n cal_i = star_catalog['imag'][close_cat_idx]\n cal_r = star_catalog['rmag'][close_cat_idx]\n cal_g = star_catalog['gmag'][close_cat_idx]\n inst_ri = inst_r - inst_i # Catalogue column order is red -> blue\n inst_gi = inst_g - inst_i\n inst_gr = inst_g - inst_r\n cal_ri = cal_r - cal_i\n cal_gi = cal_g - cal_i\n cal_gr = cal_g - cal_r\n\n log.info('\\n')\n log.info('Localizing the Red Clump')\n log.info('Median (r-i), i: '+str(np.median(inst_ri))+', '+str(np.median(inst_i)))\n log.info('Median (g-i), i: '+str(np.median(inst_gi))+', '+str(np.median(inst_i)))\n log.info('Median (g-r), g: '+str(np.median(inst_gr))+', '+str(np.median(inst_g)))\n\n ri_min = 0.8\n ri_max = 1.2\n i_min = 15.5\n i_max = 16.5\n\n r_min = 16.2\n r_max = 17.5\n\n gi_min = 2.5\n gi_max = 3.5\n\n gr_min = 1.5\n gr_max = 2.2\n g_min = 17.8\n g_max = 19.5\n\n log.info('Selected Red Clump giants between:')\n log.info('i = '+str(i_min)+' to '+str(i_max))\n log.info('r = '+str(r_min)+' to '+str(r_max))\n log.info('(r-i) = '+str(ri_min)+' to '+str(ri_max))\n log.info('g = '+str(g_min)+' to '+str(g_max))\n log.info('(g-r) = '+str(gr_min)+' to '+str(gr_max))\n log.info('(g-i) = '+str(gi_min)+' to '+str(gi_max))\n\n idx = select_within_range(inst_i, inst_ri, i_min, i_max, ri_min, ri_max)\n\n (RC.ri, RC.sig_ri, RC.i, RC.sig_i) = calc_distribution_centroid_and_spread_2d(inst_ri[idx], inst_i[idx], use_iqr=True)\n\n idx = select_within_range(inst_r, inst_ri, r_min, r_max, ri_min, ri_max)\n\n (RC.r, RC.sig_r) = calc_distribution_centre_and_spread(inst_r[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gr, g_min, g_max, gr_min, gr_max)\n\n (RC.gr, RC.sig_gr, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gr[idx], inst_g[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gi, g_min, g_max, gi_min, gi_max)\n\n (RC.gi, RC.sig_gi, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gi[idx], inst_g[idx], use_iqr=True)\n\n log.info('\\n')\n log.info('Centroid of Red Clump Stars at:')\n log.info(RC.summary(show_mags=True))\n log.info(RC.summary(show_mags=False,show_colours=True))\n\n RC.transform_to_JohnsonCousins()\n\n log.info(RC.summary(show_mags=False,johnsons=True))\n\n return RC",
"def fold_change_bohr(bohr_parameter):\n\n fold_change = (1 + np.e**(-bohr_parameter))**-1\n\n return fold_change",
"def _range_correction(unw1: NDArray,\n unw2: NDArray) -> np.float32:\n\n # Wrap unwrapped Phase in Frame-1 and Frame-2\n unw1_wrapped = np.mod(unw1, (2*np.pi)) - np.pi\n unw2_wrapped = np.mod(unw2, (2*np.pi)) - np.pi\n\n # Get the difference between wrapped images\n arr = unw1_wrapped - unw2_wrapped\n arr -= np.round(arr/(2*np.pi))*2*np.pi\n range_corr = np.angle(np.nanmean(np.exp(1j*arr)))\n\n return range_corr",
"def fold_changes(self, fold_changes: str):\n\n self._fold_changes = fold_changes",
"def compute_county_cirle(county_population):\n return SCATTER_SCALE * county_population",
"def log_fold_change(self, base=np.e, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")",
"def normed_integrated_concentration(self, start: float, stop: float) -> _VectorisedFloat:\n if stop <= self._first_presence_time():\n return (stop - start)*self.min_background_concentration()/self.normalization_factor()\n state_change_times = self.state_change_times()\n req_start, req_stop = start, stop\n total_normed_concentration = 0.\n for interval_start, interval_stop in zip(state_change_times[:-1], state_change_times[1:]):\n if req_start > interval_stop or req_stop < interval_start:\n continue\n # Clip the current interval to the requested range.\n start = max([interval_start, req_start])\n stop = min([interval_stop, req_stop])\n\n conc_start = self._normed_concentration_cached(start)\n\n next_conc_state = self._next_state_change(stop)\n conc_limit = self._normed_concentration_limit(next_conc_state)\n RR = self.removal_rate(next_conc_state)\n delta_time = stop - start\n total_normed_concentration += (\n conc_limit * delta_time +\n (conc_limit - conc_start) * (np.exp(-RR*delta_time)-1) / RR\n )\n return total_normed_concentration",
"def FoldChangeFilterToControl(X, data_headers, FCto, cutoff=0.4):\n XX = LinearFoldChange(X.copy(), data_headers, FCto)\n Xidx = np.any(XX[data_headers].values <= 1 - cutoff, axis=1) | np.any(XX[data_headers].values >= 1 + cutoff, axis=1)\n return X.iloc[Xidx, :]",
"def compute_gain(loudness, renormalize_loudness):\n gain = []\n for i in range(len(loudness)):\n delta_loudness = renormalize_loudness[i] - loudness[i]\n gain.append(np.power(10.0, delta_loudness / 20.0))\n return gain",
"def iterate_grey_level(prev_mask, new_g_disc, converter, \n num_grey_levels=256, upward=True):\n gl_delta = 1./num_grey_levels\n grey_level = new_g_disc/(num_grey_levels - 1)\n \n # Create desired spectrum.\n desired = desired_PSD_nd(\n new_g_disc*gl_delta, prev_mask.shape[0], prev_mask.ndim)\n desired_radial = converter.radially_average(desired)\n \n # Find error:\n corrected_sig = correct_signal(prev_mask, desired_radial, converter)\n error = np.abs(corrected_sig - prev_mask)\n \n # Make corrections:\n num_replacements = int(np.multiply.reduce(prev_mask.shape)*gl_delta)\n \n ## Identify worst zeros. This is different than BIPPSMA, because we \n ## have to check each replacement's neighbourhood to avoid clusters.\n replace_value = 0 if upward else 1\n replace_to = 1 - replace_value\n \n void = prev_mask == replace_value\n void_error = np.where(void, error, 0)\n void_error_order = np.argsort(-void_error, None)# descending.\n \n ## Replace:\n new_sig = prev_mask.copy()\n error_coords = np.unravel_index(void_error_order[:void.sum()], prev_mask.shape)\n \n # We need to make sure replacements don't cluster, by observing the local\n # means. We do that for the entire array - in NumPy. It's cheaper than\n # doing it individually per point in pure Python.\n half_window = 4\n window_size = (2*half_window + 1)\n window = np.full((window_size,)*prev_mask.ndim, 1/window_size**prev_mask.ndim)\n local_mean = ndi.convolve(prev_mask, window, mode='wrap')\n \n for coords in zip(*error_coords):\n if upward:\n crowded = local_mean[coords] > grey_level\n else:\n crowded = local_mean[coords] < grey_level\n \n if crowded:\n continue\n \n assert(new_sig[coords] == replace_value)\n new_sig[coords] = replace_to\n num_replacements -= 1\n if num_replacements == 0:\n break\n \n # Profit:\n return new_sig",
"def calc_change (change_amnts, rate_of_transition, from_cohort, present):\n row, col = cuda.grid(2)\n\n if row < from_cohort.shape[0] and col < from_cohort.shape[1]:\n change_amnts[row,col] = \\\n rate_of_transition[row,col] * from_cohort[row,col] \n if present[row, col] and change_amnts[row, col] > from_cohort[row, col]:\n change_amnts[row, col] = from_cohort[row,col]",
"def chgbound(self,accmode_,i_,lower_,finite_,value_): # 3\n if not isinstance(accmode_,accmode): raise TypeError(\"Argument accmode has wrong type\")\n res = self.__obj.chgbound(accmode_,i_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def calculate_force_change(data, axis=\"x\", forceChannel=\"force\", distanceChannel=\"surfaceSep\", window=15):\n axis = axis.upper()\n\n #check if the regions have been assigned\n if \"unfolding\" not in data.columns.values.tolist():\n raise ValueError(\"The unfolding events have not yet been identified. See function identify_unfolding_events\")\n\n #Label the different isolated events using scipy.ndimage\n data[\"eventID\"], eventsNumber = ndimage.label(data[\"unfolding\"])\n\n #Start the counting in 0\n data[\"eventID\"] -= 1\n #Show how many events were identified\n print(eventsNumber, \"events identified\")\n\n def averaged_values(column, startT, endT, window=5):\n start = column.index.get_loc(startT)\n end = column.index.get_loc(endT)\n averagedBefore = column.iloc[start-window: start - 3].mean()\n averagedAfter = column.iloc[end + 3: end + window].mean()\n diffAverage = averagedAfter - averagedBefore\n return averagedBefore, averagedAfter, diffAverage\n\n startForce = []\n forceChange = []\n\n pullingCycle = []\n #Take the first and last times point of each unfolding event, discarding the first point because it is the\n # unclassified regions\n times = {\"startTimes\": data.groupby(\"eventID\").time.first()[1:], \"endTimes\": data.groupby(\"eventID\").time.last()[1:]}\n newWindow = deepcopy(window)\n for startTime, endTime in zip(times[\"startTimes\"], times[\"endTimes\"]):\n if data.index.get_loc(startTime) < newWindow:\n window = data.index.get_loc(startTime) - 1\n else:\n window = newWindow\n forceBefore, forceAfter, forceDifference = averaged_values(data[forceChannel+axis], startTime, endTime, window)\n startForce.append(forceBefore)\n forceChange.append(forceDifference)\n pullingCycle.append(data.loc[startTime, \"pullingCycle\"])\n\n unfoldingData = pd.DataFrame({\"startTime\": times[\"startTimes\"], \"endTime\": times[\"endTimes\"],\n \"force\": startForce, \"forceChange\": forceChange, \"pullingCycle\": pullingCycle})\n\n return unfoldingData",
"def fold(vyper_module: vy_ast.Module) -> None:\n replace_builtin_constants(vyper_module)\n\n changed_nodes = 1\n while changed_nodes:\n changed_nodes = 0\n changed_nodes += replace_user_defined_constants(vyper_module)\n changed_nodes += replace_literal_ops(vyper_module)\n changed_nodes += replace_subscripts(vyper_module)\n changed_nodes += replace_builtin_functions(vyper_module)",
"def reduce_eq(self, threshold):\n def percentage_change(old, new):\n return (old - new) / old\n real_reduction_iterations = 0\n padic_reduction_iterations = 0\n cont_reduction_iterations = 0\n factor = len(self.constants.primes) + 1\n\n print('initial bound',max(self.coefficients['eq_n1_bound'],max(self.coefficients['eq_Z_bounds'])))\n\n # First, go through the real reduction loop.\n current_n1_bound = self.coefficients['eq_n1_bound']\n current_diff_bound = 0\n\n # Second, go through the p-adic reduction loop.\n current_Z_bounds = self.coefficients['eq_Z_bounds']\n while True:\n padic_reduction_iterations += 1\n logging.info(\"p-adic Reduction - Iteration %d\" % padic_reduction_iterations)\n \n new_Z_bounds = self.eq_padic_reduce(math.ceil(current_diff_bound))\n logging.info(\"diff bound: \" + str(current_diff_bound))\n logging.info(\"New bound on zi: \" + str(new_Z_bounds))\n logging.info(\"Current bound on n1: \" + str(current_n1_bound))\n new_n1_bound = self.update_padic_constants(new_Z_bounds)\n logging.info(\"New bound on n1: \" + str(new_n1_bound))\n if percentage_change(current_n1_bound, new_n1_bound) < self.threshold:\n logging.info(\"New bound did not improve in the p-adic step; p-adic reduction process is done.\")\n break\n\n current_n1_bound = new_n1_bound\n\n print('n1 bound: '+ str(current_n1_bound))\n\n return self.constants",
"def chgconbound(self,i_,lower_,finite_,value_): # 3\n res = self.__obj.chgconbound(i_,lower_,finite_,value_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def chgconbound(self,i_,lower_,finite_,value_):\n res = __library__.MSK_XX_chgconbound(self.__nativep,i_,lower_,finite_,value_)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def crf_refine(label,\n img,\n crf_theta_slider_value,\n crf_mu_slider_value,\n crf_downsample_factor,\n gt_prob):\n\n Horig = label.shape[0]\n Worig = label.shape[1]\n\n l_unique = np.unique(label.flatten())#.tolist()\n scale = 1+(5 * (np.array(img.shape).max() / 3000))\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF scale: %f' % (scale))\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF downsample factor: %f' % (crf_downsample_factor))\n logging.info('CRF theta parameter: %f' % (crf_theta_slider_value))\n logging.info('CRF mu parameter: %f' % (crf_mu_slider_value))\n logging.info('CRF prior probability of labels: %f' % (gt_prob))\n\n # decimate by factor by taking only every other row and column\n img = img[::crf_downsample_factor,::crf_downsample_factor, :]\n # do the same for the label image\n label = label[::crf_downsample_factor,::crf_downsample_factor]\n # yes, I know this aliases, but considering the task, it is ok; the objective is to\n # make fast inference and resize the output\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Images downsampled by a factor os %f' % (crf_downsample_factor))\n\n Hnew = label.shape[0]\n Wnew = label.shape[1]\n\n orig_mn = np.min(np.array(label).flatten())\n orig_mx = np.max(np.array(label).flatten())\n\n if l_unique[0]==0:\n n = (orig_mx-orig_mn)#+1\n else:\n\n n = (orig_mx-orig_mn)+1\n label = (label - orig_mn)+1\n mn = np.min(np.array(label).flatten())\n mx = np.max(np.array(label).flatten())\n\n n = (mx-mn)+1\n\n H = label.shape[0]\n W = label.shape[1]\n U = unary_from_labels(label.astype('int'), n, gt_prob=gt_prob)\n d = dcrf.DenseCRF2D(H, W, n)\n d.setUnaryEnergy(U)\n\n # to add the color-independent term, where features are the locations only:\n d.addPairwiseGaussian(sxy=(3, 3),\n compat=3,\n kernel=dcrf.DIAG_KERNEL,\n normalization=dcrf.NORMALIZE_SYMMETRIC)\n feats = create_pairwise_bilateral(\n sdims=(crf_theta_slider_value, crf_theta_slider_value),\n schan=(scale,scale,scale),\n img=img,\n chdim=2)\n\n d.addPairwiseEnergy(feats, compat=crf_mu_slider_value, kernel=dcrf.DIAG_KERNEL,normalization=dcrf.NORMALIZE_SYMMETRIC) #260\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF feature extraction complete ... inference starting')\n\n Q = d.inference(10)\n result = np.argmax(Q, axis=0).reshape((H, W)).astype(np.uint8) +1\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF inference made')\n\n uniq = np.unique(result.flatten())\n\n result = resize(result, (Horig, Worig), order=0, anti_aliasing=False) #True)\n\n result = rescale(result, orig_mn, orig_mx).astype(np.uint8)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('label resized and rescaled ... CRF post-processing complete')\n\n return result, n"
] | [
"0.5857831",
"0.56895924",
"0.56431454",
"0.56082547",
"0.5581456",
"0.54859054",
"0.53650284",
"0.53091687",
"0.5304077",
"0.527651",
"0.523607",
"0.5227839",
"0.51792836",
"0.5168524",
"0.51494545",
"0.5106263",
"0.5094823",
"0.50880575",
"0.50576776",
"0.5037168",
"0.498652",
"0.49696037",
"0.49205074",
"0.49026138",
"0.4894123",
"0.48912343",
"0.4875316",
"0.48743144",
"0.48461676",
"0.4836845"
] | 0.6041696 | 0 |
>>> s = Solution() >>> s.goodDaysToRobBank([5,3,3,3,5,6,2], 2) [2, 3] >>> s.goodDaysToRobBank([1,1,1,1,1], 0) [0, 1, 2, 3, 4] >>> s.goodDaysToRobBank([1,2,3,4,5,6], 2) [] | def goodDaysToRobBank(self, security: list[int], time: int) -> list[int]:
decreasing_count_before_i = []
for i, n in enumerate(security):
if i == 0:
decreasing_count_before_i.append(0)
else:
if n <= security[i - 1]:
decreasing_count_before_i.append(decreasing_count_before_i[-1] + 1)
else:
decreasing_count_before_i.append(0)
increasing_count_after_i = [0]
for i in range(len(security) - 2, -1, -1):
if security[i] <= security[i + 1]:
increasing_count_after_i.append(increasing_count_after_i[-1] + 1)
else:
increasing_count_after_i.append(0)
increasing_count_after_i.reverse()
# print(decreasing_count_before_i)
# print(increasing_count_after_i)
ans = []
for i in range(len(security)):
if (
decreasing_count_before_i[i] >= time
and increasing_count_after_i[i] >= time
):
ans.append(i)
return ans | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findRanks(toBeRanked, values):\n\treturn list(map(lambda e: findRank(e, values), toBeRanked))",
"def bst_right_balance():\n from bbst import Bst\n return Bst([5, 8, 6, 9, 2, 7])",
"def day2_data():\n test_data = []\n test_data.append([5, 1, 9, 5])\n test_data.append([7, 5, 3])\n test_data.append([2, 4, 6, 8])\n return test_data",
"def test_fibonacci_list():\n computed_fibonacci_value = fibonacci.fibonacci_list(8)\n assert computed_fibonacci_value == [1, 1, 2, 3, 5, 8, 13, 21]",
"def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made",
"def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made",
"def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made",
"def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made",
"def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made",
"def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made",
"def madebanks(self):\n made = []\n for bank in self.__banks:\n if bank.made():\n made.append(bank)\n return made",
"def test_biweekly_bussiness_days_only(self):\n print()\n print(\"Test Bussiness Days Only\")\n start_date = timezone.now()\n start_date = start_date.replace(day=1, month = 9, year = 2020)\n end_date = start_date.replace(day=30)\n expense = BudgetExpense.objects.get(id = 700)\n\n expected_dates = []\n expected_date = expense.start_date\n expected_date = expected_date.replace(day = 4, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 21, month = 9, year = 2020)\n expected_dates.append(expected_date)\n\n\n print(\"EXPECTED\")\n print(\"==========\")\n for d in expected_dates:\n print(d)\n\n result = get_anticipated_transaction_occurences(expense, start_date, end_date)\n print()\n print(\"Actual Result\")\n print(\"============\")\n for r in result.get(expense):\n print(r)\n print()\n self.assertEquals(expected_dates, result.get(expense))",
"def bookworm(people):\n for _ in xrange(people):\n tme, bok, r_t, r_b = input(), [input() for _ in xrange(input())], 0, 0\n bok.sort()\n for i in xrange(len(bok)):\n if (r_t + bok[i]) <= tme:\n r_t, r_b = r_t + bok[i], r_b + 1\n print r_b",
"def find_odds(numbers):\n\n pass # remove this line when starting your function",
"def getEatenBabies(self, gameState):\n eaten = []\n newFood = self.getFoodYouAreDefending(gameState)\n for pos in self.babies:\n if not newFood[pos[0]][pos[1]]:\n eaten.append(pos)\n return eaten",
"def testLastBillable(self):\n months = range(1, 13)\n first_days = [utils.add_timezone(datetime.datetime(2011, month, 1))\n for month in months]\n last_billable = [utils.get_last_billable_day(day).day \\\n for day in first_days]\n #should equal the last saturday of every month in 2011\n self.assertEqual(last_billable,\n [30, 27, 27, 24, 29, 26, 31, 28, 25, 30, 27, 25])",
"def get_us_bank_holidays(content=content):\n\n sorted_holidays = []\n soup = BeautifulSoup(content, 'html.parser')\n holiday_table: bs4.element.Tag = soup.find('table','list-table')\n holiday_rows: ResultSet = holiday_table.find_all('tr', {'class': 'holiday'})\n regional_rows: ResultSet = holiday_table.find_all('tr', {'class': 'regional'})\n public_rows: ResultSet = holiday_table.find_all('tr', {'class': 'publicholiday'})\n all_rows: ResultSet = holiday_rows + regional_rows + public_rows\n holiday_row: bs4.element.Tag\n for holiday_row in all_rows:\n sorted_holidays.append(process_row(holiday_row))\n\n sorted_holidays.sort()\n\n for holiday in sorted_holidays:\n (yyyy, mm, dd) = holiday[0].split('-')\n holiday_name = holiday[1]\n holidays[mm].append(holiday_name)\n\n return holidays",
"def gen_birthdays(n):\n list = []\n for date in range(n):\n list.append(random.randint(1, 365))\n return list",
"def get_ranks(d): \n raise NotImplementedError(\"Problem 3 Incomplete\")",
"def test_bathroom(self):\n array = []\n array = room_calculate(10.5, 22.8, 19.76, array)\n self.assertEqual(array[0], 4730.544)",
"def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]",
"def day2part2_data():\n test_data = []\n test_data.append([5, 9, 2, 8])\n test_data.append([9, 4, 7, 3])\n test_data.append([3, 8, 6, 5])\n return test_data",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def tobemade(self):\n tobe = []\n for bank in self.__banks:\n if bank.returned() and not bank.made():\n tobe.append(bank)\n return tobe",
"def test_get_index_of_day_one_day_list(self):\n days = [\"15.07.2013\"]\n self._test_find_day(days)\n self._test_giod(days, \"16.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"16.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"16.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")\n self._test_giod(days, \"10.07.2013\", 0,\n -1, \"Find not existing day in an One-Day-List\")\n self._test_giod(days, \"10.07.2013\", 1,\n 0, \"Find not existing day in an One-Day-List with next=1.\")\n self._test_giod(days, \"10.07.2013\", -1,\n 0, \"Find not existing day in an One-Day-List with next=-1.\")"
] | [
"0.55433816",
"0.5128601",
"0.5094142",
"0.50570726",
"0.5002229",
"0.5002229",
"0.5002229",
"0.5002229",
"0.5002229",
"0.5002229",
"0.5002229",
"0.4991885",
"0.4982164",
"0.4953555",
"0.48787373",
"0.4858374",
"0.48419097",
"0.4826743",
"0.48230216",
"0.48056105",
"0.48013327",
"0.4797592",
"0.47971714",
"0.47971714",
"0.47971714",
"0.47971714",
"0.47971714",
"0.47971714",
"0.47971714",
"0.4788559"
] | 0.67538345 | 0 |
Gets the image tensor from the path | def get_image_tensor(img_path):
img_tensor = path_to_tensor(img_path) / 255.0
return img_tensor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_image_as_tensor(path: Path) -> tf.Tensor:\n img_data = tf.io.read_file(str(path))\n img_tensor = tf.io.decode_jpeg(img_data, channels=3)\n\n return img_tensor",
"def path_to_tensor(img_path, dim):\n\n # Load RGB image\n img = image.load_img(img_path, target_size=(dim,dim))\n # Convert to (dim, dim,3) tensor\n x = image.img_to_array(img)\n return x",
"def path_to_tensor(img_path):\n img = image.load_img(img_path, target_size=(224, 224))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n return preprocess_input(x)",
"def _load_image(self, index: int) -> Tensor:\n path = self.files[index][\"image\"]\n with rasterio.open(path) as f:\n array = f.read()\n tensor = torch.from_numpy(array).float()\n return tensor",
"def load_tensor(path: str, params: Optional[Params]) -> th.Tensor:\n path = get_path_with_hash(path, params)\n files = glob(f'{path}/*')\n return th.stack([th.tensor(th.load(f)) for f in files])",
"def get_input(path):\n img = imread(path)\n return img",
"def _load_image(self, id_: str) -> Tensor:\n filename = os.path.join(self.root, \"output\", id_ + \".jpg\")\n with Image.open(filename) as img:\n array = np.array(img)\n tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]\n # Convert from HxWxC to CxHxW\n tensor = tensor.permute((2, 0, 1))\n return tensor",
"def readImage(self, path, tt=1):\n return cv2.imread( path, tt)",
"def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]",
"def imgLoad(path, gray=False):\n\tif gray:\n\t\treturn to_tensor(Image.open(path).convert('L'))[None,...]\n\treturn to_tensor(Image.open(path))[None,...]",
"def _get_img_tensor(self, fname, internal_transform):\n transforms = list(self.base_transforms)\n if internal_transform:\n transforms.insert(1, internal_transform)\n\n return T.Compose(transforms)(Image.open(self.imgs_root / fname))",
"def get_itk_image(path):\n\n reader = itk.ImageFileReader()\n reader.SetFileName(path)\n\n image = reader.Execute()\n\n return image",
"def _load_image(self, index: int) -> Tensor:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n arr = f[self.split][\"images_log\"][index, :, :, :]\n\n # put channel first\n tensor = torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)\n return tensor",
"def __getitem__(self, idx):\n label, path = self.pathList[idx]\n file_list = os.listdir(path)\n # parse the images in the folder\n for pt_file in file_list:\n file_path = path + pt_file\n extension = os.path.splitext(file_path)[1]\n\n # choose only the file that has the tensor weights\n if extension == '.pt':\n tensor = torch.load(file_path) # size Nx20x2 because the shape is made of 20 (x,y) tuples\n \n # Change type Int to type Float\n tensor = tensor.type(torch.FloatTensor) # On cpu for now\n\n # normalization of the coordinates\n #tensor[:,:,0] /= 300 # x\n #tensor[:,:,1] /= 150 # y\n\n tensor = tensor.view(tensor.size()[0], 40) # resize to Nx40\n\n return tensor, label",
"def _load_target(self, id_: str) -> Tensor:\n filename = os.path.join(self.root, \"output\", id_ + \"_m.png\")\n with Image.open(filename) as img:\n array = np.array(img.convert(\"L\"))\n tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]\n return tensor",
"def tensor_or_path_to_ndarray(tensor_or_path, rgb=True):\n if isinstance(tensor_or_path, str):\n from skimage import io\n return cv2.imread(tensor_or_path) if not rgb else io.imread(tensor_or_path)\n elif torch.is_tensor(tensor_or_path):\n # Call cpu in case its coming from cuda\n return tensor_or_path.cpu().numpy()[..., ::-1].copy() if not rgb else tensor_or_path.cpu().numpy()\n elif isinstance(tensor_or_path, np.ndarray):\n return tensor_or_path[..., ::-1].copy() if not rgb else tensor_or_path\n else:\n raise TypeError",
"def get_tensor(name):\n if name.rfind(':') == -1:\n name += ':0'\n return tf.get_default_graph().get_tensor_by_name(name)",
"def get_feature_from_image(img_path):\n global sess, softmax_tensor\n\n if not tf.gfile.Exists(img_path):\n tf.logging.fatal('File does not exist %s', img_path)\n image_data = tf.gfile.FastGFile(img_path, 'rb').read()\n\n predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n return predictions",
"def _load_target(self, index: int) -> Tensor:\n path = self.files[index][\"mask\"]\n with Image.open(path) as img:\n array: \"np.typing.NDArray[np.uint8]\" = np.array(img.convert(\"RGB\"))\n array = rgb_to_mask(array, self.colormap)\n tensor = torch.from_numpy(array)\n # Convert from HxWxC to CxHxW\n tensor = tensor.to(torch.long)\n return tensor",
"def read_image(path):\n img = misc.imread(path)\n return img",
"def load_image(filename):\n return tf.gfile.FastGFile(filename, 'rb').read()",
"def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor:\n return input_data['img']",
"def get_feature_from_image(self, img_path):\n\n if not tf.gfile.Exists(img_path):\n tf.logging.fatal('File does not exist %s', img_path)\n image_data = tf.gfile.FastGFile(img_path, 'rb').read()\n\n prediction = self.sess.run(self.softmax_tensor, {'DecodeJpeg/contents:0': image_data})\n prediction = np.squeeze(prediction)\n return prediction",
"def load_and_process_image(self, im_path):\n image = Image.open(im_path).convert('RGB')\n image = transforms.ToTensor()(image)\n image = 2 * image - 1\n return image",
"def img(name):\n\turl = \"https://raw.githubusercontent.com/bomelino/stupid/master/images/\"+name\n\timg = tf.image.decode_image(requests.get(url).content, channels=3) #, name=\"jpeg_reader\")\n\timg = tf.image.convert_image_dtype(img, tf.float32)\n\treturn img",
"def _read_image(path):\n data = tf.read_file(path)\n image = tf.image.decode_image(data, channels=3)\n image.set_shape((None, None, 3))\n float_shape = tf.cast(tf.shape(image), tf.float32)\n rows, cols = float_shape[0], float_shape[1]\n max_size = float(IMAGE_SIZE + IMAGE_AUGMENTATION_BORDER)\n new_shape = tf.cond(rows < cols,\n true_fn=lambda: (max_size, cols/rows * max_size),\n false_fn=lambda: (rows/cols * max_size, max_size))\n new_shape = tf.cast(tf.ceil(tf.stack(new_shape)), tf.int32)\n image = tf.image.resize_images(image, new_shape)\n image = tf.random_crop(image, [IMAGE_SIZE, IMAGE_SIZE, 3])\n image = tf.image.random_flip_left_right(image)\n image = tf.image.random_hue(image, 0.1)\n image = tf.image.random_brightness(image, 0.1)\n return tf.cast(image, tf.float32) / 0xff",
"def load_and_preprocess_image(path):\n image = tf.io.read_file(path)\n return preprocess_image(image)",
"def read_image(self, path):\n patch_img = Image.open(path).convert('RGB')\n tf = transforms.Resize((self.config.patch_size, self.config.patch_size))\n patch_img = tf(patch_img)\n tf = transforms.ToTensor()\n \n adv_patch_cpu = tf(patch_img)\n return adv_patch_cpu",
"def map_fn(self, path, label):\n image = tf.image.decode_png(tf.io.read_file(path))\n image = tf.image.convert_image_dtype(image, tf.float32)\n if self.img_size is not None:\n image = tf.image.resize(image, self.img_size)\n return image, label",
"def image(fname):\n return cv2.imread(fname)"
] | [
"0.7548212",
"0.74834365",
"0.73381865",
"0.7061357",
"0.7038939",
"0.701489",
"0.69295824",
"0.67972696",
"0.67446417",
"0.67446417",
"0.66678107",
"0.6606885",
"0.65754026",
"0.6573447",
"0.65305334",
"0.643628",
"0.64324105",
"0.64258087",
"0.6414476",
"0.6360211",
"0.63550717",
"0.6335073",
"0.6329234",
"0.63276726",
"0.6317372",
"0.62931347",
"0.6237224",
"0.6223171",
"0.6221042",
"0.6198138"
] | 0.8271076 | 0 |
Issues a POST request to the UC annual website to retrieve all possible employee financial data. | def acquire_data(year: int) -> dict:
base_url: str = "https://ucannualwage.ucop.edu"
search_url: str = base_url + "/wage/search.action"
# Request headers copied of out Chrome's devtools.
request_headers = {
"Host": re.sub('https://', '', base_url),
"Content-Length": '255',
"Origin": base_url,
"User-Agent":
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json, text/javascript, */*; q=0.01",
"X-Requested-With": "XMLHttpRequest",
"DNT": "1",
"Referer": base_url + "/wage/",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US, en; q=0.8;q=0.6",
"Cookie": "JSESSIONID=0000Uq2FDN8doIsM5DBz4pU0xzd:169l0fmr2"
}
# Dummy request payload. Searches over all locations to search for any employee receiving between 1 and
# 1 billion dollars in salary (aka, everyone).
payload = "_search=false&nd=1497757924608&rows=" + "10000000" + "&page=1&sidx=EAW_LST_NAM&sord=asc&year=" + str(
year
) + "&location=ALL&firstname=&lastname=&title=&startSal=1&endSal=1000000000"
session = requests.Session()
response = session.post(search_url, headers=request_headers, data=payload)
try:
response.raise_for_status()
except requests.HTTPError as e:
print("ERROR: ", e)
exit(1)
# Despite the response type being "text/json", calling `response.json()` fails immediately with the following error message:
# json.errors.JSONDecodeError: Expecting property name enclosed in double quotes: line 2 column 1 (char 2)
# Thus, we convert the response.text object to have double quotes instead of single quotes.
# Additionally, there is an errant control character somehow embedded in response.text, which gives the error:
# json.decoder.JSONDecodeError: Invalid control character at: line 185849 column 69 (char 22761096)
# To override this, we must set the 'strict' property to false.
# See: https://docs.python.org/3/library/json.html#json.JSONDecoder
# 'If strict is false...'
return json.loads(response.text.replace("\'", "\"").encode('utf-8'),
strict=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dataset(request):\n from trytond.transaction import Transaction\n from trytond.tests.test_tryton import USER, CONTEXT, DB_NAME, POOL\n\n Party = POOL.get('party.party')\n Company = POOL.get('company.company')\n Country = POOL.get('country.country')\n Subdivision = POOL.get('country.subdivision')\n Employee = POOL.get('company.employee')\n Currency = POOL.get('currency.currency')\n User = POOL.get('res.user')\n FiscalYear = POOL.get('account.fiscalyear')\n Sequence = POOL.get('ir.sequence')\n AccountTemplate = POOL.get('account.account.template')\n Account = POOL.get('account.account')\n Journal = POOL.get('account.journal')\n PaymentGateway = POOL.get('payment_gateway.gateway')\n AccountCreateChart = POOL.get('account.create_chart', type=\"wizard\")\n\n with Transaction().start(DB_NAME, USER, context=CONTEXT) as transaction:\n # Create company, employee and set it user's current company\n usd, = Currency.create([{\n 'name': 'US Dollar',\n 'code': 'USD',\n 'symbol': '$',\n }])\n\n country_us, = Country.create([{\n 'name': 'United States',\n 'code': 'US',\n }])\n subdivision_florida, = Subdivision.create([{\n 'name': 'Florida',\n 'code': 'US-FL',\n 'country': country_us.id,\n 'type': 'state'\n }])\n subdivision_california, = Subdivision.create([{\n 'name': 'California',\n 'code': 'US-CA',\n 'country': country_us.id,\n 'type': 'state'\n }])\n\n company_party, = Party.create([{\n 'name': 'ABC Corp.',\n 'addresses': [('create', [{\n 'name': 'ABC Corp.',\n 'street': '247 High Street',\n 'zip': '94301-1041',\n 'city': 'Palo Alto',\n 'country': country_us.id,\n 'subdivision': subdivision_california.id,\n }])],\n 'contact_mechanisms': [('create', [{\n 'type': 'phone',\n 'value': '123456789'\n }])]\n }])\n\n employee_party, = Party.create([{\n 'name': 'Prakash Pandey',\n }])\n company, = Company.create([{\n 'party': company_party.id,\n 'currency': usd.id,\n }])\n employee, = Employee.create([{\n 'party': employee_party.id,\n 'company': company.id,\n }])\n User.write(\n [User(USER)], {\n 'main_company': company.id,\n 'company': company.id,\n }\n )\n CONTEXT.update(User.get_preferences(context_only=True))\n\n # Create fiscal year\n date = datetime.date.today()\n\n post_move_sequence, = Sequence.create([{\n 'name': '%s' % date.year,\n 'code': 'account.move',\n 'company': company.id,\n }])\n\n fiscal_year, = FiscalYear.create([{\n 'name': '%s' % date.year,\n 'start_date': date + relativedelta(month=1, day=1),\n 'end_date': date + relativedelta(month=12, day=31),\n 'company': company.id,\n 'post_move_sequence': post_move_sequence.id,\n }])\n FiscalYear.create_period([fiscal_year])\n\n # Create minimal chart of account\n account_template, = AccountTemplate.search([\n ('parent', '=', None),\n ('name', '=', 'Minimal Account Chart')\n ])\n\n session_id, _, _ = AccountCreateChart.create()\n create_chart = AccountCreateChart(session_id)\n create_chart.account.account_template = account_template\n create_chart.account.company = company\n create_chart.transition_create_account()\n\n receivable, = Account.search([\n ('kind', '=', 'receivable'),\n ('company', '=', company.id),\n ])\n payable, = Account.search([\n ('kind', '=', 'payable'),\n ('company', '=', company.id),\n ])\n create_chart.properties.company = company\n create_chart.properties.account_receivable = receivable\n create_chart.properties.account_payable = payable\n create_chart.transition_create_properties()\n\n account_revenue, = Account.search([\n ('kind', '=', 'revenue')\n ])\n account_expense, = Account.search([\n ('kind', '=', 'expense')\n ])\n\n # Create customer\n customer, = Party.create([{\n 'name': 'John Doe',\n 'addresses': [('create', [{\n 'name': 'John Doe',\n 'street': '250 NE 25th St',\n 'zip': '33137',\n 'city': 'Miami, Miami-Dade',\n 'country': country_us.id,\n 'subdivision': subdivision_florida.id,\n }])],\n 'contact_mechanisms': [('create', [{\n 'type': 'phone',\n 'value': '123456789'\n }])]\n }])\n\n cash_journal, = Journal.search(\n [('type', '=', 'cash')], limit=1\n )\n Journal.write([cash_journal], {\n 'debit_account': account_expense.id\n })\n\n stripe_gateway = PaymentGateway(\n name='Credit Card - Stripe',\n journal=cash_journal,\n provider='stripe',\n method='credit_card',\n stripe_api_key=\"sk_test_Xw6QdFU31e8mcmcdeMt7DoiE\",\n test=True\n )\n stripe_gateway.save()\n\n result = {\n 'customer': customer,\n 'company': company,\n 'stripe_gateway': stripe_gateway,\n }\n\n transaction.commit()\n\n def get():\n from trytond.model import Model\n\n for key, value in result.iteritems():\n if isinstance(value, Model):\n result[key] = value.__class__(value.id)\n return namedtuple('Dataset', result.keys())(**result)\n\n return get",
"def test_yearly_report(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n consolidated_total = 212.23\n res = self.client().get('/yearly_report?year=2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['consolidated_total'], consolidated_total)",
"async def fetch_annual_data(self):\n self._logger.info(\"Fetching annual data\")\n await self._client.select_customer(self.account_id, self.customer_id)\n headers = {\"Content-Type\": \"application/json\"}\n res = await self._client.http_request(ANNUAL_DATA_URL, \"get\", headers=headers)\n # We can not use res.json() because the response header are not application/json\n json_res = json.loads(await res.text())\n if not json_res.get('results'):\n return\n json_res = json_res['results'][0]\n\n for key, raw_key in ANNUAL_MAP:\n self._current_annual_data[key] = json_res['courant'][raw_key]\n self._compare_annual_data[key] = json_res['compare'][raw_key]",
"def post(self, data):\n return self._post_request(data, Expenses.POST_EXPENSE)",
"def make_consultation_request(self, get, post, put):\n data = {\n \"preferredTime\": \"anytime\",\n \"mortgageProfileKind\": \"purchase\",\n \"mortgageTiming\": \"immediate\",\n \"firstName\": \"a\",\n \"lastName\": \"a\",\n \"phone\": \"(111) 111-1111\",\n \"email\": \"[email protected]\"\n }\n post('/api/v1/contact-requests/consultation/', json.dumps(data))",
"async def get_all_data_from_investigators(request):\n client_key = general.get_request_key_header(request)\n data_list = await security_messaging.get_data_from_investigators(request.app.config.VAL_CONN, client_key)\n\n data_list_json = []\n for address, data in data_list.items():\n data_list_json.append({\n 'id': data.id,\n 'height': data.height,\n 'weight': data.weight,\n 'A1C': data.A1C,\n 'FPG': data.FPG,\n 'OGTT': data.OGTT,\n 'RPGT': data.RPGT,\n 'event_time': data.event_time,\n 'eligible': data.eligible\n })\n return response.json(body={'data': data_list_json},\n headers=general.get_response_headers())",
"def get_regular_futures_eod_r(\n self,\n\n headers: t.Dict[str, str] = None,\n body: JSONEncodable = None,\n fields_data: t.Dict[str, str] = None,\n **kwargs\n ):\n r = self._do_call(\n method='GET',\n url=f'{self.API_BASE_URL}/prices/eod',\n headers=headers,\n body=body,\n fields=fields_data,\n **kwargs\n )\n return r",
"def download_earning_reports(self, form='10-Q', year_range=3, force_update=False):\n if self.components.empty:\n self.get_compo_list()\n\n cik_series = self.components['CIK'].astype(str)\n cik_to_ticker = pd.Series(cik_series.index.values, index=cik_series).to_dict()\n\n sec_archive_base = 'https://www.sec.gov/Archives'\n xbrl_idx_base = sec_archive_base + '/edgar/full-index'\n xbrl_pattern = re.compile(r'([0-9]+)\\|(.*)\\|%s\\|(.*)\\|(.*)'%form)\n link_pattern = re.compile(r'[-\\.txt]')\n #instance_pattern = re.compile(r'instance=[\\'\\\"]*([\\w\\-]+\\.xml)[\\'\\\"]*') # e.g. <Report instance=\"amtd-20170630.xml\">\n instance_pattern = re.compile(r'>([\\w]+-[0-9]+\\.xml)<') # e.g. <File>bebe-20140104.xml</File>\n year_end = dt.datetime.today().year\n year_start = year_end - year_range\n for year in range(year_start, year_end+1):\n for quarter in ['QTR1', 'QTR2', 'QTR3', 'QTR4']:\n xbrl_idx = '%s/%s/%s/xbrl.idx' %(xbrl_idx_base, year, quarter)\n try:\n r = requests.get(xbrl_idx)\n except requests.exceptions.RequestException as e:\n print('Error: xbrl.idx request exception, link %s' %xbrl_idx)\n print(e)\n continue\n if r.status_code != requests.codes.ok:\n print('Error: requests get failure, url %s, status_code %d' %(xbrl_idx, r.status_code))\n continue\n # Parse each line and extract lines with specified form(e.g.10-Q).\n #\n # Example:\n # CIK|Company Name|Form Type|Date Filed|Filename\n # 1173313|American BriVision (Holding) Corp|10-K/A|2017-09-22|edgar/data/1173313/0001213900-17-009907.txt\n # 1173313|American BriVision (Holding) Corp|10-Q|2017-08-21|edgar/data/1173313/0001213900-17-009012.txt\n # 1173313|American BriVision (Holding) Corp|S-1/A|2017-07-17|edgar/data/1173313/0001213900-17-007661.txt\n # 1173313|American BriVision (Holding) Corp|S-1/A|2017-09-22|edgar/data/1173313/0001213900-17-009909.txt\n # 1173431|TD AMERITRADE HOLDING CORP|10-Q|2017-07-24|edgar/data/1173431/0001173431-17-000108.txt\n # 1173431|TD AMERITRADE HOLDING CORP|8-K|2017-07-18|edgar/data/1173431/0001173431-17-000104.txt\n all_edgar_links = dict() # CIK-to-link dict\n for line in r.text.splitlines():\n m = xbrl_pattern.findall(line)\n if len(m) > 0:\n all_edgar_links[m[0][0]] = m[0][-1]\n # Download links\n for cik in all_edgar_links.keys():\n if cik not in cik_to_ticker.keys():\n #print('Skip CIK ' + cik) # FIXME: TEST ONLY\n continue\n link = all_edgar_links[cik] # e.g. 'edgar/data/1173431/0001173431-17-000108.txt'\n link=link.split('/') # e.g. ['edgar', 'data', '1173431', '0001173431-17-000108.txt']\n link[-1] = link_pattern.sub('', link[-1]) # e.g. '000117343117000108'\n link = '/'.join(link) # e.g. 'edgar/data/1173431/000117343117000108'\n url = sec_archive_base+'/'+link+'/FilingSummary.xml'\n try:\n r = requests.get(url)\n except requests.exceptions.RequestException as e:\n print('%s: FilingSummary request failure, link %s' %(cik_to_ticker[cik], url))\n print(e)\n continue\n m = instance_pattern.search(r.text)\n if m and len(m.groups()) > 0:\n xbrl_file = m.groups()[0]\n print('%s => %s => %s' %(cik_to_ticker[cik], cik, xbrl_file)) # FIXME: TEST ONLY\n # download file url = sec_archive_base+'/'+link+'/'+xbrl_file\n ticker = Symbol(cik_to_ticker[cik])\n ticker.download_earning(sec_archive_base+'/'+link, xbrl_file, form, force_update=force_update)\n else:\n print('Error: failed to find XBRL file for %s, url %s, status_code %d' %(cik_to_ticker[cik], url, r.status_code))\n continue",
"def on_submit(self):\n\n\t\tfor accounting_entry in self.get('accounting_entries'):\n\t\t\tledger_entry_doc = frappe.get_doc({\n\t\t\t\t'doctype': 'Ledger Entry',\n\t\t\t\t'posting_date': self.posting_date,\n\t\t\t\t'account': accounting_entry.account,\n\t\t\t\t'debit': accounting_entry.debit,\n\t\t\t\t'credit': accounting_entry.credit,\n\t\t\t\t'voucher_type': 'Journal Entry',\n\t\t\t\t'voucher_number': self.name,\n\t\t\t\t'company': self.company\n\t\t\t})\n\t\t\tledger_entry_doc.insert()",
"async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n response = await self.privatePostAuthRSummary(params)\n #\n # Response Spec:\n # [\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # [\n # [\n # MAKER_FEE,\n # MAKER_FEE,\n # MAKER_FEE,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # DERIV_REBATE\n # ],\n # [\n # TAKER_FEE_TO_CRYPTO,\n # TAKER_FEE_TO_STABLE,\n # TAKER_FEE_TO_FIAT,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # DERIV_TAKER_FEE\n # ]\n # ],\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # PLACEHOLDER,\n # {\n # LEO_LEV,\n # LEO_AMOUNT_AVG\n # }\n # ]\n #\n # Example response:\n #\n # [\n # null,\n # null,\n # null,\n # null,\n # [\n # [0.001, 0.001, 0.001, null, null, 0.0002],\n # [0.002, 0.002, 0.002, null, null, 0.00065]\n # ],\n # [\n # [\n # {\n # curr: 'Total(USD)',\n # vol: '0',\n # vol_safe: '0',\n # vol_maker: '0',\n # vol_BFX: '0',\n # vol_BFX_safe: '0',\n # vol_BFX_maker: '0'\n # }\n # ],\n # {},\n # 0\n # ],\n # [null, {}, 0],\n # null,\n # null,\n # {leo_lev: '0', leo_amount_avg: '0'}\n # ]\n #\n result = {}\n fiat = self.safe_value(self.options, 'fiat', {})\n feeData = self.safe_value(response, 4, [])\n makerData = self.safe_value(feeData, 0, [])\n takerData = self.safe_value(feeData, 1, [])\n makerFee = self.safe_number(makerData, 0)\n makerFeeFiat = self.safe_number(makerData, 2)\n makerFeeDeriv = self.safe_number(makerData, 5)\n takerFee = self.safe_number(takerData, 0)\n takerFeeFiat = self.safe_number(takerData, 2)\n takerFeeDeriv = self.safe_number(takerData, 5)\n for i in range(0, len(self.symbols)):\n symbol = self.symbols[i]\n market = self.market(symbol)\n fee = {\n 'info': response,\n 'symbol': symbol,\n 'percentage': True,\n 'tierBased': True,\n }\n if market['quote'] in fiat:\n fee['maker'] = makerFeeFiat\n fee['taker'] = takerFeeFiat\n elif market['contract']:\n fee['maker'] = makerFeeDeriv\n fee['taker'] = takerFeeDeriv\n else: # TODO check if stable coin\n fee['maker'] = makerFee\n fee['taker'] = takerFee\n result[symbol] = fee\n return result",
"def post(self, request):\n json_request = request.data\n group = json_request.get('group', None)\n filters = json_request.get('filters', None)\n\n if group is None:\n raise InvalidParameterException('Missing one or more required request parameters: group')\n if filters is None:\n raise InvalidParameterException('Missing one or more required request parameters: filters')\n potential_groups = ['quarter', 'fiscal_year', 'month', 'fy', 'q', 'm']\n if group not in potential_groups:\n raise InvalidParameterException('group does not have a valid value')\n\n queryset = spending_over_time(filters)\n filter_types = filters['award_type_codes'] if 'award_type_codes' in filters else award_type_mapping\n\n # define what values are needed in the sql query\n queryset = queryset.values('action_date', 'federal_action_obligation', 'original_loan_subsidy_cost')\n\n # build response\n response = {'group': group, 'results': []}\n nested_order = ''\n\n group_results = OrderedDict() # list of time_period objects ie {\"fy\": \"2017\", \"quarter\": \"3\"} : 1000\n\n if group == 'fy' or group == 'fiscal_year':\n\n fy_set = sum_transaction_amount(queryset.values('fiscal_year'), filter_types=filter_types)\n\n for trans in fy_set:\n key = {'fiscal_year': str(trans['fiscal_year'])}\n key = str(key)\n group_results[key] = trans['transaction_amount']\n\n elif group == 'm' or group == 'month':\n\n month_set = queryset.annotate(month=ExtractMonth('action_date')) \\\n .values('fiscal_year', 'month')\n month_set = sum_transaction_amount(month_set, filter_types=filter_types)\n\n for trans in month_set:\n # Convert month to fiscal month\n fiscal_month = generate_fiscal_month(date(year=2017, day=1, month=trans['month']))\n\n key = {'fiscal_year': str(trans['fiscal_year']), 'month': str(fiscal_month)}\n key = str(key)\n group_results[key] = trans['transaction_amount']\n nested_order = 'month'\n else: # quarterly, take months and add them up\n\n month_set = queryset.annotate(month=ExtractMonth('action_date')) \\\n .values('fiscal_year', 'month')\n month_set = sum_transaction_amount(month_set, filter_types=filter_types)\n\n for trans in month_set:\n # Convert month to quarter\n quarter = FiscalDate(2017, trans['month'], 1).quarter\n\n key = {'fiscal_year': str(trans['fiscal_year']), 'quarter': str(quarter)}\n key = str(key)\n\n # If key exists {fy : quarter}, aggregate\n if group_results.get(key) is None:\n group_results[key] = trans['transaction_amount']\n else:\n if trans['transaction_amount']:\n group_results[key] = group_results.get(key) + trans['transaction_amount']\n else:\n group_results[key] = group_results.get(key)\n nested_order = 'quarter'\n\n # convert result into expected format, sort by key to meet front-end specs\n results = []\n # Expected results structure\n # [{\n # 'time_period': {'fy': '2017', 'quarter': '3'},\n # \t'aggregated_amount': '200000000'\n # }]\n sorted_group_results = sorted(\n group_results.items(),\n key=lambda k: (\n ast.literal_eval(k[0])['fiscal_year'],\n int(ast.literal_eval(k[0])[nested_order])) if nested_order else (ast.literal_eval(k[0])['fiscal_year']))\n\n for key, value in sorted_group_results:\n key_dict = ast.literal_eval(key)\n result = {'time_period': key_dict, 'aggregated_amount': float(value) if value else float(0)}\n results.append(result)\n response['results'] = results\n\n return Response(response)",
"def fetch_fuel_data():\n\treturn requests.get('http://www.fueleconomy.gov/ws/rest/fuelprices').text",
"def get_data_from_individual_company_pages(soup):\n individual_company_data = []\n usd_roe = get_usd_roe()\n company_code = (\n soup.find(\"meta\", {\"name\": \"description\"}).get(\"content\").split(\":\")[0]\n )\n current_price_usd = float(\n soup.find(\"span\", {\"class\": \"price-section__current-value\"}).text.replace(\n \",\", \"\"\n )\n )\n current_price = round(current_price_usd * usd_roe)\n try:\n p_e_ratio = float(\n soup.find(\n \"div\", {\"class\": \"snapshot__header\"}, string=\"P/E Ratio\"\n ).previous_sibling.replace(\",\", \"\")\n )\n except AttributeError:\n p_e_ratio = 0\n\n try:\n week_52_low = float(\n soup.find(\"div\", {\"class\": \"snapshot__header\"}, string=\"52 Week Low\")\n .previous_sibling.strip()\n .replace(\",\", \"\")\n )\n except AttributeError:\n week_52_low = 1\n\n try:\n week_52_high = float(\n soup.find(\"div\", {\"class\": \"snapshot__header\"}, string=\"52 Week High\")\n .previous_sibling.strip()\n .replace(\",\", \"\")\n )\n except AttributeError:\n week_52_high = 0\n\n unreal_profit_per_year_percent = round((week_52_high / week_52_low - 1) * 100, 2)\n\n individual_company_data.append(\n [company_code, current_price, p_e_ratio, unreal_profit_per_year_percent]\n )\n\n company_df = pd.DataFrame(\n columns=[\"company_code\", \"current_price\", \"P_E\", \"potential_profit_percent\"]\n )\n company_df = company_df.append(\n {\n \"company_code\": company_code,\n \"current_price\": current_price,\n \"P_E\": p_e_ratio,\n \"potential_profit_percent\": unreal_profit_per_year_percent,\n },\n ignore_index=True,\n )\n\n return company_df",
"def test_get_my_periods2(self):\n url = reverse_lazy('api:me-get-payroll-period')\n self.client.force_login(self.test_user_employer2)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(len(response_json), 0, response_json)",
"def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports",
"def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)",
"def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])",
"def fetchEquityData(from_date,to_date, useCache=False):\n PARAMS = INSIDER_TRADING_PARAM.copy()\n PARAMS[\"from_date\"] = from_date\n PARAMS[\"to_date\"] = to_date\n\n file_path = f\"data/{from_date}__{to_date}.json\"\n\n print(f\"\\nGetting Data from {from_date} to {to_date}\")\n\n data = None\n if os.path.isfile(file_path) and useCache:\n print(\"Using cached Data\")\n with open(file_path, \"r\") as f:\n data = json.load(f)\n else:\n sess = requests.Session()\n print(\"Intializing\")\n r = sess.get(\"https://www.nseindia.com/companies-listing/corporate-filings-insider-trading\", headers=HEADERS)\n print(\"Init Done\")\n\n print(\"Fetching Data From API\")\n res = sess.get(INSIDER_TRADING_BASE_URL, params=PARAMS, headers=HEADERS)\n print(\"Fetch Done\")\n print(res.url)\n if res.status_code == 200:\n data = res.json()[\"data\"]\n if not data:\n return None\n \n if useCache:\n print(\"Saving data for future use\")\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path, \"w\") as f:\n json.dump(data,f)\n else:\n return None\n\n df = pd.DataFrame.from_dict(data)\n df = df[df[\"secType\"] == \"Equity Shares\"]\n \n numberColumns = ['secAcq','befAcqSharesNo', 'befAcqSharesPer', 'secVal','afterAcqSharesNo', 'afterAcqSharesPer']\n df[numberColumns] = df[numberColumns].apply(pd.to_numeric, errors=\"coerce\")\n\n dataTimeColumns = ['acqfromDt','acqtoDt','intimDt']\n df[dataTimeColumns] = df[dataTimeColumns].apply(pd.to_datetime, errors=\"coerce\")\n\n df = df[['symbol', 'company', 'acqName', 'secType', 'secAcq', 'tdpTransactionType',\n 'personCategory', 'befAcqSharesNo', 'befAcqSharesPer', 'secVal',\n 'afterAcqSharesNo', 'afterAcqSharesPer', 'acqfromDt', 'acqtoDt',\n 'intimDt', 'acqMode']]\n\n return df",
"def get(self, request):\n employee = EmployeeDetail.objects.all()\n response = {\n 'payment_methods': EmployeeSerializer(\n employee,\n many=True\n ).data\n }\n return Response(response)",
"def handler(request):\n\n request_json = request.get_json()\n\n client = mfp.Client(\n username=request_json['secrets']['MYFITNESSPAL_USERNAME'],\n password=request_json['secrets']['MYFITNESSPAL_PASSWORD'])\n\n # initialize state for the case when fivetran is starting from scratch.\n # put initial values for the cursor and tokens in the 'secrets' node.\n # fivetran should automatically keep track of subsequent updates in the\n # 'state' node.\n if 'cursor' not in request_json['state']:\n request_json['state']['cursor'] = request_json['secrets']['cursor']\n\n cursor = request_json['state']['cursor']\n cursor_date = parser.parse(cursor).date()\n\n if cursor_date > dt.date.today():\n raise ValueError(\n f\"cursor value {cursor_date.isoformat()} is later than \"\n f\"today's date {dt.date.today().isoformat()}\")\n\n # if the cursor is at the current date return immediately without\n # incrementing the cursor. This is to ensure we don't pull data for a day\n # until that day is over.\n if cursor_date == dt.date.today():\n return {\n 'state': {\n 'cursor': cursor,\n },\n 'hasMore': False\n }\n\n # otherwise the cursor must be in the past so go ahead and pull data.\n\n # pull data from the two most recent days to catch cases when I forget to\n # enter data for dinner until the next morning\n prev_date = cursor_date - dt.timedelta(days=1)\n\n total_records = [\n {'date': date.isoformat(), 'name': meal.name, **meal.totals}\n for date in [prev_date, cursor_date]\n for meal in client.get_date(date.year, date.month, date.day).meals\n ]\n\n return {\n 'state': {\n 'cursor': (cursor_date + dt.timedelta(days=1)).isoformat()\n },\n 'insert': {\n 'totals': total_records\n },\n 'delete': {\n },\n 'schema': {\n 'totals': {\n 'primary_key': ['date', 'name']\n }\n },\n 'hasMore': True\n }",
"def __post_agreement(self, web_token: str) -> List[str]:\n payload = {\n 'prohibition_agreement': 1,\n 'csrfmiddlewaretoken': web_token\n }\n\n self.session.headers.update(HTTP_HEADERS)\n self.__ensure_fetching_rate_limit()\n response = self.session.post(EFD_ENDPOINT_ACCESS, data=payload)\n form_names = self.__parse_search_form(response.text)\n return form_names",
"def get_data(end_date, n, local, foreign):\n URL = \"https://api.exchangeratesapi.io/history\"\n PARAMS = {'start_at': str(get_weekday_n_days_ago(end_date, n)),\n 'end_at': str(end_date),\n 'symbols': foreign,\n 'base': local}\n r = requests.get(url=URL, params=PARAMS)\n data = r.json()\n input_data = []\n for day in data['rates']:\n input_data.append([datetime.strptime(day, '%Y-%m-%d').date(),\n float(\"{:.8f}\".format(data['rates'][day][foreign]))])\n input_data.sort(key=lambda x: x[0])\n return input_data[-n:]",
"def fxempire1(site):\n url = \"https://www.fxempire.com/api/v1/en/markets/list\"\n headers = {\n \"authority\": \"www.fxempire.com\",\n \"method\": \"GET\",\n \"path\": \"/api/v1/en/markets/list\",\n \"scheme\": \"https\",\n \"accept\": (\n \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,\"\n + \"image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\"\n ),\n \"accept-encoding\": \"gzip, deflate, br\",\n \"accept-language\": \"en-US,en;q=0.9\",\n \"cache-control\": \"max-age=0\",\n \"dnt\": \"1\",\n \"sec-fetch-mode\": \"navigate\",\n \"sec-fetch-site\": \"none\",\n \"sec-fetch-user\": \"?1\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": (\n \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\"\n + \" (KHTML, like Gecko) Chrome/79.0.3945.79 Safari/537.36 OPR/66.0.3515.27\"\n ),\n }\n try:\n session = requests.Session()\n session.headers = headers\n cfscrape_requests = cfscrape.create_scraper(sess=session)\n ret = cfscrape_requests.get(url, timeout=(15, 15)).json()\n data = {}\n for item in ret[\"forex\"]:\n if item:\n try:\n pair = item[\"name\"].replace(\"/\", \":\")\n price = item[\"value\"]\n data[pair] = float(price)\n except:\n pass\n for item in ret[\"commodities\"]:\n try:\n if item[\"symbol\"] in [\"XAUUSD\", \"XAGUSD\"]:\n pair = \"USD:\" + item[\"symbol\"].replace(\"USD\", \"\")\n price = 1 / float(item[\"value\"])\n data[pair] = price\n except:\n pass\n data = {k: v for k, v in data.items() if \"RUB\" not in k} # RUBLE is stale\n data = refine_data(data)\n print(site, data)\n race_write(f\"{site}_forex.txt\", json_dumps(data))\n except:\n print(f\"{site} failed to load\")",
"def fetchFinancialInfo(self, company_symbol, company_name, type):\n result = []\n utility = RevUtility()\n\n url = self.__REVENUE_BASE_URL + \"/\" + company_symbol + \"/\" + company_name + \"/\" + type\n pageContent = requests.get(url = url)\n tree = html.fromstring(pageContent.content)\n table = tree.xpath('//*[@id=\"style-1\"]/div[2]/table/tbody/tr')\n\n for item in table:\n data = item.xpath('td/text()')\n\n date = utility.convertToDate(data[0])\n try:\n revenue = float(data[1].replace(\",\", \"\").replace(\"$\",\"\"))\n except:\n ## handle revenue or income is negative\n revenue = 0\n result.append((date,revenue))\n\n result.reverse()\n return result",
"def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201",
"def post(self):\n return self.get_request_handler(request.headers).create_new_employment_status(request)",
"async def get_all_investigators(request):\n client_key = general.get_request_key_header(request)\n investigator_list = await security_messaging.get_investigators(request.app.config.VAL_CONN, client_key)\n\n investigator_list_json = []\n for address, dp in investigator_list.items():\n investigator_list_json.append({\n 'public_key': dp.public_key,\n 'name': dp.name\n })\n return response.json(body={'data': investigator_list_json},\n headers=general.get_response_headers())",
"def financial(ticker_symbol):\n ticker = yf.Ticker(ticker_symbol, session=session)\n information = ticker.info\n\n # To check if input is a valid ticker\n if \"symbol\" in information:\n with open(r\"database/financials.json\", \"r+\") as r:\n data = json.load(r)\n check_financial_data(ticker_symbol, ticker, data, r)\n\n # url_ratings = \"https://finance.yahoo.com/calendar/earnings?symbol={}\".format(ticker_symbol)\n # text_soup_ratings = BeautifulSoup(get_earnings_html(url_ratings), \"lxml\")\n #\n # earnings_list, financial_quarter_list = [], []\n # # [[1, 0.56, 0.64], [2, 0.51, 0.65], [3, 0.7, 0.73], [4, 1.41, 1.68], [5, 0.98]]\n # count = 5\n # for earning in text_soup_ratings.findAll(\"tr\"):\n # tds = earning.findAll(\"td\")\n # if len(tds) > 0:\n # earning_date = tds[2].text.rsplit(\",\", 1)[0]\n # eps_est = tds[3].text\n # eps_act = tds[4].text\n # print(earning_date, eps_est, eps_act, ticker_symbol)\n # if eps_est != \"-\" and eps_act != \"-\":\n # if eps_act != \"-\":\n # earnings_list.append([count, earning_date, eps_est, eps_act])\n # else:\n # earnings_list.append([count, earning_date, eps_est])\n # else:\n # break\n # print(earnings_list)\n\n # if len(earnings_list) != 100:\n # tds = earning.findAll(\"td\")\n # if len(tds) > 0:\n # earning_date = tds[2].text.rsplit(\",\", 1)[0]\n # eps_est = tds[3].text\n # eps_act = tds[4].text\n # print(earning_date, eps_est, eps_act, ticker_symbol)\n #\n # if eps_act != \"-\":\n # earnings_list.append([count, eps_est, eps_act])\n # else:\n # earnings_list.append([count, eps_est])\n #\n # # Deduce financial quarter based on date of report\n # year_num = earning_date.split()[-1]\n # month_num = earning_date.split()[0]\n # if month_num in [\"Jan\", \"Feb\", \"Mar\"]:\n # year_num = int(year_num) - 1\n # quarter = \"Q4\"\n # elif month_num in [\"Apr\", \"May\", \"Jun\"]:\n # quarter = \"Q1\"\n # elif month_num in [\"Jul\", \"Aug\", \"Sep\"]:\n # quarter = \"Q2\"\n # else:\n # quarter = \"Q3\"\n # financial_quarter_list.append(\"{} {}\".format(year_num, quarter))\n # count -= 1\n # else:\n # break",
"def test_monthly_report(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 200, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n fetch = self.client().get('/expenses?name=soda', headers=dict(Authorization=\"Bearer \" + access_token))\n result = json.loads(fetch.data)\n\n consolidated_total = 212.23\n res = self.client().get('/monthly_report?month=01-2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['consolidated_total'], consolidated_total)",
"def companies():\n res = requests.get('http://0.0.0.0:5002/companies')\n return jsonify(res.json())"
] | [
"0.57792485",
"0.54393226",
"0.5438739",
"0.5402887",
"0.534966",
"0.53278506",
"0.5327787",
"0.51945466",
"0.5179972",
"0.5149336",
"0.51491547",
"0.51397854",
"0.51358885",
"0.5127249",
"0.5085375",
"0.50775725",
"0.50627774",
"0.50594234",
"0.5047741",
"0.50285554",
"0.5019624",
"0.5008184",
"0.5007965",
"0.50055635",
"0.49801707",
"0.49799603",
"0.49693006",
"0.4955312",
"0.49226207",
"0.4908269"
] | 0.6005374 | 0 |
Takes a Python object that holds every employee's salary data; extracts that into a CSV file. | def parse_salary_data_to_csv(data: dict):
# These come from website's form.
column_names = [
"id", "year", "location", "first name", "last name", "title",
"gross pay", "regular pay", "overtime pay", "other pay"
]
number_of_requests_to_search_over = data['records']
data_records: list = data["rows"]
with open("UCOP_Data.csv", "w") as csv_file_object:
csv_writer = csv.writer(csv_file_object, delimiter=",")
# Write column names to CSV
csv_writer.writerow(column_names)
for employee_record in data_records:
employee_data: list = employee_record["cell"]
assert (len(employee_data) == len(column_names))
csv_writer.writerow(employee_data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_employees_salary(self, employees_info: List[List[str]]) -> None:\n pass",
"def generate_csv_output(payslip_data):\n payslip_output = StringIO(newline=None)\n csvFileWriter = csv.writer(payslip_output, delimiter=',')\n\n data = [['Full Name', 'Payment Period', 'Gross Income',\n 'Income Tax', 'Net Income', 'Super']]\n\n for employee in payslip_data:\n data.append([\n employee['full_name'],\n employee['payment_period'],\n str(employee['gross_income']),\n str(employee['income_tax']),\n str(employee['net_income']),\n str(employee['super_amount'])\n ])\n\n csvFileWriter.writerows(data)\n\n return payslip_output",
"def load_employees(self):\n empcsv = open('employees.csv','r')\n emp_temp = []\n empcsv = empcsv.readlines()[1:]\n for line in empcsv:\n for i in line.split(','):\n if line == 0:\n pass\n else:\n emp_temp.append(i)\n employee = emp_temp[0::13]\n data_1 = []\n data = []\n for i in emp_temp:\n if i in employee:\n pass\n else:\n data_1.append(i)\n for i in range(26):\n data_temp = data_1[(i * 12):((i + 1) * 12)]\n data.append(data_temp)\n for i in range(len(employee)):\n self.emp_dict[employee[i]] = data[i]\n #print(self.emp_dict)\n for i in self.emp_dict:\n self.emp_dict[i] = [x.replace('\\n', '') for x in self.emp_dict[i]]\n return self.emp_dict",
"def generate_salary_list(Hall):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, ('Hall Salary List: Hall %s' % Hall.hall_ID))\n pdf.ln()\n\n worker_list = dbr.rebuild(\"worker\")\n title = \"Role\"\n wage = 0\n for key in worker_list:\n if worker_list[key].hall_ID == Hall.hall_ID:\n if isinstance(worker_list[key], mess_manager.MessManager):\n title = \"Mess Manager\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], clerk.Clerk):\n title = \"Clerk\"\n wage = worker_list[key].monthly_salary\n elif isinstance(worker_list[key], attendant.Attendant):\n title = \"Attendant\"\n wage = worker_list[key].daily_wage\n\n pdf.multi_cell(0, 5, ('%s: %s (%s) - Rs. %s' % (worker_list[key].worker_ID,\n worker_list[key].name, title, wage)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('hall_salary_%s.pdf' % Hall.hall_ID), 'F')",
"def get_employee_info_csv(url, user_id):\n user = get(url + 'users/' + user_id).json()\n user_name = user.get('username')\n todos = get(url + 'todos?userId={}'.format(user_id)).json()\n\n try:\n with open('{}.csv'.format(user_id), mode='w') as f:\n write = csv.writer(f, quoting=csv.QUOTE_ALL)\n for task in todos:\n write.writerow([\n user_id,\n user_name,\n task.get('completed'),\n task.get('title'),\n ])\n except IOError:\n print(\"I/O error\")",
"def etl_csv_file(input_file_location):\n\n all_employee_dict = {}\n supervisor_employee_dict = {}\n header_row = 'employee_id,first_name,last_name,hire_date,supervisor_id'\n\n with open(input_file_location, mode='r') as employee_csv_file:\n\n # verify the header exists. If the header is not correct error out and return\n first_row = next(employee_csv_file, None)\n if first_row.rstrip() != header_row:\n return False, \"The header row in the %s CSV file must be %s\" % (input_file_location, header_row)\n\n employee_csv_reader = csv.reader(employee_csv_file)\n for count, row in enumerate(employee_csv_reader):\n\n # validate each date in the input file can be casted to datetime object\n try:\n hire_date = datetime.strptime(row[3], '%Y-%m-%d')\n except ValueError as e:\n print (e)\n message = \"There has been an error parsing a date in the input file. Please correct '{0}' at \" \\\n \"line '{1}' so that it follows follows the '2011-03-24' date format.\".format(row[3], count)\n return False, message\n\n employee_id = row[0]\n employee = {\n 'employee_id': employee_id,\n 'first_name': row[1],\n 'last_name': row[2],\n 'hire_date': hire_date,\n }\n\n supervisor_id = row[4]\n\n # This is used later to print out ALL employees according to requirements\n all_employee_dict[employee_id] = 'Sorry, this person is not a supervisor'\n\n # Append to list if key already exists\n group = supervisor_employee_dict.setdefault(supervisor_id, [])\n group.append(employee)\n\n return all_employee_dict, supervisor_employee_dict",
"def get_sal_slip_list(self, as_dict=False):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.name\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=as_dict)\n\n\t\treturn emp_list",
"def generate_payslip_data(employee_data):\n payslip_data = []\n\n for employee in employee_data:\n gross_income = monthly_gross_income(employee['annual_salary'])\n income_tax = monthly_income_tax(\n employee['annual_salary'], tax_brackets)\n net_income = monthly_net_income(\n gross_income, income_tax)\n super_amount = monthly_super_amount(\n gross_income, employee['super_rate'])\n\n payslip_data.append({\n 'full_name': employee['first_name'] + ' ' + employee['last_name'],\n 'payment_period': employee['payment_period'],\n 'gross_income': gross_income,\n 'income_tax': income_tax,\n 'net_income': net_income,\n 'super_amount': super_amount\n })\n\n return payslip_data",
"def parse(self, employees, records, stream):\n # make a CSV reader from {stream}\n reader = csv.reader(stream)\n # skip the first three lines\n next(reader)\n next(reader)\n next(reader)\n # the next line is the first employee record\n line = next(reader)\n # start parsing\n while line:\n # get the zeroth field\n header = line[0]\n # check that it is an employee section\n assert header.startswith('Employee: ')\n # extract the useful info\n name = header[9:].strip()\n # pull the employee record\n line = self.getEmployeeRecord(\n employees=employees, records=records, name=name, reader=reader)\n\n # all done\n return",
"def get_csv_line(self):\n return \"{},{},{},{},{},{},{}\\n\".format(self.employee_id, self.gender,\n self.sales, self.bmi,\n self.salary,\n self.get_birthday_string(),\n self.age)",
"def save_data_to_file(file_name, list_of_product_objects):\r\n objfile = open(file_name, 'w')\r\n for row in list_of_product_objects:\r\n objfile.write(row.product_name + \",\" + str(row.product_price) + \"\\n\")\r\n objfile.close()",
"def employee_data(self):\n self.paymethod()\n self.classification()\n for i in self.emp_id:\n if self.clsf[i] == \"Salaried\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][7]]\n elif self.clsf[i] == \"Hourly\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][8],self.emp_dict[i][10],\n self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3], self.emp_dict[i][4],self.emp_dict[i][8]]\n elif self.clsf[i] == \"Commissioned\":\n if self.pymthd[i] == \"Direct Deposit\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][7],self.emp_dict[i][9],\n self.emp_dict[i][10],self.emp_dict[i][11]]\n elif self.pymthd[i] == \"Mailed Check\":\n self.emp_data[i] = [self.clsf[i],self.pymthd[i],self.emp_dict[i][1],self.emp_dict[i][2],\n self.emp_dict[i][3],self.emp_dict[i][4],self.emp_dict[i][7],self.emp_dict[i][9]]\n else:\n print(\"Error\")\n print(self.emp_data)\n return self.emp_data",
"def employees_earning(table):\n\n product_index = 1\n employee_id_index = 2\n amount_sold_index = 4\n\n person_id_index = 0\n person_name_index = 1\n\n game_index = 0\n price_index = 3\n\n store_table = store.get_table()\n store.check_table(store_table)\n hr_table = hr.get_table('model/hr/persons.csv')\n money_earned = {}\n for person in hr_table:\n person_id = person[person_id_index]\n person_name = person[person_name_index]\n money_earned[person_name] = 0\n for record in table:\n product_id = record[product_index]\n employee_id = record[employee_id_index]\n amount_sold = int(record[amount_sold_index])\n if person_id == employee_id:\n for game in store_table:\n game_id = game[game_index]\n if game_id == product_id:\n game_price = int(game[price_index])\n money_earned[person_name] += int(amount_sold * game_price)\n return money_earned",
"def save_to_file_csv(cls, list_objs):\n l = []\n if list_objs is not None:\n for item in list_objs:\n l.append(item.to_dictionary())\n with open(\"%s.csv\" % cls.__name__, mode='w') as f:\n f.write(Base.to_json_string(l))",
"def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]",
"def write_csv(fhandle, outages, fields):\n writer = csv.DictWriter(fhandle, fields)\n writer.writeheader()\n writer.writerows([o.for_json() for o in outages])",
"def export_csv(user, tasks):\n employee_name = user[0]['name']\n employee_id = user[0]['id']\n csvfile = '{}.csv'.format(employee_id)\n with open(csvfile, mode='w') as file:\n towrite = csv.writer(file, delimiter=',', quoting=csv.QUOTE_ALL)\n for task in tasks:\n towrite.writerow([employee_id, employee_name,\n task['completed'], task['title']])",
"def parse_csv_from_file(file):\n csvFileReader = None\n employee_data = []\n\n # if FileStorage object (which has a save() method)\n if hasattr(file, 'save'):\n csvFileReader = csv.reader(codecs.iterdecode(file, 'utf-8'))\n # else if File object (which does not have a save() method)\n else:\n csvFileReader = csv.reader(file)\n\n for row in csvFileReader:\n employee_data.append({\n 'first_name': row[0],\n 'last_name': row[1],\n 'annual_salary': int(row[2]),\n 'super_rate': convert_to_float(row[3]),\n 'payment_period': row[4]\n })\n\n return employee_data",
"def export_rep(name):\r\n attendance_list = read_rep()\r\n try:\r\n with open(name + '.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n # makes table in Excel by employee and attendance dates\r\n writer.writerow([\"Employee\", \"Attendance\"])\r\n for worker in attendance_list:\r\n count = 0\r\n for date in worker[1]:\r\n if not count:\r\n # first date needs to add name of worker\r\n writer.writerow([worker[0], date])\r\n count += 1\r\n # write only date\r\n else:\r\n writer.writerow(['', date])\r\n print(\"csv file made\")\r\n return attendance_list\r\n except PermissionError:\r\n print(\"file is opened, please close and try again\")\r\n return attendance_list",
"def save_tasks_to_csv(empID):\n username = ''\n allTasks = []\n\n userRes = requests.get(\n 'https://jsonplaceholder.typicode.com/users/{}'.format(empID))\n todosRes = requests.get(\n 'https://jsonplaceholder.typicode.com/users/{}/todos'.format(empID))\n\n username = userRes.json().get('username')\n todosJson = todosRes.json()\n\n for task in todosJson:\n taskRow = []\n taskRow.append(empID)\n taskRow.append(username)\n taskRow.append(task.get(\"completed\"))\n taskRow.append(task.get(\"title\"))\n\n allTasks.append(taskRow)\n\n # print(\"alltasks: {}\".format(allTasks))\n\n with open(\"{}.csv\".format(empID), 'w') as csvFile:\n csvwriter = csv.writer(csvFile, quoting=csv.QUOTE_ALL)\n csvwriter.writerows(allTasks)\n\n return 0",
"def save_to_file_csv(cls, list_objs):\n list_rectangle = [\"id\", \"width\", \"height\", \"x\", \"y\"]\n list_square = [\"id\", \"size\", \"x\", \"y\"]\n filename = cls.__name__ + \".csv\"\n result = []\n\n if list_objs:\n for objs in list_objs:\n # First recollect the info of the object with a dict\n dictionary = objs.to_dictionary()\n middle_result = []\n # Second obtein the values in a ordered class list\n if cls.__name__ == \"Rectangle\":\n for item in list_rectangle:\n middle_result.append(dictionary[item])\n if cls.__name__ == \"Square\":\n for item in list_square:\n middle_result.append(dictionary[item])\n # append the list to result list\n result.append(middle_result)\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerows(result)",
"def save_to_file_csv(cls, list_objs):\n list_dictionaries = []\n if list_objs is None or list_objs == []:\n string_dictionary = \"[]\"\n else:\n for _obj_dict in list_objs:\n list_dictionaries.append(_obj_dict.to_dictionary())\n string_dictionary = Base.to_json_string(list_dictionaries)\n with open(cls.__name__ + \".csv\", \"w\") as _file:\n _file.write(string_dictionary)\n _file.close()",
"def output_into_file(self, path: str):\n # Creating path if not exist\n Path(path).mkdir(parents=True, exist_ok=True)\n # Writing every day as a csv file\n for day in self:\n with open(f\"{path}/{day.name}.csv\", \"w\") as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # First line / Title\n writer.writerow([\" \", day.name])\n for shift in day:\n employees = \", \".join([e.name for e in shift.employees])\n writer.writerow([f\"{shift.start}-{shift.end}\", employees])",
"def _get_staff_report_tab_delimited(self):\n stringio = io.StringIO()\n writer = csv.writer(stringio, delimiter='\\t',\n quoting=csv.QUOTE_MINIMAL)\n for record in self._get_staff_report_data():\n writer.writerow(record)\n return stringio.getvalue()",
"def make_csv(user_id, fobj):\n data = show_history(user_id)\n report = csv.writer(fobj)\n report.writerow([\n 'Status',\n 'Date',\n 'Amount',\n 'From Curr',\n 'To Curr',\n 'To Address',\n ])\n for row in data:\n report.writerow([\n row.exchange_status.capitalize(),\n row.created_at.strftime('%Y-%m-%d %H:%I:%M'),\n row.amount,\n row.from_curr,\n row.to_curr,\n row.address_out\n ])",
"def csv_parser(s):\n\n # Data is our output. It will be a list of lists.\n\n # Split csv into lines and store them in a list called 'lines'.\n \n # Remove the first element from lines, so that you have only the data lines left.\n \n # At this stage, we loop through the list called lines.\n # As you loop\n # i. split each line on the commas;\n # ii. convert the Subject variable to int.\n # iii. convert the Height variable to float.\n # iv. add to data a list consisting of this line's Subject, Height, and Occupation values ",
"def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))",
"def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees",
"def export_player_sales():\n # get players belonging to a team\n players = Player.objects.filter(team__isnull=False)\n\n with open('/home/dan/Documents/ffooty_data/csv_exports/player_sales_auction.csv', 'wb') as f:\n writer = csv.writer(f)\n\n for p in players:\n writer.writerow([p.code, p.team.id, p.sale])",
"def _CsvFunc(self, obj=None, verbose=False, use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(obj, verbose=verbose, recursive=False,\n format_name='csv')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)"
] | [
"0.6110135",
"0.5721901",
"0.56247103",
"0.5470739",
"0.54523957",
"0.5402318",
"0.5387349",
"0.5354204",
"0.5340569",
"0.53081775",
"0.5222946",
"0.52161574",
"0.5165061",
"0.51539856",
"0.51404184",
"0.5131543",
"0.5097248",
"0.5090373",
"0.5044837",
"0.5016266",
"0.5010205",
"0.5008361",
"0.50036544",
"0.49755916",
"0.49755087",
"0.49679118",
"0.49511176",
"0.4937954",
"0.49303588",
"0.48903894"
] | 0.69983995 | 0 |
Exports selected rows in grid_data_out['rows']. Works in rowSelection or rangeSelection enabled. | def get_selected_rows(self):
self._export_mode = 'rows'
self._counter_update_data += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_selected_rows(self):\n self._export_mode = 'delete'\n self._counter_update_data += 1",
"def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)",
"def export_to_csv(da_locals, selection_widget, out):\n df_name = selection_widget.value\n da_locals[df_name].to_csv(\"output/{}.csv\".format(df_name), index=False)\n out.clear_output()\n out.append_display_data(FileLinks(\"output\"))",
"def _write_rows(self):\n try:\n xf = self._writer.xf.send(True)\n except StopIteration:\n self._already_saved()\n\n with xf.element(\"sheetData\"):\n row_idx = 1\n try:\n while True:\n row = (yield)\n row = self._values_to_row(row, row_idx)\n self._writer.write_row(xf, row, row_idx)\n row_idx += 1\n except GeneratorExit:\n pass\n\n self._writer.xf.send(None)",
"def get_export_queryset(self, request, context):\n # scope = self.request.POST.get('_select_across', False) == '1'\n scope = request.GET.get('scope')\n select_across = request.GET.get('_select_across', False) == '1'\n selected = request.GET.get('_selected_actions', '')\n if scope == 'all':\n queryset = self.admin_view.queryset()\n elif scope == 'header_only':\n queryset = []\n elif scope == 'selected':\n if not select_across:\n selected_pk = selected.split(',')\n queryset = self.admin_view.queryset().filter(pk__in=selected_pk)\n else:\n queryset = self.admin_view.queryset()\n else:\n queryset = [r['object'] for r in context['results']]\n return queryset",
"def dataGridView_SelectionChanged(self, sender, eventArgs):\r\n # Clear previous selection only if new rows have been selected.\r\n if self.wf.dataGridView.SelectedRows.Count > 0:\r\n Application.SelectObj(\"\", \"\", True)\r\n selectedNames = \"\"\r\n for row in self.wf.dataGridView.SelectedRows:\r\n name = row.Cells[0].Value\r\n selectedNames += ( name + \",\" )\r\n if selectedNames:\r\n Application.SelectObj(selectedNames, \"\", True)",
"def click_re_analysis_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.re_analysis_grid_div_id)",
"def outputrow(self):\n return self.__output__",
"def selected_rows(self, row_numbers: List[int]) -> Tuple[str, str]:\n\t\tif not self._closed:\n\t\t\trow_dir = \"%s.rows\" % (self.file_path)\n\t\t\tlogging.debug(\"Expanding selected row numbers, if not previously expanded: (stored in %s.rows subfolder)\" % self.filename)\n\t\t\tlogging.debug(\",\".join(str(row_nr) for row_nr in row_numbers))\n\n\t\t\trow_mod_filename = \"%s.rows.lastmod.gzip\" % (self.file_path)\n\t\t\trow_mod = load_gzipped_json_string(row_mod_filename)\n\t\t\tlast_mod = self.ds.layers.last_modified()\n\n\t\t\t# If cache is stale, remove previously expanded rows\n\t\t\tif os.path.isdir(row_dir) and row_mod != last_mod:\n\t\t\t\tself.clear_rows()\n\n\t\t\tsave_gzipped_json_string(row_mod_filename, last_mod)\n\n\t\t\ttry:\n\t\t\t\tos.makedirs(row_dir, exist_ok=True)\n\t\t\texcept OSError as exception:\n\t\t\t\tif exception.errno is not errno.EEXIST:\n\t\t\t\t\traise exception\n\n\t\t\t# make sure all rows are included only once\n\t\t\trow_numbers = list(set(row_numbers))\n\n\t\t\tif len(row_numbers) is 0:\n\t\t\t\treturn (\"[]\", last_mod)\n\n\t\t\trow_numbers.sort()\n\n\t\t\tds = self.ds\n\t\t\tretRows = [\"[\"]\n\t\t\tcomma = \",\"\n\n\t\t\trowMax = ds.shape[0]\n\t\t\tnewly_expanded = []\n\t\t\tpreviously_expanded = []\n\t\t\tfor i in row_numbers:\n\t\t\t\t# ignore out of bounds values\n\t\t\t\tif isinstance(i, int) and i >= 0 and i < rowMax:\n\t\t\t\t\trow_file_name = \"%s/%06d.json.gzip\" % (row_dir, i)\n\t\t\t\t\tif os.path.exists(row_file_name):\n\t\t\t\t\t\tretRows.append(load_gzipped_json_string(row_file_name))\n\t\t\t\t\t\tpreviously_expanded.append(i)\n\t\t\t\t\telse:\n\t\t\t\t\t\trow = json.dumps({\"idx\": i, \"data\": metadata_array(ds[i, :])})\n\t\t\t\t\t\tretRows.append(row)\n\t\t\t\t\t\tsave_gzipped_json_string(row_file_name, row)\n\t\t\t\t\t\tnewly_expanded.append(i)\n\t\t\t\t\tretRows.append(comma)\n\n\t\t\tlogging.debug(\"loaded rows: %s\", previously_expanded)\n\t\t\tlogging.debug(\"newly expanded rows: %s\", newly_expanded)\n\n\t\t\tif len(retRows) is 1:\n\t\t\t\treturn (\"[]\", last_mod)\n\n\t\t\t# convert last \",\" to \"]\" to make it a valid JSON array\n\t\t\tretRows[len(retRows) - 1] = \"]\"\n\t\t\treturn (\"\".join(retRows), last_mod)\n\t\treturn None",
"def select_grid(self, grid, count_threshold, **kwargs):\n\n query = self.query_builder.grid_query(self.table_name, grid, count_threshold, **kwargs)\n\n def prepare_results(cursor, _):\n raster_data = data.GridData(grid)\n\n for result in cursor:\n raster_data.set(result['rx'], result['ry'],\n geom.GridElement(result['count'], result['timestamp']))\n\n return raster_data\n\n return self.execute_base(str(query), query.get_parameters(), prepare_results)",
"def _menu_select_all(self, uiinfo, selection):\n print selection, uiinfo\n self.model.selected_rows = self.model.data_list[:]\n print \"selection: {}\".format(len(self.model.selected_rows))",
"def deleteSelectedRows(self):\n # Get unique row number (user can select multiple cells in one row)\n uniqRows = set([idx.row() for idx in self.view.selectedIndexes()])\n # It's necessary to remove rows from the end, otherwise indexes become\n # outdated and useless.\n revRovs = sorted(list(uniqRows), reverse=True)\n for row in revRovs:\n self.model.removeRow(row)",
"def _select_rows_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"SelectRowsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.SelectRowsNode\"\n )\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n temp_v_columns = []\n # pre-scan expressions\n er = ExpressionRequirementsCollector()\n for opk in op.ops.values():\n opk.act_on(None, expr_walker=er)\n er.add_in_temp_columns(temp_v_columns)\n value_to_send_to_act = None\n if er.collect_required:\n if isinstance(res, pl.LazyFrame):\n res = res.collect()\n value_to_send_to_act = res\n # work on expression\n if len(temp_v_columns) > 0:\n res = res.with_columns(temp_v_columns)\n selection = op.expr.act_on(\n value_to_send_to_act, \n expr_walker=PolarsExpressionActor(polars_model=self, extend_context=True)) # PolarsTerm\n assert isinstance(selection, PolarsTerm)\n res = res.filter(selection.polars_term)\n if len(temp_v_columns) > 0:\n res = res.select(op.columns_produced())\n if self.use_lazy_eval and isinstance(res, pl.DataFrame):\n res = res.lazy()\n return res",
"def click_vendor_price_list_detail_dial_digits_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_dial_digits_grid_div_id)",
"def export_csv(self, outpath):\n\n\t\tself.df.to_csv(outpath)",
"def ExportCSV(self, selected=None, long_output=False):\n\n\t\twith self.lock:\n\t\t\tif selected is None:\n\t\t\t\tuids = self.users.keys()\n\t\t\telse:\n\t\t\t\tuids = selected\n\n\t\t\tuids.sort()\n\n\t\tassert ltrace(TRACE_USERS, '| ExportCSV(%s)' % uids)\n\n\t\t# TODO: get a user locally from self[uid] and avoid all these lookups.\n\n\t\tdef build_csv_output_licorn(uid):\n\t\t\treturn ';'.join(\n\t\t\t\t[\n\t\t\t\t\tself[uid].gecos,\n\t\t\t\t\tself[uid].login,\n\t\t\t\t\tstr(self[uid].gidNumber),\n\t\t\t\t\t','.join([ g.name for g in self[uid].groups]),\n\t\t\t\t\tself[uid].backend.name,\n\t\t\t\t\tself[uid].profile.name if self[uid].profile is not \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tNone else str(None)\n\t\t\t\t]\n\t\t\t\t)\n\n\t\tdata = '\\n'.join(map(build_csv_output_licorn, uids)) +'\\n'\n\n\t\treturn data",
"def _export_disp_rowset(self, name: str, membr: str, f: IO[str], ind: str, size: int) -> None:\n assert self._disp_verts is not None\n f.write(f'{ind}\\t\\t{name}\\n{ind}\\t\\t{{\\n')\n rows = [\n str(getattr(vert, membr))\n for vert in self._disp_verts\n ]\n for y in range(size):\n f.write(f'{ind}\\t\\t\"row{y}\" \"{\" \".join(rows[size * y:size * (y+1)])}\"\\n')\n f.write(f'{ind}\\t\\t}}\\n')",
"def get_selected_columns(self):\n self._export_mode = 'columns'\n self._counter_update_data += 1",
"def deleteSelectedRows(self):\n\n model = self.proxyModel.sourceModel()\n\n proxyIndexList = []\n for i in self.selectionModel().selectedRows():\n index = QPersistentModelIndex(i)\n proxyIndexList.append(index)\n\n for index in proxyIndexList:\n modelIndex = self.proxyModel.mapToSource(index)\n row = modelIndex.row()\n rowid = model.dataset.data[row][JobHistoryKey.ID].obj\n rowid0 = model.dataset[row, JobHistoryKey.ID]\n print(f\"From History View - model call row {row} data row ID {rowid} ID {rowid0}\")\n model.removeRows(row, 1)",
"def click_country_groups_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.country_groups_grid_div_id)",
"def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()",
"def handle_output(all_rows, opts):\n for dev, rows in all_rows.iteritems():\n\n if opts.csv:\n writer = csv.writer(sys.stdout)\n for row in rows:\n writer.writerow([dev] + row)\n elif opts.dotty:\n continue\n elif opts.sqldb:\n write_sqldb(opts.sqldb, dev, rows)\n else:\n print 'DEVICE: {}'.format(dev)\n print_table(rows)",
"def get_CSV_data(self, selected=None, long_output=False):\n\n\t\twith self.lock:\n\t\t\tcsv_data = []\n\t\t\tfor user in [ u for u in self if u.uid in selected]:\n\t\t\t\tgroups = []\n\t\t\t\tfor g in user.groups:\n\t\t\t\t\tif g.is_responsible:\n\t\t\t\t\t\tgroups.append(LMC.configuration.groups.resp_prefix + g.name)\n\t\t\t\t\telif g.is_guest:\n\t\t\t\t\t\tgroups.append(LMC.configuration.groups.guest_prefix + g.name)\n\t\t\t\t\telse:\n\t\t\t\t\t\tgroups.append(g.name)\n\n\n\t\t\t\tcsv_data.append([\n\t\t\t\t\tuser.login,\n\t\t\t\t\tuser.uid,\n\t\t\t\t\tuser.primary_group,\n\t\t\t\t\tuser.gecos,\n\t\t\t\t\t','.join(groups),\n\t\t\t\t\tuser.backend.name\n\t\t\t\t])\n\n\t\t\treturn csv_data",
"def _CMD_EXPORT_SELECTED(self, file_name):\n self.__switch_command_export(file_name, selection_only=True)",
"def click_vendor_price_list_detail_reference_rates_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_reference_rates_grid_div_id)",
"def getRows(self, context, obj, data, aggData):\n rowNum = obj.get('rowNum', 1)\n obj['rowNum'] = rowNum + 1\n return [[rowNum] + data + [aggData]]",
"def export_data(self):\n return self.export_all_data()",
"def click_vendor_price_list_detail_rates_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_rates_grid_div_id)",
"def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)",
"def interactiveExport(self):\n exportMethods = {'htmlSingle': self.exportHtmlSingle,\n 'htmlNavSingle': self.exportHtmlNavSingle,\n 'htmlPages': self.exportHtmlPages,\n 'htmlTables': self.exportHtmlTables,\n 'textTitles': self.exportTextTitles,\n 'textPlain': self.exportTextPlain,\n 'textTables': self.exportTextTables,\n 'xmlGeneric': self.exportXmlGeneric,\n 'xmlSubtree': self.exportXmlSubtree,\n 'odfText': self.exportOdfText,\n 'bookmarksHtml': self.exportBookmarksHtml,\n 'bookmarksXbel': self.exportBookmarksXbel}\n exportDialog = ExportDialog(len(self.selectedNodes),\n QtGui.QApplication.activeWindow())\n if exportDialog.exec_() == QtGui.QDialog.Accepted:\n result = exportMethods[ExportDialog.currentSubtype]()\n QtGui.QApplication.restoreOverrideCursor()\n return result\n return False"
] | [
"0.5814463",
"0.5305055",
"0.5239921",
"0.5221338",
"0.5161866",
"0.5152546",
"0.51030695",
"0.5083933",
"0.5071529",
"0.50212586",
"0.5013975",
"0.49973035",
"0.49512377",
"0.4944411",
"0.4929218",
"0.49113068",
"0.4886944",
"0.48782787",
"0.4876333",
"0.48565313",
"0.48403448",
"0.4763876",
"0.47227946",
"0.47172135",
"0.47146997",
"0.4707258",
"0.47018293",
"0.46953997",
"0.468755",
"0.46805394"
] | 0.70378476 | 0 |
Exports selected columns in grid_data_out['columns']. Only works in rangeSelection enabled. | def get_selected_columns(self):
self._export_mode = 'columns'
self._counter_update_data += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_to_csv(da_locals, selection_widget, out):\n df_name = selection_widget.value\n da_locals[df_name].to_csv(\"output/{}.csv\".format(df_name), index=False)\n out.clear_output()\n out.append_display_data(FileLinks(\"output\"))",
"def output_columns(self) -> List[str]:",
"def get_export_csv_columns(self, user):\n return self.EXPORT_CSV_COLUMNS if self.EXPORT_CSV_COLUMNS is not None else self.LIST_DISPLAY",
"def get_export_columns(kind: str) -> dict:\r\n c = {\r\n 'u': {\r\n 'vendor_name': 'Vendor Name',\r\n 'number': 'Number',\r\n 'name': 'Name',\r\n 'assoc': 'Assocciated'\r\n },\r\n 'm': {\r\n 'email_address': 'Email Address',\r\n 'first_name': 'First Name',\r\n 'last_name': 'Last Name'\r\n }\r\n }\r\n columns = c['u'] # Because the matched DataFrame has all the same columns\r\n if kind == 'm': columns.update(c['m']) # as unmatched DataFrame, we use the dict.update() method\r\n return columns # to extend the columns of the unmatched DataFrame.\r",
"def select_columns(data, columns):\n return data.loc[:, columns]",
"def output_columns(self) -> pulumi.Output[Sequence['outputs.DataSetOutputColumn']]:\n return pulumi.get(self, \"output_columns\")",
"def export_csv(self, outpath):\n\n\t\tself.df.to_csv(outpath)",
"def dest_columns(self):\n return self.intersection + self.dest_renames",
"def plot_range_to_csv(df, output_csv, start_col=15, end_col=27):\n\tnon_empty_rows = df.ix[:, 0].notna()\n\tdata = df[non_empty_rows].ix[:, start_col:end_col]\n\tdata.to_csv(output_csv, index=False)",
"def _select_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"SelectColumnsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.SelectColumnsNode\"\n )\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n res = res.select(op.columns_produced())\n return res",
"def get_all_columns(self):\n df = self.get_prep_data()\n col = [c for c in df.columns if c not in ['target', 'idd', 'ft_data_dt']]\n return col",
"def columns(self):\n return self._coldefs",
"def get_selected_columns_as_list(member_data_file):\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n return member_data_file.get_selected_columns_as_list",
"def get_cols_drop():",
"def select_columns(data):\n\n #Channels to be excluded\n features_delete = np.arange(46, 50)\n features_delete = np.concatenate([features_delete, np.arange(59, 63)])\n features_delete = np.concatenate([features_delete, np.arange(72, 76)])\n features_delete = np.concatenate([features_delete, np.arange(85, 89)])\n features_delete = np.concatenate([features_delete, np.arange(98, 102)])\n features_delete = np.concatenate([features_delete, np.arange(134, 243)])\n features_delete = np.concatenate([features_delete, np.arange(244, 249)])\n return np.delete(data, features_delete, 1)",
"def tabular_range_to_csv(df, output_csv, start_row=10, end_row=14, start_col=71, end_col=77):\n data = df.ix[start_row:end_row, start_col:end_col]\n data.to_csv(output_csv, index=False, header=None)",
"def get_export_queryset(self, request, context):\n # scope = self.request.POST.get('_select_across', False) == '1'\n scope = request.GET.get('scope')\n select_across = request.GET.get('_select_across', False) == '1'\n selected = request.GET.get('_selected_actions', '')\n if scope == 'all':\n queryset = self.admin_view.queryset()\n elif scope == 'header_only':\n queryset = []\n elif scope == 'selected':\n if not select_across:\n selected_pk = selected.split(',')\n queryset = self.admin_view.queryset().filter(pk__in=selected_pk)\n else:\n queryset = self.admin_view.queryset()\n else:\n queryset = [r['object'] for r in context['results']]\n return queryset",
"def selected_columns(self, column_numbers: List[int]) -> Tuple[str, str]:\n\t\tif not self._closed:\n\t\t\tcol_dir = \"%s.cols\" % (self.file_path)\n\t\t\tlogging.debug(\"Expanding selected column numbers, if not previously expanded: (stored in %s.cols subfolder)\" % self.filename)\n\t\t\tlogging.debug(\",\".join(str(column_nr) for column_nr in column_numbers))\n\n\t\t\tcol_mod_filename = \"%s.cols.lastmod.gzip\" % (self.file_path)\n\t\t\tcol_mod = load_gzipped_json_string(col_mod_filename)\n\t\t\tlast_mod = self.ds.layers.last_modified()\n\n\t\t\t# If cache is stale, remove previously expanded columns\n\t\t\tif os.path.isdir(col_dir) and col_mod != last_mod:\n\t\t\t\tself.clear_columns()\n\n\t\t\tsave_gzipped_json_string(col_mod_filename, last_mod)\n\n\t\t\ttry:\n\t\t\t\tos.makedirs(col_dir, exist_ok=True)\n\t\t\texcept OSError as exception:\n\t\t\t\tif exception.errno is not errno.EEXIST:\n\t\t\t\t\traise\n\n\t\t\t# make sure all columns are included only once\n\t\t\tcolumn_numbers = list(set(column_numbers))\n\n\t\t\tif len(column_numbers) is 0:\n\t\t\t\treturn (\"[]\", last_mod)\n\n\t\t\tcolumn_numbers.sort()\n\n\t\t\tds = self.ds\n\t\t\tretCols = [\"[\"]\n\t\t\tcomma = \",\"\n\n\t\t\tcolMax = ds.shape[1]\n\t\t\tnewly_expanded = []\n\t\t\tpreviously_expanded = []\n\t\t\tfor i in column_numbers:\n\t\t\t\t# ignore out of bounds values\n\t\t\t\tif isinstance(i, int) and i >= 0 and i < colMax:\n\t\t\t\t\tcol_file_name = \"%s/%06d.json.gzip\" % (col_dir, i)\n\t\t\t\t\tif os.path.exists(col_file_name):\n\t\t\t\t\t\tretCols.append(load_gzipped_json_string(col_file_name))\n\t\t\t\t\t\tpreviously_expanded.append(i)\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata = metadata_array(ds[:, i].transpose())\n\t\t\t\t\t\tcolumn = json.dumps({\"idx\": i, \"data\": data})\n\t\t\t\t\t\tretCols.append(column)\n\t\t\t\t\t\tsave_gzipped_json_string(col_file_name, column)\n\t\t\t\t\t\tnewly_expanded.append(i)\n\t\t\t\t\tretCols.append(comma)\n\n\t\t\tlogging.debug(\"loaded columns: %s\", previously_expanded)\n\t\t\tlogging.debug(\"newly expanded columns: %s\", newly_expanded)\n\n\t\t\tif len(retCols) is 1:\n\t\t\t\treturn (\"[]\", last_mod)\n\n\t\t\t# convert last \",\" to \"]\" to make it a valid JSON array\n\t\t\tretCols[len(retCols) - 1] = \"]\"\n\t\t\treturn (\"\".join(retCols), last_mod)\n\t\treturn None",
"def temporal_range_to_csv(df, output_csv, start_row=0, end_row=24, start_col=38, end_col=43):\n data = df.ix[start_row:end_row, start_col:end_col]\n data.to_csv(output_csv, index=False)",
"def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False):\n # Columns for output\n columns = self.spectrograph.pypeit_file_keys() + ['calib']\n\n extras = []\n\n # comb, bkg columns\n if write_bkg_pairs:\n extras += ['comb_id', 'bkg_id']\n # manual\n if write_manual:\n extras += ['manual']\n for key in extras:\n if key not in columns:\n columns += [key]\n\n # Take only those present\n output_cols = np.array(columns)\n return output_cols[np.isin(output_cols, self.keys())].tolist()",
"def get_specific_col_data( self, columns):\n headers = []\n for i in range(len(columns)):\n headers.append(self.header2col[columns[i]])\n return self.data[:,headers]",
"def get_columns():\n col_no = request.args.get('col_no', 0, type=str)\n print(col_no)\n result = name_column.get_json_columns(col_no)\n\n return jsonify(result=result)",
"def print_columns(outfile):\r\n values = []\r\n for key in Output.R_COLUMNS:\r\n values.append(str(key))\r\n row = '\\t'.join(values)\r\n row = row + '\\n'\r\n outfile.write(row.encode(\"utf8\"))",
"def _column_selection_change(self):\n selection_index = self._lb_tables.GetSelection()\n if selection_index != -1:\n table_id = self._tables[selection_index][0]\n selected_columns_indexes = [self._columns[i][0] for i in list(self._ckl_columns.GetCheckedItems())]\n database_columns_indexes = [tup[0] for tup in self._datafile.query(sciplot.database.Query(\"SELECT VariableID FROM TableColumn WHERE TableID = (?);\", [table_id], 1))[0]]\n\n to_add = []\n to_remove = []\n\n for i in selected_columns_indexes:\n if i not in database_columns_indexes:\n to_add.append(i)\n \n for i in database_columns_indexes:\n if i not in selected_columns_indexes:\n to_remove.append(i)\n \n queries = []\n for variable_id in to_add:\n queries.append(sciplot.database.Query(\"INSERT INTO TableColumn (TableID, VariableID, FormatPattern) VALUES ((?), (?), (?));\", [table_id, variable_id, \"*.*\"], 0)) #add new column to table with a generic format string\n \n for variable_id in to_remove:\n queries.append(sciplot.database.Query(\"DELETE FROM TableColumn WHERE VariableID = (?);\", [variable_id], 0)) #remove unselected column from the database\n \n self._datafile.query(queries)\n\n self.refresh_table() #update table to reflect the changed columns",
"def column_selection_change():\n d = curdoc()\n _remove_fig(d)\n model_id, message_name, _ = run_handlers.get_modelid_messagename_type(d)\n sind = run_handlers.get_source_index(d.session_context.id, model_id, message_name)\n source = d.get_model_by_name(sind)\n _install_callback_and_cds(sind, model_id, message_name, stream_limit=1)\n sel_cols = d.get_model_by_name(COLUMN_MULTISELECT).value\n columns = [ TableColumn(field=c, title=c) for c in sel_cols ]\n data_table = DataTable(source=source, columns=columns, width=500, height=500)\n table_widget = widgetbox(data_table, name=FIGURE_MODEL)\n d.add_root(table_widget)",
"def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns",
"def write_columns(infile, outfile, fields):\n with open(outfile, 'w') as outfile:\n df = pd.read_csv(infile, delimiter='\\t')\n output = df[fields]\n output.to_csv(outfile, sep='\\t')\n outfile.close()",
"def write_csv(self):\n self.tableView.df.to_csv('Data export.csv', index=False)\n print('CSV file exported')",
"def prepare_output_df(df: DataFrame, kind: str) -> DataFrame:\r\n columns = get_export_columns(kind)\r\n to_drop = list(filter(lambda x: x not in columns.keys(), df.columns.to_list())) # For any columns not in the get_export_columns()\r\n df = df.drop(columns=to_drop) # mapping, drop them from the DataFrame.\r\n df = df.rename(columns=columns)\r\n return df",
"def output_fields(request, project_id):\n project = Project.objects.get(id=project_id)\n dal = dal_mongo.DALMongo(project_id)\n ret = {}\n if project.segmentation_skipped:\n ret['col_or_outputfield'] = \"column\"\n ret['values'] = dal.get_matched_cols()\n else:\n ret['col_or_outputfield'] = \"output field\"\n ret['values'] = dal.get_output_fields_matched_cols()\n\n return JsonResponse(ret, safe=False)"
] | [
"0.6418224",
"0.63222647",
"0.60406005",
"0.5980222",
"0.58804923",
"0.58022165",
"0.574687",
"0.5723135",
"0.564667",
"0.55667466",
"0.5561893",
"0.5499155",
"0.549359",
"0.5469247",
"0.54481393",
"0.54277176",
"0.5393627",
"0.5368741",
"0.5366894",
"0.53613985",
"0.5361262",
"0.5361083",
"0.5356567",
"0.53385204",
"0.5336367",
"0.5334994",
"0.52983135",
"0.5294319",
"0.52484226",
"0.5243839"
] | 0.7363901 | 0 |
Exports whole grid in grid_data_out['grid']. | def get_grid(self):
self._export_mode = 'grid'
self._counter_update_data += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def export_grid(self, vtk_fname='GRID', toVTK=True, toNumpy=True):\r\n print('Exporting grids')\r\n tID = 0\r\n # Start by exporting input properties (from read_prop() or read_ext_prop())\r\n # In VTK files, these props will only be visible at only the first timestep\r\n dp = []\r\n propIds = []\r\n for prop in self.out_props:\r\n if type(self.out_props[prop]) is not dict:\r\n data = np.array(self.out_props[prop])\r\n # Save to Numpy\r\n if toNumpy:\r\n self.export_prop(data, prop, tID)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n self._check_out('vtk')\r\n else:\r\n dp.append(prop)\r\n\r\n # Export time-series output properties (from read_out_props())\r\n for t in self.times:\r\n for prop in self.out_props:\r\n if prop in dp:\r\n data = np.array(self.out_props[prop][t], order='F')\r\n # Save to Numpy\r\n if toNumpy:\r\n # self.export_prop(data, prop, tID)\r\n self.export_prop(data, prop, t)\r\n # Add property data to vts structured grid\r\n if toVTK:\r\n propIds = self._prep_vtk(data, prop, propIds)\r\n # Save to VTK\r\n if toVTK:\r\n if tID == 0:\r\n self._check_out('vtk')\r\n # self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(tID)))\r\n self.exportVTK(os.path.join(self.out_dir, 'vtk', vtk_fname + str(t)))\r\n for id in propIds:\r\n self.Grid.GetCellData().RemoveArray(id)\r\n tID += 1\r\n propIds = []",
"def to_cdo_grid(self, outfile):",
"def save_GRID( self , filename ):\n self._fwrite_GRID( filename )",
"def export(self, outdir=os.getcwd(), filename='biogridpy_response'):\r\n\r\n suffix = self.output_format\r\n \r\n #json out includes headers in response\r\n if (self.output_format == 'json' or\r\n self.output_format == 'jsonExtended'):\r\n filepath = os.path.join(outdir, filename + \".\" + suffix)\r\n try:\r\n with open(filepath, 'w') as outfile:\r\n json.dump(self._byteify2(self.result), outfile)\r\n except AttributeError:\r\n with open(filepath, 'w') as outfile:\r\n json.dump(self._byteify3(self.result), outfile)\r\n #tab out need to add headers\r\n elif (self.output_format == 'tab2' or\r\n self.output_format == 'extendedTab2' or\r\n self.output_format == 'tab1'):\r\n filepath = os.path.join(outdir, filename + \".\" + suffix + \".txt\")\r\n with open(filepath, 'w') as outfile:\r\n outfile.write('#' + '\\t'.join(self.headers))\r\n outfile.write(self.result)",
"def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()",
"def export_data(self):\n return self.export_all_data()",
"def export(**kwargs):\n\n import os\n\n interface = None # Holds the actual FileInterface for the specified data format\n vertex_index_to_file_key_map = None\n element_index_to_file_key_map = None\n\n if 'file_name' in kwargs:\n fname = kwargs['file_name']\n else:\n raise ValueError(\"file_name must be specified.\")\n \n extension = os.path.splitext(fname)[1].lower()\n\n if extension=='.msh':\n from bempp.api.file_interfaces import gmsh\n interface = gmsh.GmshInterface()\n \n if int('grid' in kwargs) + int('grid_function' in kwargs) != 1:\n raise ValueError(\"Exactly one of 'grid' or 'grid_function' must be specified\")\n\n if 'grid' in kwargs:\n grid = kwargs['grid']\n elif 'grid_function' in kwargs:\n grid = kwargs['grid_function'].grid\n\n number_of_vertices = grid.leaf_view.entity_count(2)\n number_of_elements = grid.leaf_view.entity_count(0)\n\n offset = interface.index_offset\n\n if 'vertex_index_to_file_key_map' in kwargs:\n vertex_index_to_file_key_map = kwargs['vertex_index_to_file_key_map']\n else:\n vertex_index_to_file_key_map = range(offset,number_of_vertices+offset)\n if 'element_index_to_file_key_map' in kwargs:\n element_index_to_file_key_map = kwargs['element_index_to_file_key_map']\n else:\n element_index_to_file_key_map = range(offset,number_of_elements+offset)\n\n # Create the vertex and element structure\n\n from collections import OrderedDict\n\n vertex_iterator = grid.leaf_view.entity_iterator(2)\n element_iterator = grid.leaf_view.entity_iterator(0)\n index_set = grid.leaf_view.index_set()\n\n vertices = OrderedDict([(vertex_index_to_file_key_map[index_set.entity_index(vertex)],vertex.geometry.corners[:,0])\n for vertex in vertex_iterator])\n elements = OrderedDict([(element_index_to_file_key_map[index_set.entity_index(element)],\n {'data':[vertex_index_to_file_key_map[index_set.sub_entity_index(element,n,2)] for n in range(3)],\n 'domain_index':element.domain}) for element in element_iterator])\n\n interface.add_grid_data(vertices,elements)\n\n # Evaluate data\n\n if 'grid_function' in kwargs:\n fun = kwargs['grid_function']\n data_type = kwargs.get('data_type',interface.default_data_type)\n\n if 'transformation' in kwargs:\n transformation = kwargs['transformation']\n else:\n transformation = lambda x: x\n\n index_set = grid.leaf_view.index_set()\n\n if data_type == 'element_node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates))\n interface.add_element_node_data(data,kwargs.get('label','element_node_data'))\n elif data_type == 'node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(vertex_index_to_file_key_map)\n for element in grid.leaf_view.entity_iterator(0):\n local_data = transformation(fun.evaluate(element,local_coordinates))\n for i in range(3):\n data[vertex_index_to_file_key_map[index_set.sub_entity_index(element,i,2)]] = local_data[:,i]\n interface.add_node_data(data,kwargs.get('label','node_data'))\n elif data_type == 'element':\n local_coordinates = _np.array([[1./3],[1./3]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates).ravel())\n interface.add_element_data(data,kwargs.get('label','element_data'))\n else:\n raise ValueError(\"data_type must be one of 'node', 'element', or 'element_node'\")\n\n interface.write(kwargs['file_name'])",
"def output_results(in_file, csv_path, grid, months, left_side):\n file_name = os.path.basename(in_file)\n\n base_name, _ = os.path.splitext(file_name)\n img_path = os.path.join('output', base_name + '_out.png')\n\n with open(csv_path, 'a', newline='') as csv_file:\n writer = csv.writer(csv_file)\n\n fig, ax = plt.subplots(figsize=(10, 15.45), frameon=False)\n ax.imshow(grid.image, cmap=plt.cm.gray)\n ax.axis('off')\n\n color_row_labels(left_side, ax)\n\n for month_idx, month in enumerate(months):\n color_col_labels(month, ax)\n color_grid_cells(month, month_idx, ax, base_name, writer)\n\n fig.savefig(img_path, dpi=300, bbox_inches='tight')",
"def click_re_analysis_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.re_analysis_grid_div_id)",
"def write_grid(self):\n \n self.fout = self.create_savename()\n ncout = Dataset(self.fout, 'w')\n print('Writing: %s' % self.fout)\n \n # Create dimensions\n lon = ncout.createDimension(self.xvar, self.nx)\n lat = ncout.createDimension(self.yvar, self.ny)\n depth = ncout.createDimension(self.zvar, self.nz)\n tdim = ncout.createDimension('time', None)\n bndsDim = ncout.createDimension('bnds', 2)\n\n # Create variables\n varx = ncout.createVariable(self.xvar, 'float64', (self.xvar,))\n vary = ncout.createVariable(self.yvar, 'float64', (self.yvar,))\n varz = ncout.createVariable(self.zvar, 'float64', (self.zvar,))\n\n varx.standard_name = 'longitude'\n varx.units = 'degrees'\n ncout.variables['LONGITUDE'].bounds = 'lon_bnds'\n lonBndsVar = ncout.createVariable('lon_bnds', 'float64', (self.xvar, 'bnds'))\n xboundaries = np.concatenate([self.xminbounds, np.reshape(self.xmaxbounds[-1],(1,1))[0]])\n lonBndsVar[:,:] = np.array([xboundaries[:-1], xboundaries[1:]]).T\n\n vary.standard_name = 'latitude'\n vary.units = 'degrees'\n ncout.variables['LATITUDE'].bounds = 'lat_bnds'\n latBndsVar = ncout.createVariable('lat_bnds', 'float64', (self.yvar, 'bnds'))\n yboundaries = np.concatenate([self.yminbounds, np.reshape(self.ymaxbounds[-1],(1,1))[0]])\n latBndsVar[:,:] = np.array([yboundaries[:-1], yboundaries[1:]]).T\n \n varz.standard_name = 'depth'\n varz.units = 'metres'\n ncout.variables['DEPH_CORRECTED'].bounds = 'depth_bnds'\n depthBndsVar = ncout.createVariable('depth_bnds', 'float64', (self.zvar, 'bnds'))\n zboundaries = np.concatenate([self.zminbounds, np.reshape(self.zmaxbounds[-1],(1,1))[0]])\n depthBndsVar[:,:] = np.array([zboundaries[:-1], zboundaries[1:]]).T\n\n vartmean = ncout.createVariable('tmean', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmtmean = ncout.createVariable(self.datavar, 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varsum = ncout.createVariable('sum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varmsum = ncout.createVariable('meansum', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n varcount = ncout.createVariable('count', 'float32', ('time',self.zvar,self.yvar,self.xvar))\n# varmax = ncout.createVariable('gmax', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmin = ncout.createVariable('gmin', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n# varmed = ncout.createVariable('median', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n varpcount = ncout.createVariable('pcount', 'float32', ('time', self.zvar, self.yvar, self.xvar))\n vartime = ncout.createVariable('time', 'float64', ('time',))\n vartime.units = 'hours since 0001-01-01 00:00:00'\n vartime.calendar = 'gregorian'\n\n # Write to variables\n varx[:] = self.xgrid\n vary[:] = self.ygrid\n varz[:] = self.zgrid\n vartmean[:] = self.grid_tmean[np.newaxis]\n varmtmean[:] = self.grid_meantmean[np.newaxis]\n varsum[:] = self.grid_sum[np.newaxis]\n varmsum[:] = self.grid_meansum[np.newaxis]\n varcount[:] = self.grid_count[np.newaxis]\n varpcount[:] = self.grid_pcount[np.newaxis]\n# varmax[:] = self.grid_max[np.newaxis]\n# varmin[:] = self.grid_min[np.newaxis]\n# varmed[:] = self.grid_med[np.newaxis]\n vartime[:] = date2num(self.dt, units=vartime.units, calendar=vartime.calendar)\n \n # Add global attributes\n ncout.history = 'Created ' + time.ctime(time.time())\n \n # Save\n ncout.close()",
"def save_data(self) -> None:\n # Construct a grid in physical space\n rvals = np.logspace(start=-3,\n stop=2.5,\n num=21,\n endpoint=True)\n # Compute C, D, K1 and F on that grid\n Cvals = np.array([self.compute_C(r, Suppression.RAW) for r in rvals])\n Dvals = np.array([self.compute_D(r, Suppression.RAW) for r in rvals])\n K1vals = np.array([self.compute_K1(r, Suppression.RAW) for r in rvals])\n Fvals = np.array([self.compute_F(r, Suppression.RAW) for r in rvals])\n # Save them to file\n df = pd.DataFrame([rvals, Cvals[:, 0], Dvals[:, 0], K1vals[:, 0], Fvals[:, 0],\n Cvals[:, 1], Dvals[:, 1], K1vals[:, 1], Fvals[:, 1]]).transpose()\n df.columns = ['r', 'C(r)', 'D(r)', 'K1(r)', 'F(r)', 'dC(r)', 'dD(r)', 'dK1(r)', 'dF(r)']\n df.to_csv(self.file_path(self.filename + '.csv'), index=False)",
"def dump(self, path, mode='standalone'):\n if mode == 'standalone':\n with open(path+\"/export_grid_standalone\"+str(self._id)+\".html\", 'w+') as f:\n f.write(self.export_html(build=True))\n elif mode == 'all':\n widget_export = self.export_html(build=False)\n with open(path+\"/export_scripts.html\", \"w+\") as f:\n f.write(widget_export['script_tags'])\n with open(path+\"/export_html_state.html\", \"w+\") as f:\n f.write(widget_export['html_state'])\n with open(path+\"/export_state_\"+str(self._id)+\".json\", \"w+\") as f:\n f.write(json.dumps(widget_export['manager_state']))\n with open(path+\"/export_grid_\"+str(self._id)+\".html\", \"w+\") as f:\n f.write(widget_export['grid_div'])",
"def exportTOUGH2(self, fname):\r\n STR = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) # - 1 #\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename, 'w', newline='\\r\\n') as f:\r\n f.write(\"ELEME\")\r\n # debug\r\n f.write(\r\n \"\"\"\r\n 1 10 20 30 40 50 60 70 80\r\n |--------|---------|---------|---------|---------|---------|---------|---------|\r\n 12345678901234567890123456789012345678901234567890123456789012345678901234567890\r\n \"\"\")\r\n\r\n ii = 0\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # f.write(str(iy)+str(ix)+\"\\n\")\r\n # first base\r\n b2 = ii // (len(STR) * len(STR))\r\n b1 = (ii - len(STR) * b2) // len(STR)\r\n b0 = ii % len(STR)\r\n\r\n f.write(STR[b2] + STR[b1] + STR[b0] + \"\\t\" + str(ii) + \"\\n\")\r\n ii += 1",
"def write_file(self):\n if self.it_num % 5 == 0:\n #plt.imshow(self.grid)\n #plt.savefig(\"output%.4d.png\" % self.it_num, bbox_inches='tight')\n io.savemat(\"MLOutput%.4d\" % self.it_num, { \"Grid\":self.grid})",
"def test_export(filename, folder, space_type):\n grid = bempp.api.shapes.cube(h=0.5)\n space = bempp.api.function_space(grid, *space_type)\n function = bempp.api.GridFunction(\n space, coefficients=np.random.rand(space.global_dof_count)\n )\n bempp.api.export(os.path.join(folder, filename), grid_function=function)",
"def exportECL(self, fname):\r\n\r\n # TODO add consistency of dimensions across the inputs\r\n self.ne, self.nn, self.nz = np.array(self.Grid.GetDimensions()) - 1 # ECLIPSE\r\n filename, ext = os.path.splitext(fname)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid geometry and properties) (ASCII)\\n')\r\n # f.write('-- Exported by : Petrel 2013.7 (64-bit) Schlumberger\\n'\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write('SPECGRID -- Generated : ReGrid\\n')\r\n f.write(' %i %i %i 1 F /\\n\\n' % (self.ne, self.nn, self.nz))\r\n f.write('COORDSYS -- Generated : ReGrid\\n')\r\n f.write(' 1 4 /\\n\\n') # what is this line?\r\n\r\n f.write('COORD -- Generated : ReGrid\\n')\r\n nz = self.nz\r\n fstr = str(\" \")\r\n\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(0)\r\n fstr = self.printCOORDS(f, p0, fstr)\r\n p1 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(4)\r\n fstr = self.printCOORDS(f, p1, fstr)\r\n # outside edge on far x\r\n p2 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, p2, fstr)\r\n p3 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, p3, fstr)\r\n # outside edge on far y\r\n for ix in range(self.ne):\r\n p8 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(3)\r\n fstr = self.printCOORDS(f, p8, fstr)\r\n p9 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(7)\r\n fstr = self.printCOORDS(f, p9, fstr)\r\n # outside edge on far northeast\r\n p14 = self.Grid.GetCell(ix, iy, 0).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, p14, fstr)\r\n p15 = self.Grid.GetCell(ix, iy, nz - 1).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, p15, fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n\r\n f.write('ZCORN -- Generated : ReGrid\\n')\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(0)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(1)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(3)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(2)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # bottom layer\r\n for iy in range(self.nn):\r\n # front face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(4)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(5)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n # back face\r\n for ix in range(self.ne):\r\n p0 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(7)\r\n p1 = self.Grid.GetCell(ix, iy, iz).GetPoints().GetPoint(6)\r\n fstr = self.printCOORDS(f, [p0[2]], fstr)\r\n fstr = self.printCOORDS(f, [p1[2]], fstr)\r\n f.write(fstr)\r\n fstr = \" \"\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n f.write('ACTNUM -- Generated : ReGrid\\n')\r\n\r\n c = -999\r\n N = 0\r\n for iac in self.ActiveCells.flatten(order='F'):\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999:\r\n fstr = self.printAC(f, c, N, fstr)\r\n c = iac\r\n N = 1\r\n fstr = self.printAC(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n else:\r\n print(\"Only structured grids can be converted to ECLIPSE files\")",
"def write_grid(self, file_path, fmt='%0.16g'):\n with open(file_path, 'w') as outfile:\n if self.grid.size == 3:\n outfile.write('{}\\t{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1,\n self.grid[2].size - 1))\n else:\n outfile.write('{}\\t{}\\n'.format(self.grid[0].size - 1,\n self.grid[1].size - 1))\n with open(file_path, 'ab') as outfile:\n numpy.savetxt(outfile, numpy.c_[self.grid[0]], fmt=fmt)\n numpy.savetxt(outfile, numpy.c_[self.grid[1]], fmt=fmt)\n if self.grid.size == 3:\n numpy.savetxt(outfile, numpy.c_[self.grid[2]], fmt=fmt)",
"def click_country_groups_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.country_groups_grid_div_id)",
"def save_EGRID( self , filename , output_unit = EclUnitTypeEnum.ERT_ECL_METRIC_UNITS):\n self._fwrite_EGRID2( filename, output_unit )",
"def add_full_grid_output(self,output_filename,output_type, start, step):\n self.ricom.nopt = output_type \n self.ricom.noptstart = start\n self.ricom.nskip = step\n self.ricom.outputFileFull = output_filename",
"def export(exp_data: ExportData) -> None:\n pass",
"def _griddata(self):\n res = self.cfg.resolution\n\n # Get area of data\n xmin, xmax = np.nanmin(self.x), np.nanmax(self.x)\n ymin, ymax = np.nanmin(self.y), np.nanmax(self.y)\n\n # Add padding\n width = xmax-xmin\n height = ymax-ymin\n pad = np.amax([self.cfg.grid_pad_fraction*width, self.cfg.grid_pad_fraction*height])\n xmin = np.floor(xmin - pad)\n xmax = np.ceil(xmax + pad)\n ymin = np.floor(ymin - pad)\n ymax = np.ceil(ymax + pad)\n\n # Create Grid and no data mask\n self.lrx = np.arange(xmin, xmax+res, res)\n self.lry = np.arange(ymin, ymax+res, res)\n self.dem_x, self.dem_y = np.meshgrid(self.lrx, self.lry)\n self.nonan = np.where(np.logical_or(np.isfinite(self.x), np.isfinite(self.y)))\n\n # Create regular grid\n gridding_algorithm = self.cfg.griddata[\"algorithm\"]\n if gridding_algorithm == \"scipy.griddata\":\n self.dem_z = griddata((self.x[self.nonan].flatten(), self.y[self.nonan].flatten()),\n self.als.elevation[self.nonan].flatten(),\n (self.dem_x, self.dem_y),\n **self.cfg.griddata[\"keyw\"])\n else:\n raise NotImplementedError(\"Gridding algorithm: %s\" % gridding_algorithm)\n\n self.dem_z = np.ma.array(self.dem_z)\n self.dem_mask = np.zeros(self.dem_z.shape, dtype=np.bool)",
"def write_grid2d(grid_file, grid2d):\n with grid_file.open('w') as f:\n for row in grid2d['label']:\n f.write('\\t'.join(row) + '\\n')",
"def grid_image(output):\n grid = []\n for data in output:\n grid += [make_grid(data, nrow=5, normalize=True)]\n return grid",
"def export_to_vtk(xgrid, ygrid, data, data_name):\n\tfrom evtk.vtk import VtkFile, VtkStructuredGrid\n\t\n\t\n\t#stupid reshape data\n\toldshape = data.shape\n\tnewshape = oldshape + (1,)\n\tdata = data.reshape(newshape)\n\txgrid = xgrid.reshape(newshape)\n\tygrid = ygrid.reshape(newshape)\n\t\n\t\n\tpath = './{}'.format(data_name)\n\tw = VtkFile(path, VtkStructuredGrid)\n\t\n\t#Header stuff?\n\tnx, ny = oldshape[0] - 1, oldshape[1] - 1\n\tw.openGrid(start = (0, 0, 0), end = (nx, ny, 0))\n\tw.openPiece(start = (0, 0, 0), end = (nx, ny, 0))\n\t\n\tw.openElement(\"Points\")\n\tw.addData(\"points\", (xgrid, ygrid, data))\n\tw.closeElement(\"Points\")\n\t\n\tw.openData(\"Point\", scalars = data_name)\n\tw.addData(data_name, data)\n\tw.closeData(\"Point\")\n\t\n\tw.closePiece()\n\tw.closeGrid()\n\t\n\t#Now add the actual data?\n\tw.appendData((xgrid, ygrid, data))\n\tw.appendData(data)\n\t\n\t#finished\n\tw.save()",
"def export_overview(self, outpath=None):\n orderby = self.orderby.get()\n currentregion = self.region.get()\n if not outpath:\n outpath = tkinter.filedialog.askdirectory()\n if outpath:\n export.export_overview(\n self.tabs.window.aistracker,\n self.tabs.window.nmeatracker,\n self.tabs.window.messagelog,\n outpath, orderby=orderby, region=currentregion)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def export_image(self, bbox, zoomlevel, imagepath):\n assert has_pil, _(\"Cannot export image without python PIL\")\n grid = self.grid_tiles(bbox, zoomlevel)\n width = len(grid[0])\n height = len(grid)\n widthpix = width * self.tile_size\n heightpix = height * self.tile_size\n\n result = Image.new(\"RGBA\", (widthpix, heightpix))\n offset = (0, 0)\n for i, row in enumerate(grid):\n for j, (x, y) in enumerate(row):\n offset = (j * self.tile_size, i * self.tile_size)\n img = self._tile_image(self.tile((zoomlevel, x, y)))\n result.paste(img, offset)\n logger.info(_(\"Save resulting image to '%s'\") % imagepath)\n result.save(imagepath)",
"def grid_results(infile, resolution = 0.01, clip_shp = None, \n overwrite=True, contour=False):\n outfile = infile.rstrip('().csv') + '_gridded.tif'\n # if not overwrite:\n if os.path.isfile(outfile):\n if not overwrite:\n print('Not creating file %s as already exists' % outfile)\n print('To re-create file (e.g if inputs changed) set overwrite=True)')\n return\n else:\n try:\n os.remove(outfile)\n os.remove((outfile.rstrip('.tif') + '_clip.tif'))\n except:\n pass\n data = np.genfromtxt(infile, delimiter=',')\n max_lon = max(data[:,0])\n min_lon = min(data[:,0])\n max_lat = max(data[:,1])\n min_lat = min(data[:,1])\n #print max_lon, min_lon, max_lat, min_lat\n xi = np.arange(min_lon, max_lon, resolution)\n yi = np.arange(min_lat, max_lat, resolution)\n XI,YI = np.meshgrid(xi,yi)\n xsize = len(xi)\n ysize = len(yi)\n\n print('Interpolating results')\n gridded_results = griddata((data[:,0],data[:,1]),data[:,2],(XI,YI),method='linear')\n #print gridded_results\n #outfile = infile.rstrip('().csv') + '_gridded.tif'\n print('Writing gridded data to %s' % outfile)\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(outfile, xsize, ysize, 1, gdal.GDT_Float32)\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n gt = [(min_lon - (resolution/2)), resolution, 0, \n (min_lat - (resolution/2)), 0, resolution]\n ds.SetGeoTransform(gt)\n outband=ds.GetRasterBand(1)\n outband.SetStatistics(np.min(gridded_results), np.max(gridded_results), np.average(gridded_results), np.std(gridded_results))\n outband.WriteArray(gridded_results)\n # Need to close output dataset before we can do clipping\n ds = None\n # now clip by shapefile\n if clip_shp is not None:\n clipfile = outfile.rstrip('.tif') + '_clip.tif'\n cmd = ['gdalwarp',\n '-cutline',\n clip_shp,\n '-crop_to_cutline',\n '-dstalpha',\n outfile,\n clipfile]\n print(cmd)\n call(cmd, shell=False)\n if contour is True:\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (outfile, outfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)\n cmd = 'gdal_contour -i 1 -off 0.5 %s %s.shp' % (clipfile, clipfile.rstrip('.tif'))\n print(cmd)\n call(cmd, shell=True)",
"def export_data(self, pth):\n self.cleanup_allowed = False\n self.train_df.to_csv(os.path.join(pth, \"train.csv\"))\n self.valid_df.to_csv(os.path.join(pth, \"valid.csv\"))\n self.test_df.to_csv(os.path.join(pth, \"test.csv\"))",
"def save_grdfile(grddata,depdata,outname,is31=True):\n \n if outname==None:\n print('save_grdfile requires a filename to save.')\n return\n try:\n fp=open(outname,'w')\n except IOError:\n print('save_grdfile: invalid filename.')\n return data\n if is31:\n fp.write('Node Number = %d\\n' % len(depdata['node_num']) )\n fp.write('Cell Number = %d\\n' % len(grddata['nv']) )\n for i in range(0,len(grddata['nv'])):\n fp.write('%d %d %d %d %d\\n'% (grddata['ele_num'][i],grddata['nv'][i,0],grddata['nv'][i,1],grddata['nv'][i,2],0))\n\n for i in range(0,len(depdata['node_num'])):\n fp.write('%d %f %f %f\\n'% (depdata['node_num'][i],depdata['x'][i],depdata['y'][i],depdata['h'][i]))\n fp.close()\n \n return"
] | [
"0.67669964",
"0.643687",
"0.6178726",
"0.616679",
"0.61331165",
"0.6103156",
"0.6066939",
"0.6062741",
"0.5989929",
"0.5965376",
"0.58768976",
"0.580032",
"0.57934624",
"0.577638",
"0.5772764",
"0.5735569",
"0.5720665",
"0.568421",
"0.56778306",
"0.5647633",
"0.56244725",
"0.5603827",
"0.5602781",
"0.5590588",
"0.5573721",
"0.55283713",
"0.5514114",
"0.5499524",
"0.54990315",
"0.5492064"
] | 0.6646964 | 1 |
If build==True, returns a str containing HTML code for embedding as a standalone widget. If build==False, returns a dict containing 4 parts necessary to put several embed widgets in the same page. | def export_html(self, build=False):
if build:
html = export_html_code(self)
return (html['script_tags'] +
(html['html_state']).format(manager_state=json.dumps(html['manager_state'])) +
html['grid_div'])
return export_html_code(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def render(self):\n content = self._render_pre_content('div')\n for widget in self._child_widgets:\n content += \"\\n\" + widget.render()\n content += self._render_post_content('div')\n content += \"\"\"<script>\n $(function(){\n $(\"#%s\").accordion({\n collapsible: %s,\n icons: %s,\n heightStyle: \"%s\"\n });\n });\n </script>\n \"\"\" % (self._name, \"true\" if self._collapsible else \"false\",\n self._icons, \"fill\" if self._fill_space else \"\")\n self._widget_content = content\n return self._widget_content",
"def disqus_combination_widget(context, shortname='', num_items=5, \n hide_mods=False, color=\"blue\", default_tab=\"people\", excerpt_length=200):\n shortname = getattr(settings, 'DISQUS_WEBSITE_SHORTNAME', shortname)\n if color not in VALID_COLORS:\n color = VALID_COLORS[0]\n if default_tab not in VALID_TABS:\n default_tab = VALID_TABS[0]\n return {\n 'shortname': shortname,\n 'num_items': int(num_items),\n 'hide_mods': int(hide_mods in VALID_TRUE),\n 'color': color,\n 'default_tab': default_tab,\n 'excerpt_length': int(excerpt_length),\n }",
"def inline_map(map):\n map._build_map()\n return HTML('<iframe srcdoc=\"{srcdoc}\" style=\"width: 100%; height: 510px; border: none\"></iframe>'.format(srcdoc=map.HTML.replace('\"', '"')))",
"def format_webembed(project_id, url=None):\n if not url:\n return \"Please provide a valid demo link.\"\n urltest = url.lower().strip()\n if urltest.startswith('<iframe '):\n # Allow IFRAMEs\n # TODO: add a setting\n return url\n elif urltest.endswith('.pdf'):\n # Embedded document\n url = url_for('project.render', project_id=project_id)\n # url = '/project/%d/render' % project_id\n elif urltest.startswith('https://query.wikidata.org/'):\n # Fix WikiData queries\n url = url.replace('https://query.wikidata.org/',\n 'https://query.wikidata.org/embed.html')\n elif urltest.startswith('https://youtu.be/'):\n # Fix YouTube mobile link\n url = url.replace('https://youtu.be/',\n 'https://www.youtube.com/embed/')\n url = url.replace('?t=', '?start=')\n elif urltest.startswith('https://www.youtube.com/watch?'):\n # Fix YouTube web link\n url = url.replace('https://www.youtube.com/watch?v=',\n 'https://www.youtube.com/embed/')\n url = url.replace('?t=', '?start=')\n # TODO: add more embeddables here\n return '<iframe src=\"%s\"></iframe>' % url",
"def get_embed_dict(self):\n if not self.get_url() or not self.get_embed_url():\n return None\n \n output = {\n \"url\": self.get_url(),\n \"embed_url\": self.get_embed_url(),\n \"provider_url\": self.get_provider_url(),\n \"provider_name\": self.get_provider_name(),\n \"thumbnail_url\": self.get_thumbnail_url(),\n \"type\": \"video\"\n }\n if self.get_height():\n output['iframe_height'] = self.get_height()\n if self.get_width():\n output['iframe_width'] = self.get_width()\n\n return output",
"def build_dollar_embed(dictionary):\n embed= discord.Embed(\n title= dictionary['title'],\n description= dictionary['description'],\n colour= dictionary['color']\n )\n embed.set_thumbnail(url= dictionary['img'])\n\n # scrap dollar data from website\n tree = get_tree_from_HTML(dictionary['url'])\n data = get_data_from_tree(tree, dictionary['path'])\n try:\n if dictionary['field1']:\n embed.add_field(name= dictionary['field1'], value= data[0], inline=False)\n except KeyError:\n pass\n try:\n if dictionary['field2']:\n embed.add_field(name= dictionary['field2'], value= data[1], inline=True)\n except KeyError:\n pass\n\n return embed",
"def render(self):\n content = self._render_pre_content('div')\n for widget in self._child_widgets:\n content += widget.render()\n content += self._render_post_content('div')\n self._widget_content = content\n return self._widget_content",
"def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content",
"def render(self):\n start_tag = format_html('<div {}>', mark_safe(' '.join(self.field_attrs)))\n output = [start_tag]\n for widget in self:\n output.append(force_text(widget))\n output.append('</div>')\n return mark_safe('\\n'.join(output))",
"def make_dict(self):\n return self.generate_widgets()",
"def render(\n self,\n chart: Union[dict, alt.TopLevelMixin],\n inline: bool = False,\n embed_opt: Optional[dict] = None,\n open_browser: Optional[bool] = None,\n ) -> Dict[str, str]:\n if inline:\n self._initialize()\n return {\"text/html\": self._inline_html(chart, embed_opt)}\n else:\n out = self.display(\n chart, embed_opt=embed_opt, open_browser=open_browser, inline=inline\n )\n return out._repr_mimebundle_() if out is not None else {}",
"def pkgbuildContent( self, pars, directory ):\n\n ret = self.pkgbuildContentHeader( pars, directory );\n\n variables = self.pkgbuildContentVars( pars, directory );\n if len(variables) > 0 :\n ret += \"\\n\".join( map ( lambda x : f\"{ x }={ variables[x] }\", variables )) + \"\\n\"\n\n\n prepareFunction = self.pkgbuildContentPrepare( pars, directory )\n buildFunction = self.pkgbuildContentBuild( pars, directory )\n packageFunction = self.pkgbuildContentPackage( pars, directory )\n contentOther = self.pkgbuildContentOther( pars, directory )\n\n if prepareFunction:\n prepareFunction = self.indent( prepareFunction )\n ret += f\"\"\"\nprepare() {{\n{prepareFunction}\n}}\\\n\"\"\"\n\n if buildFunction:\n buildFunction = self.indent( buildFunction )\n ret += f\"\"\"\nbuild() {{\n{buildFunction}\n}}\\\n\"\"\"\n\n if packageFunction:\n packageFunction = self.indent( packageFunction )\n ret += f\"\"\"\npackage() {{\n{packageFunction}\n}}\\\n\"\"\"\n\n if contentOther:\n ret += contentOther\n\n return ret",
"def render(self):\n content = self._render_pre_content('ul')\n for widget in self._child_widgets:\n content += widget.render()\n content += self._render_post_content('ul')\n self._widget_content = content + \"\\n\" + self._attach_script() + \"\\n\" + self._attach_css()\n return self._widget_content",
"def _repr_html_(self):\n if self.container_id():\n return \"<i>This widget is already shown in this notebook</i>\"\n \n container_id = self.id + '_container'\n def set_cointainer_id():\n self.container_id._set(container_id)\n # Set container id, this gets applied in the next event loop\n # iteration, so by the time it gets called in JS, the div that\n # we define below will have been created.\n from ..app import call_later\n call_later(0.1, set_cointainer_id) # todo: always do calls in next iter\n return \"<div class='flx-container' id=%s />\" % container_id",
"def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content",
"def gen_html(\n conversations,\n height,\n width,\n title,\n other_speaker,\n human_speaker,\n user_icon,\n alt_icon,\n):\n html_str = f\"\"\"<html>\n<head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n <title> {title} </title>\n <style type=\"text/css\">\n @media print{{\n @page{{ margin: 0; size: {str(width)}in {str(height)}in; }}\n }}\n ul{{\n list-style: none;\n }}\n .{other_speaker}_img_div{{\n display: inline-block;\n float: left;\n margin: 18px 5px 0px -25px;\n }}\n .{human_speaker}_img_div{{\n display: inline-block;\n float: right;\n margin: 18px 15px 5px 5px;\n }}\n .{other_speaker}_img{{\n content:url({alt_icon});\n }}\n .{human_speaker}_img{{\n content:url({user_icon});\n }}\n .{other_speaker}_p_div{{\n float: left;\n }}\n .{human_speaker}_p_div{{\n float:right;\n }}\n p{{\n display:inline-block;\n overflow-wrap: break-word;\n border-radius: 30px;\n padding: 10px 10px 10px 10px;\n font-family: Helvetica, Arial, sans-serif;\n }}\n .clear{{\n float: none;\n clear: both;\n }}\n .{other_speaker}{{\n background: #eee;\n float: left;\n }}\n .{human_speaker}{{\n background: #0084ff;\n color: #fff;\n float: right;\n }}\n .breaker{{\n color: #bec3c9;\n display: block;\n height: 20px;\n margin: 20px 20px 20px 20px;\n text-align: center;\n text-transform: uppercase;\n }}\n img{{\n border-radius: 50px;\n width: 50px;\n height: 50px;\n }}\n </style>\n</head>\n<body>\n{gen_convo_ul(conversations)}\n</body>\n</html>\n \"\"\"\n return html_str",
"def create_response(content, debug, debug_cmd, cmd_buttons=cmd_buttons):\n return \"\"\"\\\n<html>\n<form action=\"/\" method=\"post\">\n<textarea name=\"input\" style=\"width:100%%;height:25%%;\" placeholder=\"%(workingfile)s\">%(content)s</textarea>\n<input type=\"submit\" value=\"Submit\">\n</form>\n<hr />\n%(cmd_buttons)s\n<hr />\n<h3>Debug (%(debug_cmd)s):</h3>\n<pre>%(debug)s</pre>\n</html>\"\"\" % {\"content\": content,\n \"debug\": debug,\n \"debug_cmd\": debug_cmd,\n \"cmd_buttons\": cmd_buttons,\n \"workingfile\": workingfile}",
"def pkgbuildContentBuild( self, pars, directory ):\n\n return \"\"\"\\\n # If your package requires compilation, insert your build code here\n cd \"${srcdir}/${pkgname}-${pkgver}\"\n echo Building ...\\\n\"\"\"",
"def build_maps():\n return render_template(\"maps.html\")",
"def render(self):\n content = \"<fieldset>\\n<legend>\" + self._title + \"</legend>\\n\"\n for item in self._items:\n val = self._items.get(item)\n title = val[0]\n is_sel = val[1]\n name = self._name + \"_chk_\" + item\n lbl_name = self._name + \"_lbl_\" + item\n label = \"<label for='\" + name + \"' id='\" + lbl_name + \"' >\"\\\n + (title if title is not None else item) + \"</label>\"\n checkbox = \"<input id='\" + name + \"' type='checkbox'\"\\\n + (\" checked\" if is_sel else \"\")\\\n + \" name='\" + self._name + \"_chk'\"\\\n + \" onclick='\" + self._attach_onclick(item) + \"' />\"\n content += \"\\n\" + label + \"\\n\" + checkbox\n self._widget_content = content + \"\\n</fieldset>\"\\\n + self._attach_script() + \"\\n\"\\\n + self._attach_polling()\n return self._widget_content",
"def standalone_html(self) -> str:\n SIMPLE_HTML = jinja2.Template(\"\"\"<!DOCTYPE html>\n <html>\n <head>\n <meta charset=\"UTF-8\">\n <style>\n {{ css }}\n </style>\n </head>\n <body>\n <link href=\"{{bootstrap4_css_url}}\" rel=\"stylesheet\"/>\n <link href=\"{{font_awesome_url}}\" rel=\"stylesheet\"/>\n <link href=\"{{bootstrap_table_css_url}}\" rel=\"stylesheet\"/>\n <script type=\"text/javascript\" src=\"{{ events_url }}\"></script>\n <div id=\"table\" class=\"data-table-container\"></div>\n <script type=\"text/javascript\" src=\"{{ jquery_url }}\"></script>\n <script type=\"text/javascript\" src=\"{{ popper_url }}\"></script> \n <script type=\"text/javascript\" src=\"{{ bootstrap4_js_url }}\"></script> \n <script type=\"text/javascript\" src=\"{{ boostrap_table_js_url }}\"></script> \n <script type=\"text/javascript\">\n $(document).ready(function() {\n var data = JSON.parse('{{data|safe}}');\n $(\"#table\").bootstrapTable(data);\n });\n </script>\n </body>\n </html>\"\"\")\n data = json.dumps(self.table_data, cls=presalytics.story.outline.OutlineEncoder) # dont use hyphens in data keys\n extra_css = base64.b64decode(self.css64).decode('utf-8') if self.css64 else DataTableWidget.DEFAULT_CSS #type: ignore \n context = {\n \"bootstrap4_css_url\": presalytics.lib.plugins.external.ApprovedExternalLinks().attr_dict.flatten().get('bootstrap4'),\n \"font_awesome_url\": presalytics.lib.plugins.external.ApprovedExternalLinks().attr_dict.flatten().get('font-awesome'),\n \"bootstrap_table_css_url\": presalytics.lib.plugins.external.ApprovedExternalLinks().attr_dict.flatten().get('bootstrap-table'),\n \"jquery_url\": presalytics.lib.plugins.external.ApprovedExternalScripts().attr_dict.flatten().get('jquery'),\n \"popper_url\": presalytics.lib.plugins.external.ApprovedExternalScripts().attr_dict.flatten().get('popper'),\n \"boostrap_table_js_url\": presalytics.lib.plugins.external.ApprovedExternalScripts().attr_dict.flatten().get('bootstrap-table'),\n \"d3_url\": presalytics.lib.plugins.external.ApprovedExternalScripts().attr_dict.flatten().get('d3'),\n \"events_url\": presalytics.lib.plugins.external.ApprovedExternalScripts().attr_dict.flatten().get('events'),\n \"data\": data,\n \"css\": extra_css\n }\n return SIMPLE_HTML.render(**context)",
"def embed():",
"def wrap(body):\n return \"\"\"<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\"> \n <title>OpenWhisk Crud Demo</title>\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1\">\n <link rel=\"stylesheet\" href=\"%s\">\n <script src=\"%s\"></script>\n <script src=\"%s\"></script>\n </head>\n <body>\n <div class=\"container\">%s</div>\n </body>\n</html>\n\"\"\" % (BOOTSTRAP_CSS, JQUERY, BOOTSTRAP_JS, body)",
"def build_book(self, embed_images = True, embed_styles = True, remove_scripts = True, add_navbar_js = True):\n \n chapter_list = self.get_chapter_list()\n \n for li in chapter_list:\n page = self.get_page(li)\n self.add_page_to(page.page_content, self.book_content)\n \n self.update_links()\n \n if embed_styles:\n self.embed_styles()\n \n if remove_scripts:\n self.remove_scripts()\n \n if embed_images:\n self.embed_images()\n \n if add_navbar_js:\n self.add_navbar_js()\n \n self.remove_html_widgets()\n self.remove_next_page_button()",
"def show(self):\n\t\tself.html += '<head>\\n' + self.head + '</head>\\n<body>\\n' + self.body + '</body>\\n</html>'\n\n\t\treturn self.html",
"def build_shared_embeddings(\n dicts: Dict[str, Dictionary],\n langs: List[str],\n embed_dim: int,\n build_embedding: callable,\n pretrained_embed_path: Optional[str] = None,\n ):\n shared_dict = dicts[langs[0]]\n if any(dicts[lang] != shared_dict for lang in langs):\n raise ValueError(\n \"--share-*-embeddings requires a joined dictionary: \"\n \"--share-encoder-embeddings requires a joined source \"\n \"dictionary, --share-decoder-embeddings requires a joined \"\n \"target dictionary, and --share-all-embeddings requires a \"\n \"joint source + target dictionary.\"\n )\n return build_embedding(shared_dict, embed_dim, pretrained_embed_path)",
"def builder_inited(app):\n if (app.config.wavedrom_html_jsinline and app.builder.name not in ('html', 'dirhtml', 'singlehtml')):\n app.config.wavedrom_html_jsinline = False\n\n # Skip for non-html or if javascript is not inlined\n if not app.env.config.wavedrom_html_jsinline:\n return\n\n if app.config.offline_skin_js_path is not None:\n app.add_js_file(path.basename(app.config.offline_skin_js_path))\n else:\n app.add_js_file(ONLINE_SKIN_JS.format(url=app.config.online_wavedrom_js_url))\n if app.config.offline_wavedrom_js_path is not None:\n app.add_js_file(path.basename(app.config.offline_wavedrom_js_path))\n else:\n app.add_js_file(ONLINE_WAVEDROM_JS.format(url=app.config.online_wavedrom_js_url))",
"def build_html(data, html_out, cats=None, uses=['Y', 'S', 'N'], sets=None,\n title=\"Bibliography\", debug=False):\n cats = {cat for cat in data['Journal Category']} if not cats else cats\n html_string = html_open(title)\n for cat in cats:\n for use in uses:\n html_string = html_string + html_bib_block(\n data, use, cat, sets, debug\n )\n html_string = html_string + html_close()\n with open(html_out, 'w') as h:\n h.write(html_string)",
"def render(self):\n content = self._attach_css() + \"\\n\"\n content += self._render_pre_content('div')\n content += \"<div id='\" + self._name + \"_handle' class='ui-slider-handle'></div>\"\n content += self._render_post_content('div')\n self._widget_content = content + \"\\n\" + self._attach_script() + \"\\n\" + self._attach_polling()\n return self._widget_content",
"def output(self):\n entry = []\n entry.append('''<entry>\n <title mode=\"escaped\" type=\"text/html\">%(title)s</title>\n <link rel=\"alternate\" type=\"text/html\" href=\"%(url)s\" />\n <issued>%(issued)s</issued>\n <modified>%(modified)s</modified>\n ''' % self.__dict__)\n \n if self.feed:\n entry.append('''<link rel=\"service.feed\" type=\"application/atom+xml\" href=\"%s\" title=\"%s\" />''' % (self.feed, self.feed_title))\n if self.comments:\n entry.append('''<link rel=\"comments\" type=\"application/atom+xml\" href=\"%s\" />''' % self.comments)\n if self.author:\n entry.append('''<author>%s</author>''' % self.author.output())\n for person in self.contributors:\n entry.append('''<contributor>%s</contributor>''' % person.output())\n if self.id:\n entry.append('''<id>%s</id>''' % self.id)\n if self.created:\n entry.append('''<created>%s</created>''' % self.created)\n if self.summary:\n entry.append('''<summary type=\"application/xhtml+xml\" xml:base=\"%s\" xml:space=\"preserve\">\n <div xmlns=\"http://www.w3.org/1999/xhtml\">%s</div></summary>''' % (self.base_url, self.summary))\n if self.content:\n #entry.append('''<content type=\"application/xhtml+xml\" xml:base=\"%s\" xml:space=\"preserve\">\n # <div xmlns=\"http://www.w3.org/1999/xhtml\">%s</div></content>''' % (self.base_url, self.content))\n entry.append('''<content type=\"text/html\" mode=\"escaped\" xml:base=\"%s\" xml:space=\"preserve\">%s</content>''' % (self.base_url, self.content))\n \n entry.append('''</entry>''')\n return '\\n'.join(entry)"
] | [
"0.5747042",
"0.5426093",
"0.53925323",
"0.53382397",
"0.5317704",
"0.5224264",
"0.5208884",
"0.5170875",
"0.5137135",
"0.51103103",
"0.5095983",
"0.50568163",
"0.5054981",
"0.5054563",
"0.50098556",
"0.4979679",
"0.4960313",
"0.4940549",
"0.49374953",
"0.49297464",
"0.49275258",
"0.4885925",
"0.48532942",
"0.4835149",
"0.48297095",
"0.47876033",
"0.47699597",
"0.47547725",
"0.47476545",
"0.47467166"
] | 0.57427734 | 1 |
Translates the center point, and keeps it in bounds. | def translate_center(self, dx, dy, dz):
center = self.center
center[0] -= dx
center[1] -= dy
center[2] -= dz
center[0] = min(max(center[0], self.bounds[0]), self.bounds[1])
center[1] = min(max(center[1], self.bounds[0]), self.bounds[1])
center[2] = min(max(center[2], self.bounds[0]), self.bounds[1])
self.program["center"] = self.center = center | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def center(self):\n if self.pos != 0.0:\n self.pos = 0.0",
"def translate(self, displacement):\n\n self.center = (self.center[0] + displacement[0],\n self.center[1] + displacement[1])",
"def update_center(self): \r\n \r\n self.grfx[0].center = self.center\r\n\r\n self.update_bbox()",
"def __moveCenterTo(self, x, y):\n x0, y0, w, h = self.currentBox\n x2, y2 = x - (w/2), y - (h/2)\n self.__moveTo(x2, y2)",
"def position_center(self, x, y):\n self.x = x\n self.y = y\n self.pos[0] = x - self.pos[2]/2\n self.pos[1] = y - self.pos[3]/2",
"def center(self):\n return self.pos + self.axis / 2.0",
"def setCenter(self, center):\n p = center - self.center\n for i in range(len(self.points)):\n self.points[i] += p",
"def rotation_pivot_to_center(self):\n pass",
"def translate(self, displacement):\n self._center = self._center + np.array(displacement)\n self._position = self._position + np.array(displacement)",
"def rot_center(self):\n loc = self.rect.center\n self.image = pygame.transform.rotate(self.current_sprite_alpha, self.rot)\n self.rect = self.image.get_rect()\n self.rect.center = loc",
"def center(self, destination):\n self.move(destination=destination, origin=self.center)",
"def move(self):\n self.center_x += self._vx\n self.center_y += self._vy",
"def centerOn(self, point):\n rect = self.rect()\n x = point.x() - rect.width() / 2.0\n y = point.y() - rect.height() / 2.0\n \n self.setPos(x, y)",
"def center_ship(self):\r\n self.center = self.screen_rect.centerx",
"def center_ship(self):\r\n self.center = self.screen_rect.centerx",
"def center_ava(self):\n\t\tself.rect.midbottom = self.screen_rect.midbottom\n\t\tself.x = float(self.rect.x)",
"def center_ship(self):\n self.center = self.screen_rect.centerx",
"def center_ship(self):\n self.center = self.screen_rect.centerx",
"def center_ship(self):\n self.center = self.screen_rect.centerx",
"def center(self):\n return self._lower + 0.5 * (self._upper - self._lower)",
"def center(self):\n return self.centralizer(self)",
"def translate(self, x_by=0.0, y_by=0.0, phi=0.0):\n self.center[0] += x_by + self.velocity[0] # center tracks exact floating point positions\n self.center[1] += y_by + self.velocity[1]\n self.rect.center = tuple(self.center) # this assignment updates the pygame sprite placement coordinates\n self.rotate(phi + self.omega) # exact degrees",
"def center(self):\r\n self.centerx = self.screen_rect.centerx \r\n self.centery = self.screen_rect.centery",
"def recenter(self, point=(0, 0)):\n self.center = Point(*point)",
"def transformPos(self, point):\n return point / self.scale - self.offsetToCenter()",
"def setCenter(self, np):\n p = self.getCenter()\n v = Vector.createFromTwoPoints(p, np)\n for i in range(len(self.points)):\n self.points[i] = v(self.points[i])",
"def center(self):\n cp = self.dat.flowsheet.getCenter()\n self.centerOn(cp[0], cp[1])",
"def center_camera_absolute_move(camera_obj: CameraThreaded, x, y, w, h):\n\n # I had planned on needing some sort of factor between the x,y of the face center on the screen and the position\n # number that needs to be sent to the camera, but it appears someone has already thought of that. The scale\n # appears to be perfect. One pixel on the image corresponds to one unit of movement for the camera.\n\n pan_scale_factor = 1\n tilt_scale_factor = 1\n\n center_point = {\n \"x\": x + (w / 2),\n \"y\": y + (h / 2)\n }\n\n # How many pixels off is the camera from where we want it?\n x_delta = int((640 / 2) - center_point[\"x\"] * pan_scale_factor)\n y_delta = int((480 / 2) - center_point[\"y\"] * tilt_scale_factor)\n\n updated_pan = camera_obj.current_pan - x_delta\n\n if updated_pan < 0 or updated_pan > 65535:\n # If it's less than zero or greater than 65535, wrap around.\n updated_pan = (65535 - camera_obj.current_pan) - x_delta\n\n camera_obj.current_pan = updated_pan\n\n # 65535 is the horizon, anything lower is \"down\" when camera is in mounted position\n camera_obj.current_tilt = camera_obj.current_tilt - y_delta\n\n # print(f\"Updated_Pan: {updated_pan}\")\n camera_obj.send_position_update()\n return",
"def center_mario(self):\n self.rect.midbottom = self.screen_rect.midbottom\n self.x, self.y = float(self.rect.x), float(self.rect.y)",
"def center(self):\n return (self.upper_right + self.lower_left) * 0.5"
] | [
"0.71007794",
"0.7073161",
"0.70145607",
"0.6979378",
"0.6753653",
"0.6747097",
"0.66957575",
"0.6648545",
"0.66472554",
"0.6637636",
"0.660891",
"0.6562517",
"0.64696074",
"0.6463932",
"0.6463932",
"0.6411822",
"0.6394372",
"0.6394372",
"0.6394372",
"0.63817835",
"0.6370019",
"0.6357461",
"0.63481444",
"0.6333341",
"0.6318116",
"0.63057846",
"0.6300055",
"0.62990934",
"0.6285713",
"0.6281446"
] | 0.76299447 | 0 |
Use the mouse wheel to zoom. | def on_mouse_wheel(self, event):
delta = event.delta[1]
if delta > 0: # Zoom in
factor = 0.9
elif delta < 0: # Zoom out
factor = 1 / 0.9
for _ in range(int(abs(delta))):
self.zoom(factor, event.pos) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_zooming_wheel(self):\n # Zooming: wheel\n self.set('Wheel', 'Zoom',\n param_getter=lambda p: (\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][0],\n p[\"wheel\"]*.002, \n p[\"mouse_position\"][1]))",
"def wheelEvent(self, event: QWheelEvent):\n # zoom only when CTRL key pressed\n if (event.modifiers() & Qt.ControlModifier) == Qt.ControlModifier:\n steps = event.angleDelta().y() / 15 / 8\n\n if steps == 0:\n event.ignore()\n return\n\n # scale factor 1.25\n sc = pow(1.25, steps)\n self.scale(sc, sc)\n self.centerOn(self.mapToScene(event.pos()))\n event.accept()\n # act normally on scrollbar\n else:\n # transmit event to parent class wheelevent\n super(QGraphicsView, self).wheelEvent(event)",
"def callback_mouse_zoom(self, event):\n\n if self.variables.zoom_on_wheel:\n delta = event.delta\n single_delta = 120\n\n # handle case where platform is linux:\n if platform.system() == \"Linux\":\n delta = single_delta\n if event.num == 5:\n delta = delta*-1\n\n zoom_in_box_half_width = int(self.variables.canvas_width / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_width = int(self.variables.canvas_width * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_in_box_half_height = int(self.variables.canvas_height / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_height = int(self.variables.canvas_height * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n\n x = event.x\n y = event.y\n\n after_zoom_x_offset = (self.variables.canvas_width/2 - x)/self.variables.mouse_wheel_zoom_percent_per_event\n after_zoom_y_offset = (self.variables.canvas_height/2 - y)/self.variables.mouse_wheel_zoom_percent_per_event\n\n x_offset_point = x + after_zoom_x_offset\n y_offset_point = y + after_zoom_y_offset\n\n zoom_in_box = (x_offset_point - zoom_in_box_half_width,\n y_offset_point - zoom_in_box_half_height,\n x_offset_point + zoom_in_box_half_width,\n y_offset_point + zoom_in_box_half_height)\n\n zoom_out_box = (x_offset_point - zoom_out_box_half_width,\n y_offset_point - zoom_out_box_half_height,\n x_offset_point + zoom_out_box_half_width,\n y_offset_point + zoom_out_box_half_height)\n\n if self.variables.the_canvas_is_currently_zooming:\n pass\n else:\n if delta > 0:\n self.zoom_to_selection(zoom_in_box, self.variables.animate_zoom)\n else:\n self.zoom_to_selection(zoom_out_box, self.variables.animate_zoom)\n else:\n pass",
"def wheelEvent(self, ev):\n\n # Check if we're in auto Zoom mode\n if self.__zooming:\n # we're zooming\n if (ev.angleDelta().y() > 0):\n self.zoom(ev.pos(), 1)\n else:\n self.zoom(ev.pos(), -1)\n\n else:\n # not zooming - pass wheel event on\n self.mouseWheel.emit(self, ev)",
"def set_zooming_mouse(self):\n # Zooming: right button mouse\n self.set('RightClickMove', 'Zoom',\n param_getter=lambda p: (p[\"mouse_position_diff\"][0]*2.5,\n p[\"mouse_press_position\"][0],\n p[\"mouse_position_diff\"][1]*2.5,\n p[\"mouse_press_position\"][1]))",
"def wheelEvent(self, event: QtGui.QWheelEvent) -> None:\n scaleFactor = 1 + (event.angleDelta().y() / 600)\n # Limit zoom to a reasonable range.\n if scaleFactor > 1 and self._scaleFactor > 10:\n return\n elif scaleFactor < 1 and self._scaleFactor < .8:\n return\n self.scale(scaleFactor, scaleFactor)\n self._scaleFactor = self._scaleFactor * scaleFactor # Keep track of current scaling factor",
"def onWheel(self, event):\r\n ax = event.inaxes\r\n step = event.step\r\n\r\n\r\n if ax != None:\r\n # Event occurred inside a plotting area\r\n lo,hi = ax.get_xlim()\r\n lo,hi = _rescale(lo,hi,step,pt=event.xdata)\r\n ax.set_xlim((lo,hi))\r\n\r\n lo,hi = ax.get_ylim()\r\n lo,hi = _rescale(lo,hi,step,pt=event.ydata)\r\n ax.set_ylim((lo,hi))\r\n else:\r\n # Check if zoom happens in the axes\r\n xdata,ydata = None,None\r\n x,y = event.x,event.y\r\n for ax in self.axes:\r\n insidex,_ = ax.xaxis.contains(event)\r\n if insidex:\r\n xdata,_ = ax.transAxes.inverse_xy_tup((x,y))\r\n #print \"xaxis\",x,\"->\",xdata\r\n insidey,_ = ax.yaxis.contains(event)\r\n if insidey:\r\n _,ydata = ax.transAxes.inverse_xy_tup((x,y))\r\n #print \"yaxis\",y,\"->\",ydata\r\n if xdata is not None:\r\n lo,hi = ax.get_xlim()\r\n lo,hi = _rescale(lo,hi,step,bal=xdata)\r\n ax.set_xlim((lo,hi))\r\n if ydata is not None:\r\n lo,hi = ax.get_ylim()\r\n lo,hi = _rescale(lo,hi,step,bal=ydata)\r\n ax.set_ylim((lo,hi))\r\n \r\n self.canvas.draw_idle()",
"def wheel(ticks):\n m = PyMouse()\n m.scroll(ticks)",
"def wheelEvent(self,event):\n factor = 1.41 ** (-event.delta()/240.0)\n self.scale(factor,factor)",
"def windows_zoomer(self, event):\n if event.delta > 0:\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n elif event.delta < 0:\n self.canvas.scale(\"all\", event.x, event.y, 0.9, 0.9)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))",
"def linux_zoomer_plus(self, event):\n self.canvas.scale(\"all\", event.x, event.y, 1.1, 1.1)\n self.canvas.configure(scrollregion=self.canvas.bbox(\"all\"))",
"def on_mouse_press(self, event):\n self.on_mouse_wheel(event)",
"def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")",
"def ev_mousewheel(self, event: MouseWheel) -> None:",
"def mouseMoveEvent(self, ev):\n shift = ev.modifiers() & QtCore.Qt.ShiftModifier\n ctrl = ev.modifiers() & QtCore.Qt.ControlModifier\n if shift:\n y = ev.pos().y()\n if not hasattr(self, '_prev_zoom_pos') or not self._prev_zoom_pos:\n self._prev_zoom_pos = y\n return\n dy = y - self._prev_zoom_pos\n def delta():\n return -dy * 5\n ev.delta = delta\n self._prev_zoom_pos = y\n self.wheelEvent(ev)\n elif ctrl:\n pos = ev.pos().x(), ev.pos().y()\n if not hasattr(self, '_prev_pan_pos') or not self._prev_pan_pos:\n self._prev_pan_pos = pos\n return\n dx = pos[0] - self._prev_pan_pos[0]\n dy = pos[1] - self._prev_pan_pos[1]\n self.pan(dx, dy, 0, relative=True)\n self._prev_pan_pos = pos\n else:\n super(PlotObject, self).mouseMoveEvent(ev)",
"def set_zoombox_mouse(self):\n # Zooming: zoombox (drag and drop)\n self.set('MiddleClickMove', 'ZoomBox',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))",
"def do_scroll_event(self, event):\n\t\tif event.state & gtk.gdk.CONTROL_MASK:\n\t\t\tif event.direction == gtk.gdk.SCROLL_UP:\n\t\t\t\tself.zoom *= 1.1\n\t\t\telif event.direction == gtk.gdk.SCROLL_DOWN:\n\t\t\t\tself.zoom /= 1.1",
"def on_mouse_wheel(self, e): # pragma: no cover\n super(TraceView, self).on_mouse_wheel(e)\n if e.modifiers == ('Alt',):\n start, end = self._interval\n delay = e.delta * (end - start) * .1\n self.shift(-delay)",
"def zoom(self, factor, mouse_coords=None):\n if mouse_coords is not None: # Record the position of the mouse\n x, y = float(mouse_coords[0]), float(mouse_coords[1])\n x0, y0, z0 = self.pixel_to_coords(x, y)\n\n self.scale *= factor\n self.scale = max(min(self.scale, self.max_scale), self.min_scale)\n self.program[\"scale\"] = self.scale\n\n # Translate so the mouse point is stationary\n if mouse_coords is not None:\n x1, y1, z1 = self.pixel_to_coords(x, y)\n self.translate_center(x1 - x0, y1 - y0, z1 - z0)",
"def _on_scroll(self, event):\n self._zoom(event.step, draw=True)",
"def __wheel(self, event):\n x = self.canvas.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas.winfo_width(), self.canvas.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()",
"def enableZoomIn(self):\n self.zoomInID = self.canvas.mpl_connect('button_press_event', self.onZoomIn)\n self.master.config(cursor = \"cross\")",
"def __wheel(self, event):\n x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas\n y = self.canvas_image.canvasy(event.y)\n if self.outside(x, y): return # zoom only inside image area\n scale = 1.0\n # Respond to Linux (event.num) or Windows (event.delta) wheel event\n if event.num == 5 or event.delta == -120: # scroll down, smaller\n if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels\n self.imscale /= self.__delta\n scale /= self.__delta\n if event.num == 4 or event.delta == 120: # scroll up, bigger\n i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1\n if i < self.imscale: return # 1 pixel is bigger than the visible area\n self.imscale *= self.__delta\n scale *= self.__delta\n # Take appropriate image from the pyramid\n k = self.imscale * self.__ratio # temporary coefficient\n self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1)\n self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img))\n #\n self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects\n # Redraw some figures before showing image on the screen\n self.redraw_figures() # method for child classes\n self.__show_image()",
"def setZoomOnWheelEnabled(self, enabled: bool):\n if enabled != self.__zoomOnWheel:\n self.__zoomOnWheel = enabled\n self.sigChanged.emit()",
"def enableZoomOut(self):\n self.zoomOutID = self.canvas.mpl_connect('button_press_event', self.onZoomOut)\n self.master.config(cursor = \"cross\")",
"def set_zoombox_keyboard(self):\n # Idem but with CTRL + left button mouse \n self.set('LeftClickMove', 'ZoomBox',\n key_modifier='Control',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))",
"def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])",
"def on_scroll(self, win, _deltax, deltay):\n self.zoom(deltay, glfw.get_window_size(win)[1])",
"def wheelEvent(self, event):\n degrees = event.angleDelta().y() / 8\n steps = degrees / 15\n self.view_state.scale *= 1.5 ** steps",
"def increment_zoom(self):\n if self._diving:\n self.mpl_mandelbrot.increment_zoom_anchored(self._zoom_frac_per_frame)"
] | [
"0.84443176",
"0.7937277",
"0.78334904",
"0.77894384",
"0.74605197",
"0.7382998",
"0.7369537",
"0.73458064",
"0.7263063",
"0.7241332",
"0.721578",
"0.71982265",
"0.7182637",
"0.7154681",
"0.7117357",
"0.7109941",
"0.71053416",
"0.70348686",
"0.7025429",
"0.701461",
"0.6977925",
"0.691554",
"0.6893093",
"0.68185425",
"0.6806755",
"0.67915535",
"0.6767464",
"0.6767464",
"0.6747572",
"0.6728997"
] | 0.8668343 | 0 |
Factors less than zero zoom in, and greater than zero zoom out. If mouse_coords is given, the point under the mouse stays stationary while zooming. mouse_coords should come from MouseEvent.pos. | def zoom(self, factor, mouse_coords=None):
if mouse_coords is not None: # Record the position of the mouse
x, y = float(mouse_coords[0]), float(mouse_coords[1])
x0, y0, z0 = self.pixel_to_coords(x, y)
self.scale *= factor
self.scale = max(min(self.scale, self.max_scale), self.min_scale)
self.program["scale"] = self.scale
# Translate so the mouse point is stationary
if mouse_coords is not None:
x1, y1, z1 = self.pixel_to_coords(x, y)
self.translate_center(x1 - x0, y1 - y0, z1 - z0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def callback_mouse_zoom(self, event):\n\n if self.variables.zoom_on_wheel:\n delta = event.delta\n single_delta = 120\n\n # handle case where platform is linux:\n if platform.system() == \"Linux\":\n delta = single_delta\n if event.num == 5:\n delta = delta*-1\n\n zoom_in_box_half_width = int(self.variables.canvas_width / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_width = int(self.variables.canvas_width * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_in_box_half_height = int(self.variables.canvas_height / self.variables.mouse_wheel_zoom_percent_per_event / 2)\n zoom_out_box_half_height = int(self.variables.canvas_height * self.variables.mouse_wheel_zoom_percent_per_event / 2)\n\n x = event.x\n y = event.y\n\n after_zoom_x_offset = (self.variables.canvas_width/2 - x)/self.variables.mouse_wheel_zoom_percent_per_event\n after_zoom_y_offset = (self.variables.canvas_height/2 - y)/self.variables.mouse_wheel_zoom_percent_per_event\n\n x_offset_point = x + after_zoom_x_offset\n y_offset_point = y + after_zoom_y_offset\n\n zoom_in_box = (x_offset_point - zoom_in_box_half_width,\n y_offset_point - zoom_in_box_half_height,\n x_offset_point + zoom_in_box_half_width,\n y_offset_point + zoom_in_box_half_height)\n\n zoom_out_box = (x_offset_point - zoom_out_box_half_width,\n y_offset_point - zoom_out_box_half_height,\n x_offset_point + zoom_out_box_half_width,\n y_offset_point + zoom_out_box_half_height)\n\n if self.variables.the_canvas_is_currently_zooming:\n pass\n else:\n if delta > 0:\n self.zoom_to_selection(zoom_in_box, self.variables.animate_zoom)\n else:\n self.zoom_to_selection(zoom_out_box, self.variables.animate_zoom)\n else:\n pass",
"def on_mouse_wheel(self, event):\n delta = event.delta[1]\n if delta > 0: # Zoom in\n factor = 0.9\n elif delta < 0: # Zoom out\n factor = 1 / 0.9\n for _ in range(int(abs(delta))):\n self.zoom(factor, event.pos)",
"def set_zooming_mouse(self):\n # Zooming: right button mouse\n self.set('RightClickMove', 'Zoom',\n param_getter=lambda p: (p[\"mouse_position_diff\"][0]*2.5,\n p[\"mouse_press_position\"][0],\n p[\"mouse_position_diff\"][1]*2.5,\n p[\"mouse_press_position\"][1]))",
"def _handleClick(self, event):\n\n\t\t(x_min, x_max, y_min, y_max) = [i for i in self.extent]\n\t\tif event.xdata != None and event.ydata != None:\n\t\t\t(click_x, click_y) = (event.xdata, event.ydata)\n\t\t\tnewWidth = (x_max-x_min)/self.zoom\n\t\t\tnewHeight = (y_max-y_min)/self.zoom\n\n\t\t\t# update self.extent to the new zoomed in extent\n\t\t\tself.extent = [click_x-newWidth/2, click_x+newWidth/2, click_y-newHeight/2, click_y+newHeight/2]\n\t\t\tself.plot()",
"def evt_zoom_released(self):\n # record home XY limit if it is never zoomed\n if self._isZoomed is False:\n self._homeXYLimit = list(self.getXLimit())\n self._homeXYLimit.extend(list(self.getYLimit()))\n # END-IF\n\n # set the state of being zoomed\n self._isZoomed = True\n\n return",
"def set_zoombox_mouse(self):\n # Zooming: zoombox (drag and drop)\n self.set('MiddleClickMove', 'ZoomBox',\n param_getter=lambda p: (p[\"mouse_press_position\"][0],\n p[\"mouse_press_position\"][1],\n p[\"mouse_position\"][0],\n p[\"mouse_position\"][1]))",
"def getMouseClicks(plotcoords = 0):\n nmax = 1000\n xlist, ylist = [-92171]*nmax,[-92171]*nmax\n nclicks = dislin.csrpts(xlist, ylist, nmax)\n xlist, ylist = xlist[:nclicks], ylist[:nclicks]\n if plotcoords:\n return xlist, ylist\n else:\n x = [dislin.xinvrs(i) for i in xlist]\n y = [dislin.yinvrs(i) for i in ylist]\n return x,y",
"def _zoom(self, sign=1, draw=False):\n delta = _ZOOM_STEP_SIZE * sign\n for axis, fig in enumerate(self._figs):\n xmid = self._images['cursor_v'][axis].get_xdata()[0]\n ymid = self._images['cursor_h'][axis].get_ydata()[0]\n xmin, xmax = fig.axes[0].get_xlim()\n ymin, ymax = fig.axes[0].get_ylim()\n xwidth = (xmax - xmin) / 2 - delta\n ywidth = (ymax - ymin) / 2 - delta\n if xwidth <= 0 or ywidth <= 0:\n return\n fig.axes[0].set_xlim(xmid - xwidth, xmid + xwidth)\n fig.axes[0].set_ylim(ymid - ywidth, ymid + ywidth)\n if draw:\n self._figs[axis].canvas.draw()",
"def onWheel(self, event):\r\n ax = event.inaxes\r\n step = event.step\r\n\r\n\r\n if ax != None:\r\n # Event occurred inside a plotting area\r\n lo,hi = ax.get_xlim()\r\n lo,hi = _rescale(lo,hi,step,pt=event.xdata)\r\n ax.set_xlim((lo,hi))\r\n\r\n lo,hi = ax.get_ylim()\r\n lo,hi = _rescale(lo,hi,step,pt=event.ydata)\r\n ax.set_ylim((lo,hi))\r\n else:\r\n # Check if zoom happens in the axes\r\n xdata,ydata = None,None\r\n x,y = event.x,event.y\r\n for ax in self.axes:\r\n insidex,_ = ax.xaxis.contains(event)\r\n if insidex:\r\n xdata,_ = ax.transAxes.inverse_xy_tup((x,y))\r\n #print \"xaxis\",x,\"->\",xdata\r\n insidey,_ = ax.yaxis.contains(event)\r\n if insidey:\r\n _,ydata = ax.transAxes.inverse_xy_tup((x,y))\r\n #print \"yaxis\",y,\"->\",ydata\r\n if xdata is not None:\r\n lo,hi = ax.get_xlim()\r\n lo,hi = _rescale(lo,hi,step,bal=xdata)\r\n ax.set_xlim((lo,hi))\r\n if ydata is not None:\r\n lo,hi = ax.get_ylim()\r\n lo,hi = _rescale(lo,hi,step,bal=ydata)\r\n ax.set_ylim((lo,hi))\r\n \r\n self.canvas.draw_idle()",
"def callback_handle_left_mouse_release(self, event):\n\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n self._pan(event)\n if self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:\n rect_coords = self.coords(self.variables.zoom_rect_id)\n self.zoom_to_selection(rect_coords, self.variables.animate_zoom)\n self.hide_shape(self.variables.zoom_rect_id)\n if self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:\n rect_coords = self.coords(self.variables.zoom_rect_id)\n x1 = -rect_coords[0]\n x2 = self.variables.canvas_width + rect_coords[2]\n y1 = -rect_coords[1]\n y2 = self.variables.canvas_height + rect_coords[3]\n zoom_rect = (x1, y1, x2, y2)\n self.zoom_to_selection(zoom_rect, self.variables.animate_zoom)\n self.hide_shape(self.variables.zoom_rect_id)",
"def onZoomIn(self, event):\n try:\n print('%s click: button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %\n ('double' if event.dblclick else 'single', event.button,\n event.x, event.y, event.xdata, event.ydata))\n except:\n return\n\n\n self.plotter.zoomIn(event)",
"def getMouseClick(plotcoords = 0):\n coords = dislin.csrpt1()\n if plotcoords:\n return coords\n else:\n return dislin.xinvrs(coords[0]), dislin.yinvrs(coords[1])",
"def can_zoom(self):\n return False",
"def _defaultZoom(self):\n return (-1.0, 1.0, -1.0, 1.0)",
"def _zoom(self, x0, y0, x1, y1):\n # Store current zoom state in stack\n self.plot.getLimitsHistory().push()\n\n extents = self._getAxesExtent(x0, y0, x1, y1)\n self.plot.setLimits(\n extents.xmin,\n extents.xmax,\n extents.ymin,\n extents.ymax,\n extents.y2min,\n extents.y2max,\n )",
"def onLeftDClick(self, event):\n\n # ignore next Left UP event\n self.ignore_next_up = True\n\n # should ignore double-click off the map, but within view\n # a possible workaround is to limit minimum view level\n\n # get view coords of mouse double click, want same centre afterwards\n (x, y) = event.GetPositionTuple()\n\n if event.ShiftDown():\n # zoom out if shift key also down\n if self.use_level(self.level-1):\n self.zoomOut(x, y)\n else:\n # zoom in\n if self.use_level(self.level+1):\n self.zoomIn(x, y)\n\n self.handleMousePositionCallback((x, y))",
"def handle_mouse(self, x, y):\n self.last_x = x\n self.last_y = y\n if self.min_x is not None:\n self.last_x = max(self.last_x, self.min_x)\n if self.max_x is not None:\n self.last_x = min(self.last_x, self.max_x)\n # we are in region mode\n if self.region_id is not None:\n start = self.last_x\n end = self.region_edge\n self.region_model.adjust_region(self.region_id, start, end)\n return False",
"def zoom(self, xmin, xmax, xlen, ymin, ymax, ylen):\n self.xmax = xmax\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen\n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n self.refresh()",
"def plot_zoom(ax, xlims):\n\n xmin, xmax, ymin, ymax = get_y_lims(ax, xlims)\n ax.set_xlim(xmin, xmax)\n ax.set_ylim(ymin, ymax)\n\n return ax",
"def change_zoom(self, b):\n\n x_mid = int(self.ff[0].info['xres'] / 2)\n y_mid = int(self.ff[0].info['yres'] / 2)\n\n x = x_mid - self.x_crop_slider.value\n\n if self.y_crop.value is True:\n y = y_mid - self.y_crop_slider.value\n else:\n y = y_mid - self.x_crop_slider.value\n\n x0 = x_mid - x\n x1 = x_mid + x\n y0 = y_mid - y\n y1 = y_mid + y\n\n self.x_range = [x0, x1]\n self.y_range = [y0, y1]\n\n self.ax.set_xlim([x0, x1])\n self.ax.set_ylim([y0, y1])",
"def random_zoom(x, zoom_range, row_axis=0, col_axis=1, channel_axis=2,\n fill_mode='nearest', cval=0., interpolation_order=1):\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two'\n ' floats. Received: %s' % (zoom_range,))\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n x = apply_affine_transform(x, zx=zx, zy=zy, channel_axis=channel_axis,\n fill_mode=fill_mode, cval=cval,\n order=interpolation_order)\n return x",
"def zoom(x, zoom_range=(0.9, 1.1), flags=None, border_mode='constant'):\n zoom_matrix = affine_zoom_matrix(zoom_range=zoom_range)\n h, w = x.shape[0], x.shape[1]\n transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)\n x = affine_transform_cv2(x, transform_matrix, flags=flags, border_mode=border_mode)\n return x",
"def on_mouse_press(self, event):\n self.on_mouse_wheel(event)",
"def _updateOnMouseState(self, state):\n x = state.X.abs\n y = state.Y.abs\n \n mscale = self.mouse_icon.getScale() \n \n if (x + mscale[0] + self.mouse_offset) > render_engine.Window.width:\n x = x - mscale[0] - 10\n else:\n x += self.mouse_offset\n \n if (y + mscale[1] + self.mouse_offset) > render_engine.Window.height:\n y = y - mscale[1] - 10\n else:\n y += self.mouse_offset\n \n self.mouse_icon.setPosition((x, y))",
"def apply_zoom(self):\n self.maparea.setTransform(self.zoom_levels[self.cur_zoom][1])\n self.scene.draw_visible_area()",
"def normal_mouse_move(self, event):\n plot = self.component\n if plot is not None:\n if isinstance(plot, BaseXYPlot):\n ndx = plot.map_index((event.x, event.y), index_only = True)\n x = plot.index.get_data()[ndx]\n y = plot.value.get_data()[ndx]\n print self.format % (x,y)\n else:\n print \"dataprinter: don't know how to handle plots of type\",\n print plot.__class__.__name__\n return",
"def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,\n fill_mode='nearest', cval=0.):\n if len(zoom_range) != 2:\n raise ValueError('`zoom_range` should be a tuple or list of two floats. '\n 'Received arg: ', zoom_range)\n\n if zoom_range[0] == 1 and zoom_range[1] == 1:\n zx, zy = 1, 1\n else:\n zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)\n zoom_matrix = np.array([[zx, 0, 0],\n [0, zy, 0],\n [0, 0, 1]])\n\n h, w = x.shape[row_axis], x.shape[col_axis]\n transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)\n x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)\n return x",
"def on_zoom_change(self, event) -> None:\r\n\r\n zoom_level = int(self.zoom_scale.get())\r\n self.painter.zoom = zoom_level\r\n self.painter.draw_board()",
"def action_zoom_in(self):\n if self.cur_zoom < len(self.zoom_levels) - 1:\n self.cur_zoom += 1\n self.zoom_widget.setValue(self.cur_zoom)\n self.apply_zoom()",
"def zoom_bbox(self, xmin, ymin, xmax, ymax):\n xleft, ybottom, xright, ytop = xmin, ymin, xmax, ymax\n oldxleft, oldytop, oldxright, oldybottom = self.coordspace_bbox\n # ensure old and zoom axes go in same directions\n if not (xleft < xright) == (oldxleft < oldxright):\n xleft,xright = xright,xleft\n if not (ytop < ybottom) == (oldytop < oldybottom):\n ytop,ybottom = ybottom,ytop\n # zoom it\n self.custom_space(xleft, ytop, xright, ybottom, lock_ratio=True)"
] | [
"0.6173162",
"0.5783437",
"0.5664475",
"0.55626905",
"0.5560327",
"0.5393201",
"0.5381642",
"0.5365337",
"0.53392553",
"0.52633005",
"0.5250451",
"0.5242291",
"0.5166117",
"0.5082881",
"0.50540864",
"0.50444806",
"0.4985049",
"0.49819043",
"0.49564654",
"0.4937829",
"0.49370706",
"0.4925005",
"0.49190736",
"0.491448",
"0.49064583",
"0.4904275",
"0.48961523",
"0.48721847",
"0.4848136",
"0.48446754"
] | 0.71623904 | 0 |
Generic callback for web requests receive a Deferred from the mapped function. This simply json encodes the response and passes it to the request | def deferred_response(response, request):
request.write(simplejson.dumps(response))
request.finish() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jsonResult(f):\n def _inner(self, request):\n d = maybeDeferred(f, self, request)\n d.addCallback(_writeJSONResponse, request)\n d.addErrback(_writeJSONErrorResponse, request)\n return NOT_DONE_YET\n return _inner",
"def request_callback(request):\n headers = {'content-type': \"application/json\",\n 'cache-control': \"no-cache\"}\n return (200, headers, json.dumps(response_data))",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n\n data = json.dumps(objects)\n if 'callback' in request:\n # a jsonp response!\n data = '%s(%s);' % (request['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n\n return HttpResponse(data, \"application/json\")\n return decorator",
"def json_response(func):\n\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = simplejson.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n\n return decorator",
"def json_response(func):\n\n async def wrapped(*args, **kwargs):\n content, status = await func(*args, **kwargs)\n return web.json_response(data=content, status=status)\n\n return wrapped",
"def json_response(func):\n async def wrapped(*args, **kwargs):\n content, status = await func(*args, **kwargs)\n return web.json_response(data=content, status=status)\n return wrapped",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except:\n data = simplejson.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = json.dumps(objects, default=json_serialize)\n if 'callback' in request.REQUEST:\n # a jsonp response!\n data = '%s(%s);' % (request.REQUEST['callback'], data)\n return HttpResponse(data, \"text/javascript\")\n except Exception as e:\n print (e)\n data = json.dumps(str(objects))\n return HttpResponse(data, \"application/json\")\n return decorator",
"def json_response(func):\n def decorator(request, *args, **kwargs):\n objects = func(request, *args, **kwargs)\n if isinstance(objects, HttpResponse):\n return objects\n try:\n data = simplejson.dumps(objects)\n if 'callback' in request.GET:\n data = '%s(%s);' % (request.GET['callback'], data)\n except:\n data = simplejson.dumps(str(objects))\n if 'just_the_json_plz' in kwargs:\n return data\n if 'just_the_data_plz' in kwargs:\n return objects\n if 'callback' in request.GET or 'callback' in request.POST:\n #jsonp\n return HttpResponse(data, \"text/javascript\")\n else:\n #json\n return HttpResponse(data, \"application/json\")\n return decorator",
"def response_json(func):\n def wrapper(request):\n try:\n return get_json_response(func(request))\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper",
"def test_that_method_should_return_json_on_success(self, request):\n result = None\n\n def on_next(v):\n nonlocal result\n result = v\n\n value = MagicMock()\n value.json.return_value = {'json': 'dict'}\n observable = rx.Observable.from_([value])\n request.return_value = observable\n r = rx_json('GET', 'http://google.com')\n\n r.subscribe(on_next=on_next)\n\n self.assertEqual({'json': 'dict'}, result)",
"def response_json(func):\n\n def wrapper(req):\n try:\n\n return get_json_response(func(req))\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper",
"def inner(*args, **kwargs):\n return Response(\n dumps(function(*args, **kwargs)),\n mimetype='application/json'\n )",
"def auto_jsonp(f):\n def new(*arg, **kw):\n callback = request.GET.get('callback')\n result_data = f(*arg, **kw)\n if callback and isinstance(result_data, dict):\n # We only do JSONP for dicts\n response.headers['Content-type'] = 'text/javascript'\n return makeJSONP(callback, result_data)\n # otherwise, we just return as usual\n return result_data\n\n return new",
"def support_jsonp(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n callback = request.args.get('callback', False)\n if callback:\n content = str(callback) + '(' + str(f().data) + ')'\n return app.response_class(content, mimetype='application/json')\n else:\n return f(*args, **kwargs)\n return decorated_function",
"def _bound_callback(self, operation):\n def callback(**path_args):\n # Dispatch incoming request after applying proxy to request object\n resp = self.dispatch(operation, RequestProxy(request), **path_args)\n\n # Translate standard OdinWeb response into Bottle response.\n response.status = resp.status\n for k, v in resp.headers.items():\n response[k] = v\n\n return resp.body\n return callback",
"def support_jsonp(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # callback = request.args.get('callback', False) # GET only\n callback = request.values.get('callback', False) # A CombinedMultiDict with the contents of both form and args.\n if callback:\n # content = str(callback) + '(' + str(f(*args,**kwargs).data) + ')'\n # returns dump_json output instead of Dictionary\n content = str(callback) + '(' + str(f(*args,**kwargs)) + ')'\n return current_app.response_class(content, mimetype=JAVASCRIPT_MIMETYPE)\n else:\n return f(*args, **kwargs)\n return decorated_function",
"def jsonify(func):\n\n @functools.wraps(func)\n def convert(*args, **kwargs):\n\n success = True\n code = 200 # default status code - success!\n\n try:\n result = func(*args, **kwargs)\n\n if isinstance(result, BaseResponse):\n return result\n\n except exc.HTTPException as ex:\n # i'd like to be able to just re-raise e here, but the body of the\n # response is e.get_body() instead of e.description - so we have to\n # just set up the response ourselves\n result = { 'message' : ex.description }\n code = ex.code\n\n except Exception as ex:\n result = { 'message' : 'Internal Server Error', 'system_message' : ex.message }\n code = 500\n\n # build a response object, and change the content type header to json\n response = make_response(json.dumps(result))\n response.headers['Content-Type'] = 'application/json'\n response.status_code = code\n\n return response\n\n # return the function that is taking the place of (or masquerading as) our decorated function\n return convert",
"def json_response(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tdata = func(*args, **kwargs)\n\t\tdata = json.dumps(data)\n\t\tresponse = make_response(data)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\t\treturn response\n\treturn decorated_view",
"def json_response(f):\n \n def wrapped(*args, **kwargs):\n result = f(*args, **kwargs)\n \n response = HttpResponse(json.dumps(result))\n \n if type(result) == dict and \"error\" in result:\n response.status_code = 500\n \n \n return response",
"def decorated_function(request, *args, **kwargs):\n user_for_login(request)\n response['data'] = f(*args, **kwargs)\n response = json.dumps(response)\n return response",
"def json(f):\n if dsettings.DEBUG:\n ct = 'text/plain'\n j = lambda d: simplejson.dumps(d, indent = 2)\n else:\n ct = 'application/json'\n j = simplejson.dumps\n def wrapper(func, *args, **kw):\n try:\n result = func(*args, **kw)\n except Exception, e:\n result = j(str(e))\n status = 500\n else:\n if isinstance(result, http.HttpResponse):\n return result\n else:\n result = j(result)\n status = 200\n return http.HttpResponse(content = result, content_type = ct, status = status)\n return decorator(wrapper, f)",
"def jsonp(func):\n @wraps(func)\n def decorated_function(*args, **kwargs):\n callback = request.args.get('callback', False)\n if callback:\n data = str(jsonify(func(*args, **kwargs)).data)\n content = str(callback) + '(' + data + ')'\n mimetype = 'application/javascript'\n return current_app.response_class(content, mimetype=mimetype)\n else:\n return func(*args, **kwargs)\n return decorated_function",
"def respond(self,result):\n callback = self.request.get('callback')\n self.response.headers['Content-Type'] = 'application/json'\n #self.response.headers['Content-Type'] = '%s; charset=%s' % (config.CONTENT_TYPE, config.CHARSET)\n self.response.headers['Access-Control-Allow-Origin'] = '*'\n self.response.headers['Access-Control-Allow-Methods'] = 'GET, POST, PUT, DELETE, OPTIONS, PATCH, HEAD'\n self.response.headers['Access-Control-Allow-Headers'] = 'Origin, Content-Type, X-Requested-With'\n self.response.headers['Access-Control-Allow-Credentials'] = 'True'\n\n #Add a handler to automatically convert datetimes to ISO 8601 strings. \n dthandler = lambda obj: obj.isoformat() if isinstance(obj, datetime.datetime) else None\n if callback:\n content = str(callback) + '(' + json.dumps(result,default=dthandler) + ')'\n return self.response.out.write(content)\n \n return self.response.out.write(json.dumps(result,default=dthandler))",
"def async_request(self, callback, *args):\r\n seq = self.send_request(*args)\r\n self.async_replies[seq] = callback",
"def jsonp(func):\n @wraps(func)\n def decorated_function(*args, **kwargs):\n callback = request.args.get('callback', False)\n if callback:\n data = str(func(*args, **kwargs).data)\n content = str(callback) + '(' + data + ')'\n mimetype = 'application/javascript'\n return current_app.response_class(content, mimetype=mimetype)\n else:\n return func(*args, **kwargs)\n return decorated_function",
"def response_json(func):\n\n @wraps(func)\n def set_response(*args, **kwargs):\n res = func(*args, **kwargs)\n if type(res) is not dict:\n return res\n else:\n return Response(json.dumps(res), content_type=\"application/json; charset=utf-8\")\n return set_response",
"def jsonify(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n result = f(*args, **kwargs)\n data = json.dumps(result, indent=None if request.is_xhr else 2)\n return app.response_class(data, mimetype='application/json')\n return decorated_function",
"def use_GET_in(fn, request):\n response = fn(request.GET)\n if isinstance(response, dict):\n return HttpResponse(json.dumps(response),\n content_type='application/json')\n else:\n return response",
"def support_jsonp(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n callback = request.args.get('callback', False)\n if callback:\n content = str(callback) + '(' + str(f(*args,**kwargs).data) + ')'\n return current_app.response_class(content, mimetype='application/javascript')\n else:\n return f(*args, **kwargs)\n return decorated_function"
] | [
"0.6835601",
"0.64164114",
"0.62959665",
"0.62895465",
"0.6273546",
"0.6271747",
"0.62673527",
"0.6228324",
"0.6218314",
"0.6022155",
"0.59871244",
"0.5986469",
"0.59107774",
"0.5852411",
"0.58145773",
"0.5737035",
"0.57284033",
"0.5719401",
"0.5699597",
"0.5644658",
"0.56393903",
"0.5616718",
"0.5595544",
"0.5581155",
"0.5571921",
"0.55624866",
"0.55439913",
"0.5530915",
"0.5440447",
"0.5434295"
] | 0.6709971 | 1 |
renders this resource. By default his returns a list of available interfaces. This should be used for things such as configuring clients. | def render(self, request):
return simplejson.dumps(self.module._registered_interfaces.keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interface(self):\n\n data = ['[Interface]']\n for item in INTERFACE_KEYS:\n value = getattr(self, item, None)\n if value:\n data.append(value)\n\n return '''\n'''.join(data)",
"def interface(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"interface\"), kwargs)",
"def show_interfaces(self):\n txt = \"Show Interfaces of %s\\n%s has %d interfaces\\n\"%(self.hostname, self.hostname, len(self.interfaces))\n for inf in self.interfaces:\n txt += \"%s IP-Address: %s \\\"%s\\\"\\n\"%(inf, self.interfaces[inf]['ip'], self.interfaces[inf]['status'])\n return txt",
"def get_interfaces(self):\n raise NotImplementedError",
"def list(self, req, resp):\n interfaces = []\n for e in EntryPoints('tachyonic.element.interfaces'):\n interfaces.append({'id': e, 'name': e})\n return raw_list(req, interfaces)",
"def interfaces(self):",
"def interfaces(self):",
"def interfaces(self):\n if self._interfaces is None:\n self._interfaces = list(x[\"interface\"] for x in self._interfaces_detailed_list())\n\n return self._interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def show_interface(self, interface=None):\n\n interface = interface.title()\n\n if interface is not None and interface in self.interfaces:\n print self.interfaces[interface]\n else:\n for i in self.interfaces:\n print self.interfaces[i]",
"def show_interface(self, interface=None):\n\n interface = interface.title()\n\n if interface is not None and interface in self.interfaces:\n print self.interfaces[interface]\n else:\n for i in self.interfaces:\n print self.interfaces[i]",
"def list():\n\n\treturn netifaces.interfaces()",
"def render(self):\n if self.can_render():\n output = '<ul>'\n for item in self.items:\n output += \"<li>{0}</li>\".format(item)\n return output + '</ul>'\n return ''",
"def list(self, **kwargs):\n data, self.endpoint = self.data_endpoint(kwargs)\n r = super(Resource, self).list(**data)\n\n # Change display settings and data format for human consumption\n self.configure_display(r)\n return r",
"def getClientInterfaces(self):\n return self.clients",
"def getInterface(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception', 404: 'Specified interface does not exist'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('interface')\n\t\treturn deserialize_Interface_json(payload)",
"def ifaces(self):\n return self._ifaces",
"def resources(self):\n return [self]",
"def interface(self):\n return self._interface",
"def render(self):\n raise NotImplementedError()",
"def render(self):\n raise NotImplementedError",
"def backend_getInterface(self):\n\t\treturn describeInterface(self)",
"def app_network_interface_list(self, **kwargs):\n return self._get(\n _name=APINames.Application,\n _method=\"networkInterfaceList\",\n response_class=NetworkInterfaceList,\n **kwargs\n )",
"def show_interface(dut, interface_name = None, cli_type=\"\"):\n cli_type = st.get_ui_type(dut, cli_type=cli_type)\n cli_type = \"klish\" if cli_type in [\"rest-put\", \"rest-patch\"] else cli_type\n output = list()\n if cli_type == \"klish\" or cli_type == \"click\":\n command = \"show sflow interface\"\n if interface_name:\n command = \"{} | grep {}\".format(command, interface_name)\n return st.show(dut, command, type=cli_type)\n elif cli_type == \"rest\":\n if not interface_name:\n url = REST_URI\n else:\n url = \"{}/SFLOW_SESSION/SFLOW_SESSION_TABLE\".format(REST_URI)\n result = st.rest_read(dut, url, SFLOW_SESSION_LIST=interface_name)\n if result and result.get(\"status\") == 200 and result.get(\"output\"):\n if YANG_MODULE in result[\"output\"]:\n data = result[\"output\"][YANG_MODULE]\n if data.get(\"SFLOW_SESSION_TABLE\").get(\"SFLOW_SESSION_LIST\"):\n for intf_list in data.get(\"SFLOW_SESSION_TABLE\").get(\"SFLOW_SESSION_LIST\"):\n response = dict()\n response[\"sampling_rate\"] = intf_list.get(\"sample_rate\")\n response[\"admin_status\"] = intf_list.get(\"admin_state\")\n response[\"interface\"] = intf_list.get(\"ifname\")\n if response:\n output.append(response)\n else:\n st.log(\"{} not observed in ouput\".format(YANG_MODULE))\n else:\n st.log(\"REST show INTERFACE GET CALL --- {}\".format(output))\n return output\n else:\n st.log(\"UNSUPPORTED CLI TYPE {}\".format(cli_type))\n return output",
"def displayable_items(self):\r\n return [self]",
"def get_resources(self):\n return []",
"def _default(self):\n\n self.app.render(infoNetwork.all())",
"def interface(self):\n return self.broker.interface(**{\"DeviceRouteID\": self.DeviceRouteID})"
] | [
"0.669263",
"0.6505434",
"0.63153064",
"0.62008065",
"0.6175075",
"0.6163913",
"0.6163913",
"0.60404605",
"0.60120076",
"0.60120076",
"0.60120076",
"0.5910974",
"0.5910974",
"0.5809516",
"0.57388264",
"0.57246846",
"0.5709058",
"0.5708651",
"0.56945944",
"0.567488",
"0.563987",
"0.55962294",
"0.55872285",
"0.55718535",
"0.55666983",
"0.5564538",
"0.5541856",
"0.55049485",
"0.5488227",
"0.54796624"
] | 0.65413374 | 1 |
constructs a twisted service for Controllers to connect to | def get_controller_service(self, master):
root = InterfaceResource(self)
#setup services
from twisted.internet.ssl import DefaultOpenSSLContextFactory
try:
key = '%s/ca-key.pem' % pydra_settings.RUNTIME_FILES_DIR
cert = '%s/ca-cert.pem' % pydra_settings.RUNTIME_FILES_DIR
context = DefaultOpenSSLContextFactory(key, cert)
except:
logger.critical('Problem loading certificate required for \
ControllerInterface from ca-key.pem and \
ca-cert.pem. Generate certificate with \
gen-cert.sh')
sys.exit()
return internet.SSLServer(pydra_settings.CONTROLLER_PORT, \
server.Site(root), contextFactory=context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.config = get_config()\n self.log = get_logger(self)\n\n self.factory = SugarServerFactory(\"wss://*:5505\")\n self.factory.protocol = SugarServerProtocol\n\n self.console_factory = SugarConsoleServerFactory(\"wss://localhost:5507\")\n self.console_factory.protocol = SugarConsoleServerProtocol\n\n self.api = APIService(self.config)",
"def makeService(config):\n from twisted.internet import reactor\n\n # We need a HTTP connection pool for rproxy.\n pool = HTTPConnectionPool(reactor)\n\n proxyResource = RProxyResource(\n hosts=hosts,\n pool=pool,\n customHeaders=customHeaders,\n reactor=reactor\n )\n redirectResource = RedirectResource()\n\n secureSite = Site(proxyResource)\n insecureSite = Site(redirectResource)\n\n multiService = service.MultiService()\n multiService.addService(\n strports.service('le:/certs:tcp:' + HTTPS_PORT, secureSite)\n )\n multiService.addService(\n strports.service(\"tcp:\" + HTTP_PORT, insecureSite)\n )\n return multiService",
"def test_makeService(self):\n maker = serve.ServiceMaker()\n\n endpoint = object()\n maker._serverFromString = lambda reactor, spec: endpoint\n site = object()\n maker._buildSite = lambda: site\n\n svc = maker.makeService({\"endpoint\": \"something\"})\n self.assertTrue(isinstance(svc, internet.StreamServerEndpointService))\n self.assertIdentical(svc.endpoint, endpoint)\n self.assertIdentical(svc.factory, site)",
"def initService(self):",
"def use_twisted(app):\n activity.EventLoop <<= activity.TwistedEventLoop\n REACTOR_INIT.notify(app)",
"def RemoteRouter(services):\n return PublicController(services)",
"def service(self):\n pass",
"def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)",
"def startService(self):\n from twisted.internet import reactor\n\n def connected(bot):\n self._bot = bot\n\n def failure(err):\n log.err(err, _why='Could not connect to specified server.')\n reactor.stop()\n\n client = clientFromString(reactor, self._endpoint)\n factory = GBRobotFactory(\n self._channel,\n self._nickname,\n self._realname,\n self._password,\n self._triggers,\n )\n\n return client.connect(factory).addCallbacks(connected, failure)",
"def __init__(self, reactor=None):\n self.Setup()\n self.ServiceEnabled = settings.SERVICE_ENABLED\n self.peer_zero_count = 0 # track the number of times PeerCheckLoop saw a Peer count of zero. Reset e.g. after 3 times\n self.connection_queue = []\n self.reactor = twisted_reactor\n self.incoming_server_running = False\n self.forced_disconnect_by_us = 0\n self.peers_connecting = 0\n\n # for testability\n if reactor:\n self.reactor = reactor",
"def makeService(self, options):\n endpoint = self._serverFromString(reactor, options[\"endpoint\"])\n factory = self._buildSite()\n return internet.StreamServerEndpointService(endpoint, factory)",
"def __init__(self):\n\n self.loop = asyncio.get_event_loop()\n self.aiohttp = web.Application(\n loop=self.loop,\n middlewares=[unhandled_route],\n )\n self.client = ClientSession()\n self.ws = WebSocketHandler(self)\n self.cert = self._load_ssl_certificate()\n\n self.config()",
"def client(self, reactor, serverAddress):\n raise NotImplementedError()",
"def test_serviceDefaultReactor(self):\n from twisted.internet import reactor as globalReactor\n aService = strports.service(\"tcp:80\", None)\n self.assertIs(aService.endpoint._reactor, globalReactor)",
"def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)",
"def makeService(options):\n from twisted.conch.ssh.connection import SSHConnection\n from twisted.conch.ssh.factory import SSHFactory\n from twisted.conch.ssh.keys import Key\n from twisted.cred.portal import Portal\n\n from swftp.realm import SwftpRealm\n from swftp.sftp.server import SwiftSSHUserAuthServer\n from swftp.auth import SwiftBasedAuthDB\n from swftp.utils import (\n log_runtime_info, GLOBAL_METRICS, parse_key_value_config)\n\n c = get_config(options['config_file'], options)\n\n sftp_service = service.MultiService()\n\n # ensure timezone is GMT\n os.environ['TZ'] = 'GMT'\n time.tzset()\n\n print('Starting SwFTP-sftp %s' % VERSION)\n\n # Add statsd service\n if c.get('sftp', 'log_statsd_host'):\n try:\n from swftp.statsd import makeService as makeStatsdService\n makeStatsdService(\n c.get('sftp', 'log_statsd_host'),\n c.getint('sftp', 'log_statsd_port'),\n sample_rate=c.getfloat('sftp', 'log_statsd_sample_rate'),\n prefix=c.get('sftp', 'log_statsd_metric_prefix')\n ).setServiceParent(sftp_service)\n except ImportError:\n sys.stderr.write('Missing Statsd Module. Requires \"txstatsd\" \\n')\n\n if c.get('sftp', 'stats_host'):\n from swftp.report import makeService as makeReportService\n known_fields = [\n 'command.login',\n 'command.logout',\n 'command.gotVersion',\n 'command.openFile',\n 'command.removeFile',\n 'command.renameFile',\n 'command.makeDirectory',\n 'command.removeDirectory',\n 'command.openDirectory',\n 'command.getAttrs',\n ] + GLOBAL_METRICS\n makeReportService(\n c.get('sftp', 'stats_host'),\n c.getint('sftp', 'stats_port'),\n known_fields=known_fields\n ).setServiceParent(sftp_service)\n\n authdb = SwiftBasedAuthDB(\n c.get('sftp', 'auth_url'),\n global_max_concurrency=c.getint('sftp', 'num_persistent_connections'),\n max_concurrency=c.getint('sftp', 'num_connections_per_session'),\n timeout=c.getint('sftp', 'connection_timeout'),\n extra_headers=parse_key_value_config(c.get('sftp', 'extra_headers')),\n verbose=c.getboolean('sftp', 'verbose'),\n rewrite_scheme=c.get('sftp', 'rewrite_storage_scheme'),\n rewrite_netloc=c.get('sftp', 'rewrite_storage_netloc'),\n )\n\n rabbitmq_hosts = c.get('rabbitmq', 'rabbitmq_hosts')\n rabbitmq_cluster = RabbitClusterClient([RabbitReplica(host, port) \\\n for host, port in [(h,int(p)) for h,p in [r.split(':') \\\n for r in rabbitmq_hosts.split(',')]]], \\\n c.get('rabbitmq', 'username'), \\\n c.get('rabbitmq', 'password')) \\\n if rabbitmq_hosts else None\n queue_name = c.get('rabbitmq', 'queue_name')\n\n realm = SwftpRealm(rabbitmq_cluster, queue_name)\n sftpportal = Portal(realm)\n sftpportal.registerChecker(authdb)\n\n sshfactory = SSHFactory()\n protocol = SwiftSSHServerTransport\n protocol.maxConnectionsPerUser = c.getint('sftp', 'sessions_per_user')\n protocol.supportedCiphers = c.get('sftp', 'chiphers')\n protocol.supportedMACs = c.get('sftp', 'macs')\n protocol.supportedCompressions = c.get('sftp', 'compressions')\n sshfactory.protocol = protocol\n sshfactory.noisy = False\n sshfactory.portal = sftpportal\n sshfactory.services['ssh-userauth'] = SwiftSSHUserAuthServer\n sshfactory.services['ssh-connection'] = SSHConnection\n\n pub_key_string = file(c.get('sftp', 'pub_key')).read()\n priv_key_string = file(c.get('sftp', 'priv_key')).read()\n sshfactory.publicKeys = {\n 'ssh-rsa': Key.fromString(data=pub_key_string)}\n sshfactory.privateKeys = {\n 'ssh-rsa': Key.fromString(data=priv_key_string)}\n\n signal.signal(signal.SIGUSR1, log_runtime_info)\n signal.signal(signal.SIGUSR2, log_runtime_info)\n\n internet.TCPServer(\n c.getint('sftp', 'port'),\n sshfactory,\n interface=c.get('sftp', 'host')).setServiceParent(sftp_service)\n\n return sftp_service",
"def client():",
"def server(self, reactor):\n raise NotImplementedError()",
"def service(self) -> BaseService:",
"def makeService_Slave(self, options):\n pool, txnFactory = getDBPool(config)\n if config.DirectoryProxy.Enabled:\n store = storeFromConfigWithDPSClient(config, txnFactory)\n else:\n store = storeFromConfigWithoutDPS(config, txnFactory)\n directory = store.directoryService()\n logObserver = AMPCommonAccessLoggingObserver()\n result = self.requestProcessingService(options, store, logObserver)\n\n # SIGUSR1 causes in-process directory cache reset\n def flushDirectoryCache(signalNum, ignored):\n if config.EnableControlAPI:\n directory.flush()\n signal.signal(signal.SIGUSR1, flushDirectoryCache)\n\n if pool is not None:\n pool.setName(\"db\")\n pool.setServiceParent(result)\n\n if config.ControlSocket:\n id = config.ControlSocket\n self.log.info(\"Control via AF_UNIX: {id}\", id=id)\n endpointFactory = lambda reactor: UNIXClientEndpoint(\n reactor, id\n )\n else:\n id = int(config.ControlPort)\n self.log.info(\"Control via AF_INET: {id}\", id=id)\n endpointFactory = lambda reactor: TCP4ClientEndpoint(\n reactor, \"127.0.0.1\", id\n )\n controlSocketClient = ControlSocket()\n\n class LogClient(AMP):\n\n def startReceivingBoxes(self, sender):\n super(LogClient, self).startReceivingBoxes(sender)\n logObserver.addClient(self)\n\n f = Factory()\n f.protocol = LogClient\n\n controlSocketClient.addFactory(_LOG_ROUTE, f)\n\n from txdav.common.datastore.sql import CommonDataStore as SQLStore\n\n if isinstance(store, SQLStore):\n def queueMasterAvailable(connectionFromMaster):\n store.queuer = connectionFromMaster\n queueFactory = QueueWorkerFactory(\n store.newTransaction, queueMasterAvailable\n )\n controlSocketClient.addFactory(_QUEUE_ROUTE, queueFactory)\n\n controlClient = ControlSocketConnectingService(\n endpointFactory, controlSocketClient\n )\n controlClient.setName(\"control\")\n controlClient.setServiceParent(result)\n\n # Optionally set up push notifications\n pushDistributor = None\n if config.Notifications.Enabled:\n observers = []\n if config.Notifications.Services.APNS.Enabled:\n pushSubService = ApplePushNotifierService.makeService(\n config.Notifications.Services.APNS, store)\n observers.append(pushSubService)\n pushSubService.setName(\"APNS\")\n pushSubService.setServiceParent(result)\n if config.Notifications.Services.AMP.Enabled:\n pushSubService = AMPPushForwarder(controlSocketClient)\n observers.append(pushSubService)\n if observers:\n pushDistributor = PushDistributor(observers)\n\n # Optionally set up mail retrieval\n if config.Scheduling.iMIP.Enabled:\n mailRetriever = MailRetriever(\n store, directory, config.Scheduling.iMIP.Receiving\n )\n mailRetriever.setName(\"MailRetriever\")\n mailRetriever.setServiceParent(result)\n else:\n mailRetriever = None\n\n # Optionally set up group cacher\n if config.GroupCaching.Enabled:\n cacheNotifier = MemcacheURLPatternChangeNotifier(\"/principals/__uids__/{token}/\", cacheHandle=\"PrincipalToken\") if config.EnableResponseCache else None\n groupCacher = GroupCacher(\n directory,\n updateSeconds=config.GroupCaching.UpdateSeconds,\n initialSchedulingDelaySeconds=config.GroupCaching.InitialSchedulingDelaySeconds,\n batchSize=config.GroupCaching.BatchSize,\n batchSchedulingIntervalSeconds=config.GroupCaching.BatchSchedulingIntervalSeconds,\n useDirectoryBasedDelegates=config.GroupCaching.UseDirectoryBasedDelegates,\n cacheNotifier=cacheNotifier,\n )\n else:\n groupCacher = None\n\n # Allow worker to post alerts to master\n AlertPoster.setupForWorker(controlSocketClient)\n\n def decorateTransaction(txn):\n txn._pushDistributor = pushDistributor\n txn._rootResource = result.rootResource\n txn._mailRetriever = mailRetriever\n txn._groupCacher = groupCacher\n\n store.callWithNewTransactions(decorateTransaction)\n return result",
"def service_bus_server():\n pass",
"def create_servicech(self, conf, params):\n\t\tpass",
"def test_starts_control_amp_service(self):\n options = ControlOptions()\n options.parseOptions(\n [b\"--agent-port\", b\"tcp:8001\", b\"--data-path\", self.mktemp()])\n reactor = MemoryCoreReactor()\n ControlScript().main(reactor, options)\n server = reactor.tcpServers[1]\n port = server[0]\n protocol = server[1].buildProtocol(None)\n self.assertEqual(\n (port, protocol.__class__, protocol.control_amp_service.__class__),\n (8001, ControlAMP, ControlAMPService))",
"def makeService(self, options):\n s = MultiService()\n\n irp = internet.TCPServer(int(options[\"port\"]), IRPServerFactory())\n irp.setServiceParent(s)\n\n manholeFactory = ShellFactory()\n manholeFactory.username = \"admin\"\n manholeFactory.password = \"admin\"\n manholeFactory.namespace[\"foo\"] = 12\n manholeService = internet.TCPServer(8000, manholeFactory)\n manholeService.setServiceParent(s)\n\n return s",
"def service(self):\n self.serviceConnects()\n self.serviceQueries()",
"def test_service(self):\n reactor = object() # the cake is a lie\n aFactory = Factory()\n aGoodPort = 1337\n svc = strports.service(\n 'tcp:' + str(aGoodPort), aFactory, reactor=reactor)\n self.assertIsInstance(svc, internet.StreamServerEndpointService)\n\n # See twisted.application.test.test_internet.EndpointServiceTests.\n # test_synchronousRaiseRaisesSynchronously\n self.assertTrue(svc._raiseSynchronously)\n self.assertIsInstance(svc.endpoint, TCP4ServerEndpoint)\n # Maybe we should implement equality for endpoints.\n self.assertEqual(svc.endpoint._port, aGoodPort)\n self.assertIs(svc.factory, aFactory)\n self.assertIs(svc.endpoint._reactor, reactor)",
"def start( self ):\n\n self.service()",
"def __init__(self, things, port=80, ssl_options=None):\n self.things = things\n self.name = things.get_name()\n self.port = port\n self.ip = get_ip()\n\n if isinstance(self.things, MultipleThings):\n log.info('Registering multiple things')\n for idx, thing in enumerate(self.things.get_things()):\n thing.set_href_prefix('/{}'.format(idx))\n thing.set_ws_href('{}://{}:{}/{}'.format(\n 'wss' if ssl_options is not None else 'ws',\n self.ip,\n self.port,\n idx))\n\n handlers = [\n (\n '/',\n 'GET',\n self.thingsGetHandler\n ),\n (\n '/<thing_id>',\n 'GET',\n self.thingGetHandler\n ),\n (\n '/<thing_id>/properties/<property_name>',\n 'GET',\n self.propertyGetHandler\n ),\n (\n '/<thing_id>/properties/<property_name>',\n 'PUT',\n self.propertyPutHandler\n ),\n ]\n else:\n log.info('Registering a single thing')\n self.things.get_thing(0).set_ws_href('{}://{}:{}'.format(\n 'wss' if ssl_options is not None else 'ws',\n self.ip,\n self.port))\n\n handlers = [\n (\n '/',\n 'GET',\n self.thingGetHandler\n ),\n (\n '/properties/<property_name>',\n 'GET',\n self.propertyGetHandler\n ),\n (\n '/properties/<property_name>',\n 'PUT',\n self.propertyPutHandler\n ),\n ]\n\n self.server = MicroWebSrv(webPath='/flash/www',\n routeHandlers=handlers,\n port=port)\n #self.srv.MaxWebSocketRecvLen = 256\n #self.WebSocketThreaded = ws_run_in_thread\n #self.srv.WebSocketStackSize = 4096\n #self.srv.AcceptWebSocketCallback = _acceptWebSocketCallback",
"def init(loop):\n tasks = JobsHandler()\n config = ConfigHandler()\n task = TaskHandler()\n\n\n\n app = web.Application(loop = loop)\n app.router.add_route('*', '/tasks/{do_something}', tasks.handle)\n app.router.add_route('*', '/config/{do_something}', config.handle)\n app.router.add_route('*', '/task/{id}/{do_something}', task.handle)\n\n handler = app.make_handler()\n srv = yield from loop.create_server(handler, '0.0.0.0', 8080)\n print(\"Server started at http://0.0.0.0:8080\")\n return srv, handler",
"def buildProtocol(self, addr):\n\treactor.callLater(1, self.timesync)\n return KinectServer(self)"
] | [
"0.6571904",
"0.64633",
"0.6196638",
"0.6151193",
"0.6000536",
"0.59042215",
"0.5902473",
"0.5842252",
"0.5804326",
"0.5796099",
"0.57555985",
"0.5744625",
"0.57371265",
"0.5727141",
"0.56836724",
"0.56834257",
"0.56805915",
"0.56765467",
"0.56684613",
"0.5661595",
"0.5650018",
"0.5646265",
"0.56255186",
"0.5624425",
"0.5599175",
"0.5596746",
"0.5578218",
"0.55767846",
"0.5553806",
"0.5550694"
] | 0.66206247 | 0 |
Wraps all registered interfaces in an InterfaceResource. This allows all interfaces to conform to twisted.web2 API. | def wrap_interface(self, interface, **params):
return FunctionResource(self, interface, params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def register_resources(self):\n raise NotImplementedError",
"def interface(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"interface\"), kwargs)",
"def interfaces(self):",
"def interfaces(self):",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def Interface(self):\n unpublishedMethods = (\"roots\", \"Interface\",\n \"WishIdBeenAGirlie\")\n methodType = type(self.Interface)\n ifList = []\n for i in dir(self):\n if (type(getattr(self, i)) == methodType\n and not i.startswith('_')):\n ifList.append(i)\n for i in unpublishedMethods:\n ifList.remove(i)\n return LumberjackInterface(ifList)",
"def get_interfaces(self):\n raise NotImplementedError",
"def _resource_factory(self, raw) -> ApiResource:\n raise NotImplemented",
"def ifaces(self, ifaces):\n \n self._ifaces = ifaces",
"def interface(self):\n\n data = ['[Interface]']\n for item in INTERFACE_KEYS:\n value = getattr(self, item, None)\n if value:\n data.append(value)\n\n return '''\n'''.join(data)",
"def register_resources(self, resources):\n from tw.api import merge_resources\n merge_resources(self.request_local.resources, resources)",
"def resource(self, prefix):\n def wrapper(cls):\n # Save the original init\n clsinit = getattr(cls, '__init__', lambda self: None)\n\n # Dirty trick, make the class belong to the type restful.Resource\n cls = type(cls.__name__, (Resource,), dict(cls.__dict__))\n\n aliases = getattr(cls, 'aliases', None)\n if isinstance(aliases, dict) and len(aliases) > 0:\n cls.preparer = FieldsPreparer(fields=aliases)\n\n # Rename self for using inside __init__\n api = self\n\n def __init__(self, *args, **kwargs):\n # Call Resource constructor\n super(cls, self).__init__(api)\n\n # Initialize the instance\n clsinit(self, *args, **kwargs)\n\n cls.__init__ = __init__\n\n # Add the resource to the API\n cls.add_url_rules(self.app, prefix)\n\n return cls\n\n return wrapper",
"def register_resources(self, resources):\n for resource in resources:\n self.register_resource(resource)",
"def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_interfaces__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_interfaces__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()",
"def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()",
"def interfaces(self):\n if self._interfaces is None:\n self._interfaces = list(x[\"interface\"] for x in self._interfaces_detailed_list())\n\n return self._interfaces",
"def interface(cls):\n return relationship.many_to_one(cls, 'interface')",
"def _set_interface(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_elements__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"interface must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"interface_id\",yc_interface_openconfig_qos_elements__qos_interfaces_interface, yang_name=\"interface\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-id', extensions=None), is_container='list', yang_name=\"interface\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__interface = t\n if hasattr(self, '_set'):\n self._set()",
"def Interface(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.interface_21173f93b01472152dbb5ea4d71785b0 import Interface\n return Interface(self)",
"def register_interface_magics(self):\n from sage.repl.interface_magic import InterfaceMagic\n InterfaceMagic.register_all(self.shell)",
"def _wrap(self, resource):\n\t\treturn ResourceWrapper(self, resource)",
"def interfaces(self, site_id, element_id, interface_id, data, tenant_id=None, api_version=\"v4.15\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/sites/{}/elements/{}/interfaces/{}\".format(api_version,\n tenant_id,\n site_id,\n element_id,\n interface_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)",
"def fusion_api_create_appliance_interfaces_payload(self, body=None, api=None):\n return self.interfaces.make_body(body, api)",
"def convert_interface(self, access_modifier, intr_name, interfaces):\n\n # Run super definition\n access_modifier, intr_name, interfaces = super().convert_interface(\n access_modifier, intr_name, interfaces\n )\n\n # Run class converter(interface and classes are the same in python)\n class_def = self.convert_class(\n access_modifier, intr_name, [], interfaces\n )[0]\n\n # Add class type to docstring to denote interface\n intr_def, doc_str = class_def[0], class_def[1:]\n intr_dnt = \"class type: interface\"\n\n # Add new line if docstring has separate text\n if doc_str:\n\n # Remove starting quotes\n quotes = doc_str[0][:3]\n doc_str[0] = doc_str[0][3:]\n\n # Insert text into first line\n doc_str.insert(0, quotes + intr_dnt)\n else:\n doc_str = ['\"\"\"' + intr_dnt + '\"\"\"']\n\n # Return processed interface definition\n return [intr_def] + doc_str, []",
"def __prepare__(interface, *args, **kwds):\n # Extract polymorphic parameters.\n dispatch_key = interface.__dispatch__(*args, **kwds)\n # Realize the interface.\n realization = interface.__realize__(dispatch_key)\n # Instantiate and return the realization.\n return realization(*args, **kwds)",
"def _wrap_Interface(self, expr):\n functions = [self.scope.functions[self._wrapper_names_dict[f.name]] for f in expr.functions]\n functions = [f for f in functions if not isinstance(f, EmptyNode)]\n return Interface(expr.name, functions, expr.is_argument)",
"def test_exposeInterfaces(self):\n if self.plugin is None:\n return\n\n cs = settings.Settings()\n results = self.plugin.exposeInterfaces(cs)\n if results is None or not results:\n return\n\n # each plugin should return a list\n self.assertIsInstance(results, list)\n for result in results:\n # Make sure that all elements in the list satisfy the constraints of the\n # hookspec\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 3)\n\n order, interface, kwargs = result\n\n self.assertIsInstance(order, (int, float))\n self.assertTrue(issubclass(interface, interfaces.Interface))\n self.assertIsInstance(kwargs, dict)",
"def attach_interface(func):\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n return Interface(self.session, self, func(self, *args, **kwargs))\n return wrapper"
] | [
"0.6046428",
"0.5889628",
"0.5802768",
"0.5802768",
"0.5750699",
"0.5750699",
"0.5750699",
"0.56934875",
"0.5583384",
"0.5552356",
"0.545261",
"0.54448354",
"0.54429495",
"0.54423016",
"0.54400826",
"0.5387046",
"0.5379664",
"0.5370704",
"0.5367093",
"0.5338333",
"0.5335844",
"0.5333972",
"0.5326647",
"0.5321391",
"0.5318117",
"0.531809",
"0.5304327",
"0.5299198",
"0.5290879",
"0.52575797"
] | 0.6628737 | 0 |
Render paths for operations, comparisons, and multiconditions | def test_path_comparator(renderer):
rating = Document.data["Rating"] > 0.5
no_body = Document.data["Description"]["Body"].is_(None)
stock = Document.data["Stock"].in_([1, 2, 3])
condition = (rating & no_body) | stock
expected = {
'ConditionExpression': (
'(((#n0.#n1 > :v2) AND (attribute_not_exists(#n0.#n3.#n4))) '
'OR (#n0.#n5 IN (:v6, :v7, :v8)))'),
'ExpressionAttributeValues': {
':v8': {'N': '3'}, ':v7': {'N': '2'},
':v6': {'N': '1'}, ':v2': {'N': '0.5'}},
'ExpressionAttributeNames': {
'#n0': 'data', '#n3': 'Description',
'#n5': 'Stock', '#n1': 'Rating', '#n4': 'Body'}}
renderer.render(condition, "condition")
assert renderer.rendered == expected | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_paths(self):\r\n print(\"------------------------\")\r\n print(\"######### ALL PATHS #########\")\r\n\r\n if self.size == 0:\r\n print(\"Empty tree!\")\r\n else:\r\n for i in range(1, self.root.size_tree + 1):\r\n node = self.select(i)\r\n if node.size_tree == 1:\r\n print(\"|\" + self.str_single_path(node))\r\n\r\n print(\"------------------------\")",
"def display_path(self, path):\n graph = path.graph\n if not graph:\n return\n for v in sorted(graph.vertices()):\n p = graph.get_vertex_attribute(v, 'xy')\n x, y = to_geometry(p[0]), to_geometry(p[1])\n print('define v{} ellipse 2 2 c_vertex {} {}'.format(v, x, y))\n #print('define v{0}t text {0} 14 white {1} {2}'.format(v, x, y))\n for u, v in graph.edges():\n print('define - link v{} v{} 1 c_edge'.format(u, v))\n # NOTE: this code assumes paths will not move indefinitely\n print('fix /./')",
"def operator_at_traversal_path(path, op):\n fmt_strs = [path[0]] + ['%s' for leaf in path[1:]]\n traversal = '->'.join(fmt_strs[:-1]) + '{op}%s'.format(op=op)\n return traversal",
"def render_path_visualisation(projectRoot, configName, prepared_paths):\n with open(os.path.join(os.path.dirname(__file__), 'assets/visualizer_template.html')) as file_:\n template = Template(file_.read())\n \n visuFileName = \"pathvisualizer_%s.html\" % configName.replace(\" \",\"\")\n visuFilePath = os.path.join(projectRoot, visuFileName)\n renderedTemplate = template.render(configName=configName, currentDate=datetime.datetime.now(), paths=prepared_paths)\n with open(visuFilePath,'w') as f:\n f.write(renderedTemplate)\n\n logger.info(\"Path visualized - find the file at %s\" % visuFilePath)",
"def path_entries(self):",
"def __repr__(self):\n\n return self.print_path([])",
"def render(self, **kwargs: tp.Any) -> Path:\n token = self.renderer.rendered(**kwargs)\n p = Path()\n\n if self.left:\n p = self.left.render(**kwargs)\n\n p = p / token\n\n if self.right:\n p = p / self.right.render(**kwargs)\n\n return p",
"def print_paths(self):\n for path_key, path_value in self.paths.items():\n # Handler for request in path\n self.current_path = path_key\n for request_key, request_value in path_value.items():\n if request_key == 'parameters':\n continue\n self.get_main_title(path_key, request_key)\n self.get_description(request_value)\n self.get_status_code_and_schema_rst(request_value['responses'])\n self.get_params(path_value['parameters'], 'param')\n self.get_params(request_value['parameters'], 'query')",
"def print_path(self):\n\n grid = tg.Graph.grid_graph(self.graph.rows,self.graph.cols)\n #tg.draw_grid(self.draw_edges_alt,self.graph.rows,self.graph.cols,grid)\n tg.draw_grid(self.edges,self.graph.rows,self.graph.cols,grid)",
"def path_conditions(self) -> [Exp]:\n raise NotImplementedError()",
"def path_condition(self) -> Exp:\n return EAll(self.path_conditions())",
"def __operations(self, conf):\n result = \"\"\"## Operations [back to top](#toc)\nThe operations that this API implements are:\n\"\"\"\n ops = \"\\n\"\n\n for op in conf[\"conf_json\"][1:]:\n params = []\n for p in findall(PARAM_NAME, op[\"url\"]):\n p_type = \"str\"\n p_shape = \".+\"\n if p in op:\n p_type, p_shape = findall(\"^\\s*([^\\(]+)\\((.+)\\)\\s*$\", op[p])[0]\n\n params.append(\n \"<em>%s</em>: type <em>%s</em>, regular expression shape <code>%s</code>\"\n % (p, p_type, p_shape)\n )\n result += \"\\n* [%s](#%s): %s\" % (\n op[\"url\"],\n op[\"url\"],\n op[\"description\"].split(\"\\n\")[0],\n )\n ops += \"\"\"<div id=\"%s\">\n<h3>%s <a href=\"#operations\">back to operations</a></h3>\n\n%s\n\n<p class=\"attr\"><strong>Accepted HTTP method(s)</strong> <span class=\"attr_val method\">%s</span></p>\n<p class=\"attr params\"><strong>Parameter(s)</strong> <span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Result fields type</strong><span class=\"attr_val\">%s</span></p>\n<p class=\"attr\"><strong>Example</strong><span class=\"attr_val\"><a target=\"_blank\" href=\"%s\">%s</a></span></p>\n<p class=\"ex attr\"><strong>Exemplar output (in JSON)</strong></p>\n<pre><code>%s</code></pre></div>\"\"\" % (\n op[\"url\"],\n op[\"url\"],\n markdown(op[\"description\"]),\n \", \".join(split(\"\\s+\", op[\"method\"].strip())),\n \"</li><li>\".join(params),\n \", \".join(\n [\n \"%s <em>(%s)</em>\" % (f, t)\n for t, f in findall(FIELD_TYPE_RE, op[\"field_type\"])\n ]\n ),\n conf[\"website\"] + conf[\"base_url\"] + op[\"call\"],\n op[\"call\"],\n op[\"output_json\"],\n )\n return markdown(result) + ops",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def test_render_path(renderer):\n path = \"foo bar baz\".split()\n renderer.name_ref(User.email, path=path)\n expected = {'ExpressionAttributeNames':\n {'#n0': 'email', '#n3': 'baz', '#n2': 'bar', '#n1': 'foo'}}\n assert renderer.rendered == expected",
"def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, int):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result",
"def printPaths(graph, data):\n\n # Printing data related to the circuit\n print(f'Module name: {data[\"module_name\"]}')\n print('Input: ', end='')\n print(*data['input'], sep=', ')\n print('Output: ', end='')\n print(*data['output'], sep=', ')\n print('Wire: ', end='')\n print(*data['wire'], sep=', ', end='\\n\\n')\n\n # Printing the paths in the graphical version of the circuit\n print('All paths from input to output')\n for io in [[i, o] for i in data['input'] for o in data['output']]:\n for path in nx.all_simple_paths(graph, source=io[0], target=io[1]):\n print(*path, sep=' --> ')",
"def create_web_output_paths() -> None:\n create_path_and_index(\"\")\n create_path_and_index(\"photos/\")\n create_path_and_index(\"video/\")\n create_path_and_index(\"references/\")\n create_path_and_index(\"names/\")\n create_path_and_index(\"art/\")\n create_path_and_index(\"morphology/\")\n create_path_and_index(\"maps/\")\n create_path_and_index(\"images/\")\n create_path_and_index(\"images/flag-icon-css/\")\n create_path_and_index(\"images/flag-icon-css/css/\")\n create_path_and_index(\"images/flag-icon-css/flags/\")\n create_path_and_index(\"images/flag-icon-css/flags/4x3/\")\n create_path_and_index(\"locations/\")\n create_path_and_index(\"locations/keys/\")\n create_path_and_index(\"js/\")\n create_path_and_index(\"sizes/\")\n create_path_and_index(\"handedness/\")",
"def displaypath():\n\n import pathlib\n pth = pathlib.Path('./')\n pth.is_dir()\n pth.absolute()",
"def test_list_path(renderer):\n condition = Document.numbers[1] >= 3\n expected = {\n 'ExpressionAttributeValues': {':v1': {'N': '3'}},\n 'ConditionExpression': '(#n0[1] >= :v1)',\n 'ExpressionAttributeNames': {'#n0': 'numbers'}}\n renderer.render(condition, \"condition\")\n assert renderer.rendered == expected",
"def visualize(self):\n\n # Tools that will be displayed on the plots\n tools = \"pan,wheel_zoom,reset,save\"\n\n # Plot displaying the optimized path\n result_plot = figure(\n plot_width=1000,\n plot_height=500,\n tools=tools,\n active_scroll='wheel_zoom')\n result_plot.title.text = \"Optimized Path\"\n\n # Plot displaying the non optimized path\n initial_plot = figure(\n plot_width=1000,\n plot_height=500,\n tools=tools,\n active_scroll='wheel_zoom')\n initial_plot.title.text = \"Initial Path\"\n\n # Add the data to the result plot\n result_plot = self.populate_plot(result_plot, self.result)\n result_plot.legend.location = \"bottom_right\"\n\n # Add the data to the initial plot\n initial_plot = self.populate_plot(initial_plot, self.initial)\n initial_plot.legend.location = \"bottom_right\"\n\n # Add cutting tool to plots\n # Generate the points on which the triangle should move on\n result_lines_x, result_lines_y = self.generate_tool_path(self.result, 1)\n initial_lines_x, initial_lines_y = self.generate_tool_path(self.initial, 1)\n\n # Add cutting tool triangle to optimized path\n result_triangle_position = ColumnDataSource(\n data=dict(\n x=[result_lines_x[0]],\n y=[result_lines_y[0]]\n ))\n result_triangle = Triangle(\n x='x', y='y', line_color=Category10_4[3], line_width=3,\n size=20, fill_alpha=0\n )\n result_plot.add_glyph(result_triangle_position, result_triangle)\n\n # Add cutting tool triangle to initial path\n initial_triangle_position = ColumnDataSource(\n data=dict(\n x=[initial_lines_x[0]],\n y=[initial_lines_y[0]]\n ))\n initial_triangle = Triangle(\n x='x', y='y', line_color=Category10_4[3], line_width=3,\n size=20, fill_alpha=0\n )\n initial_plot.add_glyph(initial_triangle_position, initial_triangle)\n\n # Add button to start moving the triangle\n button = Button(label='Start')\n result_num_steps = result_lines_x.shape[0]\n initial_num_steps = initial_lines_x.shape[0]\n num_steps = max(result_num_steps, initial_num_steps)\n\n # JavaScript callback which will be called once the button is pressed\n callback = CustomJS(args=dict(\n result_triangle_position=result_triangle_position,\n result_lines_x=result_lines_x,\n result_lines_y=result_lines_y,\n result_num_steps=result_num_steps,\n initial_triangle_position=initial_triangle_position,\n initial_lines_x=initial_lines_x,\n initial_lines_y=initial_lines_y,\n initial_num_steps=initial_num_steps,\n num_steps=num_steps\n ),\n code=\"\"\"\n // Animate optimal path plot\n for(let i = 0; i < num_steps; i += 50) {\n setTimeout(function() {\n if (i < result_num_steps) {\n result_triangle_position.data['x'][0] = result_lines_x[i]\n result_triangle_position.data['y'][0] = result_lines_y[i]\n }\n\n if (i < initial_num_steps) {\n initial_triangle_position.data['x'][0] = initial_lines_x[i]\n initial_triangle_position.data['y'][0] = initial_lines_y[i]\n }\n\n result_triangle_position.change.emit()\n initial_triangle_position.change.emit()\n\n }, i)\n }\n \"\"\")\n # Add callback function to button, which starts the whole animation\n button.js_on_click(callback)\n\n # Save the plot\n result_plot = row([result_plot, button])\n plot = column([result_plot, initial_plot])\n output_file(\"visualization.html\", title=\"CNC Path Optimization\")\n save(plot)",
"def path_show(args):\n print(header(\"$PATH Components\"))\n loop_fmt = \"{pad}{color}{path}\"\n pad = 4\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n cnt = (cnt + 1) % len(CODES)\n\n print(loop_fmt.format(pad=pad * \" \", color=color, path=part))\n if args.nowarn:\n continue\n\n for warn in check_path_folder(part):\n print(\"{}X {}\".format(pad * 2 * \" \", warn))",
"def print_path(self, path, marks = []):\n\n result = ''\n\n for y in range(1, self.height + 1):\n for x in range(1, self.width + 1):\n # Draw top line\n if (x, y - 1) in self.get_reachables(x, y):\n result += '+ '\n else: result += '+--'\n\n result += '+\\n'\n\n for x in range(1, self.width + 1):\n # Draw horizontal passage\n if (x - 1, y) in self.get_reachables(x, y):\n result += ' '\n else: result += '|'\n\n\n if (x, y) in path:\n if (x, y) in path[-1:]:\n result += '(X'\n else: result += ' x'\n elif (x, y) in marks:\n result += ' #'\n else: result += ' '\n\n result += '|\\n'\n\n if y == self.height:\n for x in range(1, self.width + 1):\n # Draw bottom line\n result += '+--'\n\n return result + '+'",
"def __str__(self):\n return super().formatter(\"r301 '{oldPath}', '/{new}'\")",
"def test_name_ref_with_path(renderer, engine):\n class Model(bloop.new_base()):\n id = bloop.Column(bloop.Integer, hash_key=True, name='this.is.id')\n data = bloop.Column(DocumentType)\n engine.bind(base=Model)\n\n no_id = Model.id.is_(None)\n path_condition = Model.data[\"Rating\"] >= 2\n condition = no_id & path_condition\n\n expected = {\n 'ExpressionAttributeNames': {\n '#n0': 'this.is.id', '#n2': 'Rating', '#n1': 'data'},\n 'ExpressionAttributeValues': {':v3': {'N': '2'}},\n 'ConditionExpression':\n '((attribute_not_exists(#n0)) AND (#n1.#n2 >= :v3))'}\n renderer.render(condition, \"condition\")\n assert renderer.rendered == expected",
"def test_pathop12(self):\n xpb = XPathBuilder()\n # braces not needed\n xp = xpb.foo & (xpb.bar.foo).parenthesize() | xpb.foobar\n exp = '/foo and (/bar/foo) or /foobar'\n self.assertEqual(xp.tostring(), exp)",
"def run_path_visualisation(paths, config, modulesConfig):\n all_targets = [os.path.basename(config[s][\"target\"]) for s in config.sections]\n all_target_tasks = {os.path.basename(config[s][\"target\"]):s for s in config.sections}\n \n added_tasks = []\n prepared_paths = []\n for path in paths:\n prepared_tasks = []\n for idx, task in enumerate(list(reversed(path))):\n s_module, s_name, *identifier = task.split(\" \")\n\n # Special Rule For Join Module To Have A Connection To Another Module\n special_connection = False\n if s_module == \"processing_join\":\n args = config[task]\n con_module, con_name, *identifier = all_target_tasks.get(os.path.basename(args[\"joinwith\"]), s_module+\"_SPECIAL \"+s_name+\"_SPECIAL\").split(\" \")\n special_connection = {\n \"connection_to_module\" : con_module,\n \"connection_to_name\" : con_name,\n \"will_be_created\" : (os.path.basename(args[\"joinwith\"]) in all_targets)\n }\n\n prepared_tasks.append({\n 'module':s_module,\n 'name':s_name,\n 'display': (task not in added_tasks),\n 'specialConnection': special_connection,\n 'last': (idx == len(path) - 1),\n 'attributes': config[task]\n })\n added_tasks.append(task)\n prepared_paths.append(prepared_tasks)\n logger.debug(\"Path prepared for visualization!\")\n render_path_visualisation(config['projectRoot'], config['projectName'], prepared_paths)",
"def visualizeWithContents(self, paths):\n return ExpressString(\"A totally mystical rune.\")",
"def _pretty_path(path: Sequence[BaseField]) -> str:\n # pylint: disable=protected-access\n return \"< \" + \" -> \".join(f\"'{field._resolve_field_name()}' ({type(field).__name__})\" for field in path) + \" >\""
] | [
"0.587495",
"0.57889",
"0.56957084",
"0.5665316",
"0.5621274",
"0.5618509",
"0.5589289",
"0.54609245",
"0.5458783",
"0.5372383",
"0.5368813",
"0.53570604",
"0.53273654",
"0.53273654",
"0.53273654",
"0.5310065",
"0.52882737",
"0.52749777",
"0.52570295",
"0.5243752",
"0.523105",
"0.5213479",
"0.5199138",
"0.5196339",
"0.5184393",
"0.5181831",
"0.51437175",
"0.51346415",
"0.512679",
"0.5124598"
] | 0.581619 | 1 |
! Return a pair of GARC/GAFE pairs for a tile name \param tileName A string valid as a tile name \return (garcA,gafeA),(garcB,gafeB) or None if tileName isn't a valid tile name | def getTilePair(cls, tileName):
return ( TILENAMEMAP[tileName]['A'], TILENAMEMAP[tileName]['B'] ) if\
tileName in TILENAMEMAP else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getA(cls, tileName):\n return TILENAMEMAP[tileName]['A'] if tileName in TILENAMEMAP else None",
"def getMatchup(self, name):\n if self.atHome:\n return (name, self.opponent)\n else:\n return (self.opponent, name)",
"def bridges(species1_names, species2_names):\n k12 = filter(lambda s: re.search('K-12',s)!=None, species1_names)[0]\n return [(k12, species2_names[0]), (k12, species2_names[1]), (k12, species2_names[2])]",
"def get_tile(self, char):\n if char == \"#\":\n return self.tiles[0:32, 0:32]\n elif char == \"G\": # gates\n return self.tiles[8 * 32 : 9 * 32, 3 * 32 : 4 * 32] \n elif char == \"W\": # window\n return self.tiles[8 * 32 : 9 * 32, 4 * 32 : 5 * 32]\n elif char == \"C\": # checkout\n return self.tiles[2 * 32 : 3 * 32, 8 * 32 : 9 * 32]\n elif char == \"F\": # fruits\n return self.tiles[1 * 32 : 2 * 32, 4 * 32 : 5 * 32] \n elif char == \"S\": # spices\n return self.tiles[1 * 32 : 2 * 32, 3 * 32 : 4 * 32] \n elif char == \"R\": # dairy\n return self.tiles[8 * 32 : 9 * 32, 7 * 32 : 8 * 32] \n elif char == \"D\": # drinks\n return self.tiles[6 * 32 : 7 * 32, 13 * 32 : 14 * 32] \n elif char == \"c\": # customer/shopping cart\n return self.tiles[8 * 32 : 9 * 32, 6 * 32 : 7 * 32] \n else:\n return self.tiles[32:64, 64:96]",
"def playedMoves(self):\n #Ooof, how do I make it return the tile name? I need to have a method in tile for that.\n List=[]\n for item in [a1, a2, a3, b1, b2, b3, c1, c2, c3]:\n if item.retrieve()!=\"\":\n #List += item #This adds each letter separately...\n List.append((item.name(), item.retrieve())) \n return List",
"def get_gt_map(raster_map, gt_maps):\n\n for gt_m in gt_maps:\n map_name = ntpath.basename(raster_map).split(\".\")[0]\n gt_map_name = ntpath.basename(gt_m).split(\".\")[0].replace(\"_y\", \"\")\n\n if map_name == gt_map_name:\n logger.info(\"X: %s Y: %s\", map_name, gt_map_name)\n\n return gt_m\n\n logger.warning(\"Unable to get ground truth image for %s\", raster_map)\n\n return None",
"def parse(arg: Tuple[str, str, str, str, str]) -> Tuple[str, str, str]:\n return (arg[2], arg[3], arg[4])",
"def get_tuple(self, string):\n a = re.search('\\((\\d+\\.\\d+), (\\d+\\.\\d+)\\)', string)\n if not a:\n return None\n else:\n return (float(a.group(1)), float(a.group(2)))",
"def getB(cls, tileName):\n return TILENAMEMAP[tileName]['B'] if tileName in TILENAMEMAP else None",
"def tile_coordinates(text):\n UL = (text[1]), (text[2]) # Upper Left\n UR = (text[3]), (text[2]) # Upper Right\n LR = (text[3]), (text[4]) # Lower Right\n LL = (text[1]), (text[4]) # Lower Left\n coordinates = (UL, UR, LR, LL)\n return text[0], [tuple(float(x) for x in xs) for xs in coordinates]",
"def GetGeneName(arg):\n\n genbank = ChromUnzip(arg)\n \n p1=re.compile(r'(?:ACCESSION\\s+)(\\w+\\d+)')\n p6=re.compile(r'(?:/gene=\")(.+?)(?:\"\\s+)')\n\n gene_name_dict={}\n \n for entry in genbank:\n gene_list=[] \n gene_it_6=p6.finditer(entry)\n gene_it_1=p1.finditer(entry) \n for hit in gene_it_6:\n gene_list.append(hit.group(1))\n for item in gene_it_1:\n gene_name_dict[item.group(1)]=gene_list[0]\n \n return gene_name_dict",
"def triplet_to_rrggbb(rgbtuple):\n global _tripdict\n hexname = _tripdict.get(rgbtuple)\n if hexname is None:\n hexname = '#%02x%02x%02x' % rgbtuple\n _tripdict[rgbtuple] = hexname\n return hexname",
"def corr_naam(name):\n names = ((\"techtaak\", 'techtask'), (\"programma\", 'procproc'))\n for name1, name2 in names:\n if name == name1:\n return name2\n if name == name2:\n return name1\n return name",
"def make_Gagne18_BANYAN_any_DR2_crossmatch(\n tablepath,\n namestr=None,\n maxsep=10,\n outdir=datadir,\n homedir='/home/luke/'):\n assert type(namestr) == str\n t = Table.read(tablepath, format='ascii.cds')\n\n RAh, RAm, RAs = arr(t['RAh']), arr(t['RAm']), arr(t['RAs'])\n\n RA_hms = [str(rah).zfill(2)+'h'+\n str(ram).zfill(2)+'m'+\n str(ras).zfill(2)+'s'\n for rah,ram,ras in zip(RAh, RAm, RAs)]\n\n DEd, DEm, DEs = arr(t['DEd']),arr(t['DEm']),arr(t['DEs'])\n DEsign = arr(t['DE-'])\n DEsign[DEsign != '-'] = '+'\n\n DE_dms = [str(desgn)+\n str(ded).zfill(2)+'d'+\n str(dem).zfill(2)+'m'+\n str(des).zfill(2)+'s'\n for desgn,ded,dem,des in zip(DEsign, DEd, DEm, DEs)]\n\n coords = SkyCoord(ra=RA_hms, dec=DE_dms, frame='icrs')\n\n RA = coords.ra.value\n dec = coords.dec.value\n pm_RA, pm_dec = arr(t['pmRA']), arr(t['pmDE'])\n u_pm_RA, u_pm_dec = arr(t['e_pmRA']), arr(t['e_pmDE'])\n\n maxsep = (maxsep*u.arcsec).to(u.deg).value\n\n name = t['Main'] if 'XI_' in namestr else t['Name']\n assoc = t['Assoc']\n\n outfile = os.path.join(outdir,'gotmatches_{}.xml.gz'.format(namestr))\n xmltouploadpath = os.path.join(outdir,'toupload_{}.xml'.format(namestr))\n\n if os.path.exists(outfile):\n os.remove(outfile) # NOTE if it's fast, can just do this to overwrite\n if not os.path.exists(outfile):\n _ = make_votable_given_full_cols(name, assoc, RA, dec, pm_RA, pm_dec,\n u_pm_RA, u_pm_dec,\n outpath=xmltouploadpath)\n\n Gaia.login(credentials_file=os.path.join(homedir, '.gaia_credentials'))\n\n # separated less than 10 arcsec.\n jobstr = (\n '''\n SELECT TOP {ncut:d} u.name, u.assoc, u.ra, u.dec, u.pm_ra, u.pm_dec,\n u.err_pm_ra, u.err_pm_dec,\n g.source_id, DISTANCE(\n POINT('ICRS', u.ra, u.dec),\n POINT('ICRS', g.ra,g.dec)) AS dist,\n g.phot_g_mean_mag as gaia_gmag,\n g.pmra AS gaia_pmra,\n g.pmdec AS gaia_pmdec\n FROM tap_upload.foobar as u, gaiadr2.gaia_source AS g\n WHERE 1=CONTAINS(\n POINT('ICRS', u.ra, u.dec),\n CIRCLE('ICRS', g.ra, g.dec, {sep:.8f})\n )\n '''\n )\n maxncut = int(5*len(name)) # to avoid query timeout\n query = jobstr.format(sep=maxsep, ncut=maxncut)\n\n if not os.path.exists(outfile):\n # might do async if this times out. but it doesn't.\n j = Gaia.launch_job(query=query,\n upload_resource=xmltouploadpath,\n upload_table_name=\"foobar\", verbose=True,\n dump_to_file=True, output_file=outfile)\n\n Gaia.logout()\n\n vot = parse(outfile)\n tab = vot.get_first_table().to_table()\n\n if maxncut - len(tab) < 10:\n errmsg = 'ERROR! too many matches'\n raise AssertionError(errmsg)\n\n # if nonzero and finite proper motion, require Gaia pm match to sign\n # of stated Gagne PMs.\n df = tab.to_pandas()\n\n print('\\n'+42*'-')\n print('{} stars in original Gagne table'.format(len(t)))\n print('{} stars in sep < 10 arcsec xmatch'.format(len(df)))\n\n sel = (df['gaia_gmag'] < 18)\n print('{} stars in sep < 10 arcsec, G<18, xmatch'.format(len(df[sel])))\n\n sel &= (\n ( (df['pm_ra'] != 0 ) & (df['pm_dec'] != 0 ) &\n ( np.sign(df['pm_ra']) == np.sign(df['gaia_pmra']) ) &\n ( np.sign(df['pm_dec']) == np.sign(df['gaia_pmdec']) )\n )\n |\n (\n (df['pm_ra'] == 0 ) & (df['pm_dec'] == 0 )\n )\n )\n df = df[sel]\n print('{} stars in sep < 10 as xmatch, G<18, after pm cut (xor zero pm)'.\n format(len(df)))\n\n # make multiplicity column. then sort by name, then by distance. then drop\n # name duplicates, keeping the first (you have nearest neighbor saved!)\n _, inv, cnts = np.unique(df['name'], return_inverse=True,\n return_counts=True)\n\n df['n_in_nbhd'] = cnts[inv]\n\n df['name'] = df['name'].str.decode('utf-8')\n df['assoc'] = df['assoc'].str.decode('utf-8')\n\n df = df.sort_values(['name','dist'])\n\n df = df.drop_duplicates(subset='name', keep='first')\n\n df['source_id'] = df['source_id'].astype('int64')\n\n print('{} stars after above cuts + chosing nearest nbhr by spatial sep'.\n format(len(df)))\n\n outpath = os.path.join(outdir,'MATCHED_{}.csv'.format(namestr))\n df.to_csv(outpath, index=False)\n print('made {}'.format(outpath))\n print(79*'=')",
"def get_gtfs_field_tuple_from_table(table_name, gtfs_spec=None):\n if not gtfs_spec:\n gtfs_spec = settings.GTFS_SPEC\n choice_tuple = choice_tuple = (('',''),)\n for t in gtfs_spec['resources']:\n if t['name'] == table_name:\n for f in t['schema']['fields']:\n choice_tuple = choice_tuple + ((f['name'], f['name']),)\n return choice_tuple\n raise ValueError(\"Table name not found in GTFS spec.\")",
"def getGameState(self):\n ### Student code goes here\n row1 = ()\n row2 = ()\n row3 = ()\n for currRow in range(1,4):\n for currCol in range(1,4):\n tileFound = False\n for fact in self.kb.facts:\n if fact.statement.predicate == \"located\":\n tile = fact.statement.terms[0].term.element\n column = fact.statement.terms[1].term.element\n row = fact.statement.terms[2].term.element\n\n tileNumber = int(tile[-1])\n columnNumber = int(column[-1])\n rowNumber = int(row[-1])\n\n if rowNumber == currRow and columnNumber == currCol:\n tileFound = True\n if rowNumber == 1:\n row1 += tuple([tileNumber])\n elif rowNumber == 2:\n row2 += tuple([tileNumber])\n elif rowNumber == 3:\n row3 += tuple([tileNumber])\n \n break\n\n if not tileFound:\n if currRow == 1:\n row1 += tuple([-1])\n elif currRow == 2:\n row2 += tuple([-1])\n elif currRow == 3:\n row3 += tuple([-1])\n\n\n return (row1, row2, row3)",
"def getPair(self, args):\r\n return self.name, self.getValue(args)",
"def getNames(self, resname, atomname):\n rname = None\n aname = None\n if resname in self.map:\n res = self.map[resname]\n if res.hasAtom(atomname):\n atom = res.atoms[atomname]\n aname = atom.name\n rname = atom.resname\n return rname, aname",
"def eq2gal(ra, dec):\n gal=ephem.Galactic(ephem.Equatorial(ra, dec))\n\tgl=180.0*gal.long.real/math.pi\n\tgb=180.0*gal.lat.real/math.pi\n return (gl, gb)",
"def get_2pairs():\n\n done = 0\n while not done:\n r0 = int(random(GRID_CELLS))\n c0 = int(random(GRID_CELLS))\n\n r1 = int(random(GRID_CELLS))\n c1 = int(random(GRID_CELLS))\n done = 1\n\n if random(1) < 0.5:\n # move one cell right\n ra1 = r0 + 1\n rb1 = r1 + 1\n ra0, rb0 = r0, r1\n ca0, cb0 = c0, c1\n ca1, cb1 = c0, c1\n\n if ra1 >= GRID_CELLS or rb1 >= GRID_CELLS:\n done = 0\n else: # move down:\n ca1 = c0 + 1\n cb1 = c1 + 1\n ca0, cb0 = c0, c1\n ra0, rb0 = r0, r1\n ra1, rb1 = r0, r1\n if ca1 >= GRID_CELLS or cb1 >= GRID_CELLS:\n done = 0\n\n return [((ra0, ca0), (rb0, cb0)), ((ra1, ca1), (rb1, cb1))]",
"def parse_galcoord(l, b):\n try:\n if (re.search(r\"[^\\d.+\\-]\", l) is None) and (\n re.search(r\"[^\\d.+\\-]\", b) is None\n ):\n coord = SkyCoord(l, b, unit=\"deg\", frame=\"galactic\")\n else:\n coord = SkyCoord(l, b, frame=\"galactic\")\n except ValueError:\n log.error(\"Unable to parse input coordinates '{},{}'\".format(ra, dec))\n return None\n return coord",
"def partOne(tileList):\n\n blackTiles = set()\n directionsDict = {\n \"e\" : (0, 1),\n \"se\": (-0.5, 0.5),\n \"sw\": (-0.5, -0.5),\n \"w\": (0, -1),\n \"nw\": (0.5, -0.5),\n \"ne\": (0.5, 0.5),\n }\n\n for tile in tileList:\n\n #print(f\"tile: {tile}\")\n index = 0\n currentPosition = (0, 0)\n while index < len(tile):\n\n direction = tile[index]\n\n if direction in [\"s\", \"n\"]:\n index += 1\n direction += tile[index]\n\n #print(f\"Direction: {direction}\")\n currentPosition = tuple(map(add, currentPosition, directionsDict[direction]))\n index += 1\n #print(f\"currentPosition: {currentPosition}\")\n if currentPosition in blackTiles:\n blackTiles.remove(currentPosition)\n else:\n blackTiles.add(currentPosition)\n\n return blackTiles",
"def get_tile_names(aoi):\n # Read AHN grid polygons from API:\n ahn3_api = (\"https://opendata.arcgis.com/datasets\"\n \"/9039d4ec38ed444587c46f8689f0435e_0.geojson\")\n # AHN2_api = (\"https://opendata.arcgis.com/datasets\"\n # \"/6c898cd924c441d5aea33b3bc6cc117a_0.geojson'\")\n try:\n ahn3_gj = gpd.read_file(ahn3_api)\n except URLError:\n # Use backed-up version if API unavailable, might not be up-to-date\n print(\"Problems accessing AHN3 fishnet API. Using backup version.\")\n\n if basename(sys.argv[0]) == \"storm_main_test.py\":\n bup_dir = \".\\\\P1_Geometric\\\\P11_Data_Preparation\\\\anc_bup_files\\\\\"\n else:\n bup_dir = \".\\\\anc_bup_files\\\\\"\n ahn3_gj = gpd.read_file(join(bup_dir, \"ahn3.geojson\"))\n \n # Reproject AOI polygon to WGS84 (if AOI in different CRS):\n ahn_crs = CRS.from_string(ahn3_gj.crs['init']).to_epsg()\n aoi_crs = aoi.crs.to_epsg()\n if ahn_crs != aoi_crs:\n aoi_pr = aoi.to_crs(crs=ahn_crs).envelope\n else:\n aoi_pr = aoi.envelope\n\n # Create a list of tile names covered by the polygon:\n # ---------------------------------------------------\n tiles = {'tile_names': [],\n 'laz_url': [],\n 'dtm_url': [],\n 'ahn2_i': [],\n 'ahn2_r': []}\n for _, row in ahn3_gj.iterrows():\n ints_tile = aoi_pr.intersects(row.geometry)\n if ints_tile[0]:\n tiles['tile_names'].append(row['Kaartblad'])\n tiles['laz_url'].append(row['AHN3_LAZ'])\n tiles['dtm_url'].append(row['AHN3_05m_DTM'])\n tiles['ahn2_i'].append(row['ahn2_05m_i'])\n tiles['ahn2_r'].append(row['ahn2_05m_r'])\n return tiles",
"def get_tile_bitmap(self, char):\n if char == '#':\n return self.tiles[0:32, 0:32, :]\n elif char == 'b':\n return self.tiles[0:32, 128:160, :]\n elif char == 'd':\n return self.tiles[64:96, 128:160, :]\n elif char == 'w':\n return self.tiles[96:128, 128:160, :]\n elif char == 'a':\n return self.tiles[96:128, 160:192, :]\n elif char == 'q':\n return self.tiles[32:64, 128:160, :]\n elif char == 'p':\n return self.tiles[64:96, 192:224, :]\n elif char == 'x':\n return self.tiles[128:160, 128:160, :]\n elif char == 'y':\n return self.tiles[192:224, 96:128, :]\n elif char == 'z':\n return self.tiles[160:192, 96:128, :]\n elif char == 'm':\n return self.tiles[96:128, 224:256, :]\n elif char == 's':\n return self.tiles[32:64, 0:32, :]\n else:\n return self.tiles[32:64, 64:96, :]",
"def shortest_distance(puzzle_input: List[str], satellite_name_a: str, satellite_name_b: str) -> Tuple[int, str]:\n orbit_tree = make_tree(puzzle_input)\n\n distances_satellite_a = distance_to_objects(orbit_tree, satellite_name_a)\n\n distances_satellite_b = distance_to_objects(orbit_tree, satellite_name_b)\n\n # & gives the intersection between the sets of keys, leaving only the objects they both orbit directly/indirectly\n objects_in_common = set(distances_satellite_a.keys()) & set(distances_satellite_b.keys())\n distances = [\n # Sum of distance from satellite a, b to each object, object name\n (distances_satellite_a[obj] + distances_satellite_b[obj], obj)\n for obj in objects_in_common\n ]\n\n min_distance, satellite_name = min(distances)\n return min_distance, satellite_name",
"def name_mapper(tup):\n tup = tuple(ix2name[i] for i in tup)\n\n def s(t):\n return sorted(t, key=self.clade_order)\n left, right = sorted((s(tup[:2]), s(tup[2:])))\n return (*left, *right)",
"def create_tuple4(kmer1, kmer2, kmer1_neg, kmer2_neg):\n return Tuple4(\n ''.join(kmer1),\n ''.join(kmer2),\n ''.join(kmer1_neg),\n ''.join(kmer2_neg))",
"def parse_boarding_pass(boarding_pass: str, x_min=0, x_max=127, y_min=0, y_max=7) -> tuple:\n if not boarding_pass:\n return x_min, y_min\n\n command = boarding_pass[0]\n if command == 'F':\n x_max = int(x_max - ((x_max - x_min + 1) / 2))\n if command == 'B':\n x_min = int(x_min + ((x_max - x_min + 1) / 2))\n if command == 'L':\n y_max = int(y_max - ((y_max - y_min + 1) / 2))\n if command == 'R':\n y_min = int(y_min + ((y_max - y_min + 1) / 2))\n\n return parse_boarding_pass(boarding_pass[1:], x_min, x_max, y_min, y_max)",
"def ftile(self):\n try:\n ftile = \"_\".join([self[\"grid_name\"], self[\"tile_name\"]])\n except TypeError:\n ftile = None\n return ftile",
"def pick(ln, edge, get_edge):\n me = [x for x in edge_map[edge] if x != ln][0]\n mtile = tiles[me]\n for mtile in moves(mtile):\n if edge == get_edge(mtile):\n break\n return me, mtile"
] | [
"0.5906764",
"0.5495438",
"0.5047626",
"0.5033779",
"0.492645",
"0.49116278",
"0.4895632",
"0.4837358",
"0.48061812",
"0.4783528",
"0.47784486",
"0.47697797",
"0.47350457",
"0.47343346",
"0.4715393",
"0.46583953",
"0.46310627",
"0.46287656",
"0.4628159",
"0.4617784",
"0.46046603",
"0.46013236",
"0.4591048",
"0.45772314",
"0.4567225",
"0.45656666",
"0.45607063",
"0.4537056",
"0.45279244",
"0.4523118"
] | 0.71674925 | 0 |
! Return the GARC/GAFE pair on side A for a tile name \param tileName A string valid as a tile name \return (garcA,gafeA) or None if tileName isn't a valid tile name | def getA(cls, tileName):
return TILENAMEMAP[tileName]['A'] if tileName in TILENAMEMAP else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getTilePair(cls, tileName):\n return ( TILENAMEMAP[tileName]['A'], TILENAMEMAP[tileName]['B'] ) if\\\n tileName in TILENAMEMAP else None",
"def getMatchup(self, name):\n if self.atHome:\n return (name, self.opponent)\n else:\n return (self.opponent, name)",
"def getB(cls, tileName):\n return TILENAMEMAP[tileName]['B'] if tileName in TILENAMEMAP else None",
"def _spawn_aircraft() -> Tuple[float, float, float, str]:\n\n # Get aircraft coordinates.\n x = random.uniform(-CONTROL_ZONE_RADIUS, CONTROL_ZONE_RADIUS)\n y = math.sqrt(CONTROL_ZONE_RADIUS ** 2 - x ** 2)\n y = y if random.randint(0, 1) else -y\n\n ang = _get_ac_heading(x, y)\n\n return x, y, ang, \"A\"",
"def matchchar(self, a,b):\n assert len(a) == len(b) == 1\n if a=='a':\n if b=='a':\n return self.AA\n elif b=='t':\n return self.AT\n elif b=='c':\n return self.AC\n elif b=='g':\n return self.AG\n\n elif a=='t':\n if b=='t':\n return self.AA\n elif b=='a':\n return self.AT\n elif b=='c':\n return self.AG\n elif b=='g':\n return self.AC\n\n elif a=='g':\n if b=='g':\n return self.GG\n elif b=='t':\n return self.AC\n elif b=='c':\n return self.GC\n elif b=='a':\n return self.AG\n\n elif a=='c':\n if b=='c':\n return self.GG\n elif b=='a':\n return self.AC\n elif b=='g':\n return self.GC\n elif b=='t':\n return self.AG\n '''if a==b:\n return self.match\n else:\n return self.mismatch'''",
"def get_tile(self, char):\n if char == \"#\":\n return self.tiles[0:32, 0:32]\n elif char == \"G\": # gates\n return self.tiles[8 * 32 : 9 * 32, 3 * 32 : 4 * 32] \n elif char == \"W\": # window\n return self.tiles[8 * 32 : 9 * 32, 4 * 32 : 5 * 32]\n elif char == \"C\": # checkout\n return self.tiles[2 * 32 : 3 * 32, 8 * 32 : 9 * 32]\n elif char == \"F\": # fruits\n return self.tiles[1 * 32 : 2 * 32, 4 * 32 : 5 * 32] \n elif char == \"S\": # spices\n return self.tiles[1 * 32 : 2 * 32, 3 * 32 : 4 * 32] \n elif char == \"R\": # dairy\n return self.tiles[8 * 32 : 9 * 32, 7 * 32 : 8 * 32] \n elif char == \"D\": # drinks\n return self.tiles[6 * 32 : 7 * 32, 13 * 32 : 14 * 32] \n elif char == \"c\": # customer/shopping cart\n return self.tiles[8 * 32 : 9 * 32, 6 * 32 : 7 * 32] \n else:\n return self.tiles[32:64, 64:96]",
"def corr_naam(name):\n names = ((\"techtaak\", 'techtask'), (\"programma\", 'procproc'))\n for name1, name2 in names:\n if name == name1:\n return name2\n if name == name2:\n return name1\n return name",
"def _get_grounding_from_name(self):\n grounding_name = remove_article(self.grounding)\n\n for area_name, area in self.map.areas.iteritems():\n if grounding_name == area_name:\n grounding = area\n\n for object_name, object_ in self.map.objects.iteritems():\n if grounding_name == object_name:\n grounding = object_\n\n for cop_name, cop in self.map.cops.iteritems():\n if grounding_name == cop_name:\n grounding = cop\n break\n else:\n if grounding_name == 'Deckard':\n logging.debug(\"No grounding available for Deckard yet.\")\n return None\n\n try:\n grounding\n except NameError:\n logging.error(\"No grounding available for {}\".format(grounding_name))\n return None\n\n return grounding",
"def eq2gal(ra, dec):\n gal=ephem.Galactic(ephem.Equatorial(ra, dec))\n\tgl=180.0*gal.long.real/math.pi\n\tgb=180.0*gal.lat.real/math.pi\n return (gl, gb)",
"def parse_galcoord(l, b):\n try:\n if (re.search(r\"[^\\d.+\\-]\", l) is None) and (\n re.search(r\"[^\\d.+\\-]\", b) is None\n ):\n coord = SkyCoord(l, b, unit=\"deg\", frame=\"galactic\")\n else:\n coord = SkyCoord(l, b, frame=\"galactic\")\n except ValueError:\n log.error(\"Unable to parse input coordinates '{},{}'\".format(ra, dec))\n return None\n return coord",
"def parse_IAU_name(name):\n # First see if there is a source type acronym\n if diag:\n print \"parse_IAU_name: received\",name\n parts = name.split()\n if len(parts) == 1:\n designation = parts[0]\n elif len(parts) == 2:\n acronym, designation = parts\n else:\n raise(\"Invalid format: \"+name)\n # Now process the designation\n flag = designation[0].upper()\n if flag == \"G\":\n # Galactic coordinates\n longitude,latitude,sign = split_on_sign(name[1:])\n X = parse_decimal_angle(longitude)\n Y = parse_decimal_angle(latitude)\n elif flag == \"J\":\n # Julian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif flag == \"B\":\n # Besselian epoch celestial coordinates\n ra,dec,sign = split_on_sign(name[1:])\n X = parse_sexagesimal_angle(ra)\n Y = parse_sexagesimal_angle(dec)\n elif designation[0].isdigit():\n # This should be Besselian but who knows?\n # If it is Besselian there should be at least four digits in RA\n # otherwise it could be galactic\n x,y,sign = split_on_sign(name)\n if len(x) > 3:\n X = parse_sexagesimal_angle(x)\n Y = parse_sexagesimal_angle(y)\n flag = \"B\"\n else:\n X = parse_decimal_angle(x)\n Y = parse_decimal_angle(y)\n flag = \"G\"\n else:\n return \"?\",None,None\n if sign == \"-\":\n Y = -Y\n return flag,X,Y",
"def get_aa (tRNA):\n\tpass",
"def getASGbyName( self, metaModelName ):\r\n name = self.__sanitizeMetaModelName( metaModelName )\r\n if( self.__trackASG.has_key( name ) ):\r\n return self.__trackASG[ name ][0] \r\n return None",
"def get_gt_map(raster_map, gt_maps):\n\n for gt_m in gt_maps:\n map_name = ntpath.basename(raster_map).split(\".\")[0]\n gt_map_name = ntpath.basename(gt_m).split(\".\")[0].replace(\"_y\", \"\")\n\n if map_name == gt_map_name:\n logger.info(\"X: %s Y: %s\", map_name, gt_map_name)\n\n return gt_m\n\n logger.warning(\"Unable to get ground truth image for %s\", raster_map)\n\n return None",
"def get_tile_radec(tileid):\n tiles = io.load_tiles()\n if tileid in tiles['TILEID']:\n i = np.where(tiles['TILEID'] == tileid)[0][0]\n return tiles[i]['RA'], tiles[i]['DEC']\n else:\n return (0.0, 0.0)",
"def get_HA(HA_):\n HA, Self, HA_nt, HA_vt, HA_zt = 0, 0, 0, 0, 0\n if HA_ == \"HA\":\n HA = 1\n if HA_ == \"Self\":\n Self = 1\n if HA_ == \"nt\":\n HA_nt = 1\n if HA_ == \"vt\":\n HA_vt = 1\n if HA_ == \"zt\":\n HA_zt = 1\n\n return HA, Self, HA_nt, HA_vt, HA_zt",
"def calc_next_alt_loc_id(self, atom):\n if len(self) == 0:\n return \"A\"\n for alt_loc in string.uppercase:\n if not self.has_key(alt_loc):\n return alt_loc\n\n raise AtomOverwrite(\"exhausted availible alt_loc labels for \"+str(atom))",
"def find_asg(client, name):\n describe = client.describe_auto_scaling_groups()\n matches = []\n for row in describe['AutoScalingGroups']:\n _name = row['AutoScalingGroupName']\n if _name == name:\n matches.append((0, row))\n else:\n match = re.match(re.escape(name) + r'\\-([0-9]+)', _name)\n if match:\n ts = match.group(1)\n matches.append((ts, row))\n if len(matches) == 0:\n return None\n else:\n return sorted(matches, key=lambda x: x[0])[-1][1]",
"def base_pair(c):\n dna_complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}\n return dna_complement[c.upper()].lower() if c.upper() in dna_complement else 'unknown'",
"def _get_gid(name):\n if getgrnam is None or name is None:\n return None\n try:\n result = getgrnam(name)\n except KeyError:\n result = None\n if result is not None:\n return result[2]\n return None",
"def make_Gagne18_BANYAN_any_DR2_crossmatch(\n tablepath,\n namestr=None,\n maxsep=10,\n outdir=datadir,\n homedir='/home/luke/'):\n assert type(namestr) == str\n t = Table.read(tablepath, format='ascii.cds')\n\n RAh, RAm, RAs = arr(t['RAh']), arr(t['RAm']), arr(t['RAs'])\n\n RA_hms = [str(rah).zfill(2)+'h'+\n str(ram).zfill(2)+'m'+\n str(ras).zfill(2)+'s'\n for rah,ram,ras in zip(RAh, RAm, RAs)]\n\n DEd, DEm, DEs = arr(t['DEd']),arr(t['DEm']),arr(t['DEs'])\n DEsign = arr(t['DE-'])\n DEsign[DEsign != '-'] = '+'\n\n DE_dms = [str(desgn)+\n str(ded).zfill(2)+'d'+\n str(dem).zfill(2)+'m'+\n str(des).zfill(2)+'s'\n for desgn,ded,dem,des in zip(DEsign, DEd, DEm, DEs)]\n\n coords = SkyCoord(ra=RA_hms, dec=DE_dms, frame='icrs')\n\n RA = coords.ra.value\n dec = coords.dec.value\n pm_RA, pm_dec = arr(t['pmRA']), arr(t['pmDE'])\n u_pm_RA, u_pm_dec = arr(t['e_pmRA']), arr(t['e_pmDE'])\n\n maxsep = (maxsep*u.arcsec).to(u.deg).value\n\n name = t['Main'] if 'XI_' in namestr else t['Name']\n assoc = t['Assoc']\n\n outfile = os.path.join(outdir,'gotmatches_{}.xml.gz'.format(namestr))\n xmltouploadpath = os.path.join(outdir,'toupload_{}.xml'.format(namestr))\n\n if os.path.exists(outfile):\n os.remove(outfile) # NOTE if it's fast, can just do this to overwrite\n if not os.path.exists(outfile):\n _ = make_votable_given_full_cols(name, assoc, RA, dec, pm_RA, pm_dec,\n u_pm_RA, u_pm_dec,\n outpath=xmltouploadpath)\n\n Gaia.login(credentials_file=os.path.join(homedir, '.gaia_credentials'))\n\n # separated less than 10 arcsec.\n jobstr = (\n '''\n SELECT TOP {ncut:d} u.name, u.assoc, u.ra, u.dec, u.pm_ra, u.pm_dec,\n u.err_pm_ra, u.err_pm_dec,\n g.source_id, DISTANCE(\n POINT('ICRS', u.ra, u.dec),\n POINT('ICRS', g.ra,g.dec)) AS dist,\n g.phot_g_mean_mag as gaia_gmag,\n g.pmra AS gaia_pmra,\n g.pmdec AS gaia_pmdec\n FROM tap_upload.foobar as u, gaiadr2.gaia_source AS g\n WHERE 1=CONTAINS(\n POINT('ICRS', u.ra, u.dec),\n CIRCLE('ICRS', g.ra, g.dec, {sep:.8f})\n )\n '''\n )\n maxncut = int(5*len(name)) # to avoid query timeout\n query = jobstr.format(sep=maxsep, ncut=maxncut)\n\n if not os.path.exists(outfile):\n # might do async if this times out. but it doesn't.\n j = Gaia.launch_job(query=query,\n upload_resource=xmltouploadpath,\n upload_table_name=\"foobar\", verbose=True,\n dump_to_file=True, output_file=outfile)\n\n Gaia.logout()\n\n vot = parse(outfile)\n tab = vot.get_first_table().to_table()\n\n if maxncut - len(tab) < 10:\n errmsg = 'ERROR! too many matches'\n raise AssertionError(errmsg)\n\n # if nonzero and finite proper motion, require Gaia pm match to sign\n # of stated Gagne PMs.\n df = tab.to_pandas()\n\n print('\\n'+42*'-')\n print('{} stars in original Gagne table'.format(len(t)))\n print('{} stars in sep < 10 arcsec xmatch'.format(len(df)))\n\n sel = (df['gaia_gmag'] < 18)\n print('{} stars in sep < 10 arcsec, G<18, xmatch'.format(len(df[sel])))\n\n sel &= (\n ( (df['pm_ra'] != 0 ) & (df['pm_dec'] != 0 ) &\n ( np.sign(df['pm_ra']) == np.sign(df['gaia_pmra']) ) &\n ( np.sign(df['pm_dec']) == np.sign(df['gaia_pmdec']) )\n )\n |\n (\n (df['pm_ra'] == 0 ) & (df['pm_dec'] == 0 )\n )\n )\n df = df[sel]\n print('{} stars in sep < 10 as xmatch, G<18, after pm cut (xor zero pm)'.\n format(len(df)))\n\n # make multiplicity column. then sort by name, then by distance. then drop\n # name duplicates, keeping the first (you have nearest neighbor saved!)\n _, inv, cnts = np.unique(df['name'], return_inverse=True,\n return_counts=True)\n\n df['n_in_nbhd'] = cnts[inv]\n\n df['name'] = df['name'].str.decode('utf-8')\n df['assoc'] = df['assoc'].str.decode('utf-8')\n\n df = df.sort_values(['name','dist'])\n\n df = df.drop_duplicates(subset='name', keep='first')\n\n df['source_id'] = df['source_id'].astype('int64')\n\n print('{} stars after above cuts + chosing nearest nbhr by spatial sep'.\n format(len(df)))\n\n outpath = os.path.join(outdir,'MATCHED_{}.csv'.format(namestr))\n df.to_csv(outpath, index=False)\n print('made {}'.format(outpath))\n print(79*'=')",
"def shortest_distance(puzzle_input: List[str], satellite_name_a: str, satellite_name_b: str) -> Tuple[int, str]:\n orbit_tree = make_tree(puzzle_input)\n\n distances_satellite_a = distance_to_objects(orbit_tree, satellite_name_a)\n\n distances_satellite_b = distance_to_objects(orbit_tree, satellite_name_b)\n\n # & gives the intersection between the sets of keys, leaving only the objects they both orbit directly/indirectly\n objects_in_common = set(distances_satellite_a.keys()) & set(distances_satellite_b.keys())\n distances = [\n # Sum of distance from satellite a, b to each object, object name\n (distances_satellite_a[obj] + distances_satellite_b[obj], obj)\n for obj in objects_in_common\n ]\n\n min_distance, satellite_name = min(distances)\n return min_distance, satellite_name",
"def ftile(self):\n try:\n ftile = \"_\".join([self[\"grid_name\"], self[\"tile_name\"]])\n except TypeError:\n ftile = None\n return ftile",
"def get_gaia_data(ra, dec, search_radius=10.):\n # Get the positions of the Gaia sources\n c1 = SkyCoord(ra, dec, frame='icrs', unit='deg')\n # We are querying with a diameter as the radius, overfilling by 2x.\n from astroquery.vizier import Vizier\n Vizier.ROW_LIMIT = -1\n if args.DR2:\n gaia_cat, catID = \"I/345/gaia2\", \"DR2\"\n print('\\t --> Using Gaia DR2 as requested by user...')\n else:\n gaia_cat, catID = \"I/355/gaiadr3\", \"DR3\"\n\n result = Vizier.query_region(c1, catalog=[gaia_cat],\n radius=Angle(search_radius, \"arcsec\"))\n try:\n \tresult = result[gaia_cat]\n except:\n print('Not in Gaia '+catID+'. If you know the Gaia ID and Gmag, try the options --gid and --gmag.')\n print('Exiting without finishing...')\n sys.exit()\n\n no_targets_found_message = ValueError('Either no sources were found in the query region '\n 'or Vizier is unavailable')\n too_few_found_message = ValueError('No sources found closer than 1 arcsec to TPF coordinates')\n if result is None:\n raise no_targets_found_message\n elif len(result) == 0:\n raise too_few_found_message\n\n if len(result)>1:\n dist = np.sqrt((result['RA_ICRS']-ra)**2 + (result['DE_ICRS']-dec)**2)\n idx = np.where(dist == np.min(dist))[0][0]\n return result[idx]['Source'], result[idx]['Gmag']\n else:\n return result[0]['Source'], result[0]['Gmag']",
"def get_atom(self, name, alt_loc = None):\n if alt_loc:\n if self.alt_loc_dict.has_key(name):\n altloc = self.alt_loc_dict[name]\n if altloc.has_key(alt_loc):\n return altloc[alt_loc]\n return None\n else:\n if not self.atom_dict.has_key(name):\n return None\n return self.atom_dict[name]",
"def getMAXASA(s=None):\r\n MAX_ACC={}\r\n MAX_ACC[\"ALA\"]=106.0\r\n MAX_ACC[\"CYS\"]=135.0\r\n MAX_ACC[\"ASP\"]=163.0\r\n MAX_ACC[\"GLU\"]=194.0\r\n MAX_ACC[\"PHE\"]=197.0\r\n MAX_ACC[\"GLY\"]=84.0\r\n MAX_ACC[\"HIS\"]=184.0\r\n MAX_ACC[\"ILE\"]=169.0\r\n MAX_ACC[\"LYS\"]=205.0\r\n MAX_ACC[\"LEU\"]=164.0\r\n MAX_ACC[\"MET\"]=188.0\r\n MAX_ACC[\"ASN\"]=157.0\r\n MAX_ACC[\"PRO\"]=136.0\r\n MAX_ACC[\"GLN\"]=198.0\r\n MAX_ACC[\"ARG\"]=248.0\r\n MAX_ACC[\"SER\"]=130.0\r\n MAX_ACC[\"THR\"]=142.0\r\n MAX_ACC[\"VAL\"]=142.0\r\n MAX_ACC[\"TRP\"]=227.0\r\n MAX_ACC[\"TYR\"]=222.0\r\n if s is not None and s is 'single': \r\n for k in MAX_ACC.keys():\r\n MAX_ACC[to_one_letter_code[k]]=MAX_ACC[k]\r\n return MAX_ACC",
"def gmt_element(bitmap_a, bitmap_b, sig_array, bitmap_to_linear_mapping):\n output_sign = canonical_reordering_sign(bitmap_a, bitmap_b, sig_array)\n output_bitmap = bitmap_a^bitmap_b\n idx = bitmap_to_linear_mapping[output_bitmap]\n return idx, output_sign",
"def get_complement(nucleotide):\n\t# TODO: implement this\n\tletter = str(nucleotide)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# set letter = parameter (make sure it's a string)\n\tif letter == 'A':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is A\n\t\treturn 'T'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return T\n\telif letter == 'T':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is T\n\t\treturn 'A'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return A\n\telif letter == 'G':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is G\n\t\treturn 'C'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return C\n\telif letter == 'C':\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# check if letter is C\n\t\treturn 'G'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# return G\n\telse:\n\t\treturn None",
"def get_by_name(self, name: str) -> Tuple[str, str]:\n name_bytes = str_to_bytes_pad(name, MAX_NAME_LEN)\n r = self.dev.apdu_exchange(0x05, name_bytes)\n login = bytes_to_str(r[:32])\n password = bytes_to_str(r[32:32+64])\n return (login, password)",
"def parse_reference_element(element: ET.Element) -> Tuple[Optional[str], str]:\n name_attribute = element.attrib[\"name\"]\n if \"#\" in name_attribute:\n scenario_id, name = name_attribute.split(\"#\", 1)\n return scenario_id, name\n else:\n return None, name_attribute"
] | [
"0.71642846",
"0.5385321",
"0.52549493",
"0.5067009",
"0.5058756",
"0.4986926",
"0.4952591",
"0.4868478",
"0.475587",
"0.47441566",
"0.47377697",
"0.47101924",
"0.47093934",
"0.46922797",
"0.46634054",
"0.4657869",
"0.46430093",
"0.4626833",
"0.46260536",
"0.46144578",
"0.45757553",
"0.45723367",
"0.4517889",
"0.45061073",
"0.45040435",
"0.44975644",
"0.44777256",
"0.44772318",
"0.44694233",
"0.44649357"
] | 0.7107519 | 1 |
! Return the GARC/GAFE pair on side B for a tile name \param tileName A string valid as a tile name \return (garcB,gafeB) or None if tileName isn't a valid tile name | def getB(cls, tileName):
return TILENAMEMAP[tileName]['B'] if tileName in TILENAMEMAP else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getTilePair(cls, tileName):\n return ( TILENAMEMAP[tileName]['A'], TILENAMEMAP[tileName]['B'] ) if\\\n tileName in TILENAMEMAP else None",
"def getA(cls, tileName):\n return TILENAMEMAP[tileName]['A'] if tileName in TILENAMEMAP else None",
"def m_get(state,b1,b2):\n if b2 == 'hand' and state.clear[b1] and state.holding['hand'] == False:\n if state.pos[b1] == 'table':\n return [('pickup',b1)]\n else:\n return [('unstack',b1,state.pos[b1])]\n else:\n return False",
"def getMatchup(self, name):\n if self.atHome:\n return (name, self.opponent)\n else:\n return (self.opponent, name)",
"def bridges(species1_names, species2_names):\n k12 = filter(lambda s: re.search('K-12',s)!=None, species1_names)[0]\n return [(k12, species2_names[0]), (k12, species2_names[1]), (k12, species2_names[2])]",
"def get_gt_map(raster_map, gt_maps):\n\n for gt_m in gt_maps:\n map_name = ntpath.basename(raster_map).split(\".\")[0]\n gt_map_name = ntpath.basename(gt_m).split(\".\")[0].replace(\"_y\", \"\")\n\n if map_name == gt_map_name:\n logger.info(\"X: %s Y: %s\", map_name, gt_map_name)\n\n return gt_m\n\n logger.warning(\"Unable to get ground truth image for %s\", raster_map)\n\n return None",
"def findBorehole(strg, fld_name, bh_name):\r\n\r\n #print(fld_name, bh_name)\r\n\r\n reg_fld = strg.getRegHelper().getFieldRegistry()\r\n fld = reg_fld.findByName(fld_name)\r\n if fld is None:\r\n return None\r\n\r\n reg_bh = strg.getRegHelper().getBoreholeRegistry()\r\n bh = reg_bh.findByNameAndField(fld.getID(), bh_name)\r\n\r\n return bh",
"def make_Gagne18_BANYAN_any_DR2_crossmatch(\n tablepath,\n namestr=None,\n maxsep=10,\n outdir=datadir,\n homedir='/home/luke/'):\n assert type(namestr) == str\n t = Table.read(tablepath, format='ascii.cds')\n\n RAh, RAm, RAs = arr(t['RAh']), arr(t['RAm']), arr(t['RAs'])\n\n RA_hms = [str(rah).zfill(2)+'h'+\n str(ram).zfill(2)+'m'+\n str(ras).zfill(2)+'s'\n for rah,ram,ras in zip(RAh, RAm, RAs)]\n\n DEd, DEm, DEs = arr(t['DEd']),arr(t['DEm']),arr(t['DEs'])\n DEsign = arr(t['DE-'])\n DEsign[DEsign != '-'] = '+'\n\n DE_dms = [str(desgn)+\n str(ded).zfill(2)+'d'+\n str(dem).zfill(2)+'m'+\n str(des).zfill(2)+'s'\n for desgn,ded,dem,des in zip(DEsign, DEd, DEm, DEs)]\n\n coords = SkyCoord(ra=RA_hms, dec=DE_dms, frame='icrs')\n\n RA = coords.ra.value\n dec = coords.dec.value\n pm_RA, pm_dec = arr(t['pmRA']), arr(t['pmDE'])\n u_pm_RA, u_pm_dec = arr(t['e_pmRA']), arr(t['e_pmDE'])\n\n maxsep = (maxsep*u.arcsec).to(u.deg).value\n\n name = t['Main'] if 'XI_' in namestr else t['Name']\n assoc = t['Assoc']\n\n outfile = os.path.join(outdir,'gotmatches_{}.xml.gz'.format(namestr))\n xmltouploadpath = os.path.join(outdir,'toupload_{}.xml'.format(namestr))\n\n if os.path.exists(outfile):\n os.remove(outfile) # NOTE if it's fast, can just do this to overwrite\n if not os.path.exists(outfile):\n _ = make_votable_given_full_cols(name, assoc, RA, dec, pm_RA, pm_dec,\n u_pm_RA, u_pm_dec,\n outpath=xmltouploadpath)\n\n Gaia.login(credentials_file=os.path.join(homedir, '.gaia_credentials'))\n\n # separated less than 10 arcsec.\n jobstr = (\n '''\n SELECT TOP {ncut:d} u.name, u.assoc, u.ra, u.dec, u.pm_ra, u.pm_dec,\n u.err_pm_ra, u.err_pm_dec,\n g.source_id, DISTANCE(\n POINT('ICRS', u.ra, u.dec),\n POINT('ICRS', g.ra,g.dec)) AS dist,\n g.phot_g_mean_mag as gaia_gmag,\n g.pmra AS gaia_pmra,\n g.pmdec AS gaia_pmdec\n FROM tap_upload.foobar as u, gaiadr2.gaia_source AS g\n WHERE 1=CONTAINS(\n POINT('ICRS', u.ra, u.dec),\n CIRCLE('ICRS', g.ra, g.dec, {sep:.8f})\n )\n '''\n )\n maxncut = int(5*len(name)) # to avoid query timeout\n query = jobstr.format(sep=maxsep, ncut=maxncut)\n\n if not os.path.exists(outfile):\n # might do async if this times out. but it doesn't.\n j = Gaia.launch_job(query=query,\n upload_resource=xmltouploadpath,\n upload_table_name=\"foobar\", verbose=True,\n dump_to_file=True, output_file=outfile)\n\n Gaia.logout()\n\n vot = parse(outfile)\n tab = vot.get_first_table().to_table()\n\n if maxncut - len(tab) < 10:\n errmsg = 'ERROR! too many matches'\n raise AssertionError(errmsg)\n\n # if nonzero and finite proper motion, require Gaia pm match to sign\n # of stated Gagne PMs.\n df = tab.to_pandas()\n\n print('\\n'+42*'-')\n print('{} stars in original Gagne table'.format(len(t)))\n print('{} stars in sep < 10 arcsec xmatch'.format(len(df)))\n\n sel = (df['gaia_gmag'] < 18)\n print('{} stars in sep < 10 arcsec, G<18, xmatch'.format(len(df[sel])))\n\n sel &= (\n ( (df['pm_ra'] != 0 ) & (df['pm_dec'] != 0 ) &\n ( np.sign(df['pm_ra']) == np.sign(df['gaia_pmra']) ) &\n ( np.sign(df['pm_dec']) == np.sign(df['gaia_pmdec']) )\n )\n |\n (\n (df['pm_ra'] == 0 ) & (df['pm_dec'] == 0 )\n )\n )\n df = df[sel]\n print('{} stars in sep < 10 as xmatch, G<18, after pm cut (xor zero pm)'.\n format(len(df)))\n\n # make multiplicity column. then sort by name, then by distance. then drop\n # name duplicates, keeping the first (you have nearest neighbor saved!)\n _, inv, cnts = np.unique(df['name'], return_inverse=True,\n return_counts=True)\n\n df['n_in_nbhd'] = cnts[inv]\n\n df['name'] = df['name'].str.decode('utf-8')\n df['assoc'] = df['assoc'].str.decode('utf-8')\n\n df = df.sort_values(['name','dist'])\n\n df = df.drop_duplicates(subset='name', keep='first')\n\n df['source_id'] = df['source_id'].astype('int64')\n\n print('{} stars after above cuts + chosing nearest nbhr by spatial sep'.\n format(len(df)))\n\n outpath = os.path.join(outdir,'MATCHED_{}.csv'.format(namestr))\n df.to_csv(outpath, index=False)\n print('made {}'.format(outpath))\n print(79*'=')",
"def gmt_element(bitmap_a, bitmap_b, sig_array, bitmap_to_linear_mapping):\n output_sign = canonical_reordering_sign(bitmap_a, bitmap_b, sig_array)\n output_bitmap = bitmap_a^bitmap_b\n idx = bitmap_to_linear_mapping[output_bitmap]\n return idx, output_sign",
"def get_tile(self, char):\n if char == \"#\":\n return self.tiles[0:32, 0:32]\n elif char == \"G\": # gates\n return self.tiles[8 * 32 : 9 * 32, 3 * 32 : 4 * 32] \n elif char == \"W\": # window\n return self.tiles[8 * 32 : 9 * 32, 4 * 32 : 5 * 32]\n elif char == \"C\": # checkout\n return self.tiles[2 * 32 : 3 * 32, 8 * 32 : 9 * 32]\n elif char == \"F\": # fruits\n return self.tiles[1 * 32 : 2 * 32, 4 * 32 : 5 * 32] \n elif char == \"S\": # spices\n return self.tiles[1 * 32 : 2 * 32, 3 * 32 : 4 * 32] \n elif char == \"R\": # dairy\n return self.tiles[8 * 32 : 9 * 32, 7 * 32 : 8 * 32] \n elif char == \"D\": # drinks\n return self.tiles[6 * 32 : 7 * 32, 13 * 32 : 14 * 32] \n elif char == \"c\": # customer/shopping cart\n return self.tiles[8 * 32 : 9 * 32, 6 * 32 : 7 * 32] \n else:\n return self.tiles[32:64, 64:96]",
"def get_g(r, g, b):\n\n color = Color(r, g, b)\n return color.get_g()",
"def pick(ln, edge, get_edge):\n me = [x for x in edge_map[edge] if x != ln][0]\n mtile = tiles[me]\n for mtile in moves(mtile):\n if edge == get_edge(mtile):\n break\n return me, mtile",
"def _get_gid(name):\n if getgrnam is None or name is None:\n return None\n try:\n result = getgrnam(name)\n except KeyError:\n result = None\n if result is not None:\n return result[2]\n return None",
"def _get_grounding_from_name(self):\n grounding_name = remove_article(self.grounding)\n\n for area_name, area in self.map.areas.iteritems():\n if grounding_name == area_name:\n grounding = area\n\n for object_name, object_ in self.map.objects.iteritems():\n if grounding_name == object_name:\n grounding = object_\n\n for cop_name, cop in self.map.cops.iteritems():\n if grounding_name == cop_name:\n grounding = cop\n break\n else:\n if grounding_name == 'Deckard':\n logging.debug(\"No grounding available for Deckard yet.\")\n return None\n\n try:\n grounding\n except NameError:\n logging.error(\"No grounding available for {}\".format(grounding_name))\n return None\n\n return grounding",
"def get( self, b1, b2 ):\n\t\t\n\t\tsim = self.database_sim[ self.unique_biz_id[b1] ][ self.unique_biz_id[b2] ]\n\t\tnsup = self.database_sup[ self.unique_biz_id[b1] ][ self.unique_biz_id[b2] ]\n\t\treturn ( sim, nsup )",
"def condensate_abovedew(Bg, Bgi, Gp, Gpi):\n Eg = Bg - Bgi\n F = Bg * (Gp - Gpi)\n return(F, Eg)",
"def parse_galcoord(l, b):\n try:\n if (re.search(r\"[^\\d.+\\-]\", l) is None) and (\n re.search(r\"[^\\d.+\\-]\", b) is None\n ):\n coord = SkyCoord(l, b, unit=\"deg\", frame=\"galactic\")\n else:\n coord = SkyCoord(l, b, frame=\"galactic\")\n except ValueError:\n log.error(\"Unable to parse input coordinates '{},{}'\".format(ra, dec))\n return None\n return coord",
"def triplet_to_rrggbb(rgbtuple):\n global _tripdict\n hexname = _tripdict.get(rgbtuple)\n if hexname is None:\n hexname = '#%02x%02x%02x' % rgbtuple\n _tripdict[rgbtuple] = hexname\n return hexname",
"def get(self, r, g, b):\n\n return self.map[(r << 2 * self.bits) + (g << self.bits) + b]",
"def get_tile_bitmap(self, char):\n if char == '#':\n return self.tiles[0:32, 0:32, :]\n elif char == 'b':\n return self.tiles[0:32, 128:160, :]\n elif char == 'd':\n return self.tiles[64:96, 128:160, :]\n elif char == 'w':\n return self.tiles[96:128, 128:160, :]\n elif char == 'a':\n return self.tiles[96:128, 160:192, :]\n elif char == 'q':\n return self.tiles[32:64, 128:160, :]\n elif char == 'p':\n return self.tiles[64:96, 192:224, :]\n elif char == 'x':\n return self.tiles[128:160, 128:160, :]\n elif char == 'y':\n return self.tiles[192:224, 96:128, :]\n elif char == 'z':\n return self.tiles[160:192, 96:128, :]\n elif char == 'm':\n return self.tiles[96:128, 224:256, :]\n elif char == 's':\n return self.tiles[32:64, 0:32, :]\n else:\n return self.tiles[32:64, 64:96, :]",
"async def get_split(self, name):\n\n # Generates list\n splits_list = await self._get_all_splits()\n # Grabs index based on fuzzy search match of provided name\n index = await self._get_name_index(name, splits_list)\n # If player index is found, returns split. If not found, returns None\n if index == -1:\n return None\n else:\n return splits_list[index][1], splits_list[index][0]",
"def naive_celeb(g):\n n = len(g)\n for u in range(n):\n for v in range(n):\n if u == v: continue\n if g[u][v]: break\n if not g[v][u]: break\n else:\n return u\n return None",
"def get_blue():\n # return name of actor, grazing speed, self defense\n return 'Piggy', 2",
"def battle(first, second):\n\n print(get_catchphrase(first))\n print(get_catchphrase(second))\n\n if get_damage(second) > get_damage(first):\n return second\n else:\n return first",
"def ECABG(first_piece, second_piece, s, position):\n rows, columns, _ = first_piece.shape()\n value = None\n if position == 'L':\n value = 0.5 * (epxl(first_piece, s, columns - 1, 'D') - epxl(first_piece, s, columns - 2, 'D')\n + epxl(second_piece, s, 1, 'D') - epxl(second_piece, s, 0, 'D'))\n elif position == 'R':\n value = 0.5 * (epxl(first_piece, s, columns - 2, 'D') - epxl(first_piece, s, columns - 1, 'D')\n + epxl(second_piece, s, 0, 'D') - epxl(second_piece, s, 1, 'D'))\n elif position == 'T':\n value = 0.5 * (epxl(first_piece, columns - 1, s, 'H') - epxl(first_piece, columns - 2, s, 'H')\n + epxl(second_piece, 1, s, 'H') - epxl(second_piece, 0, s, 'H'))\n elif position == 'D':\n value = 0.5 * (epxl(first_piece, columns - 2, s, 'H') - epxl(first_piece, columns - 1, s, 'H')\n + epxl(second_piece, 0, s, 'H') - epxl(second_piece, 1, s, 'H'))\n return value",
"def get_wire_in_tile_from_pin_name(conn, tile_type_str, wire_str):\n # Find the generic wire_in_tile_pkey for the specified tile_type name and\n # wire name.\n c = conn.cursor()\n\n # Find if this tile_type is a split tile.\n c.execute(\n \"\"\"\nSELECT\n site_pkey\nFROM\n site_as_tile\nWHERE\n parent_tile_type_pkey = (\n SELECT\n pkey\n FROM\n tile_type\n WHERE\n name = ?\n );\n \"\"\", (tile_type_str, )\n )\n result = c.fetchone()\n wire_is_pin = result is not None\n\n if wire_is_pin:\n # This tile is a split tile, lookup for the wire_in_tile_pkey is based\n # on the site pin name, rather than the wire name.\n site_pkey = result[0]\n c.execute(\n \"\"\"\nSELECT\n pkey,\n site_pin_pkey,\n site_pkey\nFROM\n wire_in_tile\nWHERE\n site_pin_pkey = (\n SELECT\n pkey\n FROM\n site_pin\n WHERE\n site_type_pkey = (\n SELECT\n site_type_pkey\n FROM\n site\n WHERE\n pkey = ?\n )\n AND name = ?\n );\"\"\", (site_pkey, wire_str)\n )\n else:\n c.execute(\n \"\"\"\nSELECT\n pkey,\n site_pin_pkey,\n site_pkey\nFROM\n wire_in_tile\nWHERE\n name = ?\n and tile_type_pkey = (\n SELECT\n pkey\n FROM\n tile_type\n WHERE\n name = ?\n );\n\"\"\", (wire_str, tile_type_str)\n )\n\n wire_in_tile_pkeys = {}\n the_site_pin_pkey = None\n for wire_in_tile_pkey, site_pin_pkey, site_pkey in c:\n wire_in_tile_pkeys[site_pkey] = wire_in_tile_pkey\n\n if the_site_pin_pkey is not None:\n assert the_site_pin_pkey == site_pin_pkey, (\n tile_type_str, wire_str\n )\n else:\n the_site_pin_pkey = site_pin_pkey\n\n assert the_site_pin_pkey is not None, (tile_type_str, wire_str)\n\n return wire_in_tile_pkeys, the_site_pin_pkey",
"def get_cell(self, x, y):\n if y < 0 or y >= len(self.g): return None\n if x < 0 or x >= len(self.g[y]): return None\n return self.g[y][x]",
"def Gd():\n Pz=[8]\n Pp=[1,1]\n return Pz, Pp",
"def matchchar(self, a,b):\n assert len(a) == len(b) == 1\n if a=='a':\n if b=='a':\n return self.AA\n elif b=='t':\n return self.AT\n elif b=='c':\n return self.AC\n elif b=='g':\n return self.AG\n\n elif a=='t':\n if b=='t':\n return self.AA\n elif b=='a':\n return self.AT\n elif b=='c':\n return self.AG\n elif b=='g':\n return self.AC\n\n elif a=='g':\n if b=='g':\n return self.GG\n elif b=='t':\n return self.AC\n elif b=='c':\n return self.GC\n elif b=='a':\n return self.AG\n\n elif a=='c':\n if b=='c':\n return self.GG\n elif b=='a':\n return self.AC\n elif b=='g':\n return self.GC\n elif b=='t':\n return self.AG\n '''if a==b:\n return self.match\n else:\n return self.mismatch'''",
"def get_edge(self, node1, node2):\n assert node1 in self.nodes, \"No node \"+str(node1)+\" in graph \"+str(self)\n assert node2 in self.nodes, \"No node \"+str(node2)+\" in graph \"+str(self)\n node_names = ( node1, node2 )\n for edge in self.edges:\n if ((edge.node1, edge.node2) == node_names or \n (edge.node2, edge.node1) == node_names):\n return edge\n return None"
] | [
"0.7218316",
"0.58944786",
"0.52915806",
"0.51511645",
"0.5137124",
"0.5098144",
"0.5010399",
"0.49642134",
"0.4842259",
"0.4833628",
"0.48233297",
"0.48232582",
"0.48086935",
"0.47859505",
"0.47837585",
"0.47637662",
"0.47624096",
"0.4733318",
"0.47020528",
"0.46869817",
"0.4634261",
"0.46331272",
"0.46061474",
"0.45973817",
"0.4557355",
"0.45250666",
"0.45173022",
"0.45120114",
"0.45104283",
"0.44925028"
] | 0.6829084 | 1 |
! Return the canonical tile number for a tile name \param tileName A string valid as a tile name \return integer tile number or None if tileName isn't a valid tile name | def number(cls, tileName):
return TILENAMEMAP[tileName]['Number'] if tileName in TILENAMEMAP else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def findTile(self, tileImage):\n str = tileImage.tostring()\n if str in self.TileDict:\n return self.TileDict[str] + 1\n else:\n return 0",
"def getTilePair(cls, tileName):\n return ( TILENAMEMAP[tileName]['A'], TILENAMEMAP[tileName]['B'] ) if\\\n tileName in TILENAMEMAP else None",
"def tile_name(string):\n for expr in REGEX.values():\n match = re.search(expr, string)\n if match:\n return match.group()\n\n raise ValueError(\"Tile name identier not detected in string\")",
"def tile_to_index(name, northing_origin, easting_origin):\n if not validate_name(name):\n raise ValueError(\"Invalid tile name\")\n\n tile = _parse_name(name)\n\n # round origin to nearest multiple of tile unit\n easting_rounded = round(easting_origin / tile.size) * tile.size\n northing_rounded = round(northing_origin / tile.size) * tile.size\n\n idx = (tile.easting - easting_rounded) / tile.size\n idy = (northing_rounded - tile.northing) / tile.size\n\n return idy, idx",
"def getA(cls, tileName):\n return TILENAMEMAP[tileName]['A'] if tileName in TILENAMEMAP else None",
"def name_to_number(name):\n if (name == 'rock' or name == 'Rock'):\n return 0\n elif (name == 'Spock' or name == 'spock'):\n return 1\n elif (name == 'paper' or name == 'Paper'):\n return 2\n elif (name == 'lizard' or name == 'Lizard'):\n return 3\n elif (name == 'scissors' or name == 'Scissors'):\n return 4\n else:\n return -1",
"def get_image_index(name: str):\n base_name = os.path.basename(name)\n nums = pattern.findall(base_name)\n if len(nums) != num_count:\n raise BaseException(f\"can't exact index from the string: {name}\")\n return float(nums[num_sort_index])",
"def _toTileNum(self, x, y, transpose=False):\n # TIFFCheckTile and TIFFComputeTile require pixel coordinates\n if not transpose:\n pixelX = int(x * self._tileWidth)\n pixelY = int(y * self._tileHeight)\n if x < 0 or y < 0 or pixelX >= self._imageWidth or pixelY >= self._imageHeight:\n raise InvalidOperationTiffError(\n 'Tile x=%d, y=%d does not exist' % (x, y))\n else:\n pixelX = int(x * self._tileHeight)\n pixelY = int(y * self._tileWidth)\n if x < 0 or y < 0 or pixelX >= self._imageHeight or pixelY >= self._imageWidth:\n raise InvalidOperationTiffError(\n 'Tile x=%d, y=%d does not exist' % (x, y))\n # We had been using TIFFCheckTile, but with z=0 and sample=0, this is\n # just a check that x, y is within the image\n # if libtiff_ctypes.libtiff.TIFFCheckTile(\n # self._tiffFile, pixelX, pixelY, 0, 0) == 0:\n # raise InvalidOperationTiffError(\n # 'Tile x=%d, y=%d does not exist' % (x, y))\n if self._tiffInfo.get('istiled'):\n tileNum = libtiff_ctypes.libtiff.TIFFComputeTile(\n self._tiffFile, pixelX, pixelY, 0, 0).value\n else:\n # TIFFComputeStrip with sample=0 is just the row divided by the\n # strip height\n tileNum = int(pixelY // self._stripHeight)\n return tileNum",
"def tile_id(self):\n return self._tile_id",
"def name_to_number(name):\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n elif name == \"scissors\":\n number = 4\n else:\n print \"Name is invalid!\"\n return 1\n return number",
"def name_to_number(name):\n\n # A simple if/elif/else game...\n\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n else:\n number = 4\n return number",
"def _name2idx(name):\n match = re.search(r\"eth(\\d+)\", name, re.I)\n if not match:\n raise exception.CloudbaseInitException(\n \"invalid NetworkDetails name {!r}\"\n .format(name)\n )\n return int(match.group(1))",
"def file_name_to_radar_number(prediction_file_name):\n\n error_checking.assert_is_string(prediction_file_name)\n pathless_file_name = os.path.split(prediction_file_name)[-1]\n radar_word = pathless_file_name.split('.')[0].split('_')[-1]\n\n if 'radar' not in radar_word:\n return None\n\n return int(radar_word.replace('radar', ''))",
"def getNumberFromName(self, idx):\n\t\tfile = self.all_file_names[idx]\n\t\tnumber = file[4]\n\t\tif file[5].isdigit(): \n\t\t\tnumber += file[5]\n\t\treturn int(number)",
"def name_to_id(player_name):\n # This is fairly unsophisticated, just does a CRC32 on the name. Can be\n # optimized both for compute requirements and collision frequency using\n # another hashing algorithm.\n return binascii.crc32(player_name) & 0xFFFFFFFF",
"def _parse_name(string):\n\n name = tile_name(string)\n\n if not validate_name(name):\n raise ValueError(\"Not a valid tile name!\")\n\n (unit, northing, easting) = name.split(\"_\")\n northing = _enlarge_ordinate(northing, unit)\n easting = _enlarge_ordinate(easting, unit)\n size = TILE_SIZES[unit]\n\n return TileInfo(northing, easting, size, unit)",
"def atomic_number(name):\n try:\n return symbols.index(name.capitalize()) + 1\n except ValueError:\n return lower_names.index(name.lower()) + 1",
"def score_name(name: Text) -> Optional[int]:\n\n # Accept both ways of naming the manifest asset, even though\n # there's no longer a reason to include the commit sha.\n if name.startswith(\"MANIFEST-\") or name.startswith(\"MANIFEST.\"):\n if zstandard and name.endswith(\"json.zst\"):\n return 1\n if name.endswith(\".json.bz2\"):\n return 2\n if name.endswith(\".json.gz\"):\n return 3\n return None",
"def _natural_sort_worksheet(x):\n l = re.findall(r\"\\d+$\", x.title)\n if l:\n return int(l[0])\n\n return -1",
"def get_num_from_file(file_name):\n basename = file_name.partition('.')[0]\n first, second = basename.split('_')\n num = second.replace(\"genome\", '')\n num = num[1:]\n return int(num)",
"def get_layer_index(self, layer_name):\n for i, layer in enumerate(self.tmx_data.layers):\n if layer.name == layer_name:\n return i\n return -1",
"def decode_tilename(self, tilename):\n tf = self.core.tile_ysize_m // 100000\n\n # allow short-form of tilename (e.g. \"E012N018T6\")\n if len(tilename) == 10:\n tile_size_m = int(tilename[-1]) * 100000\n if tile_size_m != self.core.tile_xsize_m:\n raise ValueError(self.msg1)\n llx = int(tilename[1:4])\n if llx % tf:\n raise ValueError(self.msg2)\n lly = int(tilename[5:8])\n if lly % tf:\n raise ValueError(self.msg2)\n tilecode = tilename[-2:]\n if tilecode != self.core.tiletype:\n raise ValueError(self.msg1)\n subgrid_id = self.core.tag\n sampling = self.core.sampling\n\n # allow long-form of tilename (e.g. \"EU500M_E012N018T6\")\n elif len(tilename) == 17:\n subgrid_id = tilename[0:2]\n if subgrid_id != self.core.tag:\n raise ValueError(self.msg1)\n sampling = Equi7Grid.decode_sampling(tilename[2:5])\n if sampling != self.core.sampling:\n raise ValueError(self.msg1)\n tile_size_m = int(tilename[-1]) * 100000\n if tile_size_m != self.core.tile_xsize_m:\n raise ValueError(self.msg1)\n llx = int(tilename[8:11])\n if llx % tf:\n raise ValueError(self.msg2)\n lly = int(tilename[12:15])\n if lly % tf:\n raise ValueError(self.msg2)\n tilecode = tilename[-2:]\n if tilecode != self.core.tiletype:\n raise ValueError(self.msg1)\n\n # wrong length\n else:\n raise ValueError(self.msg1)\n\n return subgrid_id, sampling, tile_size_m, llx * 100000, lly * 100000, tilecode",
"def get_layer_uid(layer_name=''):\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]",
"def get_layer_uid(layer_name=''):\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]",
"def get_layer_uid(layer_name=''):\n if layer_name not in _LAYER_UIDS:\n _LAYER_UIDS[layer_name] = 1\n return 1\n else:\n _LAYER_UIDS[layer_name] += 1\n return _LAYER_UIDS[layer_name]",
"def get_layer_uid(layer_name=''):\r\n if layer_name not in _LAYER_UIDS:\r\n _LAYER_UIDS[layer_name] = 1\r\n return 1\r\n else:\r\n _LAYER_UIDS[layer_name] += 1\r\n return _LAYER_UIDS[layer_name]",
"def find_TPnum(self, stateName):\n try:\n stateNum = self.stateDict[stateName]\n except:\n raise KeyError(\"find_TPnum: StateName not found!!\")\n return stateNum",
"def name_to_number(name):\r\n \r\n if name == \"rock\":\r\n return 0\r\n elif name == \"Spock\":\r\n return 1\r\n elif name == \"paper\":\r\n return 2\r\n elif name == \"lizard\":\r\n return 3\r\n elif name == \"scissors\":\r\n return 4\r\n else:\r\n return \"Invalid!Enter any one of the following: rock,Spock,paper,lizard,scissors\"",
"def get_canonical_identifier(self):\n if self.hash_func:\n return self.hash_func(self.mol)\n return self.smiles",
"def regToInt(name):\n match = re.match(r\"r([0-9]+)\", name)\n if match:\n index = int(match.group(1))\n if 0 <= index <= 15:\n return index\n raise AsmException(\"incorrect register %s\" % name)"
] | [
"0.61193204",
"0.5630647",
"0.56111467",
"0.5555727",
"0.54503417",
"0.5445518",
"0.5364563",
"0.5335817",
"0.5333355",
"0.5324129",
"0.53230995",
"0.5309463",
"0.5303459",
"0.5206147",
"0.5185187",
"0.51694244",
"0.51635987",
"0.51442856",
"0.5140779",
"0.5137861",
"0.5107929",
"0.5103241",
"0.51007193",
"0.51007193",
"0.51007193",
"0.50713676",
"0.5050156",
"0.5019819",
"0.4993395",
"0.49912938"
] | 0.7870066 | 0 |
! Return a sorted list of tile names | def getTileNames(cls):
return sorted(TILENAMEMAP.keys()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tile_data_sorted(self) -> List[TileData]:\n return sorted(\n self._tiles.values(),\n key=lambda x: x.octree_chunk.location.level_index,\n reverse=True,\n )",
"def sort_name(sprite):\n return sprite.name",
"def sorted_gnames():\n return sorted(group_names.keys())",
"def getNames():\n imgs = Image.objects.raw({})\n ans = []\n for img in imgs:\n ans.append(img.name)\n ans.sort()\n return ans",
"def sorted_nodes_names(self):\n return [nd.name for nd in self._sorted_nodes]",
"def tiles_by_score(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.score, reverse=True)\n return sorted_list",
"def playedMoves(self):\n #Ooof, how do I make it return the tile name? I need to have a method in tile for that.\n List=[]\n for item in [a1, a2, a3, b1, b2, b3, c1, c2, c3]:\n if item.retrieve()!=\"\":\n #List += item #This adds each letter separately...\n List.append((item.name(), item.retrieve())) \n return List",
"def tiles_by_tissue_percentage(self):\n sorted_list = sorted(self.tiles, key=lambda t: t.tissue_percentage, reverse=True)\n return sorted_list",
"def get_flagged_tile_list ( self ) :\n tile_list = []\n stmt = \"select name from sdb_product where sys003 =\\'T\\'\"\n self.oracle_cursor.arraysize = 100000\n self.oracle_cursor.execute(stmt)\n resultset = self.oracle_cursor.fetchmany()\n if resultset :\n for row in resultset :\n tile_list.append(str(row[0]))\n return tile_list",
"def get_tile_names(aoi, data_type):\n # Read fishnet from URL:\n fishnet_url = (\"http://gis.arso.gov.si/\"\n \"related/lidar_porocila/lidar_fishnet_D96TM.zip\")\n try:\n fishnet = gpd.read_file(fishnet_url)\n except HTTPError:\n # Use backed-up version if API unavailable, might not be up-to-date\n print(\"Problems accessing Fishnet from web...\")\n print(\"Using local (backup) version instead.\")\n fishnet = gpd.read_file(\".\\\\P1_Geometric\\\\P11_Data_Preparation\"\n \"\\\\anc_bup_files\\\\SI\\\\LIDAR_FISHNET_D96.shp\")\n\n # Reproject AOI polygon local CRS if needed (EPSG 3794):\n net_crs = CRS.from_string(fishnet.crs['init']).to_epsg()\n aoi_crs = aoi.crs.to_epsg()\n if aoi_crs != net_crs:\n aoi_pr = aoi.to_crs(crs=net_crs).envelope\n else:\n aoi_pr = aoi.envelope\n\n # Create a list of tile names covered by the polygon:\n if data_type == \"DTM\":\n url_main = \"http://gis.arso.gov.si/lidar/dmr1/\"\n url_mid = \"/D96TM/TM1_\"\n url_ext = \".asc\"\n elif data_type == \"LAZ\":\n url_main = \"http://gis.arso.gov.si/lidar/gkot/laz/\"\n url_mid = \"/D96TM/TM_\"\n url_ext = \".laz\"\n else:\n raise ValueError(f\"Unrecognized data type '{data_type}' dwn_SI.py.\")\n tiles = []\n for _, row in fishnet.iterrows():\n ints_tile = aoi_pr.intersects(row.geometry)\n if ints_tile[0]:\n tiles.append(url_main\n + row['BLOK']\n + url_mid\n + row['NAME']\n + url_ext)\n return tiles",
"def meters_names(self):\n return sorted(self.meters.keys())",
"def get_tiles():\n\t\t\n\tcursor = get_cursor()\n\t\n\tcursor.execute(\"SELECT * FROM fitmeimages ORDER BY shade ASC, id ASC\")\n\treturn cursor.fetchall();",
"def tile_list(tilefile):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\n\ttf.close()\n\treturn td",
"def readTiles(self):\n TileImage = Image.open(self.Filename).convert(\"RGB\")\n TileIW, TileIH = TileImage.size\n TilesetW, TilesetH = TileIW // self.TileWidth, TileIH // self.TileHeight\n\n for y in range(TilesetH):\n for x in range(TilesetW):\n box = self.TileWidth * x, self.TileHeight * y, self.TileWidth * (x+1), self.TileHeight * (y+1)\n tile = TileImage.crop(box)\n self.List.append(tile)\n\n str = tile.tostring()\n if not str in self.TileDict:\n #print(\"add tile: \", str)\n self.TileDict[str] = len(self.List) - 1\n print(\"tile count: {}, unique count: {}\".format(len(self.List),len(self.TileDict.values())))",
"def names(self) -> list[str]:",
"def all_machines():\n return sorted(MACHINES, key=str)",
"def order_2d(tiles):\n\n sorted_vals = sorted(tiles, key=lambda x: x[0][1])\n grouped = groupby(sorted_vals, lambda x: x[0][1])\n\n return [sorted(vals, key=lambda x: x[0][0]) for _, vals in grouped]",
"def get_ordered_names(self):\n nodes = self.get_ordered_nodes()\n return [node.name for node in nodes if node in self.leaves]",
"def getNames(self) -> List[unicode]:\n ...",
"def monomers(self):\n return sorted(set([self[x.split(\"_\")[-1]][\"name\"] for x in self.keys]), key=lambda x: -len(x))",
"def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES",
"def tank_name_list(self):\n return list(self._node_reg.tank_names)",
"def get_names(source):\n names = [row[\"name\"] for row in source]\n return sorted(names)",
"def mapping_names(self):\n return sorted([self.basename] + [name for selector in self.selections.normal_values() for name in selector.mapping_names()])",
"def nameList(self, excludeFileInfo=False):\n names = self.keys()\n if excludeFileInfo and nodeformat.FileInfoFormat.name in self:\n names.remove(nodeformat.FileInfoFormat.name)\n names.sort()\n return names",
"def get_sorted_img_list():\n dirPath=settings.BASE_DIR\n imgdir=\"/pttWeb/static/topicmodel\"\n fileID=glob.glob(dirPath+imgdir+\"/*.png\")\n fileID=[i.replace('/home/stream/Documents/minimum_django/pttWeb/static/','') for i in fileID]\n fileID=[Week_Image(i) for i in fileID]\n fileID.sort(key=lambda x: x.date, reverse=True)\n #translate . to / since javascript parsing date has some issue!\n fileID=[(i.filename,date_trans_z(i.date.strftime(\"%Y.%m.%d\"))) for i in fileID]\n return fileID",
"def names(self) -> List:\n ...",
"def get_list(self):\n return sorted(self.__entries.keys())",
"def tileTypes(self):\n types = []\n\n for type_ in getAllUtilitiesRegisteredFor(ITileType):\n if checkPermission(type_.add_permission, self.context):\n types.append(type_)\n\n types.sort(self.sortKey)\n return types",
"def tank_names(self):\n return self._tanks"
] | [
"0.68089956",
"0.64828146",
"0.63991916",
"0.6382159",
"0.6380592",
"0.63060194",
"0.62003714",
"0.616663",
"0.6013075",
"0.59970295",
"0.5937122",
"0.5923884",
"0.5878749",
"0.58711106",
"0.585727",
"0.5845981",
"0.5824051",
"0.5812356",
"0.57984525",
"0.5756947",
"0.57562923",
"0.5725014",
"0.5723808",
"0.57104206",
"0.5700128",
"0.56961834",
"0.56955725",
"0.5684123",
"0.5682659",
"0.56727624"
] | 0.86915565 | 0 |
transforms date in human format to date used in sql query | def sql_date(date):
return "to_date('{}', 'dd.mm.yyyy')".format(date) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def transform_date(date):\n if type(date) == str:\n return date\n\n formatted_string = date.strftime(\"%d/%m/%Y\")\n\n return formatted_string",
"def convert_date(date):\n\n if len(date) > 10: date = date[:date.rfind(\"-\")]\n return convf(date)",
"def convert_date(raw_date):\n if raw_date:\n date = datetime.strptime(raw_date, \"%Y-%m-%d\")\n return date.strftime(\"%m/%d/%YZ\")",
"def sas_date_converter(row, base_date='1960-01-01'):\n if row is None:\n return row\n return datetime.strptime(base_date, '%Y-%m-%d') + timedelta(int(row))",
"def convert_date_column(datestring):\n return datetime.datetime.strptime(datestring.strip(), \"%b-%Y\").date()",
"def format_date(self, date_val):\n try:\n if type(date_val) is not datetime:\n d = date.fromisoformat(date_val[0:10])\n else:\n d = date_val\n return d.strftime('%Y-%m-%d')\n except Exception as e:\n self.error((str(e)))",
"def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x",
"def convert(date):\n converted_date = datetime.datetime.strptime(date, \n \"%Y-%m-%d\").date()\n return converted_date",
"def reformatdate(self, date):\n# print('DATE', self.__str__())\n if 'dummy' in date:\n return '1970_01_01'\n# datesplit = date.split('/')\n datesplit = date.split('-') # Really? This had to be changed?!\n# print('DATE', date, datesplit)\n\n # dates used to be as follows\n# month = datesplit[0]\n# day = datesplit[1]\n# year = datesplit[2]\n\n # dates as of 12 June 2018 now done this way\n year = datesplit[0]\n month = datesplit[1]\n day = datesplit[2]\n\n return year + '_' + month + '_' + day",
"def convert(v):\n\n if type(v) is str and rexp.match(v):\n return as_date(v)\n return v",
"def convertDate(indate):\n a = datetime.datetime.fromtimestamp(indate / 1000.0)\n a_str = a.strftime('%m/%d/%y')\n return datetime.datetime.strptime(a_str, '%m/%d/%y').date()",
"def convert_eu_to_us_date(date):\n dt = datetime.strptime(date, \"%d/%m/%Y\")\n return dt.strftime(\"%m/%d/%Y\")",
"def convert_date(adate):\n\tprint \"date given: \" + adate\n\t# stuff\n\tprint \"epoch time for date: \"",
"def convertSODate(datenum):\n #Date numbers seem to start with 0 = 2001-01-01\n base_date = datetime.date(2001, 1, 1)\n #add key from the spot on object to this base date to get the date\n record_date = base_date + datetime.timedelta(days=int(datenum))\n record_date = record_date.isoformat()\n return record_date",
"def preprocess_date(date_):\n if 'JAN' in date_:\n date_ = date_.replace('JAN', '01')\n elif 'FEB' in date_:\n date_ = date_.replace('FEB', '02')\n elif 'MAR' in date_:\n date_ = date_.replace('MAR', '03')\n elif 'APR' in date_:\n date_ = date_.replace('APR', '04')\n elif 'MAY' in date_:\n date_ = date_.replace('MAY', '05')\n elif 'JUN' in date_:\n date_ = date_.replace('JUN', '06')\n elif 'JUL' in date_:\n date_ = date_.replace('JUL', '07')\n elif 'AUG' in date_:\n date_ = date_.replace('AUG', '08')\n elif 'SEP' in date_:\n date_ = date_.replace('SEP', '09')\n elif 'OCT' in date_:\n date_ = date_.replace('OCT', '10')\n elif 'NON' in date_:\n date_ = date_.replace('NON', '11')\n elif 'DEC' in date_:\n date_ = date_.replace('DEC', '12')\n if date_[-2:] > '17':\n date_ = date_[:6] + '19' + date_[-2:]\n else:\n date_ = date_[:6] + '20' + date_[-2:]\n return datetime.strptime(date_, '%d-%m-%Y')",
"def unify_date_format(date):\n if type(date) == str:\n try:\n date = dateutil.parser.parse(date) \n except:\n pass\n return date",
"def get_date(date):\n return date",
"def date_to_operate_format(self, date):\n date = date.replace(\" \", \"\")\n date = date.split(',')\n day = date[1]\n month = date[2]\n\n day = self.check_and_repair_right_format(day)\n month = self.check_and_repair_right_format(month)\n\n right_format = date[0] + month + day\n return right_format",
"def interpret_date( text ):\n try:\n as_arrow = arrow.get(text, \"MM/DD/YYYY\").replace(\n tzinfo=tz.tzlocal())\n except:\n flask.flash(\"Date '{}' didn't fit expected format 12/31/2001\")\n raise\n return as_arrow.isoformat()",
"def convert_date(self, date=None):\n if date is not None:\n format_str = '%d/%m/%Y'\n converted_date = datetime.strptime(date, format_str)\n return converted_date.date()",
"def _process_date(self, data):\n def helper(val):\n # Sometime the date has a (1) or (2) following it. Strip that off\n # so that we can successful convert to date.\n s = val.find(\" (\")\n if s >= 0:\n val = val[0:s]\n dv = dt.datetime.strptime(val, '%A, %b %d')\n dv = dv.replace(year=self.start_date.year)\n return dv\n data['Date'] = data['Date'].apply(helper)\n return data",
"def fix_date(oldfmt):\n dval = oldfmt.split('/')[-1]\n datev = datetime.strptime(dval, \"%Y-%m-%d\")\n return datev.strftime(\"%B %-d, %Y\")",
"def convert_date_to_string(date_input):\n if isinstance(date_input, date):\n return date_input.strftime(\"%Y-%m-%d\")\n else:\n raise TypeError(\"Input {0} is not a date object\".format(type(date_input)))",
"def reformat_subway_dates(date):\n date_formatted = datetime.datetime.strptime(date, '%m-%d-%y')\n date_formatted = date_formatted.strftime('%Y-%m-%d')\n return date_formatted",
"def convert_date_string(df,col_name):\n df[col_name] = pd.to_datetime(df[col_name], infer_datetime_format=True)\n return df",
"def __replaceDate(self, hql, date):\n #%%escapa\n hql = hql.replace(\"<date>\", date).replace('%', '%%')\n # gerp date-n\n #Re = re.compile(r'<date\\s*([-+]\\s*\\d+)')\n Re = re.compile(r'<date\\s*([-+]\\s*\\d+)\\|?(\\S*?\\s*\\S*?)>')\n l = Re.findall(hql)\n if not l:\n return hql\n\n l = map(lambda x: (int(x[0]), x[1]), l)\n for x in l:\n if x[1]:\n f = ''.join(\n map(lambda c: '%' + c if re.match('^[A-Za-z]', c) else c, x[1]))\n else:\n f = '%Y%m%d'\n stamp = int(time.mktime(time.strptime(\n date, '%Y%m%d'))) + 86400 * x[0]\n\n match = Re.search(hql)\n if not match:\n continue\n\n # replace <date-n|[Ymd]> to specific time.\n sdate = time.strftime(f, time.localtime(stamp))\n hql = hql.replace(match.group(), str(sdate))\n\n return hql",
"def insure_date(d):\n if isinstance(d, BeautifulDate):\n return date(year=d.year, month=d.month, day=d.day)\n else:\n return d",
"def convert_date_time(self, dt):\n return datetime.fromtimestamp(dt).strftime(\"%Y-%m-%d\")",
"def modis_to_from_pydatetime(date):\n \n if isinstance(date, (str, unicode)): \n return dt.datetime.strptime(date[1:], '%Y%j').date()\n return dt.datetime.strftime(date, 'A%Y%j')",
"def _format_date(self, date, humanize=True):\n if date:\n if humanize and date in self.special_dates:\n rv = self.special_dates[date]\n else:\n rv = date.strftime(self.date_format)\n return rv\n else:\n return ''"
] | [
"0.7220046",
"0.7032278",
"0.6782775",
"0.6732669",
"0.6480039",
"0.64462054",
"0.6420159",
"0.6401254",
"0.6381813",
"0.63493663",
"0.6343099",
"0.63151354",
"0.6298463",
"0.6281442",
"0.62560976",
"0.6254294",
"0.6253558",
"0.61913806",
"0.61866134",
"0.61836547",
"0.6163623",
"0.6160323",
"0.61417496",
"0.6132531",
"0.6130727",
"0.61254245",
"0.6119978",
"0.6117758",
"0.61175424",
"0.6107465"
] | 0.73328876 | 0 |
Listener when user create invite in a guild | async def on_invite_create(self, invite):
await add_invite_in_db(invite)
logger.info(f'{invite.guild}: user {invite.inviter} create invite') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def invite(self, ctx):\n await ctx.send(f'🐱You can invite me to your server using the following url:\\n{self.invite_url}'\n '\\n\\nYou will need the **Manage Server** permission to add me to a server. '\n f'Run `{self.heleus.command_prefix[0]}help` to see what you can customise!')",
"def invite(self):\n pass",
"async def invite(self):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await self.bot.say(\"Invite me to your server with this link!\\n\" + link)",
"async def invite(ctx):\n permissions = 2134207679\n url = discord.utils.oauth_url(client_id=bot.user.id, permissions=discord.Permissions(permissions=permissions),\n scopes=(\"bot\", \"applications.commands\"))\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"Invite\", url=url))\n await ctx.respond(\"I'm glad you want to add me to your server, here's a link!\", view=view)",
"def invite_user(session, invitee):\n session.invite_event.clear()\n key = b64encode(messaging.common.pkc_encrypt(\n session.get_channel_key(), session.get_encryption_cert(invitee))).decode()\n msg = {\n kk.typ: kk.add_user,\n kk.inviter: session.user,\n kk.invitee: invitee,\n kk.chid: session.chan,\n kk.chkey: key,\n }\n msg[kk.signature] = b64encode(\n messaging.common.create_msg_sig(session, msg)).decode()\n messaging.common.send_msg(session.sock, msg, key=session.symkey)",
"def invite(self,roomName,user):\n\n self.sendCommand(roomName +\" /invite\",user)",
"async def invite(self, ctx):\r\n myInvite = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))\r\n await ctx.channel.send('Invite me to *your* server with this link: \\n\\n<{}>'.format(myInvite))",
"async def invite(self, ctx):\n embed = discord.Embed(title='Invite links for NOVA',\n description='[<:news:730866149109137520> Required Permissions](https://discord.com/api/'\n 'oauth2/authorize?client_id=709922850953494598&permissions=1573252215&scope='\n 'bot)\\n'\n '[<:news:730866149109137520> No Permissions]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&permi'\n 'ssions=0&scope=bot)\\n[<:news:730866149109137520> All Permissions (admin)]'\n '(https://discord.com/api/oauth2/authorize?client_id=709922850953494598&perm'\n 'issions=8&scope=bot)', color=0x5643fd)\n embed.set_footer(text='Developed by YeetVegetabales', icon_url='https://cdn.discordapp.com/avatars'\n '/569374429218603019'\n '/a_6dac6946906e498650f6c2466aa82200.gif?size'\n '=256&f=.gif')\n embed.set_thumbnail(url='https://images-ext-2.discordapp.net/external/54Mim4lahztGCP4hgmpy4lOdEUc4'\n '-dOeNA_x6hVHMlc/%3Fsize%3D4096/https/cdn.discordapp.com/avatars/709922850953494598'\n '/f78ed19924e8c95abc30f406d47670d7.png')\n await ctx.send(embed=embed)",
"async def invite(self, ctx):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await ctx.send(\"Invite me to your server with this link!\\n\" + link)",
"def on_station_user_invite_received(self, func):\n self._set_event_handler(\"stations\")\n self._events.on_station_user_invite_received(func)",
"async def invite(self, ctx):\n perms = discord.Permissions.text()\n perms.update(read_messages=True, manage_messages=True,\n mention_everyone=False, send_tts_messages=False)\n await ctx.send(f'Invite me here:\\n<{discord.utils.oauth_url(self.bot.user.id, perms)}>')",
"def on_station_user_invite_accepted(self, func):\n self._set_event_handler(\"stations\")\n self._events.on_station_user_invite_accepted(func)",
"async def invite(self, ctx):\n await ctx.send(f\"**{ctx.author.name}**, use this URL to invite me\\n<{discord.utils.oauth_url(self.bot.user.id)}>\")",
"async def invite(self, context: Context) -> None:\n embed = discord.Embed(\n description=f\"Invite me by clicking [here](https://discordapp.com/oauth2/authorize?&client_id={self.bot.config['application_id']}&scope=bot+applications.commands&permissions={self.bot.config['permissions']}).\",\n color=0xD75BF4,\n )\n try:\n # To know what permissions to give to your bot, please see here: https://discordapi.com/permissions.html and remember to not give Administrator permissions.\n await context.author.send(embed=embed)\n await context.send(\"I sent you a private message!\")\n except discord.Forbidden:\n await context.send(embed=embed)",
"async def _invite(self, ctx: Context):\n\n # read_messages=True,\n # send_messages=True,\n # manage_messages=True,\n # embed_links=True,\n # attach_files=True,\n # external_emojis=True,\n # add_reactions=True\n perms = discord.Permissions(322624)\n\n try:\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perms)\n value = (\n \"Add Brawlcord to your server by **[clicking here]\"\n f\"({invite_url})**.\\n\\n**Note:** By using the link\"\n \" above, Brawlcord will be able to\"\n \" read messages,\"\n \" send messages,\"\n \" manage messages,\"\n \" embed links,\"\n \" attach files,\"\n \" add reactions,\"\n \" and use external emojis\"\n \" wherever allowed.\\n\\n*You can remove the permissions manually,\"\n \" but that may break the bot.*\"\n )\n except Exception as exc:\n invite_url = None\n value = (\n f\"Error \\\"{exc}\\\" while generating invite link.\"\n \" Notify bot owner using the `-report` command.\"\n )\n\n embed = discord.Embed(color=EMBED_COLOR, description=value)\n embed.set_author(\n name=f\"Invite {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n # embed.add_field(name=\"__**Invite Link:**__\", value=value)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n \"I do not have the permission to embed a link.\"\n \" Please give/ask someone to give me that permission.\"\n )",
"def invite(id, adminId, userId):\n db = core.connect();\n permission.create({\n \"streamId\": id,\n \"createdBy\": adminId,\n \"userId\": userId,\n \"level\": 0\n })\n event.create({\n \"createdBy\": userId,\n \"streamId\": user.messageStream(userId),\n \"displayString\": \"%s has invited you to the %s %s\" % (user.nameForId(adminId), meta(id), displayName(id)),\n \"unread\": True\n })",
"async def invite(self, ctx):\n embed = discord.Embed(title=\"Invite\", description=f\"**{ctx.author.name}**, use this URL to invite me\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)",
"async def botinvite_command(self, ctx):\n invite = f\"https://discord.com/api/oauth2/authorize?client_id={self.client.user.id}&permissions=1374809815&scope=bot\"\n await ctx.send(invite)",
"async def join(self, ctx, invite : discord.Invite):\r\n if ctx.message.author.id == \"481270883701358602\":\r\n await self.client.accept_invite(invite)\r\n await self.client.say(\"Joined the server.\")\r\n else:\r\n await self.client.say(\"**Owner only command.**\")",
"async def invite(self, ctx):\n invite = f\"https://discordapp.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=67584&scope=bot\"\n await ctx.send(embed=discord.Embed(\n color=discord.colour.Colour.teal(),\n description=f\":mailbox_with_mail: [Invite]({invite}) me to your server!\"))",
"def on_station_admin_invite_sent(self, func):\n self._set_event_handler(\"stations\")\n self._events.on_station_admin_invite_sent(func)",
"def invite_user(request):\n moderator = request.user\n site = get_current_site(request)\n\n invitation_form = InviteMemberForm(request.POST)\n\n if invitation_form.is_valid():\n\n # Invite user\n full_name = invitation_form.cleaned_data['full_name']\n email = invitation_form.cleaned_data['email']\n new_user = moderator.invite_new_user(email, full_name)\n\n # Log moderation event\n msg_type = ModerationLogMsg.INVITATION\n log_comment = _('{} invited {}'.format(moderator.get_full_name(),\n new_user.get_full_name()))\n log_moderator_event(msg_type=msg_type,\n user=new_user,\n moderator=moderator,\n comment=log_comment)\n\n # Send email\n subject = _('Welcome to {}'.format(site.name))\n template = 'moderation/emails/invite_new_user.html'\n token = new_user.auth_token\n url = request.build_absolute_uri(\n reverse('accounts:activate-account', args=[token]))\n send_connect_email(subject=subject,\n template=template,\n recipient=new_user,\n sender=moderator,\n site=site,\n url=url)\n\n messages.success(request, _('{} has been invited to {}.'.format(\n new_user.get_full_name(), site.name)))\n\n return redirect('moderation:moderators')\n\n else:\n return moderation_home(request, invitation_form=invitation_form)",
"async def invite(self, ctx):\n lang = getLang(ctx.message.guild.id)\n\n with open(f\"embeds/{lang}/inviting.json\", \"r\") as f:\n inviting = json.load(f)\n\n await ctx.reply(embed=discord.Embed.from_dict(inviting[0]), components=[\n ActionRow(\n Button(label=inviting[1],\n url=\"https://discord.com/api/oauth2/authorize?client_id=878533674042294292&permissions=537259248&scope=bot\",\n style=ButtonStyle.url\n ),\n Button(label=inviting[2],\n url=\"https://discord.com/api/oauth2/authorize?client_id=878533674042294292&permissions=8&scope=bot\",\n style=ButtonStyle.url\n )\n )\n ], mention_author=False, delete_after=20)",
"def testInviteCreatesUser(self):\r\n me = User()\r\n me.username = u'me'\r\n me.email = u'me.com'\r\n me.invite_ct = 2\r\n you = me.invite(u'you.com')\r\n\r\n self.assertEqual(\r\n 'you.com',\r\n you.username,\r\n 'The email should be the username')\r\n self.assertEqual(\r\n 'you.com',\r\n you.email,\r\n 'The email should be the email')\r\n self.assertTrue(\r\n len(you.api_key),\r\n 'The api key should be generated for the user')\r\n self.assertFalse(\r\n you.activated,\r\n 'The new user should not be activated')\r\n self.assertEqual(\r\n 1,\r\n me.invite_ct,\r\n 'My invite count should be deprecated')",
"async def _invite(self, ctx: Context):\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n value = (\n f\"Invite TvM Assistant to your bot by [clicking here]({invite_url}).\"\n \"\\n\\nInviting the bot will give it some management permissions. You can\"\n \" review them when you use the link.\"\n )\n\n embed = discord.Embed(color=await ctx.embed_colour(), description=value)\n embed.set_author(name=f\"Invite TvM Assistant\", icon_url=ctx.me.avatar_url)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n f\"{invite_url}\\n\\nInviting the bot will give it some management permissions.\"\n \" You can review them when you use the link.\"\n )",
"def on_station_admin_invite_accepted(self, func):\n self._set_event_handler(\"stations\")\n self._events.on_station_admin_invite_accepted(func)",
"async def invite(ctx):\r\n await ctx.send(\"https://discordapp.com/oauth2/authorize?client_id=457903893079392256&scope=bot&permissions=2146958591\")\r\n ctx.counter(n)",
"def invite(self, eventid, aid, uid):\n\n u_id = EventId()\n u_id.setHashed(uid)\n\n a_id = EventId()\n a_id.setHashed(aid)\n\n e_id = EventId()\n e_id.setHashed(eventid)\n\n event = Event.getById(e_id)\n admin = User(id=a_id)\n\n if not event.authorized(admin):\n raise EventError(EventError.NO_ADMIN)\n\n user = User.getById(u_id)\n\n invitation = Invitation(user=user, event=event)\n invitation.create()\n\n return",
"def on_user_create(self, user):",
"def irc_INVITE(self, prefix, (user, channel)):\n self.join(channel)"
] | [
"0.7003141",
"0.6786849",
"0.6755741",
"0.66672224",
"0.659637",
"0.65516216",
"0.65509593",
"0.6550935",
"0.652335",
"0.6459283",
"0.644018",
"0.64129496",
"0.6411395",
"0.64018804",
"0.6391156",
"0.63838065",
"0.62951034",
"0.6294505",
"0.6286033",
"0.628171",
"0.6219996",
"0.6197645",
"0.61920786",
"0.6146751",
"0.6141416",
"0.611062",
"0.6015591",
"0.5989801",
"0.597245",
"0.59559506"
] | 0.7565148 | 0 |
Test parseLineCSV with good data (all fields present) | def test_parseLine1(mocker):
# given: setup test framework
worker = Worker()
testString = "12Nov2019,Teacher,Brighter Futures,12000"
expectedResult = {
'date': '2019-11-12',
'job_title': 'Teacher',
'company_name': 'Brighter Futures',
'salary': 12000
}
# when:
result = worker.parseLineCSV(testString)
# then:
assert result == expectedResult | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parseLine2(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"11/11/19,Brighter Futures,12000\"\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then: (Using PyTruth assertions)\n AssertThat(result).IsNone()",
"def test_multiple_lines():\n\n # Multi-line file\n test_file = StringIO(\n u'fri,wed\\n1,1\\n2,2'\n )\n\n csv_parser = CSVParser(test_file)\n\n expected = [\n {'day': 'wed', 'description': 'N/A 1', 'square': 1, 'value': 1},\n {'day': 'fri', 'description': 'N/A 2', 'double': 2, 'value': 1},\n ]\n\n assert csv_parser.parse() == expected",
"def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]",
"def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"",
"def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")",
"def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'",
"def test_misc_csv_read():\n r = csv_reader(\"../test/test.csv\")\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n\n assert(data == \"\"\"\n['EVT_CODE*', 'EVT_DATE.DE', 'CODE', 'AGE', 'FRST', 'LST', 'SPEC', 'de.id']\n['tea', '2018/01/01', 'X', '35', 'PRE', 'WHO', 'BUG', '1']\n['coffee', '2018/05/05', 'X', '35', 'JAN,Z', 'WHO', 'FRG', '1']\n['water', '2018/01/01', 'Y', '35', 'TAN', 'POST', 'CAT', '2']\n \"\"\".strip())",
"def test_csvfile_single_row_of_data(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\",\"b\"\n1,2\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"b\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }\n assert list(adapter.get_data({}, [])) == [{\"a\": 1.0, \"b\": 2.0, \"rowid\": 0}]",
"def test_itercsv_emits_data_lines():\n expected = [\n b'Hello,World\\r\\n',\n b'1,2\\r\\n',\n b'3,4\\r\\n'\n ]\n assert list(itercsv(['Hello', 'World'], [[1, 2], [3, 4]])) == expected",
"def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])",
"def test_parser():\n data = parse_csv(TEST_DATA)\n assert data['2020-01-03'] == ['recycle']\n assert data['2020-01-08'] == ['bio', 'trash']\n assert data['2021-01-09'] == ['christmas']",
"def test_misc_csv_read_inmemory():\n r = csv_reader([\"fieldname_a,fieldname_b\",\n \"mo,bo\",\n \"go,zo\",\n \"fo,po\"])\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n assert(data==\"\"\"\n['fieldname_a', 'fieldname_b']\n['mo', 'bo']\n['go', 'zo']\n['fo', 'po']\n \"\"\".strip())",
"def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")",
"def test_csv_simple_input(self):\n\n # Mix of integer and string data. Ensure that commas and\n # quotes are escaped properly.\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': 'String, with, commas',\n 'item_num': 2,\n },\n {\n 'name': 'String with \" quote',\n 'item_num': 3,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect cells containing commas to be escaped with quotes.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n 'Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n '\"String, with, commas\",2\\r\\n'\n '\"String with \"\" quote\",3\\r\\n')",
"def test_csv_reader_header_fields(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n header_fields = list(data[0].keys())\n assert header_fields == [\n 'Country',\n 'City',\n 'State_Or_Province',\n 'Lat',\n 'Long',\n 'Altitude'\n ]",
"def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")",
"def check_valid_csvformat(self, csv_path):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object\n self.check_valid_csv_header(reader.next())\n self.check_valid_csv_data(reader.next())",
"def test_row_parsing(self, tmpdir):\n json_file = str(tmpdir.join(\"f.json\"))\n with open(json_file, \"w\") as f:\n json.dump({\"ds\": [{\"file\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}]}, f)\n\n create = CsvRowDataset.from_strings\n\n # Check extraneous whitespace is ignored and Yes/No to boolean\n # conversion\n got1 = create([\"ds\", \"100\", \" url\", \"title, here\", \"no\", \"Yes\", json_file])\n expected1 = CsvRowDataset(\"ds\", 100, \"url\", \"title, here\", False, True, json_file)\n assert got1 == expected1\n\n got2 = create([\"ds\", \"100\", \" url\", \"title, here\", \"No\", \"yes\", json_file])\n expected2 = CsvRowDataset(\"ds\", 100, \"url\", \"title, here\", False, True, json_file)\n assert got2 == expected2\n\n # Check invalid int and bool values\n assert pytest.raises(ValueError, create, [\"ds\", \"blah\", \"url\", \"title\", \"Yes\", json_file])\n assert pytest.raises(ValueError, create, [\"ds\", \"200\", \"url\", \"title\", \"blah\", json_file])",
"def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }",
"def test_csv_comment_default(parallel, read_csv):\n text = \"a,b,c\\n#1,2,3\\n4,5,6\"\n table = read_csv(text, parallel=parallel)\n expected = Table([[\"#1\", \"4\"], [2, 5], [3, 6]], names=(\"a\", \"b\", \"c\"))\n assert_table_equal(table, expected)",
"def test_csv(inpath, outpath, line_width=0):\n test = SimpleCSVReporter.SimpleCSVReporter()\n test.readCSV(inpath)\n indent_tool = IndentMessages.IndentMessages()\n if line_width > 0:\n indent_tool.total_width = line_width\n output = open(outpath, 'w')\n test.report_fd = output\n test.indenter = indent_tool\n test.default_report()\n output.close()",
"def test_csv(self, input_file_path: str, answer_file_path: List[Dict]):\n with open(attach_path(answer_file_path), 'r') as answer_file:\n csv_file = open(attach_path(input_file_path))\n assert str(read_csv(csv_file)) == answer_file.read().strip()",
"def test_data_creation_from_base_row(self, mock_read_csv):\n f = StringIO(self.data_header + self.data_row)\n reader = csv.DictReader(f)\n mock_read_csv.return_value = reader\n load_values()\n self.assertEqual(CountyMortgageData.objects.count(), 1)\n county = CountyMortgageData.objects.first()\n fields = reader.fieldnames\n fields.pop(fields.index('fips')) # test string separately\n fields.pop(fields.index('open')) # 'open' is stored as 'total'\n fields.pop(fields.index('date')) # date must be parsed before testing\n self.assertEqual(county.fips, self.data_row_dict.get('fips'))\n open_value = int(self.data_row_dict.get('open'))\n self.assertEqual(county.total, open_value)\n target_date = parser.parse(self.data_row_dict['date']).date()\n self.assertEqual(county.date, target_date)\n for field in fields: # remaining fields can be tested in a loop\n self.assertEqual(\n getattr(county, field), int(self.data_row_dict.get(field)))\n # test computed values\n self.assertEqual(\n county.epoch,\n int(target_date.strftime('%s')) * 1000)\n self.assertEqual(\n county.percent_90,\n int(self.data_row_dict.get('ninety')) * 1.0 / open_value)\n self.assertEqual(\n county.percent_30_60,\n (int(self.data_row_dict.get('thirty')) +\n int(self.data_row_dict.get('sixty'))) * 1.0 / open_value)",
"def test_validate_csv():\n duplicate_keys_file_path = os.path.join(\n TEST_DATA_DIR, \"clubs_invalid_duplicate_keys.csv\"\n )\n\n invalid_headers_file_path = os.path.join(\n TEST_DATA_DIR, \"membership_invalid_syntax.csv\"\n )\n\n # Test duplicate keys\n with open(duplicate_keys_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n assert \"error\" in validation_resp\n duplicate_keys = validation_resp[\"detail\"]\n assert \"5\" in duplicate_keys\n assert \"2\" in duplicate_keys\n\n # Test invalid syntax\n with open(invalid_headers_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n invalid_rows = [x[\"row\"] for x in validation_resp[\"detail\"]]\n assert \"error\" in validation_resp\n assert 3 in invalid_rows\n assert 4 in invalid_rows\n assert 5 in invalid_rows\n\n # Test unicode decode errors\n test_data = b\"\\xff\\xfe_\\x00k\\x00e\\x00y\\x00,\\x00n\\x00a\\x00m\\x00e\\x00\\n\"\n pytest.raises(DecodeFailed, decode_data, test_data)",
"def test_read_in_file(self):\r\n filename = \"CrimeDataSmall.csv\"\r\n\r\n lst = cds.read_in_file(filename)\r\n\r\n self.assertIsInstance(lst, list, \"Returned datatype should be a list\")\r\n self.assertEqual(len(lst), 4, \"There should be 4 rows returned from CrimeDataSmall 1 header and 3 data rows\")\r\n self.assertEqual(len(lst[0]), 23, \"Each row should have 23 columns\")\r\n self.assertEqual(lst[0][1], \"Reported_Date\", \"Column 1 was incorrect header\")\r\n self.assertEqual(lst[0][7], \"Offense\", \"Column 7 was incorrect header\")\r\n self.assertEqual(lst[0][13], \"Zip Code\", \"Column 13 header was incorrect\")\r\n self.assertEqual(lst[1][1], \"03/19/2019\", \"Column 1 was incorrect in first data row\")\r\n self.assertEqual(lst[1][7], \"Vehicular – Non-Injury\", \"Column 7 was incorrect in first data row\")\r\n self.assertEqual(lst[1][13], \"64161\", \"Column 13 in first data row was incorrect\")\r\n self.assertEqual(lst[3][1], \"03/27/2019\", \"Column 1 was incorrect in 3rd data row\")\r\n self.assertEqual(lst[3][7], \"Embezzlement\", \"Column 7 was incorrect 3rd data row\")\r\n self.assertEqual(lst[3][13], \"64112\", \"Column 13 3rd data row was incorrect\")\r\n self.assertEqual(lst[3][11], \"4600, S WORNALL RD\", \"Column 11 3rd data row was incorrect. Use csv module to read \")",
"def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")",
"def process_line(line):\n\n name_comp_list = []\n givenname_comp_list = []\n surname_comp_list = []\n geocode_comp_list = []\n locality_comp_list = []\n date1_comp_list = []\n date2_comp_list = []\n\n # Split the line into the basic fields - - - - - - - - - - - - - - - - - - -\n #\n if (config.in_file_type in ['CSV','CSVQ','TAB','TABQ']):\n # Comma or tabulator separated\n try:\n line_list = config.line_parser.parse(line)\n except:\n log_message('CSV line parsing failed with inout: '+line,'err')\n\n if (len(line_list) < config.input_len):\n log_message('Input line does not contain enough fields,' +\\\n 'fill up with empty fields','warn')\n while (len(line_list) < config.input_len):\n line_list.append('')\n\n config.curr_line_list = line_list # Save current line list\n\n # Extract fields into different component lists - - - - - - - - - - - - - -\n #\n if (config.input_component['name'] != []): # Extract name fields\n for i in config.input_component['name']:\n name_comp_list.append(line_list[i])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for i in config.input_component['givenname']:\n givenname_comp_list.append(line_list[i])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for i in config.input_component['surname']:\n surname_comp_list.append(line_list[i])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for i in config.input_component['geocode']:\n geocode_comp_list.append(line_list[i])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for i in config.input_component['locality']:\n locality_comp_list.append(line_list[i])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for i in config.input_component['date1']:\n date1_comp_list.append(line_list[i])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for i in config.input_component['date2']:\n date2_comp_list.append(line_list[i])\n\n elif (config.in_file_type == 'COL'): # Column based input file - - - - - - -\n\n if (len(line) < config.input_len):\n log_message('Input line is not long enough, fill up with spaces','warn')\n line += ' '*(config.input_len-len(line))\n\n if (config.input_component['name'] != []): # Extract name fields\n for (col_start,length) in config.input_component['name']:\n name_comp_list.append(line[col_start,col_start+length])\n\n else: # Extract givenname and surname into separate components - - - - - -\n if (config.input_component['givenname'] != []): # Extract g-name fields\n for (col_start,length) in config.input_component['givenname']:\n givenname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['surname'] != []): # Extract surname fields\n for (col_start,length) in config.input_component['surname']:\n surname_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['geocode'] != []): # Extract geocode fields\n for (col_start,length) in config.input_component['geocode']:\n geocode_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['locality'] != []): # Extract locality fields\n for (col_start,length) in config.input_component['locality']:\n locality_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date1'] != []): # Extract date1 fields\n for (col_start,length) in config.input_component['date1']:\n date1_comp_list.append(line[col_start,col_start+length])\n\n if (config.input_component['date2'] != []): # Extract date2 fields\n for (col_start,length) in config.input_component['date2']:\n date2_comp_list.append(line[col_start,col_start+length])\n\n # elif (config.in_file_type == 'SQL'): # - - - - - - - - - - - - - - - - - -\n\n ################################\n # Add later: SQL database access\n ################################\n\n msg = [' Component basic field lists:', \\\n ' Name: '+str(name_comp_list), \\\n ' Given name: '+str(givenname_comp_list), \\\n ' Surname: '+str(surname_comp_list), \\\n ' Geocode: '+str(geocode_comp_list), \\\n ' Locality: '+str(locality_comp_list), \\\n ' Date1: '+str(date1_comp_list), \\\n ' Date2: '+str(date2_comp_list)]\n log_message(msg,'v2')\n\n name_comp = ''\n givenname_comp = ''\n surname_comp = ''\n geocode_comp = ''\n locality_comp = ''\n date1_comp = ''\n date2_comp = ''\n\n # Now clean and then concatenate component lists into strings - - - - - - - -\n #\n if (name_comp_list != []): # Name component\n name_comp = name_comp_list[0] # Start with first field in list\n\n for f in name_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['name'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['name'] == 1):\n sep = check_field_spill(name_comp, f)\n\n name_comp = name_comp+sep+f # Append separator and field\n\n if (givenname_comp_list != []): # Givenname component - - - - - - - - - - -\n givenname_comp = givenname_comp_list[0] # Start with first field in list\n\n for f in givenname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['givenname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['givenname'] == 1):\n sep = check_field_spill(givenname_comp, f)\n\n givenname_comp = givenname_comp+sep+f # Append separator and field\n\n if (surname_comp_list != []): # Surname component - - - - - - - - - - - - -\n surname_comp = surname_comp_list[0] # Start with first field in list\n\n for f in surname_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['surname'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['surname'] == 1):\n sep = check_field_spill(surname_comp, f)\n\n surname_comp = surname_comp+sep+f # Append separator and field\n\n if (geocode_comp_list != []): # Geocode component - - - - - - - - - - - - -\n geocode_comp = geocode_comp_list[0] # Start with first field in list\n\n for f in geocode_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['geocode'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['geocode'] == 1):\n sep = check_field_spill(geocode_comp, f)\n\n geocode_comp = geocode_comp+sep+f # Append separator and field\n\n if (locality_comp_list != []): # Locality component - - - - - - - - - - - -\n locality_comp = locality_comp_list[0] # Start with first field in list\n\n for f in locality_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['locality'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['locality'] == 1):\n sep = check_field_spill(locality_comp, f)\n\n locality_comp = locality_comp+sep+f # Append separator and field\n\n if (date1_comp_list != []): # Date1 component - - - - - - - - - - - - - - -\n date1_comp = date1_comp_list[0] # Start with first field in list\n\n for f in date1_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date1'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date1'] == 1):\n if (date1_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date1_comp = date1_comp+sep+f # Append separator and field\n\n if (date2_comp_list != []): # Date2 component - - - - - - - - - - - - - - -\n date2_comp = date2_comp_list[0] # Start with first field in list\n\n for f in date2_comp_list[1:]: # Loop over following fields (if any)\n if (f != ''):\n if (config.input_space_sep['date2'] == 1):\n sep = ' ' # Set separator to space between fields\n else:\n sep = '' # No space between fields\n\n # Check field spilling only if space separator is set to ' ' \n #\n if (sep == ' ') and (config.input_check_spilling['date2'] == 1):\n if (date2_comp[-1] != ' ') and (f[0] != ' '):\n tmp_list0 = date1_comp.split()\n tmp_list1 = f.split()\n check_word = tmp_list0[-1]+tmp_list1[0]\n\n if (check_word in ['jan','feb','mar','apr','may','jun','jul','aug', \\\n 'sep','oct','nov','dec','january','february','march','april', \\\n 'may','june','july','august','september','october','november', \\\n 'december']):\n\n sep = '' # Set separator to no space\n msg = ' Correct date1 word spilling: \"'+date1_comp+'\",\"'+f+'\"'\n log_message(msg,'v1')\n\n date2_comp = date2_comp+sep+f # Append separator and field\n\n # Check if name component is given or givenname and surname separately - - -\n #\n if (config.input_component['givenname'] != []) or \\\n (config.input_component['surname'] != []):\n name_comp = [givenname_comp, surname_comp]\n\n msg = [' Components:', \\\n ' Name: \"'+str(name_comp)+'\"', \\\n ' Geocode: \"'+geocode_comp+'\"', \\\n ' Locality: \"'+locality_comp+'\"', \\\n ' Date1: \"'+date1_comp+'\"', \\\n ' Date2: \"'+date2_comp+'\"']\n log_message(msg,'v1')\n\n return [name_comp, geocode_comp, locality_comp, date1_comp, date2_comp]",
"def test_csvfile_unordered(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\"\n1\n2\n1\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }",
"def test_basic_dummy_no_match(self):\n self.assertLines([\"a\", \";\", \"examples/dummy.csv\"], [\"a,b,c,a_xfind\", \"1,2,3,\",])",
"def read_csv():"
] | [
"0.78631055",
"0.75031304",
"0.7422704",
"0.73800725",
"0.7375032",
"0.7319514",
"0.7308899",
"0.7113874",
"0.7112114",
"0.7110554",
"0.71081805",
"0.7057524",
"0.6904167",
"0.6871032",
"0.6755092",
"0.65902114",
"0.65860367",
"0.65798193",
"0.6544577",
"0.65245825",
"0.6504139",
"0.64892817",
"0.64721876",
"0.6448786",
"0.6389549",
"0.6336897",
"0.6335581",
"0.63092864",
"0.6297593",
"0.6280933"
] | 0.77302605 | 1 |
Test parseLineCSV with bad data (some fields missing) | def test_parseLine2(mocker):
# given: setup test framework
worker = Worker()
testString = "11/11/19,Brighter Futures,12000"
# when:
result = worker.parseLineCSV(testString)
# then: (Using PyTruth assertions)
AssertThat(result).IsNone() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")",
"def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"",
"def test_csv_row_bug(script_runner, tmpdir, test_dir):\n csv_file = tmpdir / 'csv_file.csv'\n\n ret = script_runner.run(\n 'mwcp-tool', '-p', 'foo', '-c', str(csv_file), str(test_dir), cwd=str(tmpdir))\n print(ret.stdout)\n print(ret.stderr, file=sys.stderr)\n assert ret.success\n\n assert csv_file.exists()\n\n with csv_file.open('r') as fo:\n reader = csv.reader(fo)\n rows = list(reader)\n assert len(rows) == len(test_dir.listdir()) + 1\n assert rows[0] == ['scan_date', 'inputfilename', 'outputfile.name',\n 'outputfile.description', 'outputfile.md5', 'address', 'debug', 'url']\n for i, row in enumerate(rows[1:]):\n assert row[0] and row[1]\n # Test entries except the timestamp and full file path.\n assert row[2:] == [\n 'fooconfigtest.txt',\n 'example output file',\n '5eb63bbbe01eeed093cb22bb8f5acdc3',\n '127.0.0.1',\n ('[+] File test_{0}.txt identified as Foo.\\n'\n '[+] size of inputfile is 23 bytes\\n'\n '[+] operating on inputfile test_{0}.txt').format(i),\n 'http://127.0.0.1',\n ]",
"def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")",
"def test_multiple_lines():\n\n # Multi-line file\n test_file = StringIO(\n u'fri,wed\\n1,1\\n2,2'\n )\n\n csv_parser = CSVParser(test_file)\n\n expected = [\n {'day': 'wed', 'description': 'N/A 1', 'square': 1, 'value': 1},\n {'day': 'fri', 'description': 'N/A 2', 'double': 2, 'value': 1},\n ]\n\n assert csv_parser.parse() == expected",
"def test_misc_csv_read():\n r = csv_reader(\"../test/test.csv\")\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n\n assert(data == \"\"\"\n['EVT_CODE*', 'EVT_DATE.DE', 'CODE', 'AGE', 'FRST', 'LST', 'SPEC', 'de.id']\n['tea', '2018/01/01', 'X', '35', 'PRE', 'WHO', 'BUG', '1']\n['coffee', '2018/05/05', 'X', '35', 'JAN,Z', 'WHO', 'FRG', '1']\n['water', '2018/01/01', 'Y', '35', 'TAN', 'POST', 'CAT', '2']\n \"\"\".strip())",
"def test_parseLine1(mocker):\n \n # given: setup test framework\n worker = Worker()\n testString = \"12Nov2019,Teacher,Brighter Futures,12000\"\n expectedResult = {\n 'date': '2019-11-12',\n 'job_title': 'Teacher',\n 'company_name': 'Brighter Futures',\n 'salary': 12000\n }\n \n # when:\n result = worker.parseLineCSV(testString)\n \n # then:\n assert result == expectedResult",
"def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])",
"def test_invalid_header(self, tmpdir):\n path1 = tmpdir.join(\"invalid.csv\")\n path1.write(\"not,a,valid,header,row\")\n with pytest.raises(ValueError):\n parse_file(str(path1))\n\n path2 = tmpdir.join(\"valid.csv\")\n path2.write(\",\".join(HEADER_ROW))\n try:\n parse_file(str(path2))\n except ValueError:\n assert False, \"Unexpected ValueError\"",
"def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'",
"def test_csvfile_get_data_impossible_filter(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=CONTENTS)\n\n adapter = CSVFile(\"test.csv\")\n assert list(adapter.get_data({\"index\": Impossible()}, [])) == []",
"def test_validate_csv():\n duplicate_keys_file_path = os.path.join(\n TEST_DATA_DIR, \"clubs_invalid_duplicate_keys.csv\"\n )\n\n invalid_headers_file_path = os.path.join(\n TEST_DATA_DIR, \"membership_invalid_syntax.csv\"\n )\n\n # Test duplicate keys\n with open(duplicate_keys_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n assert \"error\" in validation_resp\n duplicate_keys = validation_resp[\"detail\"]\n assert \"5\" in duplicate_keys\n assert \"2\" in duplicate_keys\n\n # Test invalid syntax\n with open(invalid_headers_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n invalid_rows = [x[\"row\"] for x in validation_resp[\"detail\"]]\n assert \"error\" in validation_resp\n assert 3 in invalid_rows\n assert 4 in invalid_rows\n assert 5 in invalid_rows\n\n # Test unicode decode errors\n test_data = b\"\\xff\\xfe_\\x00k\\x00e\\x00y\\x00,\\x00n\\x00a\\x00m\\x00e\\x00\\n\"\n pytest.raises(DecodeFailed, decode_data, test_data)",
"def test_misc_csv_read_inmemory():\n r = csv_reader([\"fieldname_a,fieldname_b\",\n \"mo,bo\",\n \"go,zo\",\n \"fo,po\"])\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n assert(data==\"\"\"\n['fieldname_a', 'fieldname_b']\n['mo', 'bo']\n['go', 'zo']\n['fo', 'po']\n \"\"\".strip())",
"def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def test_non_int_value_raises_an_exception():\n test_file = StringIO(\n u'fri,wed\\na,6'\n )\n\n csv_parser = CSVParser(test_file)\n\n with pytest.raises(ValueError):\n csv_parser.parse()",
"def test_parser():\n data = parse_csv(TEST_DATA)\n assert data['2020-01-03'] == ['recycle']\n assert data['2020-01-08'] == ['bio', 'trash']\n assert data['2021-01-09'] == ['christmas']",
"def test_delimiter_none(self):\n with self.assertRaisesRegexp(Exception, \"delimiter\"):\n self.context.frame.import_csv(self.dataset,\n self.schema, delimiter=None)",
"def test_itercsv_emits_data_lines():\n expected = [\n b'Hello,World\\r\\n',\n b'1,2\\r\\n',\n b'3,4\\r\\n'\n ]\n assert list(itercsv(['Hello', 'World'], [[1, 2], [3, 4]])) == expected",
"def test_basic_dummy_no_match(self):\n self.assertLines([\"a\", \";\", \"examples/dummy.csv\"], [\"a,b,c,a_xfind\", \"1,2,3,\",])",
"def test_delimiter_empty(self):\n with self.assertRaisesRegexp(Exception, \"delimiter\"):\n self.context.frame.import_csv(self.dataset,\n self.schema, delimiter=\"\")",
"def test_csvfile_single_row_of_data(fs: FakeFilesystem) -> None:\n contents = \"\"\"\"a\",\"b\"\n1,2\"\"\"\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n \"b\": Float(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }\n assert list(adapter.get_data({}, [])) == [{\"a\": 1.0, \"b\": 2.0, \"rowid\": 0}]",
"def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")",
"def test_parse_no_fields(self):\n received = self._p.parse_line(self._line)\n expected = {}\n msg = 'Line parse with no fields should return None'\n self.assertDictEqual(received, expected, msg)",
"def check_valid_csvformat(self, csv_path):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object\n self.check_valid_csv_header(reader.next())\n self.check_valid_csv_data(reader.next())",
"def __init__(self, message, file_handle, format):\n oh = open(file_handle, \"rU\")\n config.log.error(\"csv/tsv file did not pass the csv parser\")\n config.log.error(\"Message: %s\" % message)\n print(\"-----------------------\")\n print(\"CSV Diagnostic:\")\n if \"skiplines\" in format: # skip the lines.\n if format[\"skiplines\"] != -1:\n for n in range(format[\"skiplines\"]):\n oh.readline().rstrip(\"\\r\\n\")\n\n print(\"0:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"1:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"2:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"3:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"-----------------------\")\n print(\"Format Specifier: %s\" % (\" \".join([\"%s:%s\\t\" % (key, format[key]) for key in format])))\n print(\"Expected Format, based on the format specifier:\")\n oh.close()\n\n # This is a safe-ish version of loadCSV() that intelligently fails.\n\n if \"sniffer\" not in format:\n oh = open(file_handle, \"rU\")\n if \"dialect\" in format:\n reader = csv.reader(oh, dialect=format[\"dialect\"])\n else:\n reader = csv.reader(oh)\n\n try:\n if \"skiplines\" in format:\n skiplines = format[\"skiplines\"]\n else:\n skiplines = 0 # skip any header row by default.\n except:\n print(\"Error: End of File\") # premature end of file, skip out.\n print(\"-----------------------\")\n print(\"Error: %s\" % (message))\n return\n\n for index, column in enumerate(reader): # This is cryptically called column, when it is actually row.\n if index > skiplines:\n if column: # list is empty, so omit.\n if (not (column[0] in typical_headers)):\n d = {}\n for key in format:\n if not (key in ignorekeys): # ignore these tags\n try:\n if not key in d:\n d[key] = {}\n if isinstance(format[key], dict) and \"code\" in format[key]:\n # a code block insertion goes here - any valid lib and one line python code fragment\n # store it as a dict with the key \"code\"\n d[key] = eval(format[key][\"code\"]) # this always fails for some reason...\n else:\n d[key] = str(column[format[key]])\n except:\n d[key] = \"mangled\"\n print(\"%s\" % (\" \".join([\"%s:%s\" % (key, d[key]) for key in d])))\n if index > 3:\n break\n else:\n print(\" No specified format (glbase will guess)\")\n\n print(\"-----------------------\")\n config.log.error(\"End of error output\")",
"def test_row_parsing(self, tmpdir):\n json_file = str(tmpdir.join(\"f.json\"))\n with open(json_file, \"w\") as f:\n json.dump({\"ds\": [{\"file\": \"data.nc\", \"size\": 0, \"mtime\": 0, \"sha256\": 0}]}, f)\n\n create = CsvRowDataset.from_strings\n\n # Check extraneous whitespace is ignored and Yes/No to boolean\n # conversion\n got1 = create([\"ds\", \"100\", \" url\", \"title, here\", \"no\", \"Yes\", json_file])\n expected1 = CsvRowDataset(\"ds\", 100, \"url\", \"title, here\", False, True, json_file)\n assert got1 == expected1\n\n got2 = create([\"ds\", \"100\", \" url\", \"title, here\", \"No\", \"yes\", json_file])\n expected2 = CsvRowDataset(\"ds\", 100, \"url\", \"title, here\", False, True, json_file)\n assert got2 == expected2\n\n # Check invalid int and bool values\n assert pytest.raises(ValueError, create, [\"ds\", \"blah\", \"url\", \"title\", \"Yes\", json_file])\n assert pytest.raises(ValueError, create, [\"ds\", \"200\", \"url\", \"title\", \"blah\", json_file])",
"def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")",
"def test_not_enough_cols(parallel, read_csv):\n text = \"\"\"\nA,B,C\n1,2,3\n4,5\n6,7,8\n\"\"\"\n table = read_csv(text, parallel=parallel)\n assert table[\"B\"][1] is not ma.masked\n assert table[\"C\"][1] is ma.masked\n\n with pytest.raises(InconsistentTableError):\n table = FastBasic(delimiter=\",\").read(text)",
"def test_csv_simple_input(self):\n\n # Mix of integer and string data. Ensure that commas and\n # quotes are escaped properly.\n data = [\n {\n 'name': 'Normal string',\n 'item_num': 1,\n },\n {\n 'name': 'String, with, commas',\n 'item_num': 2,\n },\n {\n 'name': 'String with \" quote',\n 'item_num': 3,\n },\n ]\n\n table = TableReportForTesting(data)\n response = table.as_csv(HttpRequest())\n self.assertEqual(response.status_code, 200)\n # Expect cells containing commas to be escaped with quotes.\n content = response.content\n if PY3:\n content = content.decode(settings.DEFAULT_CHARSET).replace('\\x00', '')\n self.assertEqual(\n content,\n 'Name,Item Num\\r\\n'\n 'Normal string,1\\r\\n'\n '\"String, with, commas\",2\\r\\n'\n '\"String with \"\" quote\",3\\r\\n')"
] | [
"0.744",
"0.7357617",
"0.73319453",
"0.7304873",
"0.7091926",
"0.70851797",
"0.7049803",
"0.70358664",
"0.69938964",
"0.694792",
"0.6922245",
"0.68506056",
"0.6797974",
"0.6778899",
"0.67742884",
"0.67630935",
"0.6757114",
"0.67115325",
"0.668969",
"0.6673625",
"0.66619074",
"0.6625474",
"0.6607659",
"0.6575898",
"0.65252995",
"0.6519504",
"0.6515851",
"0.6507197",
"0.64988637",
"0.6492905"
] | 0.76332486 | 0 |
Takes a current board and a grid of scores. Find all of the empty squares with the maximum score and randomly return one of them as a (row, column) tuple. It is an error to call this function with a board that has no empty squares (there's no possible next move), so your function may do whatever it wants in that case. The case where the board is full will NOT be tested. | def get_best_move(board, scores):
empty_squares = board.get_empty_squares()
max_score = -float("inf")
max_list = []
if len(board.get_empty_squares()) == 0:
print "No Empty Tiles Left!"
else:
for row in range(board.get_dim()):
for col in range(board.get_dim()):
if scores[row][col] > max_score and (row, col) in empty_squares:
max_score = scores[row][col]
max_list = [row, col]
max_tuple = (max_list[0], max_list[1])
return max_tuple | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_best_move(board, scores):\n all_scores = {}\n for row_num in range(board.get_dim()):\n for col_num in range(board.get_dim()):\n if board.square(row_num, col_num) == provided.EMPTY:\n all_scores [(row_num, col_num)] = scores[row_num][col_num]\n max_score = max(all_scores.values())\n max_sqrs = []\n for sqr in all_scores:\n if all_scores[sqr] == max_score:\n max_sqrs.append(sqr)\n return random.choice(max_sqrs)",
"def get_best_move(board, scores):\n best_score = -50000000\n\n # iterates through board spaces and finds the max score\n for col in range(board.get_dim()):\n for row in range(board.get_dim()):\n if scores[row][col] > best_score and board.square(row, col) == provided.EMPTY:\n best_score = scores[row][col]\n\n # finds any empty space that matches the best score\n empty_spaces = board.get_empty_squares()\n move_list = []\n for space in empty_spaces:\n if scores[space[0]][space[1]] == best_score:\n move_list.append(space)\n\n best_move = random.choice(move_list)\n row = best_move[0]\n col = best_move[1]\n\n return row, col",
"def get_best_move(board, scores): \n empty_squares = board.get_empty_squares()\n highest_score = None\n best_pos = []\n \n for empty in range(len(empty_squares)):\n pos = empty_squares[empty] \n if highest_score == None:\n highest_score = scores[pos[0]][pos[1]]\n if scores[pos[0]][pos[1]] >= highest_score:\n highest_score = scores[pos[0]][pos[1]]\n \n for empty in range(len(empty_squares)):\n pos = empty_squares[empty]\n if scores[pos[0]][pos[1]] == highest_score:\n best_pos.append(pos) \n return random.choice(best_pos)",
"def get_best_move(board, scores):\n if board.check_win()==None:\n maximum=-999\n ret_location=(0,0)\n empty=board.get_empty_squares()\n #print empty\n for position in empty:\n if scores[position[0]][position[1]]>maximum:\n maximum=scores[position[0]][position[1]]\n #print max\n ret_location=(position[0],position[1])\n return ret_location\n else:\n return None",
"def maxit(board):\n maxval = -2\n\n row_index = None\n col_index = None\n # if terminal board, terminate the function.\n if terminal(board) == True:\n result = utility(board)\n return (result, 0, 0) \n # for each possible move, calculate its utility, saving the maximum.\n for i in range(0, 3):\n for j in range(0, 3):\n if board[i][j] == EMPTY:\n board[i][j] = X\n (m, mini, minj) = minit(board)\n if m > maxval:\n maxval=m\n row_index=i\n col_index=j\n board[i][j] = EMPTY\n return (maxval, row_index, col_index)",
"def get_best_move(board, scores):\r\n blankies = board.get_empty_squares()\r\n max_score = scores[blankies[0][0]][blankies[0][1]]\r\n result = []\r\n for current in blankies:\r\n if scores[current[0]][current[1]] > max_score:\r\n max_score = scores[current[0]][current[1]]\r\n for current in blankies:\r\n if scores[current[0]][current[1]] == max_score:\r\n result.append(current)\r\n return random.choice(result)",
"def get_best_move(board, scores):\n empty = board.get_empty_squares()\n if len(empty) == 0:\n return\n best_move = None\n best_score = None\n for square in empty:\n if best_move == None or scores[square[0]][square[1]] > best_score:\n best_move = square\n best_score = scores[square[0]][square[1]]\n return best_move",
"def find_empty_squares(board):\n for i in range(len(board)):\n for j in range(len(board[0])):\n if board[i][j] == 0:\n return (i,j) #row , column\n\n #if there are no blank squres\n return None",
"def pick_best_move(self, board, piece):\n valid_locations = self.get_valid_locations(board)\n best_score = -8000 # start with something very low so it doesn't mess up the \"evaluate score\" function\n best_column = random.choice(valid_locations) # random column in case the scores are all equal\n\n for col in valid_locations:\n row = self.get_available_row(board, col)\n temp_board = copy.deepcopy(board)\n self.drop_piece(temp_board, row, col, piece)\n score = self.score_position(temp_board, piece, \"medium\")\n if score > best_score:\n best_score = score\n best_column = col\n\n return best_column",
"def find_empty(game_board):\n for row in range(len(game_board)):\n for col in range(len(game_board[row])):\n if len(game_board[row][col]) == 2:\n return row, col\n for row in range(len(game_board)):\n for col in range(len(game_board[row])):\n if len(game_board[row][col]) >= 3:\n return row, col\n\n return None",
"def find_best_move(board):\n new_board = board.get_board()\n\n # X | X | X <-- Check for win on this row\n # ---------\n # 3 | 4 | 5\n # ---------\n # 6 | 7 | 9\n if new_board[0] == new_board[1] and new_board[2] == \"2\":\n return 2\n elif new_board[0] == new_board[2] and new_board[1] == \"1\":\n return 1\n elif new_board[1] == new_board[2] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | 2\n # ---------\n # X | X | X <-- Check for win on this row\n # ---------\n # 6 | 7 | 9\n elif new_board[3] == new_board[4] and new_board[5] == \"5\":\n return 5\n elif new_board[3] == new_board[5] and new_board[4] == \"4\":\n return 4\n elif new_board[4] == new_board[5] and new_board[3] == \"3\":\n return 3\n\n # 0 | 1 | 2\n # ---------\n # 3 | 4 | 5\n # ---------\n # X | X | X <-- Check for win on this row\n elif new_board[6] == new_board[7] and new_board[8] == \"8\":\n return 8\n elif new_board[6] == new_board[8] and new_board[7] == \"7\":\n return 7\n elif new_board[7] == new_board[8] and new_board[6] == \"6\":\n return 6\n\n # X | 1 | 2 Check for win on column one\n # ---------\n # X | 4 | 5\n # ---------\n # X | 7 | 9\n elif new_board[0] == new_board[3] and new_board[6] == \"6\":\n return 6\n elif new_board[0] == new_board[6] and new_board[3] == \"3\":\n return 3\n elif new_board[6] == new_board[3] and new_board[0] == \"0\":\n return 0\n\n # 0 | X | 2 Checks for win on column two\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | X | 9\n elif new_board[1] == new_board[4] and new_board[7] == \"7\":\n return 7\n elif new_board[1] == new_board[7] and new_board[4] == \"4\":\n return 4\n elif new_board[7] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | 4 | X\n # ---------\n # 6 | 7 | X\n elif new_board[2] == new_board[5] and new_board[8] == \"8\":\n return 8\n elif new_board[2] == new_board[8] and new_board[5] == \"5\":\n return 5\n elif new_board[8] == new_board[5] and new_board[2] == \"2\":\n return 2\n\n # X | 1 | 2\n # ---------\n # 3 | X | 5\n # ---------\n # 6 | 7 | X\n elif new_board[0] == new_board[4] and new_board[8] == \"8\":\n return 8\n elif new_board[0] == new_board[8] and new_board[4] == \"4\":\n return 4\n elif new_board[8] == new_board[4] and new_board[0] == \"0\":\n return 0\n\n # 0 | 1 | X\n # ---------\n # 3 | X | 5\n # ---------\n # X | 7 | 9\n elif new_board[2] == new_board[4] and new_board[6] == \"6\":\n return 6\n elif new_board[2] == new_board[6] and new_board[4] == \"4\":\n return 4\n elif new_board[6] == new_board[4] and new_board[2] == \"2\":\n return 2\n\n # If corners are empty, play there\n elif new_board[0] == \"0\" or new_board[2] == \"2\" or new_board[6] == \"6\" or new_board[8] == \"8\":\n try_spot = 0\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2\n\n # If middle is empty, play there\n elif new_board[4] == \"4\":\n return 4\n\n # Finally if edges are empty try there\n elif new_board[1] == \"1\" or new_board[3] == \"3\" or new_board[5] == \"5\" or new_board[7] == \"7\":\n try_spot = 1\n while True:\n if new_board[try_spot] != \"X\" and new_board[try_spot] != \"O\":\n return try_spot\n else:\n try_spot = try_spot + 2",
"def winner(board):\n # return 0[[0EMPTY, 1EMPTY, 2EMPTY],\n # 1[EMPTY, EMPTY, EMPTY],\n # 2[EMPTY, EMPTY, EMPTY]]\n # Check columns\n if board[0][0] == board[1][0] and board[1][0] == board[2][0]:\n return board[0][0]\n elif board[0][1] == board[1][1] and board[1][1] == board[2][1]:\n return board[0][1]\n elif board[0][2] == board[1][2] and board[1][2] == board[2][2]:\n return board[0][2]\n # Check rows\n elif all(i == board[0][0] for i in board[0]):\n return board[0][0]\n elif all(i == board[1][0] for i in board[1]):\n return board[1][0]\n elif all(i == board[2][0] for i in board[2]):\n return board[2][0]\n # Check diagonals\n elif board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n return board[0][0]\n elif board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n return board [0][2]\n else:\n return None",
"def winner(board):\n\n # Check none empty horizontals\n for i in range(3):\n if board[i][0] and board[i][0] == board[i][1] == board[i][2]:\n return board[i][0]\n\n # Check none empty verticals\n for j in range(3):\n if board[0][j] and board[0][j] == board[1][j] == board[2][j]:\n return board[0][j]\n\n # Check none empty L-R diagonal\n if board[0][0] and board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n\n # Check none empty R-L diagonal\n if board[0][2] and board[0][2] == board[1][1] == board[2][0]:\n return board[0][2]",
"def winner(board):\n if board[0][0] != EMPTY and (board[0][0] == board[0][1] == board[0][2] \n or board[0][0] == board[1][1] == board[2][2] \n or board[0][0] == board[1][0] == board[2][0]):\n return board[0][0]\n\n elif board[1][1] != EMPTY and (board[1][0] == board[1][1] == board[1][2]\n or board[0][1] == board[1][1] == board[2][1]):\n return board[1][1]\n \n elif board[2][2] != EMPTY and (board[0][2] == board[1][2] == board[2][2]\n or board[2][0] == board[2][1] == board[2][2]):\n return board[2][2]\n \n elif board[2][0] != EMPTY and (board[2][0] == board[1][1] == board[0][2]):\n return board[2][0]\n \n else:\n None",
"def find_max_score_location(grid, shape):",
"def minimax(state, depth, player):\n if depth == 9:\n row = choice([0, 1, 2])\n col = choice([0, 1, 2])\n return row, col, ''\n\n if player == COMP:\n best = [-1, -1, float(\"-inf\")]\n else:\n best = [-1, -1, float(\"inf\")]\n\n if depth == 0 or state.has_tic_tac_toe(COMP) or state.has_tic_tac_toe(HUMAN):\n score = heuristic(state, depth)\n return [-1, -1, score]\n \"\"\"\n Checks if any of the player is one away from winning in any board and make the appropriate move.\n \"\"\"\n if player==COMP:\n empty_cells=get_empty_cells(state)\n dangerous_cells=state.is_one_away_from_tic_tac_toe((player%2)+1)\n if dangerous_cells:\n found_dangerous_cells=True\n else:\n found_dangerous_cells=False\n print \"no dangerous local boards\"\n favoring_cells=state.is_one_away_from_tic_tac_toe(player)\n if favoring_cells:\n found_favoring_cells=True\n else:\n found_favoring_cells=False\n print \"no favoring local boards\"\n if found_dangerous_cells==False and found_favoring_cells==False:\n pass\n if found_dangerous_cells==False and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in favoring_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==False:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n if found_dangerous_cells==True and found_favoring_cells==True:\n empty_cells[:]=[]\n for cell in dangerous_cells:\n empty_cells.append(cell)\n else:\n empty_cells=get_empty_cells(state)\n for cell in empty_cells:\n row, col = cell[0], cell[1]\n state.board[row][col] = player\n score = minimax(state, depth - 1, (player % 2) + 1)\n state.board[row][col] = 0\n score[0], score[1] = row, col\n if player == COMP:\n if score[2] >= best[2]:\n if score[2]==best[2]:\n \"\"\"\n Favors middle positions over sides or corners\n MIDDLE > CORNERS > SIDES\n \"\"\"\n if (best[0]==0 and best[1]==0) or (best[0]==0 and best[1]==2) or (best[0]==2 and best[1]==0) or (best[0]==2 and best[1]==2):\n if score[0]==0 and score[1]==0: #favoring centre position over diagonal position\n best=score\n print(\"centre position chosen over diagonal positions\")\n else:\n if ((score[0]==0 and score[1]==1) or (score[0]==1 and score[1]==0) or (score[0]==1 and score[1]==2) or (score[0]==2 and score[1]==1))==0:\n best=score #favoring any position over side position as long as the new position is not a side position too\n print(\"diagonal and centre positions chosen over side positions\")\n else:\n best = score\n else:\n bestMoves=[]\n if score[2] < best[2]:\n best=score\n return best",
"def largest_tile(self, board):\n return max(get_state(board))",
"def winner(board):\n # Checking for 3 in a row\n for row in board:\n if row[0] is not EMPTY and row[0] == row[1] == row[2]:\n return row[0]\n\n # Checking for 3 in a col\n for col in range(len(board)):\n if board[0][col] is not EMPTY and board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n\n # Checking for Diagonals\n if board[0][0] is not EMPTY and board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n \n if board[0][2] is not EMPTY and board[0][2] == board[2][0] == board[1][1]:\n return board[0][2]\n\n return None",
"def _max(self, board: Board) -> (float, int):\n\n #\n # First we check if we have seen this board position before, and if yes just return the cached value\n #\n board_hash = board.hash_value()\n if board_hash in self.cache:\n return self.cache[board_hash]\n\n #\n # Init the min value as well as action. Min value is set to DRAW as this value will pass through in case\n # of a draw\n #\n max_value = self.DRAW_VALUE\n action = -1\n\n #\n # If the game has already finished we return. Otherwise we look at possible continuations\n #\n winner = board.who_won()\n if winner == self.side:\n max_value = self.WIN_VALUE\n action = -1\n elif winner == board.other_side(self.side):\n max_value = self.LOSS_VALUE\n action = -1\n else:\n for index in [i for i, e in enumerate(board.state) if board.state[i] == EMPTY]:\n b = Board(board.state)\n b.move(index, self.side)\n\n res, _ = self._min(b)\n if res > max_value or action == -1:\n max_value = res\n action = index\n\n # Shortcut: Can't get better than that, so abort here and return this move\n if max_value == self.WIN_VALUE:\n self.cache[board_hash] = (max_value, action)\n return max_value, action\n\n self.cache[board_hash] = (max_value, action)\n return max_value, action",
"def winner(board):\n # check columns\n for j in range(3):\n if board[1][j] == board[0][j] and board[0][j] == board[2][j] and board[1][j] != EMPTY:\n return board[1][j]\n # check rows\n for i in range(3):\n if board[i][0] == board[i][1] and board[i][1] == board[i][2] and board[i][0] != EMPTY:\n return board[i][0]\n # check diagnols\n if board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[0][0] != EMPTY:\n return board[1][1]\n if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[0][2] != EMPTY:\n return board[1][1]\n return None",
"def winner(board):\n for i in range(len(board)):\n\n # Check rows\n if board[i][0] == board[i][1] == board[i][2] and not board[i][1] == EMPTY:\n return board[i][1]\n\n # Check columns\n elif board[0][i] == board[1][i] == board[2][i] and not board[1][i] == EMPTY:\n return board[1][i]\n\n # Check diagonals\n if board[0][0] == board[1][1] == board[2][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n if board[2][0] == board[1][1] == board[0][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n # No winner if get to this point\n return None",
"def test_minimax_def_col():\n game_board = [[PLAYERX, PLAYERO, PLAYERX], [PLAYERX, PLAYERX, PLAYERO],\n [EMPTY, EMPTY, PLAYERO]]\n board = TTTBoard(board=game_board)\n move = get_move(board, PLAYERO)\n assert move == (2, 0), \"Bad Move: \" + str(move)\n\n game_board = [[PLAYERO, PLAYERX, PLAYERO], [PLAYERX, PLAYERX, PLAYERO],\n [EMPTY, EMPTY, PLAYERX]]\n board = TTTBoard(board=game_board)\n move = get_move(board, PLAYERO)\n assert move == (2, 1), \"Bad Move: \" + str(move)\n\n game_board = [[PLAYERO, PLAYERO, PLAYERX], [PLAYERX, PLAYERO, PLAYERX],\n [EMPTY, PLAYERX, EMPTY]]\n board = TTTBoard(board=game_board)\n move = get_move(board, PLAYERO)\n assert move == (2, 2), \"Bad Move: \" + str(move)",
"def winner(board):\n # Check Rows\n for row in board:\n if row[0] != EMPTY and row[0] == row[1] and row[0] == row[2]:\n return row[0]\n \n # Check Columns\n for j in range(3):\n if board[0][j] != EMPTY and board[0][j] == board[1][j]:\n if board[0][j] == board[2][j]:\n return board[0][j]\n \n # Check Diagonals\n if board[1][1] != EMPTY:\n if board[0][0] == board[1][1] and board[0][0] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] and board[0][2] == board[2][0]:\n return board[0][2]\n\n return None",
"def find_blank_cell(self, board: list):\n cells = {}\n for i in range(9): # Iterate over rows\n for j in range(9): # Iterate over columns\n if board[i][j] == 0:\n cells[str(i) + ' ' + str(j)] = self.count_numbers(board, j, i)\n m = max(cells.values())\n for k in cells:\n if cells[k] == m:\n s = k.split()\n x, y = int(s[1]), int(s[0])\n return x, y",
"def firstEmptyCell(board):\r\n for i in range(9):\r\n for j in range(9):\r\n if board[i][j] == 0:\r\n return (i, j) # row, col\r\n return None",
"def negamax(self):\n if self.check_winner():\n return 1\n elif self.full():\n return 0\n else:\n bestScore = -10\n for r, c in self.empty_cells():\n self.grid[r][c] = self.player\n self.next_player() \n score = -self.negamax()\n if score > bestScore:\n bestScore = score\n self.grid[r][c] = GameModel.EMPTY\n self.next_player()\n return bestScore",
"def find_empty_space(board: list) -> tuple:\n board_length = len(board)\n for i in range(board_length):\n for j in range(board_length):\n if board[i][j] == 0:\n return (i,j)",
"def maximum_sub_square(square_matrix):\n if not square_matrix:\n return (0, 0, 0)\n n = len(square_matrix)\n start_r, start_c, size = 0, 0, 0\n for i in range(n):\n # if there is no hope to find larger one, then break\n if i + size >= n:\n break\n # O(n^n)\n new_c, new_size = get_max_black_square(square_matrix, i, size)\n if new_size > size:\n start_r = i\n start_c = new_c\n size = new_size\n return (start_r, start_c, size)",
"def findEmpty(grid):\n for x in range(len(grid.board)):\n for y in range(len(grid.board[0])):\n if grid.board[x][y] == 0:\n return [x,y]",
"def get_move(self, board):\n while True:\n col = random.randint(0, board.width)\n row = board.try_move(col)\n\n if row >= 0:\n break\n\n return row, col"
] | [
"0.7876179",
"0.77359706",
"0.77333856",
"0.76157695",
"0.754121",
"0.7529987",
"0.7299099",
"0.72018903",
"0.68783015",
"0.68334043",
"0.6773583",
"0.67494977",
"0.67163146",
"0.6645171",
"0.66307026",
"0.66291565",
"0.6628532",
"0.6612501",
"0.6598513",
"0.6596278",
"0.65574205",
"0.6520754",
"0.65175307",
"0.6452703",
"0.6446695",
"0.6445103",
"0.64218986",
"0.6411768",
"0.6400169",
"0.6384641"
] | 0.8180115 | 0 |
Return the total utilization for ALL TASKS, INCLUDING MIGRATORY | def util(self):
total = 0.0
for task in self._slices:
total += task.util()
return self._nonmigutil + total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def totaltasks(conn):\n c = conn.cursor()\n r = c.execute(\"SELECT count(id) as total_tasks FROM event WHERE type_id = \" + taskid(\"run_task\")).fetchall()\n return r[0]['total_tasks']",
"def report_total_usage(self):\n work_time = 0\n if self.type == 'normal':\n work_time = self.fwk.fwk_global_time - self.start_exec_time\n elif self.type == 'sandia_work':\n self.total_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_usage = self.total_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.completed_work += self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.sim.rework_todo += self.fwk.fwk_global_time - self.start_exec_time\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_rework':\n self.total_rework_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n if self.state == \"running\":\n # update total work done\n self.sim.next_ckpt = self.sim.ckpt_interval - (self.fwk.fwk_global_time - self.start_exec_time)\n self.sim.rework_todo -= self.fwk.fwk_global_time - self.start_exec_time\n elif self.state == \"failed\":\n # add this work to the work to be redone\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_ckpt':\n self.total_ckpt_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_ckpt_usage = self.total_ckpt_time * self.nproc\n if self.state == \"running\":\n # update last ckpt\n self.sim.last_ckpt = self.sim.completed_work\n elif self.state == \"failed\":\n # add work to rework\n self.sim.rework_todo += self.sim.next_ckpt\n self.state = \"not_ready\"\n self.num_faults += 1\n elif self.type == 'sandia_restart':\n print(\"time spent in rework\", self.fwk.fwk_global_time - self.start_exec_time)\n self.total_restart_time += self.fwk.fwk_global_time - self.start_exec_time\n self.total_restart_usage = self.total_restart_time * self.nproc\n #if self.state == \"running\":\n # nothing to do?\n # pass\n if self.state == \"failed\":\n # gotta try again\n self.state = \"ready\"\n self.num_faults += 1\n else:\n print(\"problems updating state in report_total_usage\")\n raise\n if self.type == 'normal':\n if self.sim.state == 'rework':\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else: # sim.state == 'work'\n if self.retry:\n self.total_rework_time += work_time\n self.total_rework_usage = self.total_rework_time * self.nproc\n else:\n self.total_time += work_time\n self.total_usage = self.total_time * self.nproc",
"def calculate_task_volatile_size(**kwargs):\n task = kwargs.get(\"data\", {})\n memory_members = kwargs.get(\"resources\", {}).get(\"memory\", [])\n interleave_sets = task.get(\"Payload\").get(\"JsonBody\").get(\"InterleaveSets\", [])\n selected_members = []\n for interleave_set in interleave_sets:\n for pmem in memory_members:\n if PmemHelpers.compare_id(\n interleave_set.get(\"Memory\").get(\"@odata.id\"), pmem.get(\"@odata.id\")\n ):\n selected_members.append(pmem)\n # finding total capacity\n total_capacity = Mapper.get_single_attribute(\n selected_members,\n \"TotalCapacity\",\n MappingTable.summary.value,\n output_as_json=True,\n )\n total_capacity = total_capacity.get(\"TotalCapacity\", {}).get(\"Value\", 0)\n volatile_size = total_capacity\n # finding memory chunk size\n memory_chunk_size = Mapper.get_single_attribute(\n task, \"MemoryChunkSize\", MappingTable.tasks.value, output_as_json=True\n )\n memory_chunk_size = memory_chunk_size.get(\"MemoryChunkSize\", {}).get(\n \"Value\", None\n )\n if memory_chunk_size is not None:\n size = memory_chunk_size\n volatile_size = total_capacity - size\n else:\n # finding memory chunk size percentage\n memory_chunk_size_percentage = Mapper.get_single_attribute(\n task,\n \"MemoryChunkSizePercentage\",\n MappingTable.tasks.value,\n output_as_json=True,\n )\n memory_chunk_size_percentage = memory_chunk_size_percentage.get(\n \"MemoryChunkSizePercentage\", {}\n ).get(\"Value\", None)\n if memory_chunk_size_percentage is not None:\n size = total_capacity * memory_chunk_size_percentage / 100\n volatile_size = total_capacity - size\n # returning value in MiB\n return volatile_size * 1024",
"def get_overall_cpu_util(dut, exclude_proc_name=None):",
"def test_task_count_total(self):\r\n tasks.count_total()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TOTAL_CT)\r\n self.assertEqual(stat.data, 4)",
"def num_tasks(self) -> int:\n return 1",
"def task_summary_dict(request, tasks, fieldlist=None):\n sumd = {}\n numeric_fields_task = ['reqid', 'corecount', 'taskpriority', 'workqueue_id']\n\n if fieldlist:\n flist = fieldlist\n else:\n flist = copy.deepcopy(const.TASK_FIELDS_STANDARD)\n\n for task in tasks:\n for f in flist:\n if 'tasktype' in request.session['requestParams'] and request.session['requestParams']['tasktype'].startswith('analy'):\n # Remove the noisy useless parameters in analysis listings\n if flist in ('reqid', 'stream', 'tag'):\n continue\n\n if 'taskname' in task and len(task['taskname'].split('.')) == 5:\n if f == 'project':\n try:\n if not f in sumd:\n sumd[f] = {}\n project = task['taskname'].split('.')[0]\n if not project in sumd[f]:\n sumd[f][project] = 0\n sumd[f][project] += 1\n except:\n pass\n if f == 'stream':\n try:\n if not f in sumd:\n sumd[f] = {}\n stream = task['taskname'].split('.')[2]\n if not re.match('[0-9]+', stream):\n if not stream in sumd[f]:\n sumd[f][stream] = 0\n sumd[f][stream] += 1\n except:\n pass\n if f == 'tag':\n try:\n if not f in sumd:\n sumd[f] = {}\n tags = task['taskname'].split('.')[4]\n if not tags.startswith('job_'):\n tagl = tags.split('_')\n tag = tagl[-1]\n if not tag in sumd[f]:\n sumd[f][tag] = 0\n sumd[f][tag] += 1\n except:\n pass\n if f in task:\n val = task[f]\n if val is None or val == '':\n val = 'Not specified'\n if val == 'anal':\n val = 'analy'\n if f not in sumd:\n sumd[f] = {}\n if val not in sumd[f]:\n sumd[f][val] = 0\n sumd[f][val] += 1\n\n # convert to ordered lists\n suml = []\n for f in sumd:\n itemd = {}\n itemd['field'] = f\n iteml = []\n kys = sumd[f].keys()\n if f != 'ramcount':\n for ky in kys:\n iteml.append({'kname': ky, 'kvalue': sumd[f][ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n else:\n newvalues = {}\n for ky in kys:\n if ky != 'Not specified':\n roundedval = int(ky / 1000)\n else:\n roundedval = -1\n if roundedval in newvalues:\n newvalues[roundedval] += sumd[f][ky]\n else:\n newvalues[roundedval] = sumd[f][ky]\n for ky in newvalues:\n if ky >= 0:\n iteml.append({'kname': str(ky) + '-' + str(ky + 1) + 'GB', 'kvalue': newvalues[ky]})\n else:\n iteml.append({'kname': 'Not specified', 'kvalue': newvalues[ky]})\n iteml = sorted(iteml, key=lambda x: str(x['kname']).lower())\n itemd['list'] = iteml\n suml.append(itemd)\n suml = sorted(suml, key=lambda x: x['field'])\n return suml",
"def compute_forgetting_metric(self, task_results, task_steps, task_id, num_tasks, num_cycles, return_scale):\n per_run_forgetting_per_subsequent = {id: {} for id in range(num_tasks)} # Inner dict maps cycle to total\n \n for run_id, task_result in enumerate(task_results):\n xs = np.array([t[0] for t in task_result])\n ys = np.array([t[1] for t in task_result]) * return_scale\n \n # Select only the rewards from the region up to and including the training of the given task\n task_rewards = self.get_rewards_for_region(xs, ys, [None, (task_id+1) * task_steps])\n max_task_value = task_rewards.max()\n \n for cycle_id in range(num_cycles):\n for subsequent_task_id in range(num_tasks):\n # It's not really \"catastrophic forgetting\" if we haven't seen the task yet, so skip the early tasks\n if cycle_id == 0 and subsequent_task_id <= task_id:\n continue\n \n offset = cycle_id * num_tasks\n \n if USE_ISOLATED_FORGETTING:\n task_rewards = self.get_rewards_for_region(xs, ys, [None, (subsequent_task_id + offset) * task_steps])\n max_task_value = task_rewards[-1]\n \n subsequent_region = [(subsequent_task_id + offset) * task_steps,\n (subsequent_task_id + offset + 1) * task_steps]\n subsequent_task_rewards = self.get_rewards_for_region(xs, ys, subsequent_region)\n last_reward = subsequent_task_rewards[-1]\n forgetting = max_task_value - last_reward\n \n if cycle_id not in per_run_forgetting_per_subsequent[subsequent_task_id]:\n per_run_forgetting_per_subsequent[subsequent_task_id][cycle_id] = []\n per_run_forgetting_per_subsequent[subsequent_task_id][cycle_id].append(forgetting)\n \n return per_run_forgetting_per_subsequent",
"def calc_task_budget(self, task, assignment):\n # Get the agents assigned to this task\n agents_assigned = {a for a, tasks in assignment.items() if task in tasks}\n budget_spent = sum([self.task_cost(a, task) for a in agents_assigned])\n return self.task_budget(task) - budget_spent",
"def total_cost(self, system=None):\n system = system or self.system()\n if system == 'grid':\n cost = self['system (grid)']['internal system nodal cost']\n else:\n cost = self['system (%s)' % system]['system nodal cost']\n cost = float(cost)\n return cost",
"def total_management_cost(self):\n total = 0\n total += self.output_dict['insurance_usd']\n total += self.output_dict['construction_permitting_usd']\n total += self.output_dict['bonding_usd']\n total += self.output_dict['project_management_usd']\n total += self.output_dict['markup_contingency_usd']\n total += self.output_dict['engineering_usd']\n total += self.output_dict['site_facility_usd']\n return total",
"def estimate(self):\n self.p_est = sum([t.estimate() for t in self.tasks])\n return self.p_est",
"def execution_cost(self):\n #TODO: Compute execution cost\n total_cost = 0\n for res_cfg in self.resource_configurations:\n mapped_tasks = list(filter(lambda x: x.resource_schedule.config == res_cfg, self.schedule_mapping_list))\n makespan_res = SchedulePlan._makespan(mapped_tasks)\n total_cost += makespan_res * res_cfg.cost_hour_usd\n return total_cost",
"def get_utilization(self, current_time):\n\n # Calculate utilization for all servers.\n for server in self.server_list:\n server.get_utilization(current_time)",
"def overall_progress(app_id):\r\n sql = text('''SELECT task.id, n_answers,\r\n COUNT(task_run.task_id) AS n_task_runs\r\n FROM task LEFT OUTER JOIN task_run ON task.id=task_run.task_id\r\n WHERE task.app_id=:app_id GROUP BY task.id''')\r\n results = db.engine.execute(sql, app_id=app_id)\r\n n_expected_task_runs = 0\r\n n_task_runs = 0\r\n for row in results:\r\n tmp = row[2]\r\n if row[2] > row[1]:\r\n tmp = row[1]\r\n n_expected_task_runs += row[1]\r\n n_task_runs += tmp\r\n pct = float(0)\r\n if n_expected_task_runs != 0:\r\n pct = float(n_task_runs) / float(n_expected_task_runs)\r\n return (pct * 100)",
"def get_total_assigned(self):\n return sum(self.n_assigned_list)",
"def get_utilization(self, node: int) -> float:\n return self.busy[node].pmf(1)",
"def get_current_task_value(self, task):\n stats = self.get_statistics(task)\n if stats is None:\n return None\n mem_rss_bytes = int(stats['mem_rss_bytes'])\n mem_limit_bytes = int(stats['mem_limit_bytes'])\n return 100 * (float(mem_rss_bytes) / float(mem_limit_bytes))",
"def rule(model):\n ind_i = model.timeslots\n ind_j = model.tasks\n total = sum(self.task_willpower_load[j] * (\n model.A[i, j] + 2 * model.A2[i, j] + 3 * model.A3[i, j] + 4 *\n model.A4[i, j]) for i in ind_i for j in ind_j)\n return None, total, 0",
"def num_tasks(self) -> int:\n return self.data[0].num_tasks() if len(self.data) > 0 else None",
"def get_total_n_cpu(self) -> int:",
"def wg_task_summary(request, fieldname='workinggroup', view='production', taskdays=3):\n query = {}\n hours = 24 * taskdays\n startdate = datetime.now() - timedelta(hours=hours)\n startdate = startdate.strftime(settings.DATETIME_FORMAT)\n enddate = datetime.now().strftime(settings.DATETIME_FORMAT)\n query['modificationtime__castdate__range'] = [startdate, enddate]\n if fieldname == 'workinggroup':\n query['workinggroup__isnull'] = False\n if view == 'production':\n query['tasktype'] = 'prod'\n elif view == 'analysis':\n query['tasktype'] = 'anal'\n\n if 'processingtype' in request.session['requestParams']:\n query['processingtype'] = request.session['requestParams']['processingtype']\n\n if 'workinggroup' in request.session['requestParams']:\n query['workinggroup'] = request.session['requestParams']['workinggroup']\n\n if 'project' in request.session['requestParams']:\n query['taskname__istartswith'] = request.session['requestParams']['project']\n\n summary = JediTasks.objects.filter(**query).values(fieldname, 'status').annotate(Count('status')).order_by(\n fieldname, 'status')\n totstates = {}\n tottasks = 0\n wgsum = {}\n for state in const.TASK_STATES:\n totstates[state] = 0\n for rec in summary:\n wg = rec[fieldname]\n status = rec['status']\n count = rec['status__count']\n if status not in const.TASK_STATES:\n continue\n tottasks += count\n totstates[status] += count\n if wg not in wgsum:\n wgsum[wg] = {}\n wgsum[wg]['name'] = wg\n wgsum[wg]['count'] = 0\n wgsum[wg]['states'] = {}\n wgsum[wg]['statelist'] = []\n for state in const.TASK_STATES:\n wgsum[wg]['states'][state] = {}\n wgsum[wg]['states'][state]['name'] = state\n wgsum[wg]['states'][state]['count'] = 0\n wgsum[wg]['count'] += count\n wgsum[wg]['states'][status]['count'] += count\n\n # convert to ordered lists\n suml = []\n for f in wgsum:\n itemd = {}\n itemd['field'] = f\n itemd['count'] = wgsum[f]['count']\n kys = copy.deepcopy(const.TASK_STATES)\n iteml = []\n for ky in kys:\n iteml.append({'kname': ky, 'kvalue': wgsum[f]['states'][ky]['count']})\n itemd['list'] = iteml\n suml.append(itemd)\n suml = sorted(suml, key=lambda x: x['field'])\n return suml",
"def total_te(self):\r\n return sum(map(lambda x: self.times[x]['te'], self.times))",
"def summary(self, **kwargs):\n rows = self.api.query(None, None, self.Task.TASKSUMMARY_sql)\n return rows",
"def get_current_task_value(self, task):\n stats = self.get_statistics(task)\n if stats is None:\n return None\n return stats['cpus_system_time_secs'] + stats['cpus_user_time_secs']",
"def sum(self):\n\n return time_stat(self, stat=\"sum\")",
"def n_available_tasks(app_id, user_id=None, user_ip=None):\r\n\r\n if user_id and not user_ip:\r\n query = text('''SELECT COUNT(id) AS n_tasks FROM task WHERE NOT EXISTS\r\n (SELECT task_id FROM task_run WHERE\r\n app_id=:app_id AND user_id=:user_id AND task_id=task.id)\r\n AND app_id=:app_id AND state !='completed';''')\r\n result = db.engine.execute(query, app_id=app_id, user_id=user_id)\r\n else:\r\n if not user_ip:\r\n user_ip = '127.0.0.1'\r\n query = text('''SELECT COUNT(id) AS n_tasks FROM task WHERE NOT EXISTS\r\n (SELECT task_id FROM task_run WHERE\r\n app_id=:app_id AND user_ip=:user_ip AND task_id=task.id)\r\n AND app_id=:app_id AND state !='completed';''')\r\n result = db.engine.execute(query, app_id=app_id, user_ip=user_ip)\r\n n_tasks = 0\r\n for row in result:\r\n n_tasks = row.n_tasks\r\n return n_tasks",
"def _get_number_of_subtasks(total_num_items, items_per_query, items_per_task):\r\n total_num_tasks = 0\r\n num_queries = int(math.ceil(float(total_num_items) / float(items_per_query)))\r\n num_items_remaining = total_num_items\r\n for _ in range(num_queries):\r\n num_items_this_query = min(num_items_remaining, items_per_query)\r\n num_items_remaining -= num_items_this_query\r\n num_tasks_this_query = int(math.ceil(float(num_items_this_query) / float(items_per_task)))\r\n total_num_tasks += num_tasks_this_query\r\n\r\n return total_num_tasks",
"def utilization(user, ressource):\n if ressource == 'accounts':\n return Account.objects.filter(vhost__in=list(get_vhosts(user))).count()\n return None",
"def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))"
] | [
"0.62125945",
"0.61996835",
"0.61156285",
"0.592562",
"0.59167385",
"0.58263236",
"0.57747537",
"0.5767992",
"0.5742559",
"0.573287",
"0.5701323",
"0.56973714",
"0.5653184",
"0.56467575",
"0.560962",
"0.5592798",
"0.5591778",
"0.55893356",
"0.558803",
"0.5564105",
"0.55607414",
"0.55564547",
"0.55474705",
"0.5546989",
"0.55280274",
"0.5493713",
"0.5476718",
"0.54758215",
"0.5460825",
"0.5455109"
] | 0.652446 | 0 |
Return a list with all slices | def slices(self):
return self._slices | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def img_to_slices(img):\n res = []\n\n for i, slice_img in enumerate(img):\n res.append(slice_img)\n return res",
"def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)",
"def slice(list, point):\n index = list.index(point)\n slices = []\n \n slices.append(list[:index])\n slices.append(list[index + 1:])\n \n return slices",
"def get_slices(arr, centroid):\n assert len(centroid) == arr.ndim\n assert arr.ndim == 3\n slices = [np.rot90(np.rollaxis(arr, i)[v, ...])\n for i, v in enumerate(centroid)]\n return(slices)",
"def _get_slice(series, start, length):\n return [ int(s) for s in series[start:start+length] ]",
"def slices(series, length):\n if length == 0:\n raise ValueError(\"Slice length may not be 0\")\n num_slices = len(series)-length+1\n if num_slices <= 0:\n raise ValueError(\"Slice length may not be longer than series\")\n return [ _get_slice(series, i, length) for i in range(0, num_slices) ]",
"def carve_slice(\n self, x_index=0, width=config()[\"panel\"][\"width\"],\n ):\n piece = []\n for row in self.grid:\n piece.append(row[x_index : x_index + width])\n\n return piece",
"def indices(self):\n slice_list = []\n for axis in range(self.ndim):\n if axis in self.displayed:\n slice_list.append(slice(None))\n else:\n if self.clip:\n p = np.clip(\n self.point[axis],\n np.round(self.range[axis][0]),\n np.round(self.range[axis][1]) - 1,\n )\n else:\n p = self.point[axis]\n p = np.round(p / self.range[axis][2]).astype(int)\n slice_list.append(p)\n return tuple(slice_list)",
"def _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))",
"def make_slices(data, win_size):\n rows = data.shape[0] - win_size[0] + 1\n cols = data.shape[1] - win_size[1] + 1\n slices = []\n for i in range(win_size[0]):\n for j in range(win_size[1]):\n slices.append(data[i:rows+i, j:cols+j])\n return slices",
"def slices(series: str, length: int) -> List[str]:\n if length > len(series) or length <= 0:\n raise ValueError(f\"Error Length: {length}\")\n\n return [series[i:i + length] for i in range(len(series) - length + 1)]",
"def _slice_index(self, slicer):\n start = self.index_location(slicer.start) if slicer.start is not None else 0\n end = self.index_location(slicer.stop) if slicer.stop is not None else self.size\n return list(range(start, end))",
"def as_slice(self):\n # slice for accessing arrays of values\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)",
"def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]",
"def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))",
"def core_slices(self, borders=None):\n if borders is None:\n borders = self.all_borders\n\n core_slices = list(self.slices)\n for border, direction in borders:\n core_slice = core_slices[border]\n if direction < 0:\n core_slice = slice(core_slice.start + self.overlap[border], core_slice.stop)\n else:\n core_slice = slice(core_slice.start, core_slice.stop - self.overlap[border])\n core_slices[border] = core_slice\n\n return tuple(core_slices)",
"def get_slice(self):\n return self.locs[tuple(self.indices), :]",
"def get_time_slices(self):\n tot = []\n for clu in self._clusters:\n tot.extend(self._clusters[clu].to_dict()[:])\n #tot.sort()\n return tot",
"def list(self):\n return [self[i,j] for i in range(self._d) for j in range(self._d)]",
"def _calc_slices(X):\n\n n_rows = X.shape[0]\n slices = [n_rows // comm.size for _ in range(comm.size)]\n count = n_rows % comm.size\n for i in range(count):\n slices[i] += 1\n\n return np.array(slices, dtype=np.int64)",
"def get_slice(dimensions, x=None, y=None):\n All = slice(None)\n\n if not dimensions:\n return All # so that it does not break processing \"mapping\"\n\n index_list = [All] * len(dimensions)\n\n if x != None:\n try:\n index_list[dimensions.index('x')] = x\n except:\n pass\n\n if y != None:\n try:\n index_list[dimensions.index('y')] = y\n except:\n pass\n\n return index_list",
"def slice(n, m):\n chunks = []\n for piece in islice(n, m):\n chunks.append(piece)\n return chunks",
"def slice(self) -> Tuple[slice, ...]:\n\n total_slice = tuple(slice(None) for _ in self.collection_shape)\n for obj in self.objects.flat:\n for i, current_slice in enumerate(obj.slices):\n if total_slice[i].start is None:\n total_slice = total_slice[:i] + (current_slice,) + total_slice[i + 1:]\n else:\n if current_slice.start < total_slice[i].start:\n total_slice = total_slice[:i] + (\n slice(current_slice.start, total_slice[i].stop, total_slice[i].step),) + total_slice[i + 1:]\n if current_slice.stop > total_slice[i].stop:\n total_slice = total_slice[:i] + (\n slice(total_slice[i].start, current_slice.stop, total_slice[i].step),) + total_slice[i + 1:]\n return total_slice",
"def divide_with_stride(arr: np.ndarray) -> List[np.ndarray]:\n\n result_list: List[np.ndarray] = []\n # slice by z axis\n for z in range(0, z_len := arr.shape[0], 16):\n if z + 31 >= z_len:\n z = z_len - 16\n z_arr: np.ndarray = arr[z:z+16]\n\n # slice by y axis\n for y in range(0, y_len := arr.shape[1], 16):\n y_arr: np.ndarray = z_arr[:, y:y+16]\n\n # slice by x axis\n for x in range(0, x_len := arr.shape[2], 16):\n x_arr: np.ndarray = y_arr[:, :, x:x+16]\n if len(set(x_arr.shape)) == 1 and x_arr.shape[0] == 16:\n result_list.append(x_arr)\n \n return result_list",
"def splitLayer(self, src, dire):\n\n (rowN, colN) = src.shape\n res = []\n ## UNSURE ABOUT SLICING\n if (dire == self.VERTICAL):\n # range(start, stop, step)\n for i in range(0, rowN - self.slideThickness, self.slideThickness):\n # croping is much easier in Python, it is basically just slicing\n tmp = src[i:i+self.slideThickness, 0:colN]\n \n res.append(tmp)\n\n else:\n\n for i in range(0, colN - self.slideThickness, self.slideThickness):\n # croping is much easier in Python, it is basically just slicing\n tmp = src[0:self.slideThickness, i:i+rowN]\n res.append(tmp)\n\n return res",
"def _build_slices(dataset, patch_shape, stride_shape):\n slices = []\n if dataset.ndim == 4:\n in_channels, i_z, i_y, i_x = dataset.shape\n else:\n i_z, i_y, i_x = dataset.shape\n\n k_z, k_y, k_x = patch_shape\n s_z, s_y, s_x = stride_shape\n z_steps = SliceBuilder._gen_indices(i_z, k_z, s_z)\n for z in z_steps:\n y_steps = SliceBuilder._gen_indices(i_y, k_y, s_y)\n for y in y_steps:\n x_steps = SliceBuilder._gen_indices(i_x, k_x, s_x)\n for x in x_steps:\n slice_idx = (\n slice(z, z + k_z),\n slice(y, y + k_y),\n slice(x, x + k_x)\n )\n if dataset.ndim == 4:\n slice_idx = (slice(0, in_channels),) + slice_idx\n slices.append(slice_idx)\n return slices",
"def indices(self):\n return tuple([slice(*r) for r in self.location])",
"def get_ids_as_slice_or_list(self):\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)",
"def _slice_at_axis(sl, axis):\n return (slice(None),) * axis + (sl,) + (...,)",
"def core_slices(self, chunk):\n intersect_slices = []\n for s, b, olap, idx in zip(chunk.slices, self.bounds, self.overlap, range(0, len(chunk.slices))):\n if s.start == b.start:\n intersect_slices.append(slice(s.start + olap, s.stop))\n elif s.stop == b.stop:\n intersect_slices.append(slice(s.start, s.stop - olap))\n else:\n intersect_slices.append(s)\n\n return tuple(self.remove_chunk_overlap(chunk, intersect_slices))"
] | [
"0.7073947",
"0.6884096",
"0.6862915",
"0.6702075",
"0.6666773",
"0.66185397",
"0.66151047",
"0.66034317",
"0.653593",
"0.6535644",
"0.64788175",
"0.6473772",
"0.6450125",
"0.64454293",
"0.6411023",
"0.64006263",
"0.63614595",
"0.63422865",
"0.6328264",
"0.6324742",
"0.6320165",
"0.6306155",
"0.63058716",
"0.6221467",
"0.6216027",
"0.62107575",
"0.61959475",
"0.61904466",
"0.61539364",
"0.61138433"
] | 0.7627962 | 0 |
Return a list with all tasks and slices | def tasks(self):
return self._tasks + self._slices | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def list_tasks():",
"def get_task_list(self):\n raise NotImplementedError()",
"def tasks():",
"def get_tasks(self):\n return self.stn.get_tasks()",
"def get_tasks_list(self):\n return self.task_controller.get_list()",
"def normalTasks(self):\n return self._tasks",
"def list_tasks(ctx):\n ctx.run(\"invoke --list\")",
"def get_tasks(self):\n return self.tasks",
"def get_tasks(self):\n return self.task_collection",
"def tasks(self):\n args = Namespace(rev=self.rev)\n data = run_query('push_results', args)['data']\n\n tasks = []\n for kwargs in data:\n # Do a bit of data sanitization.\n if any(a not in kwargs for a in ('label', 'duration', 'result', 'classification')):\n continue\n\n if kwargs['duration'] <= 0:\n continue\n\n tasks.append(Task(**kwargs))\n\n return tasks",
"def get_tasks(loop):\n tasks = asyncio.all_tasks(loop)\n return \"Tasks: \" + \", \".join(\n [f\"{task.get_name()}: {task.get_coro().__name__}\" for task in tasks]\n )",
"def view_tasks():\n task_list = []\n incomplete_task_list = Tasks.objects.filter(is_complete=False)\n for task in incomplete_task_list:\n tasks = [] #create data structure\n tasks.append(task.id) #add ID \n tasks.append(task.task_text) #add text\n task_list.append(tasks) #append data structure\n\n return task_list",
"def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()",
"def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)",
"def subtasks(self):\n return tuple(self._tasks)",
"def task_list(self) -> List[\"Task\"]: # noqa: F821\n return list(self.tasks.values())",
"def get_tasks(self):\n return self.tasks.all()",
"def get_all_tasks(self):\n \n sql = \"select * from tasks;\"\n return self._query_all(sql)",
"def get_tasks():\n tasks = []\n example_dir = os.path.normpath(os.path.join(\n os.path.dirname(__file__), '../../openshift/ansiblegen/examples/')\n )\n yaml_names = os.listdir(example_dir)\n for yaml_name in yaml_names:\n _, api_version, resource = yaml_name.split('_', 2)\n resource = resource[0:-4]\n yaml_path = os.path.join(example_dir, yaml_name)\n\n with open(yaml_path, 'r') as f:\n data = yaml.load(f)\n\n tasks.append(((api_version, resource), data))\n return tasks",
"def get_tasks(self):\n return [getattr(self, k).value() for k in self._node_dict.values()]",
"def run(self):\n results = []\n for task in self.tasks:\n results.append(task.run())\n self.tasks = []\n return results",
"def get_archieve(self):\n all_tasks = self.task_controller.get_list()\n return [task for task in all_tasks if task.is_completed == Status.DONE]",
"async def list_tasks(fields: Set[str] = None):\n tasks = celery_app.describe_tasks()\n tasks = [TaskOut(**task).dict(include=fields) for task in tasks]\n return tasks",
"def get_all():\n return list(tasks.find({}))",
"def list(self, name=None):\n if name is not None:\n tasks = self._list_all_tasks_from_single_dataset(name)\n else:\n tasks = self._list_all_tasks_from_all_datasets()\n return tasks",
"def fetch_tasks(swarming, start, end, state, tags, parallel):\n def process(data):\n \"\"\"Returns the list of flattened dimensions for these tasks.\"\"\"\n items = data.get('items', [])\n logging.info('- processing %d items', len(items))\n return [_flatten_dimensions(t['properties']['dimensions']) for t in items]\n\n delta = datetime.timedelta(hours=1)\n return _fetch_daily_internal(\n delta, swarming, process, 'tasks/requests', start, end, state, tags,\n parallel)",
"def get_tasks_summary(self):\n columns = [\"id\", \"name\", \"state\", \"warning\", \"warning_message\", \"parent_job\", \"tags\"]\n \n cur = self.conn.cursor()\n cur.execute(\"SELECT \" + \", \".join(columns) + \" FROM tangerine;\")\n self.conn.commit()\n \n return [Task([(column,) for column in columns], task, interpolate=False) for task in cur.fetchall()]",
"def tasks(self, flags=gdef.TASK_ENUM_HIDDEN):\n tasks = TaskCollection()\n self.GetTasks(flags, tasks)\n return tasks",
"def get_tasks(cls):\n def _get_tasks():\n members = inspect.getmembers(cls, predicate=inspect.isfunction)\n for _, member in members:\n annotations = getattr(member, '__annotations__', {})\n if annotations.get('return', None) == Task:\n yield member\n return list(_get_tasks())",
"def get_subtasks(self, tid):\n return self.task_controller.get_subtasks(tid)"
] | [
"0.7359236",
"0.71100533",
"0.69691336",
"0.6961912",
"0.6829681",
"0.6800806",
"0.678541",
"0.6761252",
"0.6731112",
"0.67283285",
"0.66204923",
"0.6614988",
"0.6608938",
"0.66082376",
"0.660442",
"0.658818",
"0.6573101",
"0.6568259",
"0.6560644",
"0.6559535",
"0.65567577",
"0.65383446",
"0.6521678",
"0.64555144",
"0.6440592",
"0.64201874",
"0.6402355",
"0.6391982",
"0.638037",
"0.6375419"
] | 0.8244439 | 0 |
Apply schedulability test. Assumes that only migratory tasks can have restricted deadlines (due to parameter modification). | def _schedTest(self):
if not self._hasSlices(): # There are no migratory tasks, so let's check utilization
return self.util() <= 1.0
else:
return self._qpa() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def checkUpstreamScheduler():",
"def testJobTRSetRestrictTask(databases):\n gen = DataGenerator(databases)\n fwName = gen.createFramework('testfw1')\n taskName = gen.createTask('task1', fwName)\n tr1Name = gen.createTaskRunner(capabilities=[fwName])\n tr2Name = gen.createTaskRunner(capabilities=[fwName])\n config = gen.createConfiguration()\n config.getTask(taskName)._setRunners([tr2Name])\n config._notify()\n def simulate(config):\n sanityCheck(gen, config)\n job, = config.createJobs(gen.owner)\n task = job.assignTask(databases.resourceDB[tr1Name])\n assert task is None\n assert not job.isExecutionFinished()\n assert not job.hasFinalResult()\n task = job.assignTask(databases.resourceDB[tr2Name])\n assert task is not None\n taskDone(job, task.getName())\n assert job.isExecutionFinished()\n assert job.hasFinalResult()\n runWithReload(databases, config, simulate)",
"def is_task_stagnant(task):",
"def task_stagnant(task):",
"def test_set_power_schedule_for_deployment_run(self):\n pass",
"def test_ensure_not_ts_pass(self):\n self.assertEqual(ensure_not_ts(self.jobset1), 'completed')",
"def _constraints_task_valid(self):\n def rule(model):\n \"\"\"\n Bind the tail entries to zero\n \"\"\"\n num = self.num_timeslots\n ind_j = model.tasks\n total = sum(model.A2[num-1, j] for j in ind_j)\n total += sum(model.A3[num-1, j] for j in ind_j)\n total += sum(model.A4[num-1, j] for j in ind_j)\n total += sum(model.A3[num-2, j] for j in ind_j)\n total += sum(model.A4[num-2, j] for j in ind_j)\n total += sum(model.A4[num-3, j] for j in ind_j)\n return None, total, 0\n\n self.model.constrain_tail = Constraint(rule=rule)\n\n def rule(model):\n \"\"\"\n Only permit \"valid\" allocation on A, A2, A3, etc.\n \"\"\"\n ind_i = model.timeslots\n ind_j = model.tasks\n total = sum(model.A[i, j] * (1-self.valid[i, j]) for i in ind_i\n for j in ind_j)\n total += sum(model.A2[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n total += sum(model.A3[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n\n return None, total, 0\n\n self.model.constrain_valid0 = Constraint(rule=rule)\n\n def rule(model):\n \"\"\"\n Only permit \"valid\" allocation on A, A2, A3, etc.\n \"\"\"\n ind_i = model.timeslots2\n ind_j = model.tasks\n inv = 1-self.valid\n total = sum(\n model.A2[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A3[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots3\n ind_j = model.tasks\n total += sum(\n model.A3[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots4\n ind_j = model.tasks\n total += sum(\n model.A4[i, j] * inv[i + 3, j] for i in ind_i for j in ind_j)\n\n return None, total, 0\n\n self.model.constrain_valid1 = Constraint(rule=rule)",
"def test_user_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # We need one extra loop to allow the scheduler to mark a task as completed\r\n for i in range(11):\r\n self.register(fullname=self.user.username + str(i),\r\n name=self.user.username + str(i),\r\n password=self.user.username + str(i))\r\n self.signin()\r\n # Get Task until scheduler returns None\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n self.signout()\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, t.task_runs\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same User\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_id, t.task_runs), err_msg\r\n # Check that task.state is updated to completed\r\n for t in tasks:\r\n assert t.state == \"completed\", t.state",
"def ddl_guard(self):\n for _ in range(self.ddl_guard_attempts):\n result = self.query(sql.show_status, (\"Threads_running\",))\n if result:\n threads_running = int(result[0][\"Value\"])\n if threads_running > self.max_running_before_ddl:\n log.warning(\n \"Threads running: {}, bigger than allowed: {}. \"\n \"Sleep 1 second before check again.\".format(\n threads_running, self.max_running_before_ddl\n )\n )\n time.sleep(1)\n else:\n log.debug(\n \"Threads running: {}, less than: {}. We are good \"\n \"to go\".format(threads_running, self.max_running_before_ddl)\n )\n return\n log.error(\n \"Hit max attempts: {}, but the threads running still don't drop\"\n \"below: {}.\".format(self.ddl_guard_attempts, self.max_running_before_ddl)\n )\n raise OSCError(\"DDL_GUARD_ATTEMPTS\")",
"def test_check_contributing_state_ongoing_tasks_contributed(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app, n_answers=3)\r\n user = UserFactory.create()\r\n TaskRunFactory.create(task=task, user=user)\r\n contributing_state = helpers.check_contributing_state(app_id=app.id,\r\n user_id=user.id)\r\n\r\n assert contributing_state == 'cannot_contribute', contributing_state",
"def testJobTRSetRestrictJob(databases):\n gen = DataGenerator(databases)\n fwName = gen.createFramework('testfw1')\n taskName = gen.createTask('task1', fwName)\n tr1Name = gen.createTaskRunner(capabilities=[fwName])\n tr2Name = gen.createTaskRunner(capabilities=[fwName])\n config = gen.createConfiguration()\n config._setRunners([tr2Name])\n config._notify()\n def simulate(config):\n sanityCheck(gen, config)\n job, = config.createJobs(gen.owner)\n task = job.assignTask(databases.resourceDB[tr1Name])\n assert task is None\n assert not job.isExecutionFinished()\n assert not job.hasFinalResult()\n task = job.assignTask(databases.resourceDB[tr2Name])\n assert task is not None\n taskDone(job, task.getName())\n assert job.isExecutionFinished()\n assert job.hasFinalResult()\n runWithReload(databases, config, simulate)",
"def dumb_task():\n return True",
"def applyFeatureTest(tgen):\n\n if not _shared.withTests:\n # Ignore all build tasks for tests in this case\n for task in tgen.tasks:\n task.runnable_status = lambda: Task.SKIP_ME",
"def flag_tasks_with_scouting_failures(tasks, ds_dict):\n taskl = []\n for task in tasks:\n if task['jeditaskid'] in ds_dict:\n # tasks suspected in failures during scouting\n if task['status'] in ('failed', 'broken') and True in [\n (ds['nfilesfailed'] > ds['nfilestobeused']) for ds in ds_dict[task['jeditaskid']] if ds['type'] == 'input' and ds['nfilesfailed'] and ds['nfilestobeused']]:\n task['failedscouting'] = True\n # scouting has critical failures\n elif task['status'] == 'scouting' and True in [\n (ds['nfilesfailed'] > 0) for ds in ds_dict[task['jeditaskid']] if ds['type'] == 'input' and ds['nfilesfailed'] ]:\n task['scoutinghascritfailures'] = True\n elif task['status'] == 'scouting' and sum(\n [ds['nfilesfailed'] for ds in ds_dict[task['jeditaskid']] if ds['type'] == 'input' and ds['nfilesfailed']]) == 0:\n # potentially non-critical failures during scouting, flag it, and will check if there are job retries\n task['scoutinghasnoncritfailures'] = True\n taskl.append(task['jeditaskid'])\n\n tquery = {\n 'relationtype': 'retry'\n }\n extra_str = '(1=1)'\n if len(taskl) > settings.DB_N_MAX_IN_QUERY:\n tmp_table_name = get_tmp_table_name()\n transaction_key = insert_to_temp_table(taskl)\n extra_str += \" and jeditaskid in (select id from {} where transactionkey={})\".format(tmp_table_name, transaction_key)\n else:\n tquery['jeditaskid__in'] = taskl\n retries = JediJobRetryHistory.objects.filter(**tquery).extra(where=[extra_str]).values('jeditaskid')\n retries_dict = {}\n for r in retries:\n if r['jeditaskid'] not in retries_dict:\n retries_dict[r['jeditaskid']] = True\n\n for task in tasks:\n if 'scoutinghasnoncritfailures' in task and task['scoutinghasnoncritfailures']:\n task['scoutinghasnoncritfailures'] = retries_dict[task['jeditaskid']] if task['jeditaskid'] in retries_dict else False\n\n return tasks",
"def test_list_of_tasks():\n\n with Flow(name=\"test\") as flow:\n condition = Condition()\n true_branch = [SuccessTask(), SuccessTask()]\n false_branch = SuccessTask()\n ifelse(condition, true_branch, false_branch)\n\n with prefect.context(CONDITION=True):\n state = flow.run()\n\n for t in true_branch:\n assert isinstance(state.result[t], Success)\n assert isinstance(state.result[false_branch], Skipped)\n\n with prefect.context(CONDITION=False):\n state = flow.run()\n\n for t in true_branch:\n # the tasks in the list ran becuase they have no upstream dependencies.\n assert isinstance(state.result[t], Success)\n list_task = next(\n t for t in flow.tasks if isinstance(t, prefect.tasks.core.collections.List)\n )\n # but the list itself skipped\n assert isinstance(state.result[list_task], Skipped)\n assert isinstance(state.result[false_branch], Success)",
"def test_set_deployment_run_lock(self):\n pass",
"def skip_or_run_constraints_test(func):\n\n return skip_or_run_test_tarantool(func, '2.10.0',\n 'does not support schema constraints')",
"def can_dry_run(self, task: \"TaskView\") -> bool:\n return False",
"def task_status():\n pass",
"def test_anonymous_03_respects_limit_tasks(self):\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n assigned_tasks = []\r\n # Get Task until scheduler returns None\r\n for i in range(10):\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n while data.get('info') is not None:\r\n # Check that we received a Task\r\n assert data.get('info'), data\r\n\r\n # Save the assigned task\r\n assigned_tasks.append(data)\r\n\r\n # Submit an Answer for the assigned task\r\n tr = TaskRun(app_id=data['app_id'], task_id=data['id'],\r\n user_ip=\"127.0.0.\" + str(i),\r\n info={'answer': 'Yes'})\r\n db.session.add(tr)\r\n db.session.commit()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check if there are 30 TaskRuns per Task\r\n tasks = db.session.query(Task).filter_by(app_id=1).all()\r\n for t in tasks:\r\n assert len(t.task_runs) == 10, len(t.task_runs)\r\n # Check that all the answers are from different IPs\r\n err_msg = \"There are two or more Answers from same IP\"\r\n for t in tasks:\r\n for tr in t.task_runs:\r\n assert self.is_unique(tr.user_ip, t.task_runs), err_msg",
"def test_task_add():\n pytest.fail('Not implemented yet.')",
"def testJobTRLostWhileRunning(databases):\n gen = DataGenerator(databases)\n fwName = gen.createFramework('testfw1')\n taskName = gen.createTask('task1', fwName)\n trName = gen.createTaskRunner(capabilities=[fwName])\n config = gen.createConfiguration()\n\n sanityCheck(gen, config)\n job, = config.createJobs(gen.owner)\n runner = databases.resourceDB[trName]\n task = job.assignTask(runner)\n assert task is not None\n assert task.isRunning()\n runner.markLost()\n assert not task.isRunning()\n assert task.result == ResultCode.ERROR",
"def _check_and_apply_migrations(self) -> None:\n from hathor.transaction.storage.exceptions import OutOfOrderMigrationError, PartialMigrationError\n db_is_empty = self.is_empty()\n self.log.debug('step through all migrations', count=len(self._migrations))\n migrations_to_run = []\n # XXX: this is used to ensure migrations don't advance out of order\n previous_migration_state = MigrationState.COMPLETED\n for migration in self._migrations:\n migration_name = migration.get_db_name()\n self.log.debug('step migration', migration=migration_name)\n\n # short-cut to avoid running migrations on empty database\n if migration.skip_empty_db() and db_is_empty:\n self.log.debug('migration is new, but does not need to run on an empty database',\n migration=migration_name)\n self.set_migration_state(migration_name, MigrationState.COMPLETED)\n continue\n\n # get the migration state to decide whether to run, skip or error\n migration_state = self.get_migration_state(migration_name)\n\n if migration_state > previous_migration_state:\n raise OutOfOrderMigrationError(f'{migration_name} ran after a migration that wasn\\'t advanced')\n previous_migration_state = migration_state\n\n should_run_migration: bool\n if migration_state is MigrationState.NOT_STARTED:\n self.log.debug('migration is new, will run', migration=migration_name)\n should_run_migration = True\n elif migration_state is MigrationState.STARTED:\n self.log.warn('this migration was started before, but it is not marked as COMPLETED or ERROR, '\n 'it will run again but might fail', migration=migration_name)\n should_run_migration = True\n elif migration_state is MigrationState.COMPLETED:\n self.log.debug('migration is already complete', migration=migration_name)\n should_run_migration = False\n elif migration_state is MigrationState.ERROR:\n self.log.error('this migration was run before but resulted in an error, the database will need to be '\n 'either manually fixed or discarded', migration=migration_name)\n raise PartialMigrationError(f'Migration error state previously: {migration_name}')\n else:\n raise ValueError(f'Unexcepted migration state: {migration_state!r}')\n\n # run if needed, updating the state along the way\n if should_run_migration:\n migrations_to_run.append(migration)\n self.log.debug('stepped through all migrations')\n if migrations_to_run:\n self.log.info('there are migrations that need to be applied')\n migrations_to_run_count = len(migrations_to_run)\n for i, migration in enumerate(migrations_to_run):\n migration_name = migration.get_db_name()\n self.log.info(f'running migration {i+1} out of {migrations_to_run_count}', migration=migration_name)\n self.set_migration_state(migration_name, MigrationState.STARTED)\n try:\n migration.run(self)\n # XXX: we catch \"any\" exception because just we want to mark the state as \"ERROR\"\n except Exception as exc:\n self.set_migration_state(migration_name, MigrationState.ERROR)\n raise PartialMigrationError(f'Migration error state: {migration_name}') from exc\n else:\n self.set_migration_state(migration_name, MigrationState.COMPLETED)\n if migrations_to_run:\n self.log.info('all migrations have been applied')",
"def schedule_task(self, task):\n if self.time_based:\n minimum_wait_server = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_wait_server > server.waiting_time:\n target_server = server\n minimum_wait_server = server.waiting_time\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")\n else:\n minimum_jobs = float('inf')\n for uid, server in self.all_servers.items():\n if server.status:\n if minimum_jobs > len(server.jobs):\n minimum_jobs = len(server.jobs)\n target_server = server\n try:\n target_server.jobs.append(task)\n target_server.waiting_time += task.task_time\n self.servers_jobs_list[target_server.server_id].append(task)\n except Exception:\n print(\"There are no servers left to reassign\")\n raise Exception(\"################# CHAOS MONKEY WON ####################\")",
"def test_check_contributing_state_ongoing_tasks_not_contributed(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app)\r\n user = UserFactory.create()\r\n\r\n contributing_state = helpers.check_contributing_state(app_id=app.id,\r\n user_id=user.id)\r\n\r\n assert contributing_state == 'can_contribute', contributing_state",
"def prepare_rt_taskset(tcconfig):\n\n core_assignment = schf.get_rt_core_assignment(rt_taskset=tcconfig.rt_taskset, n_core=tcconfig.n_core)\n\n if core_assignment is not None:\n # run schedulability test for safety\n is_sched = schf.check_rt_schedulability(rt_taskset=tcconfig.rt_taskset, core_assignment=core_assignment)\n if is_sched:\n # print \"RT Tasks schedulable!\"\n # update the wcrt variable in RT taskset\n schf.set_wcrt_all_rt_task(rt_taskset=tcconfig.rt_taskset, core_assignment=core_assignment)\n tcconfig.rt_core_assignment = copy.deepcopy(core_assignment)\n return True\n else:\n print (\"RT Taskset is not schedulable. Continue...\")\n return False\n\n else:\n print (\"Unable to find core assignment for RT Tasks. Continue...\")\n return False",
"def dummy_update_subtask_status(entry_id, _current_task_id, new_subtask_status):\r\n bogus_task_id = \"this-is-bogus\"\r\n update_subtask_status(entry_id, bogus_task_id, new_subtask_status)",
"def testJobTRRemovedWhileRunning(databases):\n gen = DataGenerator(databases)\n fwName = gen.createFramework('testfw1')\n taskName = gen.createTask('task1', fwName)\n trName = gen.createTaskRunner(capabilities=[fwName])\n config = gen.createConfiguration()\n\n sanityCheck(gen, config)\n job, = config.createJobs(gen.owner)\n runner = databases.resourceDB[trName]\n task = job.assignTask(runner)\n assert task is not None\n assert task.isRunning()\n databases.resourceDB.remove(runner)\n assert not task.isRunning()\n assert task.result == ResultCode.ERROR",
"def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"harvest_oai\",\n \"create_collection\",\n \"combine_index\",\n \"solr_alias_swap\",\n \"success_slack_trigger\"\n ])",
"def test_dag_tasks_present(self):\n self.assertEqual(self.tasks, [\n \"harvest_oai\",\n \"create_collection\",\n \"combine_index\",\n \"solr_alias_swap\",\n \"success_slack_trigger\"\n ])"
] | [
"0.60494995",
"0.59117544",
"0.58629143",
"0.5710559",
"0.5710446",
"0.56452847",
"0.5637244",
"0.5563337",
"0.5528159",
"0.54618025",
"0.5455458",
"0.54490566",
"0.54092175",
"0.5387321",
"0.53783834",
"0.5360575",
"0.534527",
"0.5332817",
"0.5274864",
"0.52744997",
"0.5274138",
"0.52689856",
"0.5268083",
"0.5252858",
"0.52367985",
"0.5231493",
"0.5225256",
"0.5224163",
"0.51831627",
"0.51831627"
] | 0.6594417 | 0 |
Return the free execution time for a migratory task in this partition | def _calcExecTime(self, migTask, dPrime):
#print "ae", self
# Let's start making U = 0.9999 (which probably causes deadline misses).
# If we force U = 1, we won't be able to use La.
if self.util() >= 0.9999:
self._lastCost = 0.0
return 0.0
cPrime = (0.9999 - self.util())*migTask.period()
# Temporarily add the slice
tempSlice = WmSlice(-1, cPrime, dPrime, migTask)
self._addSlice(tempSlice)
L = self._L()
min_d = self._minDeadline()
#print "L", L
#print self
#print "Calculating cost. dPrime", dPrime
# QPA
t = self._lastDeadline(L)
h = self._h(t)
#print t
while round(t,12) >= round(min_d,12): # We are checking demand only for the migratory task
# We round the checking to 12 decimal places. Otherwise, it could make the algorithm repeat undefinedly, in
# case new calculated cost is not 100% precise. We do the same when applying floor(). The other comparisons don't
# need this correction, since they are not so critical.
if round(h,12) > round(t,12):
#print "HIGH. t %.15f" % t, "h(t) %.15f" % h, ". C was", cPrime
cPrime = (t - self._h_oth(t, tempSlice)) / floor(round((t + migTask.period() - dPrime)/migTask.period(), 12))
#print "New C is", cPrime
tempSlice._wcet = cPrime # Update slice cost to fix demand
if cPrime <= 0.0: # Stop if the cost gets negative
self._removeLastSlice()
self._lastCost = 0.0
return 0.0
#print "OK. t", t, "h(t)",h, "new t",
t = self._lastDeadline(t)
#print t
h = self._h(t)
#print "OK. t", t, "h(t)",h
#print self
#print "Final cost", cPrime
#if not self._qpa():
# print self.tasks()
#assert self._qpa()
self._removeLastSlice()
self._lastCost = cPrime
return cPrime | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def estimated_time(self):\n self._update()\n if not self.running_mode:\n return 0 if self._is_finished() else float(\"nan\")\n elif self.running_mode == \"local\":\n start = self.processes[0].create_time()\n elif self.running_mode == \"grid\":\n start = self.job[\"start_time\"]\n if start == 0:\n # Queued, but not started\n return float(\"nan\")\n else:\n logger.warning(\"Invalid running_mode attribute\")\n return float(\"nan\")\n current = self.current_step()\n if current <= 0: # If not dumped yet or error\n return float('nan')\n else:\n elapsed = time() - start\n return elapsed * (self.total_steps / current - 1)",
"def compute_time_without_gc(self):\n compute_time = (self.runtime() - self.scheduler_delay - self.gc_time -\n self.shuffle_write_time - self.input_read_time - self.output_write_time)\n if self.has_fetch:\n # Subtract off of the time to read local data (which typically comes from disk) because\n # this read happens before any of the computation starts.\n compute_time = compute_time - self.fetch_wait - self.local_read_time\n return compute_time",
"def queued_time(self):\r\n return (self.node_monitor_launch_time - self.node_monitor_submit_time)",
"def used_time(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"used_time\")",
"def compute_time(self):\n return self.compute_time_without_gc() + self.gc_time",
"def execution_time_sec(self):\n if self.job_started_at is not None:\n if self.job_completed_at is not None:\n return (self.job_completed_at -\n self.job_started_at).total_seconds()\n return None",
"def adjusted_completion_time(self):\r\n expected_scheduler_get_task_time = (self.node_monitor_get_task_time +\r\n self.node_monitor_launch_time) / 2.0\r\n skew = self.scheduler_launch_time - expected_scheduler_get_task_time\r\n return self.completion_time + skew",
"def free_flight_time(self):\n return self._free_flight_time",
"def runtime_no_compute(self):\n # Time the task spent reading data over the network or from disk for the shuffle.\n # Computation happens during this time, but if the computation were infinitely fast,\n # this phase wouldn't have sped up because it was ultimately waiting on the network.\n # This is an approximation because tasks don't currently log the amount of time where\n # the network is stopped, waiting for the computation to speed up.\n # We're also approximating because there's some disk writing that happens in parallel\n # via the OS buffer cache. It's basically impossible for us to account for that so\n # we ignore it.\n # The final reason that this is an approximation is that the shuffle write time could overlap with\n # the shuffle time (if a task is both reading shuffle inputs and writing shuffle outputs).\n # We should be able to fix the logging to correct this issue.\n compute_wait_time = self.finish_time - self.start_time - self.shuffle_write_time - self.scheduler_delay - self.gc_time - self.input_read_time\n if self.has_fetch:\n #compute_wait_time = compute_wait_time - shuffle_time\n compute_wait_time = compute_wait_time - self.fetch_wait\n return self.runtime() - compute_wait_time",
"def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt",
"def duration(self):\n return int(sum(t.duration for t in self.tasks) / 3600)",
"def get_execution_time(self):\n self.execution_time = self.end_time - self.start_time\n\n print('\\n')\n self.message('**[OPERATION COMPLETE]**********************************************************************')\n if self.arg_data:\n self.message(' Execution Time: {} ms'.format(self.execution_time))\n self.message('********************************************************************************************')\n else:\n self.message(' Cell Updates: {}'.format(self.cells_updated))\n self.message(' Cell Additions: {}'.format(self.rows_appended))\n self.message(' Errors: {}'.format(self.errors))\n self.message(' Warnings: {}'.format(self.warnings))\n self.message(' Execution Time: {} ms'.format(self.execution_time))\n self.message('********************************************************************************************')",
"def max_node_provision_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"max_node_provision_time\")",
"def response_time(self):\r\n if self.__arrival_time == INVALID_TIME:\r\n self.__logger.debug(\"Request %s missing arrival time\" % self.__id)\r\n return INVALID_TIME_DELTA\r\n completion_time = self.__arrival_time\r\n for task_id, task in self.__tasks.items():\r\n if task.completion_time == INVALID_TIME:\r\n self.__logger.debug((\"Task %s in request %s missing completion \"\r\n \"time\") % (task_id, self.__id))\r\n return INVALID_TIME_DELTA\r\n task_completion_time = task.adjusted_completion_time()\r\n #if task.scheduler_launch_time > task.node_monitor_launch_time:\r\n #self.__logger.warn((\"Task %s suggests clock skew: scheduler launch time %d, node \"\r\n # \"monitor launch time %d\") %\r\n\r\n #(task_id, task.scheduler_launch_time,\r\n # task.node_monitor_launch_time))\r\n completion_time = max(completion_time, task_completion_time)\r\n return completion_time - self.__arrival_time",
"def used_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"used_time\")",
"def used_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"used_time\")",
"def get_task_time(self, task):\n task.task_time = TASK_TYPES[task.task_type]\n print(\"Fetched task time\")",
"def _estimate_free(self):\n # Query the information we need for this task's channel and package.\n capacity_deferred = self.channel.total_capacity()\n open_tasks_deferred = self.channel.tasks(state=[task_states.OPEN])\n avg_delta_deferred = self.estimate_duration()\n deferreds = [capacity_deferred,\n open_tasks_deferred,\n avg_delta_deferred]\n results = yield defer.gatherResults(deferreds, consumeErrors=True)\n capacity, open_tasks, avg_delta = results\n # Ensure this task's channel has spare capacity for this task.\n open_weight = sum([task.weight for task in open_tasks])\n if open_weight >= capacity:\n # TODO: Evaluate all tasks in the channel and\n # determine when enough OPEN tasks will complete so that we can\n # get to OPEN.\n raise NotImplementedError('channel %d is at capacity' %\n self.channel_id)\n # A builder will pick up this task and start it within SLEEPTIME.\n # start_time is the maximum amount of time we expect to wait here.\n start_time = self.created + SLEEPTIME\n if avg_delta is None:\n defer.returnValue(None)\n est_completion = start_time + avg_delta\n defer.returnValue(est_completion)",
"def service_time(self):\r\n return (self.completion_time - self.node_monitor_launch_time)",
"def get_exec_time(self):\n return self._exec_time",
"def get_current_task_value(self, task):\n stats = self.get_statistics(task)\n if stats is None:\n return None\n return stats['cpus_system_time_secs'] + stats['cpus_user_time_secs']",
"def free_offer_expiration_time(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"free_offer_expiration_time\")",
"def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)",
"def check(self):\r\n boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))\r\n\r\n if self.hourly and not self.last_executed:\r\n return 0\r\n \r\n if self.daily and not self.last_executed:\r\n if int(self.hour) == self.now.hour:\r\n return 0\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60\r\n\r\n delta = self.now - self.last_executed\r\n if self.hourly:\r\n if delta.seconds >= 60*60:\r\n return 0\r\n else:\r\n return 60*60 - delta.seconds\r\n else:\r\n if int(self.hour) == self.now.hour:\r\n if delta.days >= 1:\r\n return 0\r\n else:\r\n return 82800 # 23 hours, just to be safe\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60",
"def get_curr_exec_time(self):\n if self.type == 'normal':\n try:\n self.curr_exec_time = self.my_rand.gauss(self.runtime, self.stddev)\n except:\n if self.fwk.debug:\n print(\"not varying the execution time\")\n self.curr_exec_time = self.runtime\n raise\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_work':\n # this is a sandia style work task\n next_ckpt = self.sim.next_ckpt # relative work time\n work_todo = self.sim.total_work - self.sim.completed_work\n self.curr_exec_time = min(work_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_rework':\n next_ckpt = self.sim.next_ckpt # relative work time\n self.curr_exec_time = min(self.sim.rework_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_ckpt' or self.type == 'sandia_restart':\n self.curr_exec_time = self.runtime\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n else:\n print('error error error!!! problem with component type in get_curr_exec_time')\n raise",
"def get_free_space(config, task):\n if 'host' in config:\n import paramiko\n\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n try:\n ssh.connect(\n config.get('host'),\n config.get('port', 22),\n config.get('user'),\n config.get('password', None),\n config.get('pkey', None),\n config.get('ssh_key_filepath'),\n timeout=5000,\n )\n except Exception as e:\n logger.error(\"Issue connecting to remote host. {}\", e)\n task.abort('Error with remote host.')\n if config['allotment'] != -1:\n stdin, stdout, stderr = ssh.exec_command(f\"du -s {config['path']} | cut -f 1\")\n else:\n stdin, stdout, stderr = ssh.exec_command(\n f\"df -k {config['path']} | tail -1 | tr -s ' ' | cut -d' ' -f4\"\n )\n outlines = stdout.readlines()\n resp = ''.join(outlines)\n ssh.close()\n try:\n if config['allotment'] != -1:\n free = int(config['allotment']) - ((int(resp.strip()) * 1024) / 1000000)\n else:\n free = int(resp.strip()) / 1000\n except ValueError:\n logger.error('Non-integer was returned when calculating disk usage.')\n task.abort('Error with remote host.')\n return free\n elif os.name == 'nt':\n import ctypes\n\n free_bytes = ctypes.c_ulonglong(0)\n ctypes.windll.kernel32.GetDiskFreeSpaceExW(\n ctypes.c_wchar_p(config['path']), None, None, ctypes.pointer(free_bytes)\n )\n return free_bytes.value / (1024 * 1024)\n else:\n stats = os.statvfs(config['path'])\n return (stats.f_bavail * stats.f_frsize) / (1024 * 1024)",
"def runtime_no_disk(self):\n disk_time = self.output_write_time + self.shuffle_write_time + self.input_read_time\n if self.has_fetch:\n disk_time += self.local_read_time + self.fetch_wait\n return self.runtime() - disk_time",
"def pc_work_time_total(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_pc_work_time_total(self)",
"def calculate_eft_and_cost(self, task, resource_id, arrival_time=0):\r\n start_time, eft, runtime_on_resource, place_id = self.calculate_eft(task, resource_id, arrival_time=arrival_time)\r\n if task.dummy_task:\r\n return start_time, eft, runtime_on_resource, place_id, 0\r\n else:\r\n cost = self.calculate_share_cost_change(resource_id, start_time, eft, task.graph.name, True)\r\n return start_time, eft, runtime_on_resource, place_id, cost",
"def ingestion_wait_time_in_hours(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"ingestion_wait_time_in_hours\")"
] | [
"0.6263205",
"0.60351074",
"0.59931386",
"0.5978507",
"0.5912567",
"0.5891301",
"0.58882743",
"0.5881097",
"0.5842023",
"0.5826707",
"0.5752384",
"0.5737175",
"0.57205445",
"0.5720036",
"0.5698623",
"0.5698623",
"0.5694059",
"0.5669578",
"0.5656949",
"0.5654609",
"0.56376934",
"0.56307167",
"0.5623396",
"0.561967",
"0.5618293",
"0.5617561",
"0.55900145",
"0.5571121",
"0.5567003",
"0.5564072"
] | 0.6479419 | 0 |
Eliminate values using the naked twins strategy. | def naked_twins(values):
# Find all instances of naked twins
# Eliminate the naked twins as possibilities for their peers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def purge_outlying_trials(self, trial_nums, thresh=5.0):\n for injkey in self.values.keys():\n for fit_key in self.values[injkey].keys():\n points = np.array(self.values[injkey][\n fit_key]['metric_val']['vals'])\n if len(points.shape) == 1:\n points = points[:, None]\n median = np.median(points, axis=0)\n diff = np.sum((points - median)**2, axis=-1)\n diff = np.sqrt(diff)\n med_abs_deviation = np.median(diff)\n modified_z_score = 0.6745 * diff / med_abs_deviation\n good_trials = modified_z_score < thresh\n if not np.all(good_trials):\n bad_trials = np.where(not good_trials)[0]\n logging.warning(\n 'Outlier(s) detected for %s in trial(s) %s. Will be '\n 'removed. If you think this should not happen, please '\n 'change the value of the threshold used for the '\n 'decision (currently set to %.2e).'%(\n fit_key, trial_nums[bad_trials], thresh\n )\n )\n for fitkey in self.values[injkey].keys():\n for param in self.values[injkey][fitkey].keys():\n new_vals = np.delete(\n np.array(self.values[injkey][\n fitkey][param]['vals']),\n bad_trials\n )\n self.values[injkey][\n fitkey][param]['vals'] = new_vals",
"def naked_twins(values):\n # TODO: Implement this function!\n \n # First select boxes with 2 entries\n potential_twins = [box for box in values.keys() if len(values[box]) == 2]\n # Collect boxes that have the same elements\n naked_twins = [[box1,box2] for box1 in potential_twins for box2 in peers[box1] if set(values[box1])==set(values[box2]) ]\n #print(naked_twins)\n \n for twins in naked_twins:\n box1 = twins[0]\n box2 = twins[1]\n # 1- compute intersection of peers\n peers1 = set(peers[box1])\n peers2 = set(peers[box2])\n peers_int = peers1 & peers2\n # 2- Delete the two digits in naked twins from all common peers.\n for box in peers_int:\n if len(values[box])>=2:\n for rm_val in list(set(values[box1]+values[box2])):\n #print (box, \"=>\", values[box], \"removed\", rm_val)\n values = assign_value(values, box, values[box].replace(rm_val,''))\n\n return values",
"def naked_twins(values):\n\n # Find all instances of naked twins. Here we are solving only for twins, not for triplets or quads\n from collections import Counter\n for unit in unitlist:\n val_l = []\n for y in unit:\n if len(values[y]) == 2:\n val_l += [values[y]]\n a = [k for k, v in Counter(val_l).items() if v == 2]\n # Eliminate the naked twins as possibilities for their peers\n for y in unit:\n for z in a:\n ##Deparce twins into simple numbers\n for yy in z:\n # Eliminate the naked twins\n if yy in values[y] and len(values[y])>2:\n values[y] = values[y].replace(yy,'')\n return values",
"def row_naked_twins(values):\n dual_values = [box for box in values.keys() if len(values[box]) == 2]\n \n for box in dual_values:\n for row_boxes in row_dict[box]:\n if values[row_boxes] == values[box]:\n loc_1 = values[box][0]\n loc_2 = values[box][1]\n \n modified_row = list(row_dict[box])\n modified_row.remove(row_boxes) #we do not want to remove the values from naked twins\n \n for modified in modified_row: #for all the OTHER columns:\n if len(values[modified]) == 1: #we do not want to remove values from solved entries\n modified_row.remove(modified)\n \n for row_boxes_2 in modified_row:\n try:\n values[row_boxes_2].remove(loc_1)\n except:\n pass\n try:\n values[row_boxes_2].remove(loc_2)\n except:\n pass\n \n\n \n return values",
"def naked_twins(values):\n \n #display(values)\n # Find all instances of naked twins\n naked_twins = []\n for unit in unitlist:\n for box in unit:\n if len(values[box]) == 2:\n for other_box in peers[box]:\n if values[box] == values[other_box]:\n \n naked_twins.append((box,other_box))\n # Eliminate the naked twins as possibilities for their peers\n #print(set(naked_twins))\n for unit in unitlist:\n for naked_twin in set(naked_twins):\n if naked_twin[0] in unit and naked_twin[1] in unit:\n for box in unit:\n if box not in naked_twin:\n values = assign_value(values, box, values[box].replace(values[naked_twin[0]][0],\"\"))\n values = assign_value(values, box, values[box].replace(values[naked_twin[0]][1],\"\"))\n #print()\n #display(values)\n #print()\n #from solution_test import after_naked_twins\n #display(after_naked_twins)\n return values",
"def naked_twins(values):\n\n # Find all instances of naked twins. Twins are found in each kind of peer separately.\n # This is done by searching for and putting all two-digit box values in a list.\n # When a duplicate is encountered, that box and its value are added to a list of twins.\n\n row_twins = []\n row_twins_values = []\n for row in row_units:\n two_digit_values = []\n for box in row:\n if len(values[box]) == 2:\n if values[box] in two_digit_values:\n if row not in row_twins:\n row_twins.append(row)\n row_twins_values.append(values[box])\n else:\n two_digit_values.append(values[box])\n\n column_twins = []\n column_twins_values = []\n for column in column_units:\n two_digit_values = []\n for box in column:\n if len(values[box]) == 2:\n if values[box] in two_digit_values:\n if column not in column_twins:\n column_twins.append(column)\n column_twins_values.append(values[box])\n else:\n two_digit_values.append(values[box])\n\n square_twins = []\n square_twins_values = []\n for square in square_units:\n two_digit_values = []\n for box in square:\n if len(values[box]) == 2:\n if values[box] in two_digit_values:\n if square not in square_twins:\n square_twins.append(square)\n square_twins_values.append(values[box])\n else:\n two_digit_values.append(values[box])\n\n # Eliminate the naked twins as possibilities for their peers.\n # Look for all boxes that have a value with a digit included in a naked twin value for that peer,\n # and remove that digit form that box. Naked twins themselves are excluded from the elimination, of course.\n\n elimination_count = 0\n\n index = 0\n for row in row_twins:\n for box in row:\n for digit in row_twins_values[index]:\n if digit in values[box] and values[box] != row_twins_values[index]:\n values[box] = values[box].replace(digit, '')\n elimination_count += 1\n index += 1\n\n index = 0\n for column in column_twins:\n for box in column:\n for digit in column_twins_values[index]:\n if digit in values[box] and values[box] != column_twins_values[index]:\n values[box] = values[box].replace(digit, '')\n elimination_count += 1\n index += 1\n\n index = 0\n for square in square_twins:\n for box in square:\n for digit in square_twins_values[index]:\n if digit in values[box] and values[box] != square_twins_values[index]:\n values[box] = values[box].replace(digit, '')\n elimination_count += 1\n index += 1\n\n # The following condition is meant to cause constraint propagation to continue for new naked twins that are\n # revealed after the most recent round of twin identification and value elimination.\n # However, even though the resulting boards ARE in the possible solutions in solution_test.py\n # (the print statement below prints 'True' when the conditional statements aren't commented),\n # the unit tests do not pass. Therefore, this condition and its else are commented out.\n # if elimination_count == 0:\n print(\"Is board in solutions? \" + str(values in solution_test.TestNakedTwins.possible_solutions_1\n or values in solution_test.TestNakedTwins.possible_solutions_2))\n return values\n # else:\n # naked_twins(values)",
"def removeOutliers(self):\n #With the DSFPlate object, we can just use self.wells.pop() to remove outliers\n visited = []\n discard = []\n for well in self.wells:\n if well not in visited:\n reps = []\n reps += self.originalPlate.repDict[well]\n pairs = combinations(reps,2)\n distMatrix = [[0 for x in range(len(reps))] for y in range(len(reps))]\n for pair in pairs:\n dist = sqrDiffWellFluoro(self.wells[pair[0]].fluorescence,self.wells[pair[1]].fluorescence)\n distMatrix[reps.index(pair[0])][reps.index(pair[1])] = dist\n distMatrix[reps.index(pair[1])][reps.index(pair[0])] = dist\n keep = rh.discardBad(reps,distMatrix,SIMILARITY_THRESHOLD)\n for rep in reps:\n visited.append(rep)\n if rep not in keep:\n discard.append(rep)\n for well in discard:\n self.wells[well].fluorescence = None\n self.delCurves.append(well)\n return",
"def naked_twins(values):\n\n # Find all instances of naked twins\n # Eliminate the naked twins as possibilities for their peers\n for unit in unitlist:\n for i in range(len(unit)):\n if len(values[unit[i]]) == 2:\n for j in range(i+1, len(unit)):\n if values[unit[i]] == values[unit[j]]:\n a = values[unit[i]][0]\n b = values[unit[i]][1]\n #print (a,b)\n for box in unit:\n if values[box] != values[unit[i]]:\n if a in values[box]:\n values = assign_value(values,box,values[box].replace(a,''))\n if b in values[box]:\n values = assign_value(values,box,values[box].replace(b,''))\n return values",
"def naked_twins(values):\n\n # Find all instances of naked twins\n # Eliminate the naked twins as possibilities for their peers\n\n # dictionary for naked twins with 'value'of grid as key \n twins = dict()\n for unit in unitlist:\n # dictionary to find possible twins with 'value' of grid as key\n dups = dict()\n # loop to find possible twins\n for box in unit:\n if len(values[box]) == 2:\n if values[box] not in dups:\n dups[values[box]] = [box]\n else:\n dups[values[box]].append(box)\n # make sure the twins are naked twins\n for key,value in dups.items():\n if len(value) == 2:\n if key not in twins:\n twins[key] = [unit]\n else:\n twins[key].append(unit)\n # remove the matched character as per the units.\n for key in twins:\n for unit in twins[key]:\n for box in unit:\n for key_element in list(key):\n if values[box] != key and len(values[box])>=2:\n assign_value(values, box, values[box].replace(key_element, ''))\n\n\n\n return values",
"def _prune(self, idx):\n idx = list(idx)\n neurons = []\n for nold in self.neurons:\n k = nold[1] # number of neurons\n ix1 = [i for i in idx if i < k] # index for current neuron type\n idx = [i-k for i in idx if i >= k]\n func = nold[0]\n number = len(ix1)\n W = nold[2][:, ix1]\n bias = nold[3][ix1]\n neurons.append((func, number, W, bias))\n self.neurons = neurons",
"def eliminate(values):\n\tsolved = [box for box in boxes if len(values[box]) == 1]\n\tempties = [box for box in boxes if len(values[box]) == 0]\n\n\tfor empty in empties:\n\t\tvalues[empty] = '123456789'\n\n\tfor box in solved:\n\n\t\tfor peer in peers[box]:\n\t\t\tvalues = assign_value(values, peer, values[peer].replace(values[box], ''))\n\n\treturn values",
"def row_inout_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0]\n \n if location in location_dict.keys():\n outside = location_dict[location][0]\n \n if str(6) not in box: #only look at periods 1-5\n \n following_activity = inout_dict[box][0]\n if following_activity not in solved_values:\n temp_list = list(values[following_activity])\n \n for locations_next in values[following_activity]:\n \n if location_dict[locations_next][0] == outside and outside == True:\n \n try:\n temp_list.remove(locations_next)\n except:\n pass\n \n \n values[following_activity] = temp_list\n\n return values",
"def eliminate(values):\n complete_boxes = [box for box in values.keys() if len(values[box])==1]\n for box in complete_boxes:\n for peer in peers[box]:\n values = assign_value(values, peer, values[peer].replace(values[box], \"\"))\n \n return values",
"def eliminate(values):\n # TODO: Copy your code from the classroom to complete this function\n for box,value in values.items():\n #print (box,value)\n if len(values[box]) == 1:\n for peer in peers[box]:\n if value in values[peer]:\n values[peer] = values[peer].replace(value,'')\n return values",
"def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)",
"def eliminate(values):\n for b in boxes:\n if len(values[b]) == 1:\n for p in peers[b]:\n values = assign_value(values, p, values[p].replace(values[b], ''))\n return values",
"def noiseReduction(self):\n pass",
"def eliminate(self):\n deleteKey = []\n for key,value in self._sets[self._currentSet].items():\n if value < self._minSupport:\n deleteKey.append(key)\n \n for key in deleteKey:\n del self._sets[self._currentSet][key]",
"def discard(self):\n for f in self.featureNames:\n self.data = self.data[self.data[:,self._getFIdx(f)] != '-99999']\n return",
"def discard(self, discard_set, check=True):\n if check:\n value = 0\n for domino in discard_set:\n value += domino.get_value\n\n if value != self.number_point:\n raise BadSumException(self.number_point)\n\n for domino in discard_set:\n self.hand.remove(domino)",
"def discard(self, value):\r\n raise NotImplementedError",
"def naked_twins(values):\n\n # values.keys() creates a dictionary of keys of all of the boxes ['A1', 'A2', 'A3', ...]\n # then we iterate through all of those boxes & check of the length of the value in that box is 2\n # this will return an array of two_number_boxes\n\n #two_number_boxes = [box for box in values.keys() if len(values[box]) == 2]\n two_number_boxes = ['A1', 'B3', 'B5', 'A7']\n # iterate through the two_number_boxes\n for box in two_number_boxes:\n # This gets the columns of the selected box from the two_number_boxes array. For example, if the box is\n # 'A1' then [int(box[1]) - 1] will return 0 (1 - 1). That will then take the first element in the column_units\n # array which in this case would be ['A1', 'B1', 'C1', 'D1', 'E1', 'F1', 'G1', 'H1', 'I1']\n the_cols = column_units[int(box[1]) - 1]\n for col_box in the_cols:\n # Get the value of the column box and see if it's the same as the two digit number and ensure\n # it's not the same box :)\n if values[col_box] == values[box] and box != col_box:\n # If it matches, then record the value to remove\n to_remove = values[col_box]\n # Then loop through the inner_boxes and remove the to_remove value from others in that column\n for inner_box in the_cols:\n if len(values[inner_box]) > 1 and inner_box != col_box and inner_box != box:\n values[inner_box] = values[inner_box].replace(to_remove[0], '')\n values[inner_box] = values[inner_box].replace(to_remove[1], '')\n\n # This takes the letter of the box in the two_number_boxes element and then finds the index of that letter in\n # the rows_array. For ex: if box[0] = 'A' then the letter_index would be zero.\n letter_index = rows_array.index(box[0])\n\n # This then returns an array of the row. For ex, if the box is 'A1' then this would return\n # ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9']\n the_rows = row_units[letter_index]\n\n # Same process as above for columns\n for row_box in the_rows:\n if values[row_box] == values[box] and box != row_box:\n to_remove_row = values[row_box]\n for inner_row_box in the_rows:\n if len(values[inner_row_box]) > 1 and inner_row_box != row_box and inner_row_box != box:\n values[inner_row_box] = values[inner_row_box].replace(to_remove_row[0], '')\n values[inner_row_box] = values[inner_row_box].replace(to_remove_row[1], '')\n\n return values",
"def naked_twins(values):\n\n # Find all instances of naked twins\n for curr_box, units_for_box in units.items(): # looking through all the hashed units for a given box\n for idx, unit_for_box in enumerate(units_for_box): # looping through each unit\n naked_twin_boxes = [curr_box] # we'll add the box to an array to keep as reference for later \n for unit_box in unit_for_box: # looping through each box\n if unit_box != curr_box and values[unit_box] == values[curr_box] and len(values[unit_box]) == 2:\n # if the box we're checking have the same values\n # and the length of the values is two (a naked twin)\n # then we have a naked twin!\n naked_twin_boxes.append(unit_box)\n naked_twin_values_set = set(values[curr_box])\n if len(naked_twin_boxes) > 1:\n # if we have any naked twin boxes\n for unit_box in unit_for_box:\n if unit_box not in naked_twin_boxes:\n new_possibilities = list(set(values[unit_box]).difference(naked_twin_values_set)) # eliminate the naked twin values from the other boxes\n new_possibilities.sort()\n values[unit_box] = \"\".join(new_possibilities)\n\n\n # Eliminate the naked twins as possibilities for their peers\n return values",
"def CleanUp(self):\n for Ind in self.IndList():\n if amax(abs(self[Ind]))<1e-10:\n del self[Ind]",
"def remove_unimproved_species(self):\n for spec_num, spec in list(self.species.items()):\n if self.gen_num - spec.gen_last_improved > self.species_dropoff_age:\n self.species.pop(spec_num)",
"def remove_sgons(s_value, candidates):\n return list(filter(lambda x: x.s != s_value,\n candidates))",
"def noiseRemoval(array, minSize, classes):\n img=array.astype('int')\n for i in range(classes):\n B=(img!=i) # return a bool array\n B = morphology.remove_small_objects(B, min_size=minSize, connectivity=1) \n img[B==False]=i\n \n return img",
"def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)",
"def perform_noise_removal(mask):\n trans1 = cv.dilate(mask, KERNEL, iterations=4)\n trans1 = cv.erode(trans1, KERNEL, iterations=5)\n return cv.dilate(trans1, KERNEL, iterations=7)",
"def test_negate_tips_to_keep(self):\r\n t = DndParser(\"((S5:0.00014,S7:0.00015)0.752:0.45762,(S3:0.00014,\"\r\n \"seq6:0.00014)0.180:0.00015,(Seq1:0.00014,s2:0.00014)0.528:1.0466);\")\r\n\r\n tips_to_keep = [\"S5\", \"Seq1\", \"s2\"]\r\n expected = [\"S7\", \"S3\", \"seq6\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = [\"S5\", \"Seq1\"]\r\n expected = [\"S7\", \"S3\", \"seq6\", \"s2\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = []\r\n expected = [\"S7\", \"S3\", \"seq6\", \"s2\", \"S5\", \"Seq1\"]\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)\r\n\r\n tips_to_keep = [\"S7\", \"S3\", \"seq6\", \"s2\", \"S5\", \"Seq1\"]\r\n expected = []\r\n self.assertItemsEqual(negate_tips_to_keep(tips_to_keep, t), expected)"
] | [
"0.6560856",
"0.63050807",
"0.6227025",
"0.61716145",
"0.6147236",
"0.61047584",
"0.61028165",
"0.6063567",
"0.60429865",
"0.59686977",
"0.59654444",
"0.5957118",
"0.5945127",
"0.5822696",
"0.5820852",
"0.58206373",
"0.58202213",
"0.58170843",
"0.5815434",
"0.58153576",
"0.5801343",
"0.5796",
"0.5732593",
"0.5730055",
"0.5721578",
"0.5715104",
"0.57130265",
"0.57101196",
"0.5702027",
"0.56656766"
] | 0.67530364 | 0 |
VarianceCovariance calculation of daily ValueatRisk using confidence level 'c', with mean of returns 'mu' and standard deviation of returns 'sigma', on a portfolio of value 'P'. | def var_cov_var(P, c, mu, sigma):
alpha = norm.ppf(1-c, mu, sigma)
return P - P * (alpha + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def var_cov_var_normal(P, c, mu=0, sigma=1):\n\n alpha = sp.sp.stats.norm.ppf(1 - c, mu, sigma)\n return P - P * (alpha + 1)",
"def compute_measurement_covariance(jacobian, oldCovariance, sigmaObservation): \n\n return None",
"def compute_initial_covariance(jacobian, sigmaObservation):\n\n return None",
"def variance(X, C):\n try:\n if not isinstance(X, np.ndarray) or len(X.shape) != 2:\n return None\n if not isinstance(C, np.ndarray) or len(C.shape) != 2:\n return None\n distances = np.sqrt(((X - C[:, np.newaxis])**2).sum(axis=-1))\n distancia_min = np.min(distances, axis=0)\n var = np.sum(distancia_min ** 2)\n return var\n except Exception:\n return None",
"def get_covariance(self):\n x = self.particles[:, 0]\n y = self.particles[:, 1]\n X = np.stack((x, y), axis=0)\n return np.cov(X)",
"def get_ci_sigma_unknown(p_data, p_alpha):\n\n # -- -------------------------------------------\n # Gather the core statistic values\n sample_mean = np.mean(p_data)\n sample_std = np.std(p_data, ddof=1) # using ddof=1 for sample std\n sample_size = len(p_data)\n\n df = sample_size - 1\n # -- -------------------------------------------\n\n\n # -- -------------------------------------------\n # Get the standard error of the mean \n sem = get_sem(\n p_provided_std = sample_std,\n p_sample_size = sample_size\n )\n # -- -------------------------------------------\n\n\n # -- ------------------------------------------- \n # Calculate the Margin of Error and Confidence Interval\n _, upper_critical_value = get_two_tailed_critical_values(p_alpha = p_alpha)\n\n upper_critical_t = stats.t.ppf(upper_critical_value, df)\n\n # Get the margin of error:\n moe = upper_critical_t * sem\n\n # Calculate the confidence interval:\n ci = np.array([sample_mean - moe, sample_mean + moe])\n # -- -------------------------------------------\n\n\n # -- -------------------------------------------\n Result = namedtuple('Result', 'sample_mean sample_std sample_size alpha sem confidence_level_pct critical_t_statistic margin_of_error confidence_interval')\n\n result = Result(\n sample_mean,\n sample_std,\n sample_size,\n p_alpha,\n sem,\n upper_critical_value * 100,\n upper_critical_t,\n moe,\n ci\n )\n\n return result\n # -- -------------------------------------------",
"def get_pca_variances(self):\n return self.get_pca_images()[3]",
"def sigma_from_cov(params, cov):\n rands = np.random.multivariate_normal(params, cov, 10000)\n breakdowns = -1*rands[:, 1]/rands[:, 0]\n return np.std(breakdowns)",
"def empirical_covariance(system, excitation, m):\n observations = [system() @ excitation() for _ in range(m)]\n return np.cov(np.array(observations).T)",
"def covariance(x, mean_x, y, mean_y):\r\n \r\n covar = 0.0\r\n for i in range(len(x)):\r\n covar += (x[i] - mean_x) * (y[i] - mean_y)\r\n return covar",
"def variance(X, C):\r\n if ((type(X) is not np.ndarray or X.ndim != 2 or\r\n type(C) is not np.ndarray or C.ndim != 2)):\r\n return None\r\n try:\r\n return (np.square(np.apply_along_axis(np.subtract, 1, X, C))\r\n .sum(axis=2).min(axis=1).sum())\r\n except Exception:\r\n return None",
"def semicovariance(prices, benchmark=0, frequency=252):\n if not isinstance(prices, pd.DataFrame):\n warnings.warn(\"prices are not in a dataframe\", RuntimeWarning)\n prices = pd.DataFrame(prices)\n daily_returns = daily_price_returns(prices)\n drops = np.fmin(daily_returns - benchmark, 0)\n return drops.cov() * frequency",
"def _calculate_covariance_error(self):\n for energy in self.covar_error.keys():\n lc, lc_ref = self._create_lc_and_lc_ref(energy, self.energy_events)\n\n xs_y = self._calculate_excess_variance(lc_ref)\n\n err_x = self._calculate_std(lc)\n err_y = self._calculate_std(lc_ref)\n\n covar = self.energy_covar[energy]\n\n num = (covar**2)*err_y + xs_y*err_x + err_x*err_y\n denom = 2*self.nbins*xs_y\n\n self.covar_error[energy] = (num / denom)**0.5",
"def variance(G, variables = [], conditionants = []):\n \n cov = covariance(G, variables = variables, \n conditionants = conditionants)\n \n if len(variables) == 1: \n return cov[0]\n else: \n return cov",
"def variance(self):\n return (math.exp(self.sigma ** 2) - 1.0) \\\n * math.exp(2.0 * self.mu + self.sigma ** 2)",
"def sigma(self):\n with ops.name_scope(self.name):\n return self._cov.to_dense()",
"def variance(self):\n return self.sigma",
"def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var",
"def cpca_cov(sigma2, d, old=False):\n\n n_ = sigma2.shape[0]\n k_ = d.shape[0]\n i_n = np.eye(n_)\n lambda2_d = np.empty((n_, 1))\n e_d = np.empty((n_, n_))\n\n # Step 0. initialize constraints\n m_ = n_ - k_\n a_n = np.copy(d)\n\n for n in range(n_):\n # Step 1. orthogonal projection matrix\n p_n = [email protected](a_n@a_n.T)@a_n\n\n # Step 2. conditional dispersion matrix\n s2_n = p_n @ sigma2 @ p_n\n\n # Step 3. conditional principal directions/variances\n e_d[:, [n]], lambda2_d[n] = pca_cov(s2_n, 1)\n\n # Step 4. Update augmented constraints matrix\n if n+1 <= m_-1:\n a_n = np.concatenate((a_n.T, sigma2 @ e_d[:, [n]]), axis=1).T\n elif m_ <= n+1 <= n_-1:\n a_n = (sigma2 @ e_d[:, :n+1]).T\n\n return e_d, lambda2_d.squeeze()",
"def sigma(self, x, derivative=False, i=None):\n\n assert isinstance(x, list)\n assert len(x) > 0\n assert isinstance(x[0], tuple)\n if derivative:\n if len(x) > 1:\n error_msg = 'Derivatives of the variance'\n error_msg += ' has not been tested for a vector input'\n raise NotTestedFeature(error_msg)\n assert 0 <= i < len(x[0])\n cov_function = partial(\n self._compute_covariance_matrix_pd, pd_dim=i\n )\n else:\n cov_function = self._compute_covariance_matrix\n\n # assert the Gaussian process is up to date\n self._gp_up_to_date()\n\n current_sigma = cov_function(x, x)\n # Compute the correlation between the parameter x and the observation\n current_cov = self._compute_covariance_matrix(\n x, self.list_observations\n )\n # Solve the linear system\n y = np.linalg.solve(self.cov_matrix, current_cov.T)\n # Assert the resolution of the linear system went well\n assert np.allclose(current_cov.T, self.cov_matrix @ y)\n\n if derivative:\n current_cov_pd = self._compute_covariance_matrix_pd(\n x, self.list_observations, pd_dim=i\n )\n # Solve the linear system\n y_2 = np.linalg.solve(self.cov_matrix, current_cov_pd.T)\n # Assert the resolution of the linear system went well\n assert np.allclose(current_cov_pd.T, self.cov_matrix @ y_2)\n second_term = -current_cov @ y_2 - current_cov_pd @ y\n else:\n second_term = - current_cov @ y\n\n return current_sigma + second_term",
"def _get_standard_concentration_variables(\n self, c_s, c_s_xav=None, c_s_rav=None, c_s_av=None, c_s_surf=None\n ):\n domain, Domain = self.domain_Domain\n phase_name = self.phase_name\n\n # Get surface concentration if not provided as fundamental variable to\n # solve for\n if c_s_surf is None:\n c_s_surf = pybamm.surf(c_s)\n c_s_surf_av = pybamm.x_average(c_s_surf)\n\n c_scale = self.phase_param.c_max\n\n # Get average concentration(s) if not provided as fundamental variable to\n # solve for\n if c_s_xav is None:\n c_s_xav = pybamm.x_average(c_s)\n if c_s_rav is None:\n c_s_rav = pybamm.r_average(c_s)\n if c_s_av is None:\n c_s_av = pybamm.r_average(c_s_xav)\n\n variables = {\n f\"{Domain} {phase_name}particle stoichiometry\": c_s / c_scale,\n f\"{Domain} {phase_name}particle concentration\": c_s / c_scale,\n f\"{Domain} {phase_name}particle concentration [mol.m-3]\": c_s,\n f\"X-averaged {domain} {phase_name}particle concentration\": c_s_xav\n / c_scale,\n f\"X-averaged {domain} {phase_name}particle \"\n \"concentration [mol.m-3]\": c_s_xav,\n f\"R-averaged {domain} {phase_name}particle concentration\": c_s_rav\n / c_scale,\n f\"R-averaged {domain} {phase_name}particle \"\n \"concentration [mol.m-3]\": c_s_rav,\n f\"Average {domain} {phase_name}particle concentration\": c_s_av / c_scale,\n f\"Average {domain} {phase_name}particle concentration [mol.m-3]\": c_s_av,\n f\"{Domain} {phase_name}particle surface stoichiometry\": c_s_surf / c_scale,\n f\"{Domain} {phase_name}particle surface concentration\": c_s_surf / c_scale,\n f\"{Domain} {phase_name}particle surface concentration [mol.m-3]\": c_s_surf,\n f\"X-averaged {domain} {phase_name}particle \"\n \"surface concentration\": c_s_surf_av / c_scale,\n f\"X-averaged {domain} {phase_name}particle \"\n \"surface concentration [mol.m-3]\": c_s_surf_av,\n f\"{Domain} electrode extent of lithiation\": c_s_rav / c_scale,\n f\"X-averaged {domain} electrode extent of lithiation\": c_s_av / c_scale,\n f\"Minimum {domain} {phase_name}particle concentration\": pybamm.min(c_s)\n / c_scale,\n f\"Maximum {domain} {phase_name}particle concentration\": pybamm.max(c_s)\n / c_scale,\n f\"Minimum {domain} {phase_name}particle concentration [mol.m-3]\"\n \"\": pybamm.min(c_s),\n f\"Maximum {domain} {phase_name}particle concentration [mol.m-3]\"\n \"\": pybamm.max(c_s),\n f\"Minimum {domain} {phase_name}particle \"\n \"surface concentration\": pybamm.min(c_s_surf) / c_scale,\n f\"Maximum {domain} {phase_name}particle \"\n \"surface concentration\": pybamm.max(c_s_surf) / c_scale,\n f\"Minimum {domain} {phase_name}particle \"\n \"surface concentration [mol.m-3]\": pybamm.min(c_s_surf),\n f\"Maximum {domain} {phase_name}particle \"\n \"surface concentration [mol.m-3]\": pybamm.max(c_s_surf),\n }\n\n return variables",
"def _get_cum_variance(self) -> np.ndarray:\n return np.cumsum(self.pca.explained_variance_ratio_)",
"def estimateCovariance(df):\n import numpy as np\n m = df.select(df['scaledFeatures']).map(lambda x: x[0]).mean()\n dfZeroMean = df.select(df['scaledFeatures']).map(lambda x: x[0]).map(lambda x: x-m) # subtract the mean\n\n return dfZeroMean.map(lambda x: np.outer(x,x)).sum()/df.count()",
"def extract_covariance(self, block):\n raise RuntimeError(\"You need to implement the method \"\n \"'extract_covariance' if you set constant_covariance=False \"\n \"in a gaussian likelihood\")",
"def test_calculate_variance_covariance(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.6142)\n self.assertAlmostEqual(_var_covar[0][0], 0.1351777)\n self.assertAlmostEqual(_var_covar[0][1], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][0], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][1], 0.01710296)\n self.assertEqual(_var_covar[0][1], _var_covar[1][0])",
"def prior_variance(self):\n S = self.eval_S(self.kappa, self.sigma_f)\n variance = np.sum((self.eigenfunctions * S[None, :]).T *\n self.eigenfunctions.T, axis=0)\n return variance",
"def variance(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n mean = self.mean()\n weighted_central_moment = sum(\n count * (value - mean) ** 2 for value, count in clean.items()\n )\n return weighted_central_moment / total",
"def covariance(mtrx):\r\n\r\n # Average column of matrix\r\n T = np.transpose(mtrx)\r\n ave = np.zeros(len(mtrx))\r\n mtrx = np.asarray(mtrx)\r\n\r\n if isinstance(mtrx, np.ndarray):\r\n ave = average(T)\r\n\r\n for col in T:\r\n if type(mtrx) == list:\r\n # If data isn't standardized\r\n ave += np.asarray(col)\r\n\r\n\r\n if len(mtrx[0]) > len(mtrx):\r\n for moreRows in range(len(mtrx[0]), len(mtrx)):\r\n mtrx[moreRows] = np.asarray(mtrx[moreRows])\r\n\r\n ave /= len(mtrx[0])\r\n\r\n\r\n phi = T - ave\r\n # Covariance matrix\r\n return np.dot(np.transpose(phi), phi)",
"def solutionCovariance(self):\n return self.standardError2()*self.AtAinv",
"def get_crude_mc_variance(num_samples=10000):\n int_max = 5 # the max of our integration range\n\n # find the average of squares\n running_total = 0\n for i in range(num_samples):\n x = get_random_number(0, int_max)\n running_total += f_of_x(x)**2\n sum_of_sqs = (int_max*running_total/num_samples)\n\n # find square of ave\n running_total = 0\n for i in range(num_samples):\n x = get_random_number(0, int_max)\n running_total += f_of_x(x)\n sq_ave = (int_max*running_total/num_samples)**2\n\n return math.fabs(sum_of_sqs - sq_ave)"
] | [
"0.6847843",
"0.6526581",
"0.6272541",
"0.6260589",
"0.62504464",
"0.61954904",
"0.61854136",
"0.61352956",
"0.61109257",
"0.6048535",
"0.60057604",
"0.5979164",
"0.59760386",
"0.59689605",
"0.5947482",
"0.5946931",
"0.59439176",
"0.5938865",
"0.5895788",
"0.58956015",
"0.5891865",
"0.5890291",
"0.587464",
"0.58666724",
"0.5865938",
"0.58544713",
"0.5852226",
"0.5851096",
"0.58419114",
"0.5833203"
] | 0.7584912 | 0 |
Given klgetcomponents command When Running `run` command on it with jsontooutputs flag and no prefix argument Then Ensure the json_to_outputs command is failing due to no prefix argument provided. | def test_json_to_outputs_flag_fail_no_prefix(
mocker, monkeypatch, set_environment_variables
):
logger_info = mocker.patch.object(logging.getLogger("demisto-sdk"), "info")
monkeypatch.setenv("COLUMNS", "1000")
# mocks to allow the command to run locally
mocker.patch.object(Runner, "_get_playground_id", return_value="pg_id")
mocker.patch.object(Runner, "_run_query", return_value=["123"])
# mock to get test log file
mocker.patch.object(DefaultApi, "download_file", return_value=DEBUG_FILE_PATH)
# mock to set prefix instead of getting it from input
command = "!kl-get-records"
run_result = CliRunner(
mix_stderr=False,
).invoke(main, ["run", "-q", command, "--json-to-outputs"])
assert 1 == run_result.exit_code
assert str_in_call_args_list(
logger_info.call_args_list,
"A prefix for the outputs is needed for this command. Please provide one",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_from_json(self, output: Dict[str, Any]) -> OutputInfo:",
"def test_output_invalid(self):\n assert (\n self.route.output_invalid(hug_core.output_format.json).route[\"output_invalid\"]\n == hug_core.output_format.json\n )",
"def xcresulttool_json(*args):\n args = list(args) + ['--format', 'json']\n contents = xcresulttool(*args)\n return json.loads(contents)",
"def xcresulttool_json(*args):\n args = list(args) + ['--format', 'json']\n contents = xcresulttool(*args)\n return json.loads(contents)",
"def test_run_prefix__bad_json(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n )\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--metadata-json\",\n \"{bad:\",\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 1\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_not_called()\n assert \"Metadata JSON is not valid\" in res.output",
"def testGetOutput(self):\n #f = open(\"src_output.root\", 'w')\n #f.close()\n\n #1) missing required -d option (the other required option, -r, is ignored)\n go = getoutput(self.logger, self.maplistopt)\n res = go()\n expRes = CommandResult(2001, 'ERROR: Task option is required')\n self.assertEquals(expRes, res)\n\n #2) -d option is present but -r is missing\n analysisDir = self.reqarea\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir])\n res = go()\n expRes = CommandResult(2002, 'ERROR: Range option is required')\n self.assertEquals(expRes, res)\n\n #3) request passed with the -d option does not exist\n #res = go([\"-d\", analysisDir + \"asdf\"])\n #TODO we expect an appropriate answer from the server.\n #By now, the server just answer an empty list\n\n #4) check correct behaviour without specifying output directory\n #N.B.: -p options is required for tests to skip proxy creation and delegation\n destDir = os.path.join(analysisDir, 'results')\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir(destDir))\n self.assertTrue(os.path.isfile(os.path.join(destDir, '1.root')))\n #Remove the directory\n shutil.rmtree(destDir)\n self.assertFalse(os.path.isdir(destDir))\n self.assertEquals(expRes, res)\n\n #5) correct behavior and output directory specified which exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp'))\n destFile = os.path.join('/tmp', '1.root')\n self.assertTrue(os.path.isfile(destFile))\n os.remove(destFile)\n self.assertFalse(os.path.isfile(destFile))\n self.assertEquals(expRes, res)\n\n #6) correct behavior and output directory specified which does not exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp/asdf/qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp/asdf/qwerty'))\n #Remove the directory\n shutil.rmtree('/tmp/asdf/qwerty')\n self.assertEquals(expRes, res)\n\n #7) correct behavior and output directory specified which does not exists (relative path)\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('qwerty'))\n #Remove the directory\n shutil.rmtree('qwerty')\n self.assertEquals(expRes, res)",
"def parse_output(use_json, output):\n return json.loads(output[0]) if use_json else parse_info.construct_tree(output)",
"def test_json_to_outputs_flag(mocker, monkeypatch, set_environment_variables):\n logger_info = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"info\")\n logger_warning = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"warning\")\n logger_error = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"error\")\n monkeypatch.setenv(\"COLUMNS\", \"1000\")\n\n # mocks to allow the command to run locally\n mocker.patch.object(Runner, \"_get_playground_id\", return_value=\"pg_id\")\n mocker.patch.object(Runner, \"_run_query\", return_value=[\"123\"])\n # mock to get test log file\n mocker.patch.object(DefaultApi, \"download_file\", return_value=DEBUG_FILE_PATH)\n # mock to set prefix instead of getting it from input\n\n command = \"!kl-get-records\"\n run_result = CliRunner(\n mix_stderr=False,\n ).invoke(main, [\"run\", \"-q\", command, \"--json-to-outputs\", \"-p\", \"Keylight\", \"-r\"])\n\n assert run_result.exit_code == 0\n assert not run_result.stderr\n assert not run_result.exception\n\n assert str_in_call_args_list(logger_info.call_args_list, YAML_OUTPUT)\n assert logger_warning.call_count == 0\n assert logger_error.call_count == 0",
"def output_to_cwl_json(\n galaxy_output, get_metadata, get_dataset, get_extra_files, pseduo_location=False,\n):\n def element_to_cwl_json(element):\n element_output = GalaxyOutput(\n galaxy_output.history_id,\n element[\"object\"][\"history_content_type\"],\n element[\"object\"][\"id\"],\n )\n return output_to_cwl_json(element_output, get_metadata, get_dataset, get_extra_files)\n\n output_metadata = get_metadata(galaxy_output.history_content_type, galaxy_output.history_content_id)\n\n def dataset_dict_to_json_content(dataset_dict):\n if \"content\" in dataset_dict:\n return json.loads(dataset_dict[\"content\"])\n else:\n with open(dataset_dict[\"path\"]) as f:\n return json.load(f)\n\n if output_metadata[\"history_content_type\"] == \"dataset\":\n ext = output_metadata[\"file_ext\"]\n assert output_metadata[\"state\"] == \"ok\"\n if ext == \"expression.json\":\n dataset_dict = get_dataset(output_metadata)\n return dataset_dict_to_json_content(dataset_dict)\n else:\n file_or_directory = \"Directory\" if ext == \"directory\" else \"File\"\n if file_or_directory == \"File\":\n dataset_dict = get_dataset(output_metadata)\n properties = output_properties(pseduo_location=pseduo_location, **dataset_dict)\n basename = properties[\"basename\"]\n extra_files = get_extra_files(output_metadata)\n found_index = False\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == SECONDARY_FILES_INDEX_PATH:\n found_index = True\n\n if found_index:\n ec = get_dataset(output_metadata, filename=SECONDARY_FILES_INDEX_PATH)\n index = dataset_dict_to_json_content(ec)\n for basename in index[\"order\"]:\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n if path == os.path.join(SECONDARY_FILES_EXTRA_PREFIX, basename):\n ec = get_dataset(output_metadata, filename=path)\n if not STORE_SECONDARY_FILES_WITH_BASENAME:\n ec[\"basename\"] = basename + os.path.basename(path)\n else:\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n if \"secondaryFiles\" not in properties:\n properties[\"secondaryFiles\"] = []\n\n properties[\"secondaryFiles\"].append(ec_properties)\n else:\n basename = output_metadata.get(\"cwl_file_name\")\n if not basename:\n basename = output_metadata.get(\"name\")\n\n listing = []\n properties = {\n \"class\": \"Directory\",\n \"basename\": basename,\n \"listing\": listing,\n }\n\n extra_files = get_extra_files(output_metadata)\n for extra_file in extra_files:\n if extra_file[\"class\"] == \"File\":\n path = extra_file[\"path\"]\n ec = get_dataset(output_metadata, filename=path)\n ec[\"basename\"] = os.path.basename(path)\n ec_properties = output_properties(pseduo_location=pseduo_location, **ec)\n listing.append(ec_properties)\n\n return properties\n\n elif output_metadata[\"history_content_type\"] == \"dataset_collection\":\n if output_metadata[\"collection_type\"] == \"list\":\n rval = []\n for element in output_metadata[\"elements\"]:\n rval.append(element_to_cwl_json(element))\n elif output_metadata[\"collection_type\"] == \"record\":\n rval = {}\n for element in output_metadata[\"elements\"]:\n rval[element[\"element_identifier\"]] = element_to_cwl_json(element)\n return rval\n else:\n raise NotImplementedError(\"Unknown history content type encountered\")",
"def test_missing_output_workspace(self):\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n config['output_workspaces'] = {}\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertFalse(results['is_valid'])\n self.assertEqual(len(results['errors']), 1)\n self.assertEqual(results['errors'][0]['name'], 'MISSING_WORKSPACE')",
"def noop_merge(output_json, jsons_to_merge):\n if len(jsons_to_merge) > 1:\n print('Multiple JSONs provided: %s' % (','.join(jsons_to_merge)),\n file=sys.stderr)\n return 1\n if jsons_to_merge:\n shutil.copyfile(jsons_to_merge[0], output_json)\n else:\n with open(output_json, 'w') as f:\n json.dump({}, f)\n return 0",
"def json_formatter(components):\n columns = cfg['columns']\n\n newList = [] # New list of only dictionaries with column attributes to marshall\n\n for component in components:\n newComp = {}\n\n for column in columns:\n try:\n newComp[column] = component[column]\n except:\n newComp[column] = cfg['emptyValue']\n\n newList.append(newComp)\n\n result = json.dumps(newList)\n\n # Save the json file\n save_path = args.output_file\n try:\n with open(save_path, \"w\") as file:\n file.write(result)\n\n Logger.Debug(\"Output saved to\", save_path)\n\n return save_path\n\n except:\n Logger.Error(\"Could not save output to\", save_path)",
"def _json_output(ctx, json, *args, **kwargs):\n # call early if --json wasn't given as a cmd line arg\n if not json:\n return f(ctx, *args, **kwargs)\n\n # gets the original level so the decorator can restore it\n original_level = logging.getLogger('').manager.disable\n\n # disables ALL log messages critical and below\n logging.disable(logging.CRITICAL)\n\n try:\n result = f(ctx, *args, **kwargs)\n except exceptions.Two1Error as ex:\n # sets the level back to original\n logging.disable(original_level)\n\n err_json = ex._json\n err_json[\"error\"] = ex._msg\n\n # dumps the json error\n logger.info(jsonlib.dumps(err_json, indent=4, separators=(',', ': ')))\n\n raise ex\n else:\n # sets the level back to original\n logging.disable(original_level)\n\n # dumps the json result\n logger.info(jsonlib.dumps(result, indent=4, separators=(',', ': ')))\n\n return result",
"def test_no_output_option_accepted():\n kgo_dir = acc.kgo_root() / \"interpret_metadata\"\n input_path = kgo_dir / \"temperature_realizations.nc\"\n args = [input_path, \"--output\", \"/dev/null\"]\n with pytest.raises(UnknownOption, match=\".*Unknown option '--output'.*\"):\n run_cli(args)",
"def create_output_for_success(returned_result):\r\n # In future, there should be a check here that the resulting JSON\r\n # will fit in the column. In the meantime, just return an exception.\r\n json_output = json.dumps(returned_result)\r\n if len(json_output) > 1023:\r\n raise ValueError(\"Length of task output is too long: {0}\".format(json_output))\r\n return json_output",
"def main():\n if config.command == \"list-groups\":\n # Get the list of policies in JSON format for the given network\n if hasattr(config, 'accountSwitchKey'):\n groupList = listGroups(config.accountSwitchKey)\n else:\n groupList = listGroups()\n formatOutputGroupList(groupList, config.output_type)\n\n elif config.command == \"list-connectors\":\n if hasattr(config, 'accountSwitchKey'):\n connectorList = listConnectors(config.accountSwitchKey)\n else:\n connectorList = listConnectors()\n formatOutputConnectorList(connectorList, config.output_type)\n\n elif config.command == \"list-products\":\n if hasattr(config, 'accountSwitchKey'):\n productsList = listProducts(config.accountSwitchKey)\n else:\n productsList = listProducts()\n formatOutputProductList(productsList, config.output_type)\n\n elif config.command == \"list-stream-types\":\n if hasattr(config, 'accountSwitchKey'):\n streamTypeList = listStreamTypes(config.accountSwitchKey)\n else:\n streamTypeList = listStreamTypes()\n formatOutputStreamTypeList(streamTypeList, config.output_type)\n\n elif config.command == \"list-streams\":\n if hasattr(config, 'accountSwitchKey'):\n streamList = listStreams(config.groupid,config.streamstatus,config.accountSwitchKey)\n else:\n streamList = listStreams(config.groupid,config.streamstatus)\n formatOutputStreamList(streamList, config.output_type)\n\n elif config.command == \"list-properties\":\n if hasattr(config, 'accountSwitchKey'):\n propertiesList = listProperties(config.groupid,config.productId,config.accountSwitchKey)\n else:\n propertiesList = listProperties(config.groupid,config.productId)\n formatOutputPropertiesList(propertiesList, config.output_type)\n\n elif config.command == \"list-error-streams\":\n if hasattr(config, 'accountSwitchKey'):\n errorstreamList = listErrorStreams(config.groupid,config.accountSwitchKey)\n else:\n errorstreamList = listErrorStreams(config.groupid)\n formatOutputErrorStreamList(errorstreamList, config.output_type)\n\n elif config.command == \"create\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n\n if hasattr(config, 'accountSwitchKey'):\n createResponse = createStream(json_string,config.accountSwitchKey)\n else:\n createResponse = createStream(json_string)\n formatOutputActDeactResp(createResponse)\n\n elif config.command == \"update\":\n # Opening JSON file\n f = open(config.file.name,'r')\n data = json.load(f)\n json_string = json.dumps(data) #Very Important since when you read it will be in single quotes, it need to be dumped to json and strings are only valid only in double quotes\n print(json_string)\n if hasattr(config, 'accountSwitchKey'):\n updateResponse = updateStream(json_string,config.streamid,config.accountSwitchKey)\n else:\n updateResponse = updateStream(json_string,config.streamid)\n formatOutputActDeactResp(updateResponse)\n\n\n elif config.command == \"get-stream\":\n if hasattr(config, 'accountSwitchKey'):\n streamDetail = getStream(config.streamid,config.accountSwitchKey)\n else:\n streamDetail = getStream(config.streamid)\n formatOutputStreamDetail(streamDetail, config.output_type)\n\n elif config.command == \"activation-history\":\n if hasattr(config, 'accountSwitchKey'):\n activationHistory = getStreamActHistory(config.streamid,config.accountSwitchKey)\n else:\n activationHistory = getStreamActHistory(config.streamid)\n formatOutputActHistory(activationHistory, config.output_type)\n\n elif config.command == \"stream-history\":\n if hasattr(config, 'accountSwitchKey'):\n streamHistory = getStreamHistory(config.streamid,config.accountSwitchKey)\n else:\n streamHistory = getStreamHistory(config.streamid)\n formatOutputStreamHistory(streamHistory, config.output_type)\n\n elif config.command == \"list-datasets\":\n if hasattr(config, 'accountSwitchKey'):\n datasetList = getDatasets(config.template,config.accountSwitchKey)\n else:\n datasetList = getDatasets(config.template)\n formatOutputDatasetList(datasetList, config.output_type)\n\n elif config.command == \"activate\":\n if hasattr(config, 'accountSwitchKey'):\n activateResponse = activateStream(config.streamid,config.accountSwitchKey)\n else:\n activateResponse = activateStream(config.streamid)\n formatOutputActDeactResp(activateResponse)\n\n elif config.command == \"deactivate\":\n if hasattr(config, 'accountSwitchKey'):\n deactivateResponse = deActivateStream(config.streamid,config.accountSwitchKey)\n else:\n deactivateResponse = deActivateStream(config.streamid)\n formatOutputActDeactResp(deactivateResponse)\n\n elif config.command == \"delete\":\n if hasattr(config, 'accountSwitchKey'):\n deleteResponse = deleteStream(config.streamid,config.accountSwitchKey)\n else:\n deleteResponse = deleteStream(config.streamid)\n formatOutputActDeactResp(deleteResponse)",
"def test_CLI_user_json(self, capsys):\n sys.argv = (self.common_args + [\"-l\", \"Berger_POPC\"]\n + [\"-lt\", str(PATH_ROOT_DATA / \"Berger_POPC.json\")])\n UI.entry_point()\n captured = capsys.readouterr().out\n assert \"Results written to OP_buildH.out\" in captured",
"def test_to_json():\n mock_json = ['foo', {'bar': ['baz', None, 1.0, 2]}]\n output = sh.to_json(mock_json)\n assert output == '[\"foo\", {\"bar\": [\"baz\", null, 1.0, 2]}]'",
"def run_json(self, cmd):\n\n try:\n loaded = json.loads(self.run(cmd + ' J'))\n except ValueError as decode_error:\n raise StorcliException('Problem processing output: {}'.format(decode_error))\n return loaded",
"def test_empty_output_successful(self):\n\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['job']['interface']['outputs'] = {}\n\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n url = '/%s/job-types/validation/' % self.api\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})",
"def test_to_json_with_non_result(self):\n actual_result = ResultBuilder(None,\n ERROR_MESSAGE,\n ERROR_RETURN_CODE).build_json()\n self.assertMultiLineEqual(actual_result, EXPECTED_OUTPUT_BUILDER_ERROR)",
"def test_run_prefix__success_with_json(mocker):\n runner = CliRunner()\n mocked_login = mocker.patch.object(APIClient, \"login\", return_value=None)\n mocked_get_sample_sheet = mocker.patch.object(\n APIClient,\n \"get_sample_sheet\",\n return_value=SampleSheet(**MOCKED_UPLOADS),\n )\n mocked_add_samples_to_project = mocker.patch.object(\n APIClient,\n \"add_samples_to_project\",\n return_value=UploadSamples(**{}),\n )\n\n res = runner.invoke(\n run_prefix,\n [\n str(uuid4()),\n \"gncv://batch\",\n \"--metadata-json\",\n '{\"somekey\": \"somevalue\"}',\n \"--email\",\n \"[email protected]\",\n \"--password\",\n \"123\",\n ],\n )\n assert res.exit_code == 0\n mocked_login.assert_called_once()\n mocked_get_sample_sheet.assert_called_once()\n mocked_add_samples_to_project.assert_called_once()\n assert \"Assigning metadata to the uploaded samples.\" in res.output",
"def handle_unsuccessful_cmd(out, error_template, missing_resource_template):\n if MISSING_RESOURCE in out.lower() or UNRECOGNIZED_RESOURCE in out.lower():\n logger.info(missing_resource_template)\n else:\n logger.warning(error_template.format(out.rstrip()))",
"def test_bcl_convert_workflow_output_not_json(self):\n mock_sqr = SequenceRunFactory()\n\n mock_workflow = Workflow()\n mock_workflow.wfr_id = f\"wfr.{_rand(32)}\"\n mock_workflow.type_name = WorkflowType.BCL_CONVERT.value\n mock_workflow.end_status = WorkflowStatus.SUCCEEDED.value\n mock_workflow.sequence_run = mock_sqr\n mock_workflow.output = \"\"\"\n \"main/fastq_list_rows\": [\n {\n \"rgid\": \"THIS_DOES_NOT_MATTER_AS_ALREADY_MALFORMED_JSON\",\n }\n ]\n \"\"\"\n try:\n orchestrator.next_step(mock_workflow, {'global': [], 'by_run': {}}, None)\n except Exception as e:\n logger.exception(f\"THIS ERROR EXCEPTION IS INTENTIONAL FOR TEST. NOT ACTUAL ERROR. \\n{e}\")\n self.assertRaises(json.JSONDecodeError)",
"def call_py(self, command, *args, **kwargs):\n\tif self.output_format and self.output_format != 'json':\n\t raise RuntimeError, \"output_format must be 'json' for this to work\"\n\treturn simplejson.loads(self.call(command, *args, **kwargs))",
"def check_output(*args, **kwargs):\n if isinstance(args[0], list):\n new_args = args\n else:\n if len(args) == 1 and isinstance(args[0], basestring):\n args = args[0].split()\n new_args = [args]\n string_in = kwargs.pop('input', None)\n expected_return_code = kwargs.pop('return_code', [])\n if not isinstance(expected_return_code, list):\n expected_return_code = [expected_return_code]\n #print 'string_in', string_in\n #print 'new_args', new_args\n # possible pop extra arguments for Popen\n # newargs.extend(kwargs.pop('args', [])\n process = subprocess.Popen(stdin=subprocess.PIPE if string_in else None,\n stdout=subprocess.PIPE, *new_args, **kwargs)\n output, unused_err = process.communicate(input=string_in)\n retcode = process.poll()\n if retcode and not retcode in expected_return_code:\n cmd = args[0]\n raise subprocess.CalledProcessError(retcode, cmd)\n return output\n #return subprocess.check_output(*new_args, **kwargs)",
"def svn_diff_output_fns_invoke_output_conflict(_obj, output_baton, original_start, original_length, modified_start, modified_length, latest_start, latest_length, resolved_diff):\n return _diff.svn_diff_output_fns_invoke_output_conflict(_obj, output_baton, original_start, original_length, modified_start, modified_length, latest_start, latest_length, resolved_diff)",
"def writeJobJSON(self):\n\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"python {Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['jobJSON']).as_posix()} {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name}\")\n\n return result",
"def check_output(out: Union[str, bytes], fmt: str) -> None:\n if fmt in [\"png\", \"pdf\"]:\n assert isinstance(out, bytes)\n elif fmt in [\"vega\", \"vega-lite\"]:\n assert isinstance(out, str)\n dct = json.loads(out)\n assert len(dct) > 0\n else:\n assert isinstance(out, str)\n assert len(out) > 0",
"def output_run(run_data, name):\n\n print(json.dumps(run_data, indent=4))\n ret = run_data.get('return', {})\n display_output(\n {name: ret}, \n\tout=run_data.get('out', 'nested'),\n\topts = salt.config.minion_config('/dev/null'))"
] | [
"0.55570996",
"0.54450536",
"0.54315525",
"0.54315525",
"0.542043",
"0.5378358",
"0.5360909",
"0.53541315",
"0.5313696",
"0.5289341",
"0.52827024",
"0.5255093",
"0.5228024",
"0.5222124",
"0.5177748",
"0.51397806",
"0.51249546",
"0.5121813",
"0.51027524",
"0.5102597",
"0.50788975",
"0.50523734",
"0.5030504",
"0.5006405",
"0.4989626",
"0.49753234",
"0.49398234",
"0.4939619",
"0.49347565",
"0.49318045"
] | 0.6202639 | 0 |
Given klgetcomponents command and incidentid argument. When Running `run` command on it. Then Ensure the investigationid is set from the incidentid. | def test_incident_id_passed_to_run(mocker, monkeypatch, set_environment_variables):
logger_debug = mocker.patch.object(logging.getLogger("demisto-sdk"), "debug")
logger_warning = mocker.patch.object(logging.getLogger("demisto-sdk"), "warning")
logger_error = mocker.patch.object(logging.getLogger("demisto-sdk"), "error")
monkeypatch.setenv("COLUMNS", "1000")
# mocks to allow the command to run locally
mocker.patch.object(Runner, "_run_query", return_value=["123"])
# mock to get test log file
mocker.patch.object(DefaultApi, "download_file", return_value=DEBUG_FILE_PATH)
# mock to set prefix instead of getting it from input
command = "!kl-get-records"
run_result = CliRunner(
mix_stderr=False,
).invoke(main, ["run", "-q", command, "--incident-id", "pg_id"])
assert run_result.exit_code == 0
assert str_in_call_args_list(
logger_debug.call_args_list, "running command in investigation_id='pg_id'"
)
assert logger_warning.call_count == 0
assert logger_error.call_count == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_installments_id_get(self):\n pass",
"def test_id_is_set_without_original_id(mocker):\n runner = CliRunner()\n result = runner.invoke(main, ['id', 'abcd'])\n assert result.exit_code != 0",
"def main():\n demisto.info('Command being called is ' + demisto.command())\n\n \"\"\"\n PARSE AND VALIDATE INTEGRATION PARAMS\n \"\"\"\n\n rest_client = RestClient(\n base_url=BASE_URL,\n verify=VERIFY_CERT,\n )\n\n try:\n if demisto.command() == 'test-module':\n test_module(rest_client)\n demisto.results('ok')\n\n elif demisto.command() == 'fetch-incidents':\n # get all tenant ids\n next_run, incidents = fetch_incidents(rest_client, demisto.getLastRun())\n demisto.setLastRun(next_run)\n demisto.incidents(incidents)\n\n elif demisto.command() == 'mad-close-incident':\n return_outputs(close_incident_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-assign-user':\n return_outputs(assign_user_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-remove-user':\n return_outputs(remove_user_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-get-incident':\n return_results(get_incident_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'update-remote-system':\n return_results(update_remote_system_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'get-mapping-fields':\n return_results(get_mapping_fields_command())\n\n elif demisto.command() == 'get-remote-data':\n return_results(get_remote_data_command(rest_client, demisto.args()))\n\n elif demisto.command() == 'mad-get-escalations':\n return_results(get_escalations_command(rest_client, demisto.args()))\n\n else:\n raise NotImplementedError('Command not implemented')\n\n except NotImplementedError:\n raise\n except Exception as err:\n demisto.error(traceback.format_exc()) # print the traceback\n return_error(f'Failed to execute {demisto.command()} command.\\nError:\\n{str(err)}')",
"def maincli():\n parser = argparse.ArgumentParser(description=\"query SBR cases\")\n parser.add_argument('-c', '--config-file', default='rh_auth.yaml',\n help='Path to config file')\n parser.add_argument('-t', '--sbr-team',\n help='SBR team to query')\n parser.add_argument('-s', '--status', action='append',\n help='case status query param')\n parser.add_argument('-i', '--internal-status', action='append',\n help='case internal status query param')\n parser.add_argument('-p', '--product', action='append',\n help='product query param')\n parser.add_argument('-e', '--endpoint',\n default='https://unified.gsslab.rdu2.redhat.com',\n help='Unified API endpoint URL')\n parser.add_argument('-d', '--debug', action='store_true',\n help='Unified API endpoint URL')\n\n # My preferrred defaults\n parser.set_defaults(sbr_team='Shift',\n status=['Waiting on Red Hat'],\n internal_status=['Waiting on Collaboration'],\n product=['OpenShift Online'])\n args = parser.parse_args()\n cfg = yaml.load(open(args.config_file).read())\n cfg['unified_endpoint'] = args.endpoint\n client = UnifiedClient(cfg)\n if args.debug:\n import logging\n logging.basicConfig(level=logging.DEBUG)\n cases = client.get_sbr_cases(args.sbr_team, status=args.status,\n internal_status=args.internal_status,\n product=[x.lower() for x in args.product])\n\n print json.dumps(cases, indent=4)",
"def test_id_nonexistent(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"e-dad\"\n self.assertRaises(InvalidInputError, self.command.run)",
"def test_intercommunalitys_id_get(self):\n pass",
"def main():\n params = demisto.params()\n service_principal = params.get('credentials', {}).get('identifier')\n secret = params.get('credentials', {}).get('password')\n\n # Remove trailing slash to prevent wrong URL path to service\n server_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']\n api_version = params.get('api_version')\n\n verify_certificate = not params.get('insecure', False)\n # How many time before the first fetch to retrieve incidents\n fetch_time = params.get('fetch_time', '60 minutes')\n\n threat_status = argToList(params.get('threat_status'))\n\n threat_type = argToList(params.get('threat_type'))\n\n event_type_filter = params.get('events_type')\n\n fetch_limit = 50\n # Remove proxy if not set to true in params\n proxies = handle_proxy()\n\n command = demisto.command()\n LOG(f'Command being called is {command}')\n\n try:\n client = Client(server_url, api_version, verify_certificate, service_principal, secret, proxies)\n commands = {\n 'proofpoint-get-events': get_events_command,\n 'proofpoint-get-forensics': get_forensic_command\n }\n if command == 'test-module':\n results = test_module(client, fetch_time, event_type_filter)\n return_outputs(results)\n\n elif demisto.command() == 'fetch-incidents':\n integration_context = demisto.getIntegrationContext()\n next_run, incidents, remained_incidents = fetch_incidents(\n client=client,\n last_run=demisto.getLastRun(),\n first_fetch_time=fetch_time,\n event_type_filter=event_type_filter,\n threat_status=threat_status,\n threat_type=threat_type,\n limit=fetch_limit,\n integration_context=integration_context\n )\n # Save last_run, incidents, remained incidents into integration\n demisto.setLastRun(next_run)\n demisto.incidents(incidents)\n # preserve context dict\n integration_context['incidents'] = remained_incidents\n demisto.setIntegrationContext(integration_context)\n\n elif command in commands:\n return_outputs(*commands[command](client, demisto.args()))\n\n except Exception as e:\n return_error(str(e))",
"def __init__(self, revision_id, incident_id, dao, controller, workbook):\r\n\r\n self._incident_id = incident_id\r\n self._controller = controller\r\n self._workbook = workbook\r\n\r\n self.assistant = gtk.Assistant()\r\n self.assistant.set_title(_(u\"RTK Add Affected Component Assistant\"))\r\n self.assistant.connect('apply', self._add_component)\r\n self.assistant.connect('cancel', self._cancel)\r\n self.assistant.connect('close', self._cancel)\r\n\r\n # Create the introduction page.\r\n _fixed = gtk.Fixed()\r\n _label = Widgets.make_label(_(u\"This is the RTK incident affected \"\r\n u\"component assistant. It will help \"\r\n u\"you add an affected component to the \"\r\n u\"currently selected program Incident. \"\r\n u\"Press 'Forward' to continue or \"\r\n u\"'Cancel' to quit the assistant.\"),\r\n width=-1, height=-1, wrap=True)\r\n _fixed.put(_label, 5, 5)\r\n self.assistant.append_page(_fixed)\r\n self.assistant.set_page_type(_fixed, gtk.ASSISTANT_PAGE_INTRO)\r\n self.assistant.set_page_title(_fixed, _(u\"Introduction\"))\r\n self.assistant.set_page_complete(_fixed, True)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Create the incident information page. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n self.cmbHardware = Widgets.make_combo(simple=False)\r\n # self.cmbSoftware = Widgets.make_combo(simple=False)\r\n\r\n # Load the gtk.ComboBox() widgets.\r\n _query = \"SELECT fld_name, fld_hardware_id, fld_description \\\r\n FROM rtk_hardware \\\r\n WHERE fld_revision_id={0:d} \\\r\n AND fld_part=1\".format(revision_id)\r\n (_results, _error_code, __) = dao.execute(_query, commit=False)\r\n Widgets.load_combo(self.cmbHardware, _results, simple=False)\r\n\r\n # Create and place the labels.\r\n self.fxdPageGeneral = gtk.Fixed()\r\n\r\n _labels = [_(u\"Select Component*:\")]\r\n (_x_pos, _y_pos) = Widgets.make_labels(_labels,\r\n self.fxdPageGeneral, 5, 5)\r\n _x_pos += 40\r\n\r\n self.fxdPageGeneral.put(self.cmbHardware, _x_pos, _y_pos[0])\r\n\r\n # Connect widget signals to callback functions.\r\n self.cmbHardware.connect('changed', self._check_ready, None, 2)\r\n\r\n self.assistant.append_page(self.fxdPageGeneral)\r\n self.assistant.set_page_type(self.fxdPageGeneral,\r\n gtk.ASSISTANT_PAGE_CONTENT)\r\n self.assistant.set_page_title(self.fxdPageGeneral, _(u\"Select \"\r\n u\"Component\"))\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Create the confirmation page. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n\r\n _label = Widgets.make_label(_(u\"Press 'Apply' to add the affected \"\r\n u\"component or 'Cancel' to quit the \"\r\n u\"assistant without adding the \"\r\n u\"component.\"),\r\n width=-1, height=-1, wrap=True)\r\n _fixed.put(_label, 5, 5)\r\n\r\n self.assistant.append_page(_fixed)\r\n self.assistant.set_page_type(_fixed,\r\n gtk.ASSISTANT_PAGE_CONFIRM)\r\n self.assistant.set_page_title(_fixed, _(u\"Confirm Addition of the New \"\r\n u\"Component\"))\r\n self.assistant.set_page_complete(_fixed, True)\r\n\r\n self.assistant.show_all()",
"def test_no_identities(dummy_command):\n # get_identities will return some options.\n dummy_command.get_identities.return_value = {}\n\n # Return option 1\n dummy_command.input.values = [\"1\"]\n\n result = dummy_command.select_identity()\n\n assert result == (\n \"-\",\n (\n \"Ad-hoc identity. The resulting package will run but cannot be \"\n \"re-distributed.\"\n ),\n )\n\n # User input was solicited\n assert dummy_command.input.prompts",
"def run(self):\n investigation_id = self.incident_id or self._get_playground_id()\n logger.debug(f\"running command in {investigation_id=}\")\n\n try:\n log_ids = self._run_query(investigation_id)\n except DemistoRunTimeError as err:\n log_ids = None\n logger.info(f\"[red]{err}[/red]\")\n\n if self.debug:\n if not log_ids:\n logger.info(\"[yellow]Entry with debug log not found[/yellow]\")\n else:\n self._export_debug_log(log_ids)\n\n if self.json2outputs:\n if not self.prefix:\n logger.info(\n \"[red]A prefix for the outputs is needed for this command. Please provide one[/red]\"\n )\n return 1\n else:\n raw_output_json = self._return_context_dict_from_log(log_ids)\n if raw_output_json:\n with tempfile.NamedTemporaryFile(mode=\"w+\") as f:\n if isinstance(raw_output_json, dict):\n f.write(json.dumps(raw_output_json))\n if isinstance(raw_output_json, list):\n f.write(json.dumps(raw_output_json[0]))\n f.seek(0)\n file_path = f.name\n command = self.query.split(\" \")[0]\n json_to_outputs(command, json=file_path, prefix=self.prefix)\n else:\n logger.info(\n \"[red]Could not extract raw output as JSON from command[/red]\"\n )\n return 1",
"def main():\n params = demisto.params()\n service_principal = params.get('credentials', {}).get('identifier')\n secret = params.get('credentials', {}).get('password')\n\n # Remove trailing slash to prevent wrong URL path to service\n server_url = params['url'][:-1] if (params['url'] and params['url'].endswith('/')) else params['url']\n api_version = params.get('api_version')\n\n verify_certificate = not params.get('insecure', False)\n # How many time before the first fetch to retrieve incidents\n fetch_time = params.get('fetch_time', '60 minutes')\n\n threat_status = argToList(params.get('threat_status'))\n\n threat_type = argToList(params.get('threat_type'))\n\n event_type_filter = params.get('events_type')\n\n raw_json_encoding = params.get('raw_json_encoding')\n\n fetch_limit = min(int(params.get('limit', DEFAULT_LIMIT)), DEFAULT_LIMIT)\n # Remove proxy if not set to true in params\n proxies = handle_proxy()\n\n command = demisto.command()\n args = demisto.args()\n demisto.info(f'Command being called is {command}')\n demisto.debug(f'{fetch_time=}')\n try:\n client = Client(server_url, api_version, verify_certificate, service_principal, secret, proxies)\n commands = {\n 'proofpoint-get-events': get_events_command,\n 'proofpoint-get-forensics': get_forensic_command\n }\n if command == 'test-module':\n return_outputs(test_module(client))\n\n elif demisto.command() == 'fetch-incidents':\n integration_context = demisto.getIntegrationContext()\n next_run, incidents, remained_incidents = fetch_incidents(\n client=client,\n last_run=demisto.getLastRun(),\n first_fetch_time=fetch_time,\n event_type_filter=event_type_filter,\n threat_status=threat_status,\n threat_type=threat_type,\n limit=fetch_limit,\n integration_context=integration_context,\n raw_json_encoding=raw_json_encoding,\n )\n # Save last_run, incidents, remained incidents into integration\n demisto.setLastRun(next_run)\n demisto.incidents(incidents)\n # preserve context dict\n integration_context['incidents'] = remained_incidents\n demisto.setIntegrationContext(integration_context)\n\n elif command in commands:\n return_outputs(*commands[command](client, args))\n\n elif command == 'proofpoint-get-events-clicks-blocked':\n return_results(get_clicks_command(client, is_blocked=True, **args))\n\n elif command == 'proofpoint-get-events-clicks-permitted':\n return_results(get_clicks_command(client, is_blocked=False, **args))\n\n elif command == 'proofpoint-get-events-messages-blocked':\n return_results(get_messages_command(client, is_blocked=True, **args))\n\n elif command == 'proofpoint-get-events-messages-delivered':\n return_results(get_messages_command(client, is_blocked=False, **args))\n\n elif command == 'proofpoint-list-campaigns':\n return_results(list_campaigns_command(client, **args))\n\n elif command == 'proofpoint-get-campaign':\n return_results(get_campaign_command(client, **args))\n\n elif command == 'proofpoint-list-most-attacked-users':\n return_results(list_most_attacked_users_command(client, **args))\n\n elif command == 'proofpoint-get-top-clickers':\n return_results(get_top_clickers_command(client, **args))\n\n elif command == 'proofpoint-url-decode':\n return_results(url_decode_command(client, **args))\n\n elif command == 'proofpoint-list-issues':\n return_results(list_issues_command(client, **args))\n\n except Exception as exception:\n if command == 'test-module':\n return_error(str(exception))\n return_error(f'Failed to execute {command} command. Error: {str(exception)}')",
"def main():\n\n parser = argparse.ArgumentParser(prog='glpi-cli',\n usage='%(prog)s --item item_name '\n '--command cmd [options]')\n\n parser.add_argument(\"-i\", \"--item\", metavar='i', dest=\"item_name\",\n required=True,\n help=\"GLPI Item Name. [ticket, knownbase]\")\n\n parser.add_argument(\"-c\", \"--command\", metavar='c', dest=\"command\",\n required=True,\n help=\"Command could be: [get|get_all].\")\n\n parser.add_argument(\"-id\", \"--id\", metavar='id', dest=\"item_id\",\n type=int, required=False,\n help=\"GLPI Item ID.\")\n\n parser.add_argument(\"-f\", \"--force\", action=\"store_true\",\n dest=\"flag_force\", required=False, default=False,\n help=\"Force changes.\")\n\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",\n dest=\"flag_verbose\", required=False, default=False,\n help=\"Verbose.\")\n\n parser.add_argument(\"-p\", \"--payload\", metavar='p', dest=\"item_payload\",\n help=\"GLPI Item Payload to be updated.\")\n\n args = parser.parse_args()\n\n # ID should be defined in...\n if (args.command == 'get') or \\\n (args.command == 'delete') or \\\n (args.command == 'update'):\n if not args.item_id:\n print '{ \"error_message\": \"This command requires option --id ID\" }'\n sys.exit(1)\n\n cli = CLI()\n item_dict = {}\n\n if (args.command == 'get'):\n print json.dumps(cli.get(args.item_name, args.item_id),\n indent=4,\n separators=(',', ': '),\n sort_keys=True)\n\n elif (args.command == 'get_all'):\n try:\n print json.dumps(cli.get_all(args.item_name),\n indent=4,\n separators=(',', ': '),\n sort_keys=True)\n except Exception as e:\n print('{ \"error_message\": \"get_all: {}\".format(e) }')\n sys.exit(1)\n\n elif (args.command == 'delete'):\n\n try:\n item = cli.get(args.item_name, args.item_id)\n\n if 'id' not in item:\n print(\"ID not found in GLPI server. Aborting...\")\n sys.exit(1)\n\n print json.dumps(item,\n indent=4,\n separators=(',', ': '),\n sort_keys=True)\n\n if not args.flag_force:\n msg = \"The item will deleted, do you want to continue? [y/n]\"\n rc, rm = get_prompt_yes_or_no(msg)\n if rc > 0:\n print(rm)\n sys.exit(1)\n\n print(\"Deleting item ID {}\".format(args.item_id))\n print json.dumps(cli.delete(args.item_name, args.item_id),\n indent=4,\n separators=(',', ': '),\n sort_keys=True)\n except Exception as e:\n print('{ \"error_message\": \"delete: %s\" }' % e)\n\n elif (args.command == 'update'):\n try:\n item = cli.get(args.item_name, args.item_id)\n k_update = {}\n\n if 'id' not in item:\n print(\"ID not found in GLPI server. Aborting...\")\n sys.exit(1)\n\n payload = json.loads(args.item_payload)\n\n # looking for changes\n for k in payload:\n if k not in item:\n if 'notFound' not in k_update:\n k_update['notFound'] = {}\n k_update['notFound'].update({k: payload[k]})\n continue\n\n if k == 'id':\n continue\n\n if payload[k] == item[k]:\n if 'notChanged' not in k_update:\n k_update['notChanged'] = {}\n k_update['notChanged'].update({k: payload[k]})\n continue\n\n if 'change' not in k_update:\n k_update['change'] = {}\n\n k_update['change'].update({k: payload[k]})\n\n if args.flag_verbose:\n print(\"Original Item: \")\n print json.dumps(item,\n indent=4,\n separators=(',', ': '),\n sort_keys=True)\n\n if 'notFound' in k_update:\n print(\"The key(s) bellow was not found: \")\n print(json.dumps(k_update['notFound'], indent=4))\n\n if 'notChanged' in k_update:\n print(\"The key(s) bellow was not changed: \")\n print(json.dumps(k_update['notChanged'], indent=4))\n\n if 'change' not in k_update:\n print(\"Nothing to change, exiting...\")\n sys.exit(0)\n\n print(\"Changing the key(s) bellow: \")\n print(json.dumps(k_update['change'], indent=4))\n if args.flag_verbose:\n print(\"Detailed changes: \")\n change_log = []\n for k in k_update['change']:\n changes = {\n k: {\n \"current\": item[k],\n \"new\": k_update['change'][k]\n }\n }\n change_log.append(changes)\n print(json.dumps(change_log, indent=4))\n\n if not args.flag_force:\n rc, rm = get_prompt_yes_or_no(\"Do you want to continue? [y/n]\")\n if rc > 0:\n print(rm)\n sys.exit(1)\n\n k_update['change'].update({\"id\": args.item_id})\n payload = k_update['change']\n\n print(\"Updating the item ID {}\".format(args.item_id))\n print(json.dumps(cli.update(args.item_name, payload),\n indent=4,\n separators=(',', ': '),\n sort_keys=True))\n\n except Exception as e:\n print('{ \"error_message\": \"update: %s\" }' % e)\n\n else:\n msg = (\"Command [{}] not found\".format(args.command))\n print('{ \"error_message\": \"%s\" }' % msg)\n sys.exit(1)\n\n sys.exit(0)",
"def test_cyclingleagues_id_get(self):\n pass",
"async def importIncident(self, incident: Incident) -> None:",
"def __init__(self, revision_id, dao, modulebook):\r\n\r\n self._dao = dao\r\n self._revision_id = revision_id\r\n self._modulebook = modulebook\r\n\r\n self.assistant = gtk.Assistant()\r\n self.assistant.set_title(_(u\"RTK Add Incident Assistant\"))\r\n self.assistant.connect('apply', self._add_incident)\r\n self.assistant.connect('cancel', self._cancel)\r\n self.assistant.connect('close', self._cancel)\r\n\r\n # Create the introduction page.\r\n _fixed = gtk.Fixed()\r\n _label = Widgets.make_label(_(u\"This is the RTK incident addition \"\r\n u\"assistant. It will help you add a \"\r\n u\"new hardware or software incident to \"\r\n u\"the database. Press 'Forward' to \"\r\n u\"continue or 'Cancel' to quit the \"\r\n u\"assistant.\"),\r\n width=-1, height=-1, wrap=True)\r\n _fixed.put(_label, 5, 5)\r\n self.assistant.append_page(_fixed)\r\n self.assistant.set_page_type(_fixed, gtk.ASSISTANT_PAGE_INTRO)\r\n self.assistant.set_page_title(_fixed, _(u\"Introduction\"))\r\n self.assistant.set_page_complete(_fixed, True)\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Create the incident information page. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n self.cmbCategory = Widgets.make_combo()\r\n self.cmbType = Widgets.make_combo()\r\n self.cmbCriticality = Widgets.make_combo()\r\n self.cmbLifeCycle = Widgets.make_combo()\r\n self.cmbHardware = Widgets.make_combo(simple=False)\r\n self.cmbSoftware = Widgets.make_combo(simple=False)\r\n self.cmbUnit = Widgets.make_combo()\r\n self.cmbReportedBy = Widgets.make_combo()\r\n self.cmbDetectMethod = Widgets.make_combo()\r\n\r\n self.txtIncidentDate = Widgets.make_entry(width=100)\r\n self.txtTestProcedure = Widgets.make_entry()\r\n self.txtTestCase = Widgets.make_entry()\r\n self.txtExecutionTime = Widgets.make_entry(width=100)\r\n\r\n # Load the gtk.ComboBox() widgets.\r\n self.cmbReportedBy.append_text(\"\")\r\n for i in range(len(Configuration.RTK_USERS)):\r\n self.cmbReportedBy.append_text(Configuration.RTK_USERS[i])\r\n self.cmbCategory.append_text(\"\")\r\n for i in range(len(Configuration.RTK_INCIDENT_CATEGORY)):\r\n self.cmbCategory.append_text(\r\n Configuration.RTK_INCIDENT_CATEGORY[i])\r\n self.cmbType.append_text(\"\")\r\n for i in range(len(Configuration.RTK_INCIDENT_TYPE)):\r\n self.cmbType.append_text(Configuration.RTK_INCIDENT_TYPE[i])\r\n self.cmbCriticality.append_text(\"\")\r\n for i in range(len(Configuration.RTK_INCIDENT_CRITICALITY)):\r\n self.cmbCriticality.append_text(\r\n Configuration.RTK_INCIDENT_CRITICALITY[i])\r\n self.cmbLifeCycle.append_text(\"\")\r\n for i in range(len(Configuration.RTK_LIFECYCLE)):\r\n self.cmbLifeCycle.append_text(Configuration.RTK_LIFECYCLE[i])\r\n\r\n _query = \"SELECT fld_name, fld_hardware_id, fld_description \\\r\n FROM rtk_hardware \\\r\n WHERE fld_revision_id={0:d} \\\r\n AND fld_part=0\".format(revision_id)\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n Widgets.load_combo(self.cmbHardware, _results, simple=False)\r\n\r\n _query = \"SELECT fld_description, fld_software_id, fld_description \\\r\n FROM rtk_software \\\r\n WHERE fld_revision_id={0:d}\".format(revision_id)\r\n (_results, _error_code, __) = self._dao.execute(_query, commit=False)\r\n Widgets.load_combo(self.cmbSoftware, _results, simple=False)\r\n\r\n _results = [[_(u\"Code Review\")], [_(u\"Error/Anomaly Analysis\")],\r\n [_(u\"Structure Analysis\")], [_(u\"Random Testing\")],\r\n [_(u\"Functional Testing\")], [_(u\"Branch Testing\")]]\r\n Widgets.load_combo(self.cmbDetectMethod, _results)\r\n\r\n # Create and place the labels.\r\n self.fxdPageGeneral = gtk.Fixed()\r\n\r\n _labels = [_(u\"Incident Date*:\"), _(u\"Reported By*:\"),\r\n _(u\"Incident Category*:\"), _(u\"Incident Type:\"),\r\n _(u\"Incident Criticality:\"), _(u\"Life Cycle:\"),\r\n _(u\"Affected Unit:\"), _(u\"Affected Assembly*:\"),\r\n _(u\"Affected Software:\"), _(u\"Detection Method*:\"),\r\n _(u\"Test Procedure:\"), _(u\"Test Case:\"),\r\n _(u\"Execution Time*:\")]\r\n (_x_pos, _y_pos) = Widgets.make_labels(_labels,\r\n self.fxdPageGeneral, 5, 5)\r\n _x_pos += 40\r\n\r\n self.txtIncidentDate.set_tooltip_text(_(u\"Enter the date the incident \"\r\n u\"occurred.\"))\r\n self.cmbReportedBy.set_tooltip_text(_(u\"Enter the name of the person \"\r\n u\"reporting the incident. \"\r\n u\"Defaults to currently logged \"\r\n u\"in user.\"))\r\n self.cmbCategory.set_tooltip_text(_(u\"Select the category this \"\r\n u\"incident represents.\"))\r\n self.cmbType.set_tooltip_text(_(u\"Select the type of problem this \"\r\n u\"incident represents.\"))\r\n self.cmbCriticality.set_tooltip_text(_(u\"Select the severity of the \"\r\n u\"discrepancy.\"))\r\n self.txtTestProcedure.set_tooltip_text(_(u\"Enter the test procedure \"\r\n u\"being run when the \"\r\n u\"incident occurred.\"))\r\n self.txtTestCase.set_tooltip_text(_(u\"Enter the test case being run \"\r\n u\"when the incident occurred.\"))\r\n self.txtExecutionTime.set_tooltip_text(_(u\"Enter the execution time \"\r\n u\"when the incident \"\r\n u\"occurred.\"))\r\n\r\n # Add a calendar widget for date selection if we are on a posix\r\n # platform. The calendar widget doesn't work for shit on Windoze.\r\n if name == 'posix':\r\n self.btnCalendar = Widgets.make_button(height=25, width=25,\r\n label=\"...\", image=None)\r\n self.btnCalendar.set_tooltip_text(_(u\"Launch a calendar to select \"\r\n u\"the incident date\"))\r\n self.btnCalendar.connect('clicked', Widgets.date_select, None,\r\n self.txtIncidentDate)\r\n self.fxdPageGeneral.put(self.btnCalendar, _x_pos + 105, _y_pos[0])\r\n\r\n self.fxdPageGeneral.put(self.txtIncidentDate, _x_pos, _y_pos[0])\r\n self.fxdPageGeneral.put(self.cmbReportedBy, _x_pos, _y_pos[1])\r\n self.fxdPageGeneral.put(self.cmbCategory, _x_pos, _y_pos[2])\r\n self.fxdPageGeneral.put(self.cmbType, _x_pos, _y_pos[3])\r\n self.fxdPageGeneral.put(self.cmbCriticality, _x_pos, _y_pos[4])\r\n self.fxdPageGeneral.put(self.cmbLifeCycle, _x_pos, _y_pos[5])\r\n self.fxdPageGeneral.put(self.cmbUnit, _x_pos, _y_pos[6])\r\n self.fxdPageGeneral.put(self.cmbHardware, _x_pos, _y_pos[7])\r\n self.fxdPageGeneral.put(self.cmbSoftware, _x_pos, _y_pos[8])\r\n self.fxdPageGeneral.put(self.cmbDetectMethod, _x_pos, _y_pos[9])\r\n self.fxdPageGeneral.put(self.txtTestProcedure, _x_pos, _y_pos[10])\r\n self.fxdPageGeneral.put(self.txtTestCase, _x_pos, _y_pos[11])\r\n self.fxdPageGeneral.put(self.txtExecutionTime, _x_pos, _y_pos[12])\r\n\r\n # Connect widget signals to callback functions.\r\n # self.txtIncidentDate.connect('focus_out_event', self._check_ready, 2)\r\n self.cmbReportedBy.connect('changed', self._check_ready, None, 2)\r\n self.cmbCategory.connect('changed', self._check_ready, None, 2)\r\n self.cmbHardware.connect('changed', self._check_ready, None, 2)\r\n self.cmbSoftware.connect('changed', self._check_ready, None, 2)\r\n self.cmbDetectMethod.connect('changed', self._check_ready, None, 2)\r\n\r\n self.assistant.append_page(self.fxdPageGeneral)\r\n self.assistant.set_page_type(self.fxdPageGeneral,\r\n gtk.ASSISTANT_PAGE_CONTENT)\r\n self.assistant.set_page_title(self.fxdPageGeneral, _(u\"Program \"\r\n u\"Incident: \"\r\n u\"General \"\r\n u\"Information\"))\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Create the incident descriptions page. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n self.txtDescription = Widgets.make_entry(width=595)\r\n self.txtDetails = gtk.TextBuffer()\r\n self.txtRemarks = gtk.TextBuffer()\r\n\r\n # Assign tooltips to the widgets.\r\n self.txtDescription.set_tooltip_text(_(u\"Enter a brief description of \"\r\n u\"the incident being \"\r\n u\"reported.\"))\r\n\r\n # Place the widgets.\r\n self.fxdPageDescription = gtk.Fixed()\r\n\r\n _label = Widgets.make_label(_(u\"Brief Description*\"))\r\n _x_pos = _label.size_request()[0]\r\n self.fxdPageDescription.put(_label, 5, 5)\r\n\r\n _label = Widgets.make_label(_(u\"Detailed Description*\"))\r\n self.fxdPageDescription.put(_label, 5, 35)\r\n\r\n _label = Widgets.make_label(_(u\"Remarks\"))\r\n self.fxdPageDescription.put(_label, 5, 370)\r\n\r\n self.fxdPageDescription.put(self.txtDescription, _x_pos, 5)\r\n _textview = Widgets.make_text_view(txvbuffer=self.txtDetails,\r\n width=795, height=300)\r\n _textview.set_tooltip_text(_(u\"Enter a detailed description of the \"\r\n u\"incident being reported.\"))\r\n self.fxdPageDescription.put(_textview, 5, 65)\r\n\r\n _textview = Widgets.make_text_view(txvbuffer=self.txtRemarks,\r\n width=795, height=150)\r\n _textview.set_tooltip_text(_(u\"Enter any additional, pertinent \"\r\n u\"remarks related to the incident being \"\r\n u\"reported.\"))\r\n self.fxdPageDescription.put(_textview, 5, 400)\r\n\r\n self.txtDescription.connect('focus_out_event', self._check_ready, 3)\r\n self.txtDetails.connect('changed', self._check_ready, None, 3)\r\n\r\n self.assistant.append_page(self.fxdPageDescription)\r\n self.assistant.set_page_type(self.fxdPageDescription,\r\n gtk.ASSISTANT_PAGE_CONTENT)\r\n self.assistant.set_page_title(self.fxdPageDescription,\r\n _(u\"Program Incident: Incident \"\r\n u\"Description\"))\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Create the confirmation page. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _fixed = gtk.Fixed()\r\n\r\n _label = Widgets.make_label(_(u\"Press 'Apply' to create the incident \"\r\n u\"or 'Cancel' to quit the assistant \"\r\n u\"without adding the incident.\"),\r\n width=-1, height=-1, wrap=True)\r\n _fixed.put(_label, 5, 5)\r\n\r\n self.assistant.append_page(_fixed)\r\n self.assistant.set_page_type(_fixed,\r\n gtk.ASSISTANT_PAGE_CONFIRM)\r\n self.assistant.set_page_title(_fixed, _(u\"Incident: Confirm Addition \"\r\n u\"of New Incident\"))\r\n self.assistant.set_page_complete(_fixed, True)\r\n\r\n self.assistant.show_all()\r\n\r\n # hide the widgets that are specific to software unless a software\r\n # module is selected.\r\n self.cmbDetectMethod.set_sensitive(False)\r\n self.txtTestProcedure.set_sensitive(False)\r\n self.txtTestCase.set_sensitive(False)\r\n self.txtExecutionTime.set_sensitive(False)",
"def test_env_app_identifier(context):\n os.environ[config.FLOWSERV_APP] = '0000'\n assert context.get_workflow(dict()) == '0000'\n assert context.get_group(dict()) == '0000'\n os.environ[config.FLOWSERV_GROUP] = '000A'\n assert context.get_workflow(dict()) == '0000'\n assert context.get_group(dict()) == '000A'\n del os.environ[config.FLOWSERV_APP]\n del os.environ[config.FLOWSERV_GROUP]\n with pytest.raises(err.MissingConfigurationError):\n context.get_workflow(dict())\n with pytest.raises(err.MissingConfigurationError):\n context.get_group(dict())",
"def find_issue_id(self):",
"def main(job_id, params):\n cfg = load_experiment_config_file()\n log_level = cfg.getint(\"HPOLIB\", \"HPOlib_loglevel\")\n logging.basicConfig(format='[%(levelname)s] [%(asctime)s:%(name)s] %('\n 'message)s', datefmt='%H:%M:%S')\n logger.setLevel(log_level)\n\n cli_target = \"HPOlib.optimization_interceptor\"\n result = command_line_function(params, cli_target)\n return result",
"def test_integration_run_non_existing_command(\n mocker, monkeypatch, set_environment_variables\n):\n logger_info = mocker.patch.object(logging.getLogger(\"demisto-sdk\"), \"info\")\n monkeypatch.setenv(\"COLUMNS\", \"1000\")\n mocker.patch.object(DefaultApi, \"investigation_add_entries_sync\", return_value=None)\n mocker.patch.object(Runner, \"_get_playground_id\", return_value=\"pg_id\")\n result = CliRunner(mix_stderr=False,).invoke(\n main,\n [\n \"run\",\n \"-q\",\n \"!non-existing-command\",\n \"-D\",\n ],\n )\n assert 0 == result.exit_code\n assert not result.exception\n assert str_in_call_args_list(\n logger_info.call_args_list,\n \"Command did not run, make sure it was written correctly.\",\n )",
"def run(config):\n locator = cea.inputlocator.InputLocator(config.scenario)\n print('Key in run')\n print(config.bigmacc.key)\n i = config.bigmacc.key\n print(i)\n # SCENARIO SETUP ---\n config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i)\n print(config.general.project)\n cea.datamanagement.data_initializer.main(config)\n # use the scenario code to set the year for the lca and other operations that need the current year\n pathway_code = config.general.parent\n pathway_items = pathway_code.split('_')\n scenario_year = int(pathway_items[1])\n config.emissions.year_to_calculate = scenario_year\n\n bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round)\n\n scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0')\n experiment_key = 'exp_{}'.format(i)\n print(experiment_key)\n keys = [int(x) for x in str(i)]\n if experiment_key in scen_check['Experiments'].values.tolist():\n print('Experiment was finished previously, moving to next.')\n pass\n else:\n print('START: experiment {}.'.format(i))\n\n # INITIALIZE TIMER ---\n t0 = time.perf_counter()\n if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)):\n print(' - Folder exists for experiment {}.'.format(i))\n else:\n os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i))\n print(' - Folder does not exist for experiment {}, creating now.'.format(i))\n\n # run the archetype mapper to leverage the newly loaded typology file and set parameters\n print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))\n cea.datamanagement.archetypes_mapper.main(config)\n\n # run the rule checker to set the scenario parameters\n print(' - Running rule checker for experiment {}.'.format(i))\n cea.bigmacc.bigmacc_rules.main(config)\n\n # SIMULATIONS ---\n\n print(' - Run radiation is {}.'.format(config.bigmacc.runrad))\n print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))\n # checking on need for radiation simulation\n\n if config.bigmacc.runrad == True:\n # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation\n if config.bigmacc.rerun != True:\n print(' - Running radiation simulation for experiment {}.'.format(i))\n if os.path.exists(locator.get_radiation_building('B000')):\n print(' - Radiation folder exists for experiment {}, copying.'.format(i))\n else:\n print(' - Radiation running for experiment {}.'.format(i))\n cea.resources.radiation_daysim.radiation_main.main(config)\n else:\n # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i))\n old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'solar-radiation')\n # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder())\n else:\n radfiles = config.bigmacc.copyrad\n # print(' - Copying radiation results from {}.'.format(radfiles))\n # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())\n print(' - Experiment {} does not require new radiation simulation.'.format(i))\n\n # running demand forecasting\n if os.path.exists(locator.get_schedule_model_file('B000')):\n print(' - Schedules exist for experiment {}.'.format(i))\n else:\n print(' - Schedule maker running for experiment {}.'.format(i))\n schedule_maker.main(config)\n\n # check to see if we need to rerun demand or if we can copy\n if config.bigmacc.rerun != True:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n if keys[0] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n elif keys[6] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n cea.demand.demand_main.main(config)\n # print(' - Looking for demand results data from previous run for experiment {}.'.format(i))\n # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n # config.general.scenario_name, 'outputs', 'data', 'demand')\n # if os.path.exists(old_demand_files):\n # # print(' - Copy demand results files from previous run of experiment {}.'.format(i))\n # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder())\n # pass\n # else:\n # print(' - No results found.')\n # print(' - Running demand simulation for experiment {}.'.format(i))\n # cea.demand.demand_main.main(config)\n\n if config.bigmacc.pv == True:\n print(' - Run PV is {}.'.format(config.bigmacc.pv))\n if config.bigmacc.rerun == True:\n print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i))\n old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar')\n if os.path.exists(old_pv_files):\n # print(' - Copying PV files from previous run of experiment {}.'.format(i))\n # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder())\n pass\n else:\n print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files))\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n else:\n # if PV simulation is needed, run it.\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n\n print('Run water-body exchange is {}.'.format(config.bigmacc.water))\n # if water-body simulation is needed, run it.\n if config.bigmacc.water == True:\n print(' - Running water body simulation for experiment {}.'.format(i))\n water.main(config)\n\n # recalculating the supply split between grid and ng in the websrook DH\n if keys[4] == 1:\n print(' - Do not run district heat recalculation.')\n else:\n print(' - Run district heat recalculation.')\n cea.bigmacc.wesbrook_DH.main(config)\n\n if keys[7] == 1:\n print(' - PV use detected. Adding PV generation to demand files.')\n util.write_pv_to_demand(config)\n else:\n print(' - No PV use detected.')\n\n # running the emissions and costing calculations\n print(' - Run cost and emissions scripts.')\n cea.analysis.costs.system_costs.main(config)\n cea.analysis.lca.main.main(config)\n\n # clone out the simulation inputs and outputs directory\n print(' - Transferring results directory for experiment {}.'.format(i))\n\n new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'inputs')\n new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data')\n\n if config.bigmacc.rerun != True:\n distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path)\n distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path)\n\n time_elapsed = time.perf_counter() - t0\n\n # save log information\n log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'),\n index_col='Unnamed: 0')\n log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),\n 'Completed': 'True',\n 'Experiment Time': '%d.2 seconds' % time_elapsed,\n 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)\n log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'))\n log_df.to_csv(r\"C:\\Users\\justi\\Desktop\\126logger_backup.csv\", )\n\n # write netcdf of hourly_results\n netcdf_writer.main(config, time='hourly')\n\n if config.bigmacc.rerun != True:\n shutil.rmtree(locator.get_costs_folder())\n shutil.rmtree(locator.get_demand_results_folder())\n shutil.rmtree(locator.get_lca_emissions_results_folder())\n shutil.rmtree(locator.get_solar_radiation_folder())\n shutil.rmtree(locator.get_potentials_folder())\n else:\n print(' - Rerun does not require purging of the files.')\n\n # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here\n if keys[0] == 1:\n cea.datamanagement.data_initializer.main(config)\n else:\n pass\n print('END: experiment {}. \\n'.format(i))",
"def __init__(self, id, aca_options):\n super().__init__(id)\n self.aca_command = f'''./aca_build/bin/AlcorControlAgent {aca_options}'''",
"def testNeedClientIDSetup(self):\n # Test project changed.\n self.assertTrue(self.gcp_env_runner._NeedClientIDSetup(True))\n # Test project is not changed but client_id or client_secret is empty.\n self.gcp_env_runner.client_id = \"\"\n self.gcp_env_runner.client_secret = \"\"\n self.assertTrue(self.gcp_env_runner._NeedClientIDSetup(False))\n # Test no need client_id setup.\n self.gcp_env_runner.client_id = \"test_client_id\"\n self.gcp_env_runner.client_secret = \"test_client_secret\"\n self.assertFalse(self.gcp_env_runner._NeedClientIDSetup(False))",
"def test_variables_id_get(self):\n pass",
"def test_variablepresentations_id_get(self):\n pass",
"def validateIOmoduleId(output ,arg_dict , key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric \" % (key,id))) \n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict",
"def test_get_pipeline_by_id(self):\n response = self.client.get_pipeline_by_id(2)\n self.assertEqual(response['id'], 2)",
"def test_context_id(self):\n assert str(self.system.course_id) == self.xmodule.context_id",
"def run_scenario(self, run, run_id):\n\n raise NotImplementedError",
"def run_id() -> int:\n return sg_covid_impact.config[\"flows\"][\"glass\"][\"run_id\"]",
"def test_get_case_by_id(self):\n pass"
] | [
"0.530482",
"0.5264318",
"0.49025062",
"0.48202714",
"0.4806076",
"0.4735691",
"0.4722407",
"0.47197405",
"0.47131798",
"0.46948573",
"0.46831575",
"0.4672817",
"0.46602222",
"0.46404818",
"0.4632036",
"0.4598876",
"0.4589877",
"0.4585696",
"0.4569471",
"0.45632952",
"0.45611808",
"0.45510596",
"0.45382473",
"0.4535718",
"0.45221198",
"0.45155755",
"0.45125765",
"0.45089367",
"0.4496826",
"0.44870403"
] | 0.6833365 | 0 |
Appends File info to input arrays | def appendFileInfo(File, params, extractedValues, names):
for p in params:
extractedValues[p].append(getValue(File, p))
names.append(getName(File)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def associate_files(self):\n # Open starinfo file and define structured array\n starinfo_file = self.starinfo_file\n nstar = sum(1 for line in open(starinfo_file))\n infoname = ['obj', 'std', 'caldir', 'altname']\n infofmt = ['|S25', '|S25', '|S25', '|S25']\n starinfo = np.zeros(nstar, dtype={\n 'names': infoname, 'formats': infofmt})\n with open(starinfo_file, 'r') as arq:\n for i in range(nstar):\n linelist = arq.readline().split()\n for j in range(len(infoname)):\n starinfo[i][j] = linelist[j]\n\n if self.stored_sens:\n self.load_storedsens()\n\n os.chdir(self.raw_dir)\n\n l = glob.glob('*.fits')\n l.sort()\n\n headers = []\n headers_ext1 = []\n for i in l:\n try:\n headers.append(fits.getheader(i, ext=0))\n headers_ext1.append(fits.getheader(i, ext=1))\n except IOError:\n print('IOError reading file {:s}.'.format(i))\n raise SystemExit(0)\n\n oversc = np.array(\n [('overscan') in i for i in headers_ext1], dtype='bool')\n\n mjds = np.array([i['mjd-obs'] for i in headers_ext1], dtype='float32')\n idx = np.arange(len(l))\n\n images = np.array([\n l[i] for i in idx if (\n (headers[i]['obstype'] == 'OBJECT') &\n (headers[i]['object'] != 'Twilight') &\n (headers[i]['obsclass'] != 'acq'))])\n\n field_names = [\n 'filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'obsclass', 'object', 'obstype',\n 'grating_wl', 'overscan', 'mjd', 'ccdsum']\n types = [\n 'S120', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60',\n 'float32', 'bool', 'float32', 'S60']\n hdrkeys = [\n 'observat', 'instrume', 'detector', 'grating', 'filter1',\n 'obsclass', 'object', 'obstype', 'grwlen']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n hdrpars = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys]) +\n (oversc[i],) + (mjds[i],) + (headers_ext1[i]['ccdsum'],))\n for i in idx], dtype=hdrpars_type)\n\n associated = []\n\n for i, j in enumerate(images):\n\n # Take great care when changing this.\n hdr = fits.getheader(j, ext=0)\n hdr_ext1 = fits.getheader(j, ext=1)\n mjd = hdr_ext1['mjd-obs']\n\n element = {\n 'image': j, 'observatory': hdr['observat'],\n 'instrument': hdr['instrume'],\n 'detector': hdr['detector'], 'grating_wl': hdr['grwlen'],\n 'mjd': mjd, 'grating': hdr['grating'],\n 'filter1': hdr['filter1'], 'obsclass': hdr['obsclass'],\n 'object': hdr['object']}\n\n if self.stored_sens:\n ssf = self.stored_sensfunc\n element['standard_star'] = ssf['filename'][\n (ssf['observatory'] == hdr['observat']) &\n (ssf['detector'] == hdr['detector']) &\n (ssf['grating'] == hdr['grating']) &\n (ssf['instrument'] == hdr['instrume']) &\n (ssf['filter1'] == hdr['filter1']) &\n (ssf['maskname'] == hdr['maskname'])]\n else:\n element['standard_star'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'OBJECT') &\n (np.array([k in ['partnerCal', 'progCal']\n for k in hdrpars['obsclass']], dtype='bool')) &\n (hdrpars['object'] != 'Twilight') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['filter1'] == hdr['filter1']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'stdstar_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'stdstar_ttol'))]\n\n element['flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <= self.cfg.getfloat('associations',\n 'flat_ttol'))]\n\n element['twilight'] = hdrpars['filename'][\n (hdrpars['object'] == 'Twilight') &\n (hdrpars['obstype'] == 'OBJECT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'twilight_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'twilight_ttol'))]\n\n c = 'twilight'\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 1:\n element[c] = element[c][0]\n elif len(element[c]) == 0:\n element[c] = ''\n\n # A flat close to the twilight observation for a better\n # response function.\n if element['twilight']:\n twipars = hdrpars[hdrpars['filename'] == element['twilight']]\n element['twilight_flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == twipars['observatory']) &\n (hdrpars['detector'] == twipars['detector']) &\n (hdrpars['grating'] == twipars['grating']) &\n (hdrpars['grating_wl'] == twipars['grating_wl']) &\n (abs(mjds - twipars['mjd']) <= self.cfg.getfloat(\n 'associations', 'twilight_ttol'))]\n else:\n element['twilight_flat'] = np.array([], dtype='S60')\n\n element['arc'] = hdrpars['filename'][\n # (hdrpars['object'] == 'CuAr') &\n (hdrpars['obstype'] == 'ARC') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'arc_ttol'))]\n\n element['bias'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BIAS') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'bias_ttol')) &\n (\n (hdrpars['overscan'] & (self.fl_over == 'yes')) |\n (~hdrpars['overscan'] & (self.fl_over == 'no'))\n )]\n\n im = fits.open(element['image'])\n ishape = np.array(im[1].data.shape, dtype='float32')\n im.close()\n del(im)\n\n validBiases = np.ones(len(element['bias']), dtype='bool')\n k = 0\n\n for biasImage in element['bias']:\n\n bias = fits.open(biasImage)\n bshape = np.array(bias[1].data.shape, dtype='float32')\n bias.close()\n del(bias)\n\n #\n # Elinates biases if they differ in array size from\n # the science image. Small differences are normal due to\n # the overscan subtraction in processed bias frames.\n #\n if np.any(np.abs(bshape / ishape - 1.0) > 0.10):\n validBiases[k] = False\n\n k += 1\n\n element['bias'] = element['bias'][validBiases]\n del(k)\n\n element['bpm'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BPM') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['ccdsum'] == hdr_ext1['ccdsum'])]\n\n categories = ['flat', 'bias', 'arc', 'standard_star',\n 'bpm', 'twilight_flat']\n\n for c in categories:\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 0:\n element[c] = ''\n elif len(element[c]) == 1:\n element[c] = (element[c])[0]\n\n associated.append(element)\n\n # Define mdf filename\n # Based in gprepare.cl\n # Did not account for observation in Nod-and-Shuffle\n for i in associated:\n header_flat = [\n k for j, k in enumerate(headers) if l[j] == i['flat']\n ]\n if len(header_flat):\n header_flat = header_flat[0]\n MaskName = header_flat['maskname']\n if MaskName == \"IFU-2\":\n slits = 'both'\n elif MaskName == \"IFU-B\":\n slits = 'blue'\n elif MaskName == \"IFU-R\":\n slits = 'red'\n i['slits'] = slits\n\n if self.object_filter:\n objs = self.object_filter.split(',')\n sci_ims = [\n i for i in associated if (\n (i['obsclass'] == 'science') &\n (i['object'] in objs))]\n else:\n sci_ims = [i for i in associated if i['obsclass'] == 'science']\n\n if self.all_stars:\n std_ims = [\n i for i in associated if i['obsclass'] in ['partnerCal',\n 'progCal']]\n else:\n used_stds = [i['standard_star'] for i in sci_ims]\n std_ims = [i for i in associated if i['image'] in used_stds]\n\n # Get star info from starinfo.dat\n possible_names = np.concatenate((starinfo['obj'], starinfo['std'],\n starinfo['altname']))\n n_names = len(possible_names)\n\n for i, j in enumerate(possible_names):\n possible_names[i] = (j.lower()).replace(' ', '')\n\n for i in std_ims:\n # Removes the 'standard_star' key if the dictionary\n # element in question refers to a standard star.\n del i['standard_star']\n starname = (i['object'].lower()).replace(' ', '')\n\n try:\n stdstar_idx = (\n np.arange(n_names)[possible_names == starname] %\n (n_names / 3))[0]\n except:\n raise Exception(\n 'Standard star named {:s} not found in file {:s}'.\n format(starname, starinfo_file))\n\n i['stdstar'] = starinfo[stdstar_idx]['std']\n\n if starinfo[stdstar_idx]['caldir'] == 'gireds_data':\n i['caldir'] = pkg_resources.resource_filename(\n 'gireds', 'data/')\n else:\n i['caldir'] = starinfo[stdstar_idx]['caldir']\n\n self.sci = sci_ims\n self.std = std_ims\n\n # Writes the file association dictionary to an ASCII file\n # in the run directory.\n\n if not self.dry_run:\n try:\n os.mkdir(self.products_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n try:\n os.mkdir(self.run_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n\n if not self.dry_run:\n os.chdir(self.run_dir)\n json.dump(\n sci_ims, open('file_associations_sci.dat', 'w'),\n sort_keys=True, indent=4)\n json.dump(\n std_ims, open('file_associations_std.dat', 'w'),\n sort_keys=True, indent=4)",
"def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)",
"def _merge_fileinfos(self, hard_infos, infos):\n new_infos = copy.deepcopy(hard_infos)\n for info in infos:\n new_infos[info['name']] = new_infos.get(info['name'], {})\n new_infos[info['name']].update(info)\n return new_infos",
"def add_files(self, *files):\n for f in files:\n # if file contains actual aperture magnitudes\n if \"mag_calib_unc\" in Table.read(f, format=\"ascii\").colnames:\n LightCurve.__mag_file_append(self, f)\n # if table contains limiting magnitudes\n else: \n LightCurve.__limmag_file_append(self, f)",
"def ingest(self, files):\n for file in files:\n self.files.add(file)",
"def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass",
"def CHANGE_appendAll(self):\r\n # Separate new files to be loaded\r\n FoI = list(set(self.listenFiles)-set(self.logFiles))\r\n FoI.sort()\r\n for file in FoI:\r\n print(\"Loading {}\".format(file))\r\n filePath = os.path.join(self.listenDir, file)\r\n\r\n try:\r\n (newProj, newAngle) = self.read_projection_image(filePath)\r\n\r\n self.logTiltAngles = np.append(self.logTiltAngles, newAngle)\r\n\r\n # Invert Contrast for BF-TEM\r\n if self.invert:\r\n newProj *= -1\r\n\r\n newProj = self.background_subtract(newProj)\r\n\r\n # Apply Center of Mass (if selected)\r\n if self.alignMethod == 'CoM':\r\n newProj = self.center_of_mass_align(newProj)\r\n\r\n # Account for Python's disdain for AxAx1 arrays\r\n # (compresses to 2D)\r\n if (len(self.logTiltSeries0) == 0):\r\n dataDim = np.shape(newProj)\r\n self.logTiltSeries0 = np.zeros([dataDim[0], dataDim[1], 1])\r\n self.logTiltSeries0[:, :, 0] = newProj\r\n self.wbp = wbp.WBP(dataDim[0], dataDim[1], 1)\r\n else:\r\n self.logTiltSeries0 = np.dstack((self.logTiltSeries0,\r\n newProj))\r\n\r\n self.logFiles = np.append(self.logFiles, file)\r\n\r\n except Exception:\r\n print('Could not read : {}, will proceed with reconstruction\\\r\n and re-download on next pass'.format(file))\r\n break\r\n\r\n # Apply Cross-Correlation after reading images (if selected)\r\n if self.alignMethod == 'xcor':\r\n self.logTiltSeries = self.xcorr_align(self.logTiltSeries0)\r\n # update tilt angles and sinogram\r\n self.wbp.set_tilt_series(self.logTiltSeries, self.logTiltAngles)\r\n # re-center tilt axis\r\n self.logTiltSeries = self.shift_tilt_axis(self.logTiltSeries,\r\n self.logTiltAngles)\r\n else:\r\n self.logTiltSeries = self.logTiltSeries0",
"def add(self, file_infos):\n self._check_writable_()\n \n for file_info in file_infos:\n #columns = mesh_id, value, date_data, lon, lat, date_added_to_db, sv_name, info\n #add file to db with status adding\n file_info['date_added_to_db'] = datetime.now()\n list_write = [file_info[el] if el in file_info else None for el in self._columns.keys()]\n #check for proper inputs\n self.check_column_values(list_write)\n \n #add to db\n self._cursor.execute('INSERT INTO FILEINFO VALUES (%s)'%(','.join(['?' for el in self._columns.keys()])), tuple(self.convert_column_dates2str(list_write)))\n self._conn.commit()",
"def insert_good_data():\n get_file_reply(files[0][0], files[0][1])\n get_file_reply(files[1][0], files[1][1])",
"def readFile(self, files):\n files = np.atleast_1d(files) # allow scalar input\n\n events = list()\n groups = list()\n flashes = list()\n one_sec = list()\n\n ev_id_ctr = 0\n gr_id_ctr = 0\n fl_id_ctr = 0\n\n for _file in files:\n # todo: with...open\n nc = Dataset(_file)\n\n this_ev = _extract_events(nc)\n this_grp = _extract_groups(nc)\n this_fl = _extract_flashes(nc)\n this_one_sec = _extract_one_second(nc, background=False)\n\n nc.close()\n\n # TODO: do we need check for \"empty\" files like w/GLM?\n\n # IDs are not necessarily unique. We'll modify them so they are.\n # Similar to what is done with GLM data (glm.py in this package)\n # See there for details, but the gist is get unique values and map\n # TODO: refactor?\n\n this_ev.sort_values('id', inplace=True)\n this_grp.sort_values('id', inplace=True)\n this_fl.sort_values('id', inplace=True)\n\n new_flash_id = np.arange(len(this_fl))\n this_fl.id = new_flash_id\n flash_id_map = dict(zip(this_fl._orig_id.values, new_flash_id))\n\n # Update group parent\n new_id = this_grp.parent_id.map(flash_id_map.get)\n this_grp.parent_id = new_id\n\n # New id for the group:\n new_group_id = np.arange(len(this_grp))\n this_grp.id = new_group_id\n group_id_map = dict(zip(this_grp._orig_id.values, new_group_id))\n\n # Update event parent\n this_ev.parent_id = this_ev.parent_id.map(group_id_map.get)\n\n # New event ID (although I don't think is really necessary)\n new_event_id = np.arange(len(this_ev))\n this_ev.id = new_event_id\n\n # Add in an offset to get unique values across files\n this_ev['id'] += ev_id_ctr\n this_grp['id'] += gr_id_ctr\n this_fl['id'] += fl_id_ctr\n\n # Offset the parent IDs for the children too:\n this_ev['parent_id'] += gr_id_ctr\n this_grp['parent_id'] += fl_id_ctr\n\n # Next, update the counters\n ev_id_ctr = this_ev['id'].iloc[-1]+1\n gr_id_ctr = this_grp['id'].iloc[-1]+1\n fl_id_ctr = this_fl['id'].iloc[-1]+1\n\n # Modify the times to UTC:\n for val in [this_ev, this_grp, this_fl]: # one seconds already converted\n val.time = tai93_to_utc(val.time)\n\n # todo: add option to not sort by time\n # this_event.sort_values('time', inplace=True)\n # this_group.sort_values('time', inplace=True)\n # this_flash.sort_values('time', inplace=True)\n\n # Finally, add \"this\" data\n events.append(this_ev)\n groups.append(this_grp)\n flashes.append(this_fl)\n one_sec.append(this_one_sec)\n\n # Put these as attributes of the class\n self.events = Ltg(pd.concat(events))\n self.groups = Ltg(pd.concat(groups))\n self.flashes = Ltg(pd.concat(flashes))\n self.one_second = Ltg(pd.concat(one_sec))",
"def _push_one(self, f, **kwargs):\n\n # Copy the metadata for modifying and open the ann file\n meta = kwargs.copy()\n desc = read_InSar_annotation(f)\n\n # Expand the path for the geotiffs\n tiff_dir = abspath(expanduser(self.geotiff_dir))\n\n # form the pattern to look for and grab the tifs\n pattern = '.'.join(basename(f).split('.')[0:-1]) + '*.tif'\n rasters = glob.glob(join(tiff_dir, pattern))\n\n # Submit each geotif, modifying meta on the fly\n for r in rasters:\n # Grab information from the filename\n f_pieces = r.split('.')\n component = f_pieces[-2] # Real or imaginary component\n data_abbr = f_pieces[-3] # Key to the data name\n dname = self.dname_map[data_abbr] # Data type in db\n\n # For the data type\n meta['type'] = 'insar ' + dname.split(' ')[0]\n\n if dname == 'interferogram':\n meta['type'] += (' ' + component)\n\n # Assign the date for the respective flights\n if 'amplitude' in dname:\n meta['date'] = desc['start time of acquisition for pass {}'.format(\n dname.split(' ')[-1])]['value']\n\n # Derived products always receive the date of the last overpass\n else:\n meta['date'] = desc['start time of acquisition for pass 2']['value']\n\n # Assign only the date not the date and time\n meta['date'] = meta['date'].date()\n\n # Assign units\n meta['units'] = desc['{} units'.format(\n dname.split(' ')[0])]['value']\n\n # Flexibly form a comment for each of the products for dates\n comment = get_InSar_flight_comment(dname, desc)\n # add which dem was used which dictates the file name convert e.g.\n # ...VV_01.int.grd\n comment += ', DEM used = {}'.format(\n desc['dem used in processing']['value'])\n # Add the polarization to the the comments\n comment += ', Polarization = {}'.format(\n desc['polarization']['value'])\n meta['description'] = comment\n\n self.log.info('Uploading {} as {}...'.format(r, meta['type']))\n\n d = self.UploaderClass(r, **meta)\n\n # Submit the data to the database\n d.submit(self.session)\n\n # Uploaded set\n self.uploaded += 1",
"def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)",
"def addFileNames(self, fileNames):\n with Tracer(traceLogger):\n infos = []\n\n oldNumFiles = len(self.topLevelOperator.Dataset)\n # HACK: If the filePath isn't valid, replace it\n # This is to work around the scenario where two independent data selection applets are coupled, causing mutual resizes.\n # This will be fixed when a multi-file data selection applet gui replaces this gui. \n for i in reversed( range( oldNumFiles ) ):\n if not self.topLevelOperator.Dataset[i].ready():\n oldNumFiles -= 1\n else:\n break\n \n \n # Assign values to the new inputs we just allocated.\n # The GUI will be updated by callbacks that are listening to slot changes\n for i, filePath in enumerate(fileNames):\n datasetInfo = DatasetInfo()\n cwd = self.topLevelOperator.WorkingDirectory.value\n \n if not areOnSameDrive(filePath,cwd):\n QMessageBox.critical(self, \"Drive Error\",\"Data must be on same drive as working directory.\")\n return\n \n absPath, relPath = getPathVariants(filePath, cwd)\n \n # Relative by default, unless the file is in a totally different tree from the working directory.\n if len(os.path.commonprefix([cwd, absPath])) > 1:\n datasetInfo.filePath = relPath\n else:\n datasetInfo.filePath = absPath\n\n h5Exts = ['.ilp', '.h5', '.hdf5']\n if os.path.splitext(datasetInfo.filePath)[1] in h5Exts:\n datasetNames = self.getPossibleInternalPaths( absPath )\n if len(datasetNames) > 0:\n datasetInfo.filePath += str(datasetNames[0])\n else:\n raise RuntimeError(\"HDF5 file %s has no image datasets\" % datasetInfo.filePath)\n\n # Allow labels by default if this gui isn't being used for batch data.\n datasetInfo.allowLabels = ( self.guiMode == GuiMode.Normal )\n infos.append(datasetInfo)\n\n #if no exception was thrown, set up the operator now\n self.topLevelOperator.Dataset.resize( oldNumFiles+len(fileNames) )\n for i in range(len(infos)):\n self.topLevelOperator.Dataset[i+oldNumFiles].setValue( infos[i] )",
"def AggregateFileInfos(file_info_list):\n infos = []\n for file_info in file_info_list:\n if file_info['info']:\n infos.append(file_info['info'])\n\n return {\n 'file': file_info_list[0]['file'],\n 'blame_url': file_info_list[0]['blame_url'],\n 'info': '\\n'.join(infos)\n }",
"def _add_files(self, index_key, media_key,\n new_list, fundamentals):\n _index=fundamentals.get(index_key, {})\n _media=fundamentals.get(media_key, {})\n for _file in new_list:\n _data=self._item_from_index(_file, 'data', _media)\n if not _data:\n self.log('Failed to write file %s due to no data'%_file)\n continue\n if self._item_from_index(_file, None, _index) is None:\n _origin=self._item_from_index(_file, 'origin', _media)\n if _origin=='ringers':\n _path=self.protocolclass.RT_PATH\n elif _origin=='sounds':\n _path=self.protocolclass.SND_PATH\n elif _origin=='images':\n _path=self.protocolclass.PIC_PATH\n else:\n selg.log('File %s has unknown origin, skip!'%_file)\n continue\n _file_name=_path+'/'+_file\n try:\n self.writefile(_file_name, _data)\n except:\n self.log('Failed to write file '+_file_name)\n if __debug__:\n raise",
"def addfiles(self, filelist):\r\n for tmpc in filelist:\r\n self._filelist.append(tmpc)\r\n tmp_energy=self.readenergy(filelist)\r\n for tmpdat in tmp_energy:\r\n self._energy.append(tmpdat)\r\n return tmp_energy",
"def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()",
"def readdata(self, filepaths):\n pass",
"def add_to_db(self):\r\n for filename in self.new_data_files:\r\n unique_name = form_unique_name(filename)\r\n extracted_date = extract_date(filename)\r\n if extracted_date is not None:\r\n # If we can parse the date from the filename we parse the file\r\n file_ = File(filename, unique_name, extracted_date)\r\n content = file_.get_content()\r\n for element in content:\r\n # If each of the spectra in the file has data, we\r\n # add it to the data base\r\n if element[1] is not None:\r\n self.add_to_db_single(element)\r\n status_msg('Elements of file {0} added to db'.format(\r\n unique_name), True)\r\n else:\r\n status_msg('File {0} not added, unknown filename format'.\r\n format(unique_name), False)",
"def update_args_with_file(files, args):\n args['files'] = {}\n for file_name in files:\n file = files[file_name]\n filename = file.filename\n args['files'][file_name] = filename\n return args",
"def updateBaseFiles(self):\n for filename, filetype in self._get_base_files():\n lines = open(filename).readlines()\n\n if self.Verbose:\n print 'Reading %s' % filename\n\n if filetype is 'Python':\n lines, write_out = self._update_python_file(lines, filename) \n elif filetype is 'Properties':\n lines, write_out = self._update_properties_file(lines,filename)\n else:\n raise TypeError, \"Unknown base file type %s\" % filetype\n\n if write_out:\n self._file_writer(lines, filename)",
"def do_f(self, parms):\n\t\tprint self.files",
"def write_to_file(info: List[str]) -> None:\n return",
"def _add_tag_files(\n zip_file, dir_name, payload_info_list, payload_byte_count, payload_file_count\n):\n tag_info_list = []\n _add_tag_file(zip_file, dir_name, tag_info_list, _gen_bagit_text_file_tup())\n _add_tag_file(\n zip_file,\n dir_name,\n tag_info_list,\n _gen_bag_info_file_tup(payload_byte_count, payload_file_count),\n )\n _add_tag_file(\n zip_file, dir_name, tag_info_list, _gen_pid_mapping_file_tup(payload_info_list)\n )\n return tag_info_list",
"def inputFiles(self, filesizelist):\n self.inputs = filesizelist\n self.inputSize = reduce(lambda x,y: x + y[1], filesizelist, 0)",
"def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]",
"def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict",
"def get_output_data(filenames):\n output = []\n for filename in filenames:\n file_info = get_file_info(filename)\n output.append(file_info)\n return output",
"def extract_info_from_arguments(self):\r\n\r\n for sample_name in self.arguments['--sample_name']:\r\n self.list_of_samples_to_be_combined.append(sample_name)\r\n\r\n for file_path in self.arguments['--input_file']:\r\n file_object = Input_file(file_path, self.list_of_samples_to_be_combined)\r\n self.indices.update(file_object.indices)\r\n self.list_of_input_files.append(file_object)\r\n self.list_of_input_files_paths.append(file_path)\r\n\r\n if self.arguments['--out']:\r\n if self.arguments['--output_format'] == 'COMPRESSED':\r\n self.compressed = True\r\n elif self.arguments['--output_format'] == 'UNCOMPRESSED':\r\n self.compressed = False\r\n else:\r\n if self.list_of_input_files[0].compressed:\r\n self.compressed = True\r\n else:\r\n self.compressed = False",
"def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)"
] | [
"0.6511712",
"0.6023056",
"0.59897375",
"0.58889556",
"0.5849256",
"0.5835569",
"0.58307076",
"0.5823733",
"0.5703966",
"0.5639739",
"0.5636581",
"0.5610219",
"0.5601616",
"0.55730885",
"0.55701035",
"0.55312705",
"0.5526952",
"0.5526862",
"0.55259305",
"0.5499703",
"0.5496988",
"0.54943854",
"0.5486146",
"0.5479475",
"0.5464378",
"0.54551685",
"0.5450003",
"0.54388237",
"0.54354954",
"0.5433382"
] | 0.6418849 | 1 |
Appends File info to input arrays, for cycles mode | def appendFileInfoCycles(File, params, extractedValues, names, cyclesColumn):
for cycle in getCycleRange(File):
for p in params:
extractedValues[p].append(getValue(File, p, cycle))
names.append('{}_cycle{}'.format(getName(File),
makeCycleSortable(cycle)))
cyclesColumn.append(cycle + 1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def associate_files(self):\n # Open starinfo file and define structured array\n starinfo_file = self.starinfo_file\n nstar = sum(1 for line in open(starinfo_file))\n infoname = ['obj', 'std', 'caldir', 'altname']\n infofmt = ['|S25', '|S25', '|S25', '|S25']\n starinfo = np.zeros(nstar, dtype={\n 'names': infoname, 'formats': infofmt})\n with open(starinfo_file, 'r') as arq:\n for i in range(nstar):\n linelist = arq.readline().split()\n for j in range(len(infoname)):\n starinfo[i][j] = linelist[j]\n\n if self.stored_sens:\n self.load_storedsens()\n\n os.chdir(self.raw_dir)\n\n l = glob.glob('*.fits')\n l.sort()\n\n headers = []\n headers_ext1 = []\n for i in l:\n try:\n headers.append(fits.getheader(i, ext=0))\n headers_ext1.append(fits.getheader(i, ext=1))\n except IOError:\n print('IOError reading file {:s}.'.format(i))\n raise SystemExit(0)\n\n oversc = np.array(\n [('overscan') in i for i in headers_ext1], dtype='bool')\n\n mjds = np.array([i['mjd-obs'] for i in headers_ext1], dtype='float32')\n idx = np.arange(len(l))\n\n images = np.array([\n l[i] for i in idx if (\n (headers[i]['obstype'] == 'OBJECT') &\n (headers[i]['object'] != 'Twilight') &\n (headers[i]['obsclass'] != 'acq'))])\n\n field_names = [\n 'filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'obsclass', 'object', 'obstype',\n 'grating_wl', 'overscan', 'mjd', 'ccdsum']\n types = [\n 'S120', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60',\n 'float32', 'bool', 'float32', 'S60']\n hdrkeys = [\n 'observat', 'instrume', 'detector', 'grating', 'filter1',\n 'obsclass', 'object', 'obstype', 'grwlen']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n hdrpars = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys]) +\n (oversc[i],) + (mjds[i],) + (headers_ext1[i]['ccdsum'],))\n for i in idx], dtype=hdrpars_type)\n\n associated = []\n\n for i, j in enumerate(images):\n\n # Take great care when changing this.\n hdr = fits.getheader(j, ext=0)\n hdr_ext1 = fits.getheader(j, ext=1)\n mjd = hdr_ext1['mjd-obs']\n\n element = {\n 'image': j, 'observatory': hdr['observat'],\n 'instrument': hdr['instrume'],\n 'detector': hdr['detector'], 'grating_wl': hdr['grwlen'],\n 'mjd': mjd, 'grating': hdr['grating'],\n 'filter1': hdr['filter1'], 'obsclass': hdr['obsclass'],\n 'object': hdr['object']}\n\n if self.stored_sens:\n ssf = self.stored_sensfunc\n element['standard_star'] = ssf['filename'][\n (ssf['observatory'] == hdr['observat']) &\n (ssf['detector'] == hdr['detector']) &\n (ssf['grating'] == hdr['grating']) &\n (ssf['instrument'] == hdr['instrume']) &\n (ssf['filter1'] == hdr['filter1']) &\n (ssf['maskname'] == hdr['maskname'])]\n else:\n element['standard_star'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'OBJECT') &\n (np.array([k in ['partnerCal', 'progCal']\n for k in hdrpars['obsclass']], dtype='bool')) &\n (hdrpars['object'] != 'Twilight') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['filter1'] == hdr['filter1']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'stdstar_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'stdstar_ttol'))]\n\n element['flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <= self.cfg.getfloat('associations',\n 'flat_ttol'))]\n\n element['twilight'] = hdrpars['filename'][\n (hdrpars['object'] == 'Twilight') &\n (hdrpars['obstype'] == 'OBJECT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'twilight_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'twilight_ttol'))]\n\n c = 'twilight'\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 1:\n element[c] = element[c][0]\n elif len(element[c]) == 0:\n element[c] = ''\n\n # A flat close to the twilight observation for a better\n # response function.\n if element['twilight']:\n twipars = hdrpars[hdrpars['filename'] == element['twilight']]\n element['twilight_flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == twipars['observatory']) &\n (hdrpars['detector'] == twipars['detector']) &\n (hdrpars['grating'] == twipars['grating']) &\n (hdrpars['grating_wl'] == twipars['grating_wl']) &\n (abs(mjds - twipars['mjd']) <= self.cfg.getfloat(\n 'associations', 'twilight_ttol'))]\n else:\n element['twilight_flat'] = np.array([], dtype='S60')\n\n element['arc'] = hdrpars['filename'][\n # (hdrpars['object'] == 'CuAr') &\n (hdrpars['obstype'] == 'ARC') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'arc_ttol'))]\n\n element['bias'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BIAS') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'bias_ttol')) &\n (\n (hdrpars['overscan'] & (self.fl_over == 'yes')) |\n (~hdrpars['overscan'] & (self.fl_over == 'no'))\n )]\n\n im = fits.open(element['image'])\n ishape = np.array(im[1].data.shape, dtype='float32')\n im.close()\n del(im)\n\n validBiases = np.ones(len(element['bias']), dtype='bool')\n k = 0\n\n for biasImage in element['bias']:\n\n bias = fits.open(biasImage)\n bshape = np.array(bias[1].data.shape, dtype='float32')\n bias.close()\n del(bias)\n\n #\n # Elinates biases if they differ in array size from\n # the science image. Small differences are normal due to\n # the overscan subtraction in processed bias frames.\n #\n if np.any(np.abs(bshape / ishape - 1.0) > 0.10):\n validBiases[k] = False\n\n k += 1\n\n element['bias'] = element['bias'][validBiases]\n del(k)\n\n element['bpm'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BPM') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['ccdsum'] == hdr_ext1['ccdsum'])]\n\n categories = ['flat', 'bias', 'arc', 'standard_star',\n 'bpm', 'twilight_flat']\n\n for c in categories:\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 0:\n element[c] = ''\n elif len(element[c]) == 1:\n element[c] = (element[c])[0]\n\n associated.append(element)\n\n # Define mdf filename\n # Based in gprepare.cl\n # Did not account for observation in Nod-and-Shuffle\n for i in associated:\n header_flat = [\n k for j, k in enumerate(headers) if l[j] == i['flat']\n ]\n if len(header_flat):\n header_flat = header_flat[0]\n MaskName = header_flat['maskname']\n if MaskName == \"IFU-2\":\n slits = 'both'\n elif MaskName == \"IFU-B\":\n slits = 'blue'\n elif MaskName == \"IFU-R\":\n slits = 'red'\n i['slits'] = slits\n\n if self.object_filter:\n objs = self.object_filter.split(',')\n sci_ims = [\n i for i in associated if (\n (i['obsclass'] == 'science') &\n (i['object'] in objs))]\n else:\n sci_ims = [i for i in associated if i['obsclass'] == 'science']\n\n if self.all_stars:\n std_ims = [\n i for i in associated if i['obsclass'] in ['partnerCal',\n 'progCal']]\n else:\n used_stds = [i['standard_star'] for i in sci_ims]\n std_ims = [i for i in associated if i['image'] in used_stds]\n\n # Get star info from starinfo.dat\n possible_names = np.concatenate((starinfo['obj'], starinfo['std'],\n starinfo['altname']))\n n_names = len(possible_names)\n\n for i, j in enumerate(possible_names):\n possible_names[i] = (j.lower()).replace(' ', '')\n\n for i in std_ims:\n # Removes the 'standard_star' key if the dictionary\n # element in question refers to a standard star.\n del i['standard_star']\n starname = (i['object'].lower()).replace(' ', '')\n\n try:\n stdstar_idx = (\n np.arange(n_names)[possible_names == starname] %\n (n_names / 3))[0]\n except:\n raise Exception(\n 'Standard star named {:s} not found in file {:s}'.\n format(starname, starinfo_file))\n\n i['stdstar'] = starinfo[stdstar_idx]['std']\n\n if starinfo[stdstar_idx]['caldir'] == 'gireds_data':\n i['caldir'] = pkg_resources.resource_filename(\n 'gireds', 'data/')\n else:\n i['caldir'] = starinfo[stdstar_idx]['caldir']\n\n self.sci = sci_ims\n self.std = std_ims\n\n # Writes the file association dictionary to an ASCII file\n # in the run directory.\n\n if not self.dry_run:\n try:\n os.mkdir(self.products_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n try:\n os.mkdir(self.run_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n\n if not self.dry_run:\n os.chdir(self.run_dir)\n json.dump(\n sci_ims, open('file_associations_sci.dat', 'w'),\n sort_keys=True, indent=4)\n json.dump(\n std_ims, open('file_associations_std.dat', 'w'),\n sort_keys=True, indent=4)",
"def CHANGE_appendAll(self):\r\n # Separate new files to be loaded\r\n FoI = list(set(self.listenFiles)-set(self.logFiles))\r\n FoI.sort()\r\n for file in FoI:\r\n print(\"Loading {}\".format(file))\r\n filePath = os.path.join(self.listenDir, file)\r\n\r\n try:\r\n (newProj, newAngle) = self.read_projection_image(filePath)\r\n\r\n self.logTiltAngles = np.append(self.logTiltAngles, newAngle)\r\n\r\n # Invert Contrast for BF-TEM\r\n if self.invert:\r\n newProj *= -1\r\n\r\n newProj = self.background_subtract(newProj)\r\n\r\n # Apply Center of Mass (if selected)\r\n if self.alignMethod == 'CoM':\r\n newProj = self.center_of_mass_align(newProj)\r\n\r\n # Account for Python's disdain for AxAx1 arrays\r\n # (compresses to 2D)\r\n if (len(self.logTiltSeries0) == 0):\r\n dataDim = np.shape(newProj)\r\n self.logTiltSeries0 = np.zeros([dataDim[0], dataDim[1], 1])\r\n self.logTiltSeries0[:, :, 0] = newProj\r\n self.wbp = wbp.WBP(dataDim[0], dataDim[1], 1)\r\n else:\r\n self.logTiltSeries0 = np.dstack((self.logTiltSeries0,\r\n newProj))\r\n\r\n self.logFiles = np.append(self.logFiles, file)\r\n\r\n except Exception:\r\n print('Could not read : {}, will proceed with reconstruction\\\r\n and re-download on next pass'.format(file))\r\n break\r\n\r\n # Apply Cross-Correlation after reading images (if selected)\r\n if self.alignMethod == 'xcor':\r\n self.logTiltSeries = self.xcorr_align(self.logTiltSeries0)\r\n # update tilt angles and sinogram\r\n self.wbp.set_tilt_series(self.logTiltSeries, self.logTiltAngles)\r\n # re-center tilt axis\r\n self.logTiltSeries = self.shift_tilt_axis(self.logTiltSeries,\r\n self.logTiltAngles)\r\n else:\r\n self.logTiltSeries = self.logTiltSeries0",
"def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)",
"def readFile(self, files):\n files = np.atleast_1d(files) # allow scalar input\n\n events = list()\n groups = list()\n flashes = list()\n one_sec = list()\n\n ev_id_ctr = 0\n gr_id_ctr = 0\n fl_id_ctr = 0\n\n for _file in files:\n # todo: with...open\n nc = Dataset(_file)\n\n this_ev = _extract_events(nc)\n this_grp = _extract_groups(nc)\n this_fl = _extract_flashes(nc)\n this_one_sec = _extract_one_second(nc, background=False)\n\n nc.close()\n\n # TODO: do we need check for \"empty\" files like w/GLM?\n\n # IDs are not necessarily unique. We'll modify them so they are.\n # Similar to what is done with GLM data (glm.py in this package)\n # See there for details, but the gist is get unique values and map\n # TODO: refactor?\n\n this_ev.sort_values('id', inplace=True)\n this_grp.sort_values('id', inplace=True)\n this_fl.sort_values('id', inplace=True)\n\n new_flash_id = np.arange(len(this_fl))\n this_fl.id = new_flash_id\n flash_id_map = dict(zip(this_fl._orig_id.values, new_flash_id))\n\n # Update group parent\n new_id = this_grp.parent_id.map(flash_id_map.get)\n this_grp.parent_id = new_id\n\n # New id for the group:\n new_group_id = np.arange(len(this_grp))\n this_grp.id = new_group_id\n group_id_map = dict(zip(this_grp._orig_id.values, new_group_id))\n\n # Update event parent\n this_ev.parent_id = this_ev.parent_id.map(group_id_map.get)\n\n # New event ID (although I don't think is really necessary)\n new_event_id = np.arange(len(this_ev))\n this_ev.id = new_event_id\n\n # Add in an offset to get unique values across files\n this_ev['id'] += ev_id_ctr\n this_grp['id'] += gr_id_ctr\n this_fl['id'] += fl_id_ctr\n\n # Offset the parent IDs for the children too:\n this_ev['parent_id'] += gr_id_ctr\n this_grp['parent_id'] += fl_id_ctr\n\n # Next, update the counters\n ev_id_ctr = this_ev['id'].iloc[-1]+1\n gr_id_ctr = this_grp['id'].iloc[-1]+1\n fl_id_ctr = this_fl['id'].iloc[-1]+1\n\n # Modify the times to UTC:\n for val in [this_ev, this_grp, this_fl]: # one seconds already converted\n val.time = tai93_to_utc(val.time)\n\n # todo: add option to not sort by time\n # this_event.sort_values('time', inplace=True)\n # this_group.sort_values('time', inplace=True)\n # this_flash.sort_values('time', inplace=True)\n\n # Finally, add \"this\" data\n events.append(this_ev)\n groups.append(this_grp)\n flashes.append(this_fl)\n one_sec.append(this_one_sec)\n\n # Put these as attributes of the class\n self.events = Ltg(pd.concat(events))\n self.groups = Ltg(pd.concat(groups))\n self.flashes = Ltg(pd.concat(flashes))\n self.one_second = Ltg(pd.concat(one_sec))",
"def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass",
"def read_data_files(self):\n\n for name, snap in zip(self.names, self.snaps):\n # build the very important dictionary:\n key = f'{name}_{snap:03}' # e.g 'MW_000'\n self.galaxies[key] = Galaxy(name, snap, self.path, \n self.usesql, self.ptype, self.stride)\n self.time = self.galaxies[key].time\n\n # bits of minor housekeeping:\n # self.path = self.galaxies[key].filepath # may speed up next search\n self.filenames.append(key)",
"def process_all(fileinfos, args):\n # create overall figure\n count_and_draw(fileinfos,args)\n # create figures for all the files\n for key in fileinfos:\n count_and_draw(fileinfos,args,key)\n # create figures for all the elements\n els_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_el\"]:\n if key not in els_processed:\n count_and_draw(fileinfos,args,key)\n els_processed.append(key)\n # create figures for all the attributes\n atts_processed = []\n for key in fileinfos:\n for key in fileinfos[key][\"usage_att\"]:\n if key not in atts_processed:\n count_and_draw(fileinfos,args,\"@\"+key)\n atts_processed.append(key)",
"def seqff(self):\r\n\r\n start = time.time()\r\n\r\n # load bininfo\r\n bininfo = load_bininfo(self.bininfodata_loc)\r\n\r\n # load input files\r\n if os.path.isdir(self.input_loc):\r\n input_list = [self.input_loc + x for x in os.listdir(self.input_loc)]\r\n\r\n elif os.path.isfile(self.input_loc):\r\n input_list = [self.input_loc]\r\n\r\n else:\r\n raise FileNotFoundError(\"error occurred : inputData is not a Directory or File\")\r\n\r\n for i, file in enumerate(input_list):\r\n filetype = file.split(\".\")[-1]\r\n # filetype : 'sam' or 'bam' or 'newtemp'\r\n if 'sam' in filetype:\r\n bincount = load_sam(file)\r\n\r\n elif 'newtemp' in filetype:\r\n bincount = load_counts(file)\r\n file = file.replace(\".newtemp\", \"\") # TEMP .newtemp -> .bam\r\n\r\n elif 'bam' in filetype:\r\n bincount = load_bam(file)\r\n\r\n else:\r\n continue\r\n\r\n #CREATE newtemp file in \"output_loc\"/newtemp/\r\n create_newtemp(bincount, file, self.newtemp_loc)\r\n\r\n newtemp = pd.DataFrame.from_dict(bincount, orient='index')\r\n newtemp.reset_index(level=0, inplace=True)\r\n newtemp.rename(columns={'index': 'binName', 0: 'counts'}, inplace=True)\r\n\r\n temp_bininfo = bininfo.copy(deep=True)\r\n temp_bininfo = temp_bininfo.merge(newtemp, on='binName',\r\n how='left') # missing value : NaN, not NA in pandas\r\n temp_bininfo['counts'] = temp_bininfo['counts'].fillna(0)\r\n\r\n temp_bininfo.sort_values(by='binorder', inplace=True)\r\n temp_bininfo.reset_index(drop=True)\r\n\r\n ####DATA PROCESSING #######################\r\n autosomebinsonly = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != 'NA') and \\\r\n (float(temp_bininfo['GC'][index]) > 0.316) and \\\r\n (temp_bininfo['CHR'][index] != 'chrX') and \\\r\n (temp_bininfo['CHR'][index] != 'chrY')\r\n autosomebinsonly.append(boolean)\r\n autosomebinsonly = pd.Series(autosomebinsonly)\r\n\r\n alluseablebins = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != \"NA\") and (float(temp_bininfo['GC'][index]) > 0.316)\r\n alluseablebins.append(boolean)\r\n alluseablebins = pd.Series(alluseablebins)\r\n\r\n #CREATE alluseablebins file in \"output_loc\"/alluseablebins\r\n #create_alluseablebins(alluseablebins, file, self.alluseablebins_loc)\r\n\r\n sum_counts = pd.Series(temp_bininfo['counts'])\r\n sum_counts = sum_counts[autosomebinsonly].sum(skipna=True)\r\n\r\n autoscaledtemp = pd.Series(temp_bininfo['counts'].loc[(autosomebinsonly)],\r\n copy=True) / sum_counts # NA-related code removed\r\n allscaledtemp = pd.Series(temp_bininfo['counts'].loc[(alluseablebins)], copy=True) / sum_counts\r\n\r\n gc_index = {}\r\n cnt = 0\r\n for index, isauto in enumerate(autosomebinsonly):\r\n if isauto:\r\n if temp_bininfo['GC'].iat[index] in gc_index:\r\n gc_index[temp_bininfo['GC'].iat[index]].append(float(autoscaledtemp.iat[cnt]))\r\n cnt += 1\r\n\r\n else:\r\n gc_index[temp_bininfo['GC'].iat[index]] = [float(autoscaledtemp.iat[cnt])]\r\n cnt += 1\r\n\r\n key_list = []\r\n val_list = []\r\n for key, val in gc_index.items():\r\n key_list.append(key)\r\n val_list.append(np.median(val))\r\n\r\n loess_var = loess(key_list, val_list) # default span : 0.75\r\n loess_var.fit()\r\n # y = loess.loess_prediction(newData, loessVar)\r\n # temp_loessPredict.loess_debugging(loessVar)\r\n\r\n ###prediction###\r\n loess_x = [float(gc) for index, gc in enumerate(temp_bininfo['GC']) if (alluseablebins[index])]\r\n # print(temp_bininfo['GC'])\r\n loess_fitted = loess_var.predict(loess_x)\r\n loess_fitted = list(loess_fitted.values)\r\n # print(loess_fitted)\r\n\r\n median_autoscaledtemp = np.median(autoscaledtemp)\r\n median_autoscaledtemp = float(median_autoscaledtemp) # for fixed constant\r\n\r\n normalizedbincount = [(x + (median_autoscaledtemp - loess_fitted[index])) for index, x in\r\n enumerate(allscaledtemp)]\r\n\r\n #CREATE normalizedbincount in \"output_loc\"/normalizedbincount\r\n create_normalizedbincount(normalizedbincount, file, self.normalizedbincount_loc)\r\n\r\n bincounts = pd.Series(data=np.repeat(a=0.0, repeats=61927), index=temp_bininfo['binName'], dtype=np.float64)\r\n\r\n sum_normalizedbincount = sum([val for val in normalizedbincount if not math.isnan(val)])\r\n sum_normalizedbincount = float(sum_normalizedbincount) # deep copy temporarily\r\n\r\n cnt = 0\r\n for index, x in enumerate(alluseablebins):\r\n if x == True:\r\n data = (normalizedbincount[cnt] / sum_normalizedbincount) * len(normalizedbincount)\r\n bincounts.iat[index] = data\r\n cnt += 1\r\n\r\n #CREATE bincounts in \"output_loc\"/bincounts\r\n create_bincounts(bincounts, file, self.bincounts_loc)\r\n\r\n wrsc = self.prediction(bincounts, self.B, self.mu, self.parameter_1, self.parameter_2)\r\n enet = np.dot(bincounts, (self.elnetbeta)) + (self.elnetintercept)\r\n ff = (wrsc+enet) / 2\r\n\r\n result_lines = list()\r\n result_lines.append(\"SeqFF\\tEnet\\tWRSC\")\r\n result_lines.append(\"{}\\t{}\\t{}\".format(ff, enet, wrsc))\r\n\r\n #CREATE results of seqff (seqff paper result covered) in \"output_loc\"/results\r\n create_results(result_lines, file, self.results_loc)\r\n\r\n end = time.time()\r\n elapsed = end - start\r\n h = int(elapsed) // 3600\r\n m = (int(elapsed) - (h * 3600)) // 60\r\n s = (int(elapsed) % 60)\r\n print(\"elapsed time: %d hr %d min %d sec\" % (h, m, s))\r\n print(\"elapsed :\", elapsed)\r\n print(\"progress : {} / {}\".format(i + 1, self.progress))",
"def __concatonate_files_controller(self):\n\n # find all barcode file paths\n barcode_directories = []\n for root, directory, files in os.walk(self.input_directory):\n for name in directory:\n barcode_directories.append( os.path.join(root, name) )\n\n # iterate through each barcode directory, item is the file path\n for item in barcode_directories:\n file = os.listdir(item)[0]\n path = item\n\n new_file_name = self.__return_new_file_name(file_name=file, file_path=path)\n self.__concatonate_files(new_file_name=new_file_name, parent_folder=path)\n self.__write_logs_to_file(new_file_name)",
"def _create_txt_from_list(in_path, time_array, channels, new_path):\n\n header = [\"# OpenSignals Text File Format\"]\n files = [load(p) for p in in_path]\n with open(in_path[0], encoding=\"latin-1\") as opened_p:\n with open(in_path[1], encoding=\"latin-1\") as opened_p_1:\n # append both headers\n header.append(opened_p.readlines()[1][:-2] + ', ' + opened_p_1.readlines()[1][3:])\n\n header.append(\"# EndOfHeader\")\n\n print('2')\n # lists for holding the read data\n data = []\n nr_channels = []\n\n # read the data\n for i, file in enumerate(files):\n device = list(file.keys())\n nr_channels.append(len(list(file[device[0]])))\n data.append(file[device[0]][channels[i]])\n print('3')\n # calculate the delay between both signals\n dephase, _, _ = synchronise_signals(data[0], data[1],time_array, time_interval = 300, fs = 100)\n print('4')\n # load original data\n data_1 = np.loadtxt(in_path[0])\n data_2 = np.loadtxt(in_path[1])\n\n # Check which device lags\n if dephase < 0:\n\n # second device lags\n # slice the data\n data_2 = data_2[np.abs(dephase):]\n\n elif dephase > 0:\n\n # first device lags\n # slice the data\n data_1 = data_1[np.abs(dephase):]\n\n else:\n # dephase == 0 ---> devices were already syncronised\n print(\"The devices were already synchronised.\")\n\n print(len(data_1))\n print(len(data_2))\n\n # pad data so that both devices are of the same length\n # in case that phase = 0 the data will only be concatenated horizontally\n print('5')\n new_file = _shape_array(data_1, data_2, dephase)\n print('6')\n\n # write header to file\n new_header = [h.replace(\"\\n\", \"\") for h in header]\n sync_file = open(new_path, 'w')\n sync_file.write('\\n'.join(new_header) + '\\n')\n\n # writing synchronised data to file\n for line in new_file:\n sync_file.write('\\t'.join(str(i) for i in line) + '\\t\\n')\n\n # close the file\n sync_file.close()",
"def openfile(self,files):\n for f in files:\n if f in self.fmap:\n continue\n try:\n fd=open(f,'r');\n self.files.append(fd)\n self.fmap[f]=fd\n if len(self.handle)<2:\n self.handle.append(len(self.files)-1)\n self.fname.append(f)\n self.total+=[0]\n self.inst+=[{}]\n self.excl+=[{}]\n self.incl+=[{}]\n self.caller_callee+=[{}]\n self.loadfile(fd)\n except IOError:\n pass\n print('%s not exist!!'%(f))",
"def fillSongsArray():\r\n counter = 1\r\n notealt = 0.0\r\n frequenz = 0\r\n notencounter = 0\r\n\r\n file2write.write(\"\\n{\")\r\n for instrument in midi_data.instruments:\r\n while counter == 1:#first line of the instrument e.g piano it will only save the treble clef and NOT the bass clef\r\n for note in instrument.notes:\r\n if note.start - notealt >= 0.15: #If the note is a break it will save it as such\r\n value = dauer/((note.start - notealt)*1000)\r\n y = round(value)\r\n file2write.write(\"{0,\")\r\n file2write.write(str(y+1))\r\n file2write.write(\"},\")\r\n\r\n else:\r\n frequenz = int(pretty_midi.note_number_to_hz(note.pitch)) #convert the midi-note-number to a frequency with function of the library\r\n value = dauer/((note.end - note.start)*1000) #calculates the duration of the note\r\n x = round(value)\r\n file2write.write(\"{\")\r\n file2write.write(str(frequenz))\r\n file2write.write(\",\")\r\n file2write.write(str(x))\r\n file2write.write(\"},\")\r\n notealt = note.end\r\n counter += 1\r\n file2write.write(\"},\")\r\n #file2write.write(\"};\\n\")\r",
"def readdata(self, filepaths):\n pass",
"def add_files(self, file_dict):\n from xeye_calib import resize_rgb_b64\n if self.src_keys is None:\n self.src_keys, self.rgb_cam_list, self.rgb_of_depth_cam_list = init_cam_set(file_dict)\n self.src_keys_dict = {v: i for i, v in enumerate(self.src_keys)}\n logger.info('Init Calibrator done.')\n logger.info('src_keys_dict, {}'.format(self.src_keys_dict))\n logger.info('file_dict.keys, {}'.format(file_dict.keys()))\n for k, v in file_dict.items():\n filename = str(10000000 + self.counter)[1:]\n if k.startswith('cam'):\n if 'dept' in k:\n continue\n print(self.src_keys_dict.keys())\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.png')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n # print('calib data copy', v, dst_path)\n # print('calib data copy', v, dst_path, file=sys.stderr)\n # with open(self.record_path, 'a') as fout:\n # fout.write('cp ' + v + ' ' + dst_path + '\\n')\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n elif k.startswith('rgb'):\n cam_id = self.src_keys_dict[k]\n dst_path = os.path.join(self.calib_data_dir, str(\n cam_id), 'cam0', filename + '.jpg')\n if not os.path.exists(os.path.dirname(dst_path)):\n os.makedirs(os.path.dirname(dst_path))\n if self.resize_xeye:\n resize_rgb_b64(v, dst_path)\n else:\n with open(dst_path, 'wb') as fout:\n fout.write(base64.b64decode(v))\n\n else:\n logger.warn('Unrocognize key: {}'.format(k))\n return\n self.counter += 1",
"def build(self):\r\n self.dirty = 0\r\n \r\n # Files first\r\n for output in self.files.keys():\r\n params = self.files[output]\r\n if (params[1] != -1):\r\n filename = params[0]\r\n freq = params[1]\r\n if (output == 'energies'):\r\n self.myOutputs.append(OutputEnergies.OutputEnergies(filename, freq, 1,0,1.0,0))\r\n elif (output == 'dcdtrajpos'):\r\n if (os.path.exists(filename)): # Continue\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 1))\r\n else: # Overwrite\r\n self.myOutputs.append(OutputDCDTrajectory.OutputDCDTrajectory(filename, freq, 1, 0))\r\n elif (output == 'dcdtrajvel'):\r\n if (os.path.exists(filename)):\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 1))\r\n else:\r\n self.myOutputs.append(OutputDCDTrajectoryVel.OutputDCDTrajectoryVel(filename, freq, 1, 0))\r\n elif (output == 'xyztrajforce'):\r\n self.myOutputs.append(OutputXYZTrajectoryForce.OutputXYZTrajectoryForce(filename, freq))\r\n elif (output == 'xyztrajpos'):\r\n self.myOutputs.append(OutputXYZTrajectoryPos.OutputXYZTrajectoryPos(filename, freq, 1))\r\n elif (output == 'xyztrajvel'):\r\n self.myOutputs.append(OutputXYZTrajectoryVel.OutputXYZTrajectoryVel(filename, freq))\r\n elif (output == 'gui'):\r\n self.myOutputs.append(OutputFAHGUI.OutputFAHGUI(filename, freq, 52753, 1, \"MDL_3.0\", 0.0, 0))\r\n\r\n if (self.screen != -1):\r\n self.myOutputs.append(OutputScreen.OutputScreen(self.screen))\r\n\r\n\r\n # Now plots\r\n for plot in self.plots.keys():\r\n freq = self.plots[plot]\r\n if (freq != -1):\r\n\r\n # Initialize a plot\r\n if (not self.doMPL): # Gnuplot\r\n self.xyData[plot] = []\r\n self.graphs[plot] = Gnuplot(debug=0)\r\n else: # Matplotlib\r\n self.xData[plot] = []\r\n self.yData[plot] = []\r\n self.figures[plot] = 0\r\n\r\n # Add the function to plot the data,\r\n # and the frequency at which to execute it\r\n self.myPlots.append([self.plotFunctions[plot], freq])",
"def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'",
"def appendFileInfo(File, params, extractedValues, names):\n for p in params:\n extractedValues[p].append(getValue(File, p))\n names.append(getName(File))",
"def _merge_fileinfos(self, hard_infos, infos):\n new_infos = copy.deepcopy(hard_infos)\n for info in infos:\n new_infos[info['name']] = new_infos.get(info['name'], {})\n new_infos[info['name']].update(info)\n return new_infos",
"def add_files(self, *files):\n for f in files:\n # if file contains actual aperture magnitudes\n if \"mag_calib_unc\" in Table.read(f, format=\"ascii\").colnames:\n LightCurve.__mag_file_append(self, f)\n # if table contains limiting magnitudes\n else: \n LightCurve.__limmag_file_append(self, f)",
"def addFileNames(self, fileNames):\n with Tracer(traceLogger):\n infos = []\n\n oldNumFiles = len(self.topLevelOperator.Dataset)\n # HACK: If the filePath isn't valid, replace it\n # This is to work around the scenario where two independent data selection applets are coupled, causing mutual resizes.\n # This will be fixed when a multi-file data selection applet gui replaces this gui. \n for i in reversed( range( oldNumFiles ) ):\n if not self.topLevelOperator.Dataset[i].ready():\n oldNumFiles -= 1\n else:\n break\n \n \n # Assign values to the new inputs we just allocated.\n # The GUI will be updated by callbacks that are listening to slot changes\n for i, filePath in enumerate(fileNames):\n datasetInfo = DatasetInfo()\n cwd = self.topLevelOperator.WorkingDirectory.value\n \n if not areOnSameDrive(filePath,cwd):\n QMessageBox.critical(self, \"Drive Error\",\"Data must be on same drive as working directory.\")\n return\n \n absPath, relPath = getPathVariants(filePath, cwd)\n \n # Relative by default, unless the file is in a totally different tree from the working directory.\n if len(os.path.commonprefix([cwd, absPath])) > 1:\n datasetInfo.filePath = relPath\n else:\n datasetInfo.filePath = absPath\n\n h5Exts = ['.ilp', '.h5', '.hdf5']\n if os.path.splitext(datasetInfo.filePath)[1] in h5Exts:\n datasetNames = self.getPossibleInternalPaths( absPath )\n if len(datasetNames) > 0:\n datasetInfo.filePath += str(datasetNames[0])\n else:\n raise RuntimeError(\"HDF5 file %s has no image datasets\" % datasetInfo.filePath)\n\n # Allow labels by default if this gui isn't being used for batch data.\n datasetInfo.allowLabels = ( self.guiMode == GuiMode.Normal )\n infos.append(datasetInfo)\n\n #if no exception was thrown, set up the operator now\n self.topLevelOperator.Dataset.resize( oldNumFiles+len(fileNames) )\n for i in range(len(infos)):\n self.topLevelOperator.Dataset[i+oldNumFiles].setValue( infos[i] )",
"def treat(input, output):\n files = find(input)\n acc = []\n for file in files:\n fileInfo = extract(file)\n out = makeOutputPath(output, fileInfo[\"path\"], fileInfo[\"filename\"])\n if not out == None:\n fileInfo[\"outPath\"] = out\n acc += [fileInfo]\n return acc",
"def _update_bpm_file(ffile):\n # Loop over extensions\n # ASSUMES all extensions are image extensions AND first extension is a PHU\n\n # Unbinned versions are in entire detector and individual ccd frames\n unbinned_section_keywords = ['detsec', 'ccdsec']\n binned_section_keywords = ['datasec', 'trimsec']\n section_keywords = []\n section_keywords.extend(unbinned_section_keywords)\n section_keywords.extend(binned_section_keywords)\n\n print(\"Working on...\\n{}:\\n\".format(ffile.filename()))\n\n ffile_slice = ffile[1:]\n for ext in ffile_slice:\n\n if VERBOSE:\n print(\"{}, {}\\n\".format(ext.name, ext.ver))\n\n # Require shape for some reason...\n old_shape = ext.data.shape\n old_y_size, old_x_size = old_shape\n\n # Parse sections and binning\n for key in section_keywords:\n vars()[key] = _parse_iraf_section(_get_key_value(ext, key.upper()))\n [x_bin, y_bin] = [int(value)\n for value in _get_key_value(ext, 'CCDSUM').split()]\n\n # Updated array must cover entire raw amplifier\n # Set default array value to 1 == bad pixels\n # Add the bottom __TRIMMED_ROWS__ / y_bin\n old_array_start = __TRIMMED_ROWS__ / y_bin\n (new_y_size, new_x_size) = (old_y_size + old_array_start, old_x_size)\n new_size = (new_y_size, new_x_size)\n new_array = np.ones(new_size, dtype=__DEFAULT_BPM_DTYPE__)\n\n # Insert old data into new_array\n y_slice = slice(old_array_start, new_y_size)\n new_array[y_slice, :] = ext.data\n ext.data = new_array\n\n\n # Update keywords\n #\n # Binned versions\n for section in binned_section_keywords:\n value = vars()[section]\n old_str_value = _get_key_value(ext, section.upper())\n new_value = [0, new_y_size, value[2], value[3]]\n _set_key_value(ext, section.upper(),\n _set_iraf_section(new_value))\n if VERBOSE:\n print(\"{}: {} -> {}\\n\".format(section.upper(),\n old_str_value, _get_key_value(ext, section.upper())))\n\n # Unbinned version\n # ASSUMES that original y section end at physical end of CCD\n for section in unbinned_section_keywords:\n value = vars()[section]\n new_value = [0, value[1], value[2], value[3]]\n old_str_value = _get_key_value(ext, section.upper())\n _set_key_value(ext, section.upper(),\n _set_iraf_section(new_value))\n if VERBOSE:\n print(\"{}: {} -> {}\\n\".format(section.upper(),\n old_str_value, _get_key_value(ext, section.upper())))\n\n return ffile",
"def load_files(self):\n print('Saving numpy mask arrays in {0}'.format(self.ProcDir))\n\n if not os.path.isdir(self.ProcDir): os.mkdir(self.ProcDir)\n if not os.path.isdir(self.OutDir): os.mkdir(self.OutDir)\n\n self.Files = {}\n for ig in self.Set:\n phase = roipy.tools.load_half(ig,2)\n # convert wavelength to displacements\n # NOTE: make attributes of commonly used values in rsc: float(ig.Rsc['WAVELENGTH'])\n disp = phase * (ig.Wavelength / (4*np.pi))\n igram = ma.array(disp, mask=ma.nomask)\n name = self.save_ma(ig, igram) #Mask_ array is just zeros at this point..\n self.Files[ig.ID] = name\n\n print('load_files() complete: {0} interferograms'.format(self.Set.Nig))",
"def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict",
"def add_crds_reffile_names(self):\n all_obs_info, unique_obs_info = self.info_for_all_observations()\n\n # Add empty placeholders for reference file entries\n empty_col = np.array([' ' * 500] * len(self.info['Instrument']))\n superbias_arr = deepcopy(empty_col)\n linearity_arr = deepcopy(empty_col)\n saturation_arr = deepcopy(empty_col)\n gain_arr = deepcopy(empty_col)\n distortion_arr = deepcopy(empty_col)\n photom_arr = deepcopy(empty_col)\n ipc_arr = deepcopy(empty_col)\n ipc_invert = np.array([True] * len(self.info['Instrument']))\n transmission_arr = deepcopy(empty_col)\n badpixmask_arr = deepcopy(empty_col)\n pixelflat_arr = deepcopy(empty_col)\n\n # Loop over combinations, create metadata dict, and get reffiles\n for status in unique_obs_info:\n updated_status = deepcopy(status)\n (instrument, detector, filtername, pupilname, readpattern, exptype) = status\n\n # Make sure NIRISS filter and pupil values are in the correct wheels\n if instrument == 'NIRISS':\n filtername, pupilname = utils.check_niriss_filter(filtername, pupilname)\n\n # Create metadata dictionary\n date = datetime.date.today().isoformat()\n current_date = datetime.datetime.now()\n time = current_date.time().isoformat()\n status_dict = {'INSTRUME': instrument, 'DETECTOR': detector,\n 'FILTER': filtername, 'PUPIL': pupilname,\n 'READPATT': readpattern, 'EXP_TYPE': exptype,\n 'DATE-OBS': date, 'TIME-OBS': time,\n 'SUBARRAY': 'FULL'}\n if instrument == 'NIRCAM':\n if detector in ['NRCA5', 'NRCB5', 'NRCALONG', 'NRCBLONG', 'A5', 'B5']:\n status_dict['CHANNEL'] = 'LONG'\n else:\n status_dict['CHANNEL'] = 'SHORT'\n if instrument == 'FGS':\n if detector in ['G1', 'G2']:\n detector = detector.replace('G', 'GUIDER')\n status_dict['DETECTOR'] = detector\n updated_status = (instrument, detector, filtername, pupilname, readpattern, exptype)\n\n # Query CRDS\n # Exclude transmission file for now\n files_no_transmission = list(CRDS_FILE_TYPES.values())\n files_no_transmission.remove('transmission')\n reffiles = crds_tools.get_reffiles(status_dict, files_no_transmission,\n download=not self.offline)\n\n # If the user entered reference files in self.reffile_defaults\n # use those over what comes from the CRDS query\n if self.reffile_overrides is not None:\n manual_reffiles = self.reffiles_from_dict(updated_status)\n\n for key in manual_reffiles:\n if manual_reffiles[key] != 'none':\n if key == 'badpixmask':\n crds_key = 'mask'\n elif key == 'pixelflat':\n crds_key = 'flat'\n elif key == 'astrometric':\n crds_key = 'distortion'\n else:\n crds_key = key\n reffiles[crds_key] = manual_reffiles[key]\n\n # Transmission image file\n # For the moment, this file is retrieved from NIRCAM_GRISM or NIRISS_GRISM\n # Down the road it will become part of CRDS, at which point\n if 'transmission' not in reffiles.keys():\n reffiles['transmission'] = get_transmission_file(status_dict)\n self.logger.info('Using transmission file: {}'.format(reffiles['transmission']))\n\n # Check to see if a version of the inverted IPC kernel file\n # exists already in the same directory. If so, use that and\n # avoid having to invert the kernel at run time.\n inverted_file, must_invert = SimInput.inverted_ipc_kernel_check(reffiles['ipc'])\n if not must_invert:\n reffiles['ipc'] = inverted_file\n reffiles['invert_ipc'] = must_invert\n\n # Identify entries in the original list that use this combination\n match = [i for i, item in enumerate(all_obs_info) if item==status]\n\n # Populate the reference file names for the matching entries\n superbias_arr[match] = reffiles['superbias']\n linearity_arr[match] = reffiles['linearity']\n saturation_arr[match] = reffiles['saturation']\n gain_arr[match] = reffiles['gain']\n distortion_arr[match] = reffiles['distortion']\n photom_arr[match] = reffiles['photom']\n ipc_arr[match] = reffiles['ipc']\n ipc_invert[match] = reffiles['invert_ipc']\n transmission_arr[match] = reffiles['transmission']\n badpixmask_arr[match] = reffiles['mask']\n pixelflat_arr[match] = reffiles['flat']\n\n self.info['superbias'] = list(superbias_arr)\n self.info['linearity'] = list(linearity_arr)\n self.info['saturation'] = list(saturation_arr)\n self.info['gain'] = list(gain_arr)\n self.info['astrometric'] = list(distortion_arr)\n self.info['photom'] = list(photom_arr)\n self.info['ipc'] = list(ipc_arr)\n self.info['invert_ipc'] = list(ipc_invert)\n self.info['transmission'] = list(transmission_arr)\n self.info['badpixmask'] = list(badpixmask_arr)\n self.info['pixelflat'] = list(pixelflat_arr)",
"def read_adas(self):\n for name in self.files_atte:\n self.beam_atte.append(adas.ADAS21(name))\n for name in self.files_emis:\n self.beam_emis.append(adas.ADAS22(name))",
"def readdata(self, reflist , comment = '#' , regexp = None , substr = None, filename = True):\n self.kpunten = []\n datalist = []\n prefixlist = []\n if os.path.isfile(str(reflist)):\n reflist = [reflist] #if we work with only one file this wraps it automatically in right format\n for ref in reflist:\n print('start with the collection of data from file %s' %ref)\n plotf = open(ref, 'r')\n if not filename:\n prefixlist.append( os.path.dirname(ref) + '/')\n else:\n prefixlist.append(re.sub('\\.dat$' , '' , ref))\n try:\n if regexp != None:\n raise ValueError\n dataf = np.loadtxt(plotf,comments = comment)\n print 'we readed data in with np.loadtxt'\n except:\n print('reading in data with numpy loadtxt failed or use reg exp to extract information')\n dataf = np.array([])\n kpuntenf = []\n plotf.seek(0) #go back to beginning of file\n for line in plotf:\n if regexp is not None:\n analyse = re.search(regexp,line)\n if analyse:\n kpuntenf.append((analyse.group(1), len(dataf)-1 ))\n print 'we found the following matches: %s' % analyse.group(0)\n if substr != None: \n line = re.sub(substr, '' , line)\n if line[0] != comment:\n #print line\n pline = np.array(map(float,line.split()))\n if len(dataf) <= 1:\n dataf = pline\n else:\n try:\n dataf = np.vstack((dataf,pline))\n except:\n continue\n self.kpunten.append(kpuntenf)\n datalist.append(dataf)\n\n plotf.close()\n self.datarg = datalist\n self.prefix = prefixlist\n self.reader = dr.ReaderOutput(reflist[0]) #Some plotting functions need a bit more information this info is extracted from the header of the files\n self.reader.depvar['depvar'] += ' (a.u.)'",
"def eeg_loaddata(filedir,filemask):\t\n\tfiles = glob.glob1(filedir,filemask)\n\tprint \"loading %d files\" %len(files)\n\teeg,tim,nchan,ntpts = eeg_readavr(op.join(filedir,files[0])) #just to initialize the next line\n\tdata = np.zeros((len(files),eeg.shape[0],eeg.shape[1]))\n\tfor i in range(len(files)):\n\t\teeg,tim,nchan,ntpts = eeg_readavr(op.join(filedir,files[i]))\n\t\tdata[i,:,0:ntpts]=eeg[:,0:ntpts]\n\t\tcnt = 100.0*i/len(files)\t\n\t\tsys.stdout.write(\"progress: \\r%d%%\" %cnt)\n\n return data,tim,nchan,files",
"def setESFiles(self, eSourceDir = None, verbose = False):\n\n print('\\n***Setting electronic structure files')\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc':\n # Check and set electronic structure file for packaging.\n if '***Missing' in self.nbDetails[key]['jobInfo'][2]:\n self.nbDetails[key]['elecStructure'] = None\n else:\n if eSourceDir is not None:\n # Copy electronic structure files to package using supplied path\n fileName = Path(self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\"))\n self.nbDetails[key]['elecStructure'] = Path(eSourceDir, fileName.name).as_posix()\n\n else:\n # Copy electronic structure files to package, based on full path from original job\n self.nbDetails[key]['elecStructure'] = self.nbDetails[key]['jobInfo'][-1].split()[-1].strip(\"'\")\n\n checkList = self.checkFiles(self.nbDetails[key]['elecStructure'])\n\n # If file is missing, set to \"missing\"\n if not checkList[0]:\n self.nbDetails[key]['elecStructure'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {self.nbDetails[key]['elecStructure']}\"\n\n # If file is present, check also for corresponding files\n else:\n # Assuming above is molden file, check also for corresponding Gamess file\n gFile = Path(self.nbDetails[key]['elecStructure']).with_suffix('.log')\n checkList = self.checkFiles(gFile)\n if checkList[0]:\n # self.nbDetails[key]['elecStructure'].append(gFile.as_posix()) # Set here to append... hopefully works OK with arch update code...\n self.nbDetails[key]['elecStructureGamess'] = gFile.as_posix() # Set here as separate item\n else:\n self.nbDetails[key]['elecStructureGamess'] = f\"***Missing file: {gFile.as_posix()}\"\n #\n\n if verbose:\n print(f\"Job {key}: {self.nbDetails[key]['title']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructure']}\")\n print(f\"Set file: {self.nbDetails[key]['elecStructureGamess']}\")",
"def write_files(neither, both, c_only, f_only, fh_c, data_c_start):\n\n # output arrays for all pixels in input datasets\n write_to_file(neither.astype(np.int16), 'neither_cube.fits')\n write_to_file(both.astype(np.int16), 'both_cube.fits')\n write_to_file(c_only.astype(np.int16), 'c_only_cube.fits')\n write_to_file(f_only.astype(np.int16), 'f_only_cube.fits')\n\n # output arrays for pixels in 2d array\n print(' Within the 2d arrays:')\n if (fh_c[0].header['NAXIS'] == 3): # for nirspec with 1 integration\n write_to_file(neither.sum(axis=0), 'neither_2d.fits')\n write_to_file(both.sum(axis=0), 'both_2d.fits')\n write_to_file(c_only.sum(axis=0), 'c_only_2d.fits')\n write_to_file(f_only.sum(axis=0), 'f_only_2d.fits')\n print(' The fraction of pixels in the 2d array having true CRs:',\\\n float(len(np.where(both.sum(axis=0) != 0.)[0])) / data_c_start.size)\n elif (fh_c[1].header['NAXIS'] == 4): # for miri or nircam cases\n write_to_file(neither.sum(axis=1).sum(axis=0), 'neither_2d.fits')\n write_to_file(both.sum(axis=1).sum(axis=0), 'both_2d.fits')\n write_to_file(c_only.sum(axis=1).sum(axis=0), 'c_only_2d.fits')\n write_to_file(f_only.sum(axis=1).sum(axis=0), 'f_only_2d.fits')\n print(' The fraction of pixels in the 2d array having true CRs:',\\\n float(len(np.where(both.sum(axis=1).sum(axis=0) != 0.)[0])) / \\\n data_c_start.size)\n else:\n print('FATAL ERROR - unexpected case in write_file()')"
] | [
"0.62886095",
"0.59401524",
"0.5788338",
"0.57699704",
"0.57441694",
"0.5552395",
"0.55344814",
"0.5515049",
"0.549071",
"0.5488106",
"0.54504967",
"0.5429675",
"0.5382784",
"0.53214943",
"0.5320739",
"0.52730936",
"0.52273995",
"0.52198684",
"0.52138954",
"0.52115697",
"0.5198478",
"0.51881826",
"0.51803905",
"0.5178411",
"0.5159162",
"0.514749",
"0.51430374",
"0.5142291",
"0.51361036",
"0.51338416"
] | 0.6070096 | 1 |
Verify that /products/ page renders topics. | def test_product_landing(self, flag_is_active):
flag_is_active.return_value = True
# Create a product
p = product(save=True)
# Create some topics
topics = []
for i in range(11):
topics.append(topic(save=True))
# Create a document and assign the product and 10 topics.
doc = revision(is_approved=True, save=True).document
doc.products.add(p)
for i in range(10):
doc.topics.add(topics[i])
self.refresh()
# GET the topic page and verify the content
url = reverse('products.product', args=[p.slug])
r = self.client.get(url, follow=True)
eq_(200, r.status_code)
doc = pq(r.content)
eq_(10, len(doc('#help-topics li'))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_topics_for_products(self):\n desktop_topics = topics_for(product=self.desktop)\n eq_(len(desktop_topics), 3)\n\n mobile_topics = topics_for(product=self.mobile)\n eq_(len(mobile_topics), 2)",
"def test_topic_list_view_unauthenticated(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context[\"topics\"]), 3)",
"def test_topic_list_view_authenticated(self):\n self.assertTrue(self.client.login(username=\"test\", password=\"test\"))\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.context[\"topics\"]), 4)",
"def test_question_topics(self):\n p = ProductFactory()\n t1 = TopicFactory(slug='doesnotexist', product=p)\n t2 = TopicFactory(slug='cookies', product=p)\n t3 = TopicFactory(slug='sync', product=p)\n\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t2)\n QuestionFactory(topic=t3)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n )\n\n qs = {'a': 1, 'w': 2, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])",
"def test_list_products(self):\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Check that the context contains 2 products\n self.assertEqual(len(response.context['products']),2)\n\n # Check that the product title appears in the rendered HTML content\n self.assertIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)",
"def test_wiki_topics(self):\n t1 = TopicFactory(slug='doesnotexist')\n t2 = TopicFactory(slug='extant')\n t3 = TopicFactory(slug='tagged')\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n RevisionFactory(document=doc, is_approved=True)\n\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(t2)\n doc.topics.add(t3)\n RevisionFactory(document=doc, is_approved=True)\n\n self.refresh()\n\n topic_vals = (\n (t1.slug, 0),\n (t2.slug, 2),\n (t3.slug, 1),\n ([t2.slug, t3.slug], 1),\n )\n\n qs = {'a': 1, 'w': 1, 'format': 'json'}\n for topics, number in topic_vals:\n qs.update({'topics': topics})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(number, json.loads(response.content)['total'])",
"def test_list_posts_on_topic(self):\n url = reverse('post-list', args=[self.topic1.url_name])\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)",
"def test_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topics/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['count'],3)\n self.assertTrue({'name': 'Topic 1', 'description': 'The first topic.'} in data['results'])\n self.assertTrue({'name': 'Topic 2', 'description': 'The second topic.'} in data['results'])",
"def test_retrieve_topic_viewset(self):\n\n topic = TopicFactory(author=self.user)\n response = self.client.get(reverse('api:topics-detail', kwargs={'topic_id': topic.id}))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('title'), topic.title)",
"def test_get_product_detail(self):\n\n response = self.client.get(reverse('website:product_details', args=(1,)))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Product title appears in HTML response content\n self.assertIn('<h1>Test Product</h1>'.encode(), response.content)\n self.assertNotIn('<h1>Test Product2</h1>'.encode(), response.content)",
"def test_question_feed_with_product_and_topic(self):\n p = ProductFactory()\n t = TopicFactory(product=p)\n url = urlparams(reverse(\"questions.list\", args=[p.slug]), topic=t.slug)\n res = self.client.get(url)\n self.assertEqual(200, res.status_code)\n doc = pq(res.content)\n\n feed_links = doc('link[type=\"application/atom+xml\"]')\n feed = feed_links[0]\n self.assertEqual(1, len(feed_links))\n self.assertEqual(\"Recently updated questions\", feed.attrib[\"title\"])\n self.assertEqual(\n urlparams(\"/en-US/questions/feed\", product=p.slug, topic=t.slug), feed.attrib[\"href\"]\n )\n self.assertEqual(200, self.client.get(feed.attrib[\"href\"]).status_code)",
"def test_59_help_tos(self):\r\n url = \"/help/terms-of-use\"\r\n res = self.app.get(url, follow_redirects=True)\r\n err_msg = \"There should be a TOS page\"\r\n assert \"Terms for use\" in res.data, err_msg",
"def test_get_posts_by_topic(self):\n data = {'topic': self.topic.id}\n response = self.client.get(reverse('api:posts-list'), data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data.get('results')), 2)",
"def test_view_product_description_page(self):\r\n response = self.client.get('/product_description/nutella/')\r\n self.assertEqual(response.status_code, 200)",
"def test_wiki_topics_inherit(self):\n doc = DocumentFactory(locale=u'en-US', category=10)\n doc.topics.add(TopicFactory(slug='extant'))\n RevisionFactory(document=doc, is_approved=True)\n\n translated = DocumentFactory(locale=u'es', parent=doc, category=10)\n RevisionFactory(document=translated, is_approved=True)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 1, 'format': 'json', 'topics': 'extant'}\n response = self.client.get(reverse('search.advanced', locale='es'), qs)\n eq_(1, json.loads(response.content)['total'])",
"def test_list_products_logged_in(self):\n\n # Log in seller\n self.client.login(username=\"test_seller\", password=\"secret\")\n\n # Issue a GET request\n response = self.client.get(reverse('website:products'))\n\n # Check that the response is 200\n self.assertEqual(response.status_code, 200)\n\n # Check that the logged in user does not recieve any products to view because the only products available are the ones they have for sale\n self.assertEqual(len(response.context['products']),0)\n\n # Check that the product title appears in the rendered HTML content\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product</h5>'.encode(), response.content)\n self.assertNotIn('<h5 class=\"card-title mb-0\">Test Product2</h5>'.encode(), response.content)",
"def test_single_topic_retrieval_authenticated(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.free_token.key)\n response = self.client.get('/topic/Topic 1/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data['name'],'Topic 1')\n self.assertEqual(data['description'],'The first topic.')",
"def test_product_category_view(self):\n\n response = self.client.get(reverse('website:product_categories'))\n\n # Check that the response is 200 ok\n self.assertEqual(response.status_code, 200)\n\n # Check that the rendered context contains 2 product types\n self.assertEqual(len(response.context['product_categories']),2)\n\n # Product title appears in HTML response content\n self.assertIn('<h6 class=\"mb-1\">Test Product</h6>'.encode(), response.content)\n self.assertIn('<h6 class=\"mb-1\">Test Product2</h6>'.encode(), response.content)\n self.assertIn('<h4 class=\"card-title\">Test Product Type <p class=\"badge badge-primary ml-2\">1</p></h4>'.encode(), response.content)\n self.assertIn('<h4 class=\"card-title\">Test Product Type2 <p class=\"badge badge-primary ml-2\">1</p></h4>'.encode(), response.content)",
"def test_get_post_on_topic(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n data = response.data\n self.assertEqual(data['id'], self.post1.id)\n self.assertEqual(data['title'], self.post1.title)",
"def test_topic_viewset_list(self):\n TopicFactory()\n TopicFactory(title='Test Title2',\n body='Test body',\n description='Test description',\n section=Topic.CONVERSATION)\n TopicFactory(title='Test Title3',\n body='Test body',\n description='Test description',\n section=Topic.CONVERSATION)\n data = {'section': Topic.CONVERSATION}\n response = self.client.get(reverse('api:topics-by-section'), data=data)\n self.assertTrue(response.status_code == status.HTTP_200_OK)\n self.assertEqual(len(response.data), 3)\n data = {'section': Topic.IDEAS}\n response = self.client.get(reverse('api:topics-by-section'), data)\n self.assertTrue(response.status_code == status.HTTP_200_OK)\n self.assertEqual(len(response.data), 0)",
"def test_questions_page(self):\n # import pdb\n # pdb.set_trace()\n\n result = self.client.get('/questions')\n self.assertIn('<h2>Submit A Question</h2>', result.data)\n\n print \"DONE WITH QUESTIONS PAGE CHECK\"",
"def test_single_topic_retrieval_unauthenticated(self):\n response = self.client.get('/topic/Topic 1/', format='json')\n data = json.loads(response.content)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertTrue('detail' in data)",
"def test_product_detail_view(client, sample_product, user_company, authenticated_user):\n products = Product.objects.all()\n for product in products:\n product_detail_view = reverse('product-detail', kwargs={'pk': product.pk})\n response = client.get(product_detail_view)\n #The view should return 200 for each product that exists\n assert response.status_code == 200\n content = response.content.decode(response.charset)\n #With content specific for each product\n assert product.name in content\n #checking for \"page not found\" if product does not exist\n product_not_exist_detail_view = reverse('product-detail', kwargs={'pk':104})\n response = client.get(product_not_exist_detail_view)\n assert response.status_code == 404 \n #Authenticated user but not the owner of the product returns 404\n if authenticated_user and not user_company:\n product_detail_view = reverse('product-detail', kwargs={'pk': 6})\n response = client.get(product_detail_view)\n assert response.status_code == 404",
"def test_publication_view(self):\n \n test_response = self.client.get('/papers/14-3-3-proteins-a-number-of-functions-for-a-numbered-protein/')\n self.assertEqual(test_response.status_code, 200)\n self.assertTrue('publication' in test_response.context) \n self.assertTemplateUsed(test_response, 'paper-detail.html')\n self.assertTemplateUsed(test_response, 'base.html') \n self.assertTemplateUsed(test_response, 'disqus_snippet.html') \n self.assertTemplateUsed(test_response, 'paper_sharing_widgets.html')\n self.assertTemplateUsed(test_response, 'altmetric_snippet.html') \n self.assertEqual(test_response.context['publication'].pk, 1)\n self.assertEqual(test_response.context['publication'].title, u'14-3-3 proteins: a number of functions for a numbered protein.')",
"def test_product_list(self):\n self.url = reverse(\"product-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)",
"def test_list_product(self):\n url = reverse('products:list')\n response = self.client.get(url)\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['name'], 'Eggs')",
"def test_faq_view(self):\n response = self.client.get(url_for('main.faq'))\n self.assertEqual(response.status_code, 200)",
"def test_positive(self):\n self._login()\n url = self._reverse_topic(self.test_topic.id)\n response = self.client.post(\n url,\n {\"text\": \"Create topic via API\"},\n )\n self.assertIn(reverse(\"learning_logs:topics\"), response.headers[\"location\"])\n self.assertEqual(response.status_code, 302)",
"def test_is_product_show(self):\n\n self.selenium.get(\"http://localhost:8000/\")\n response = self.selenium.find_element(By.ID, \"id_product_name\")\n response.send_keys(\"frosties\")\n response.send_keys(Keys.ENTER)\n self.assertTemplateUsed('selected_product.html')",
"def test_get_all_topics(mock_send_message_json):\n assert OranDmaap.get_all_topics_url == f\"{BASE_URL}/topics/listAll\""
] | [
"0.73147404",
"0.68919265",
"0.67828816",
"0.673037",
"0.6644931",
"0.6602935",
"0.658075",
"0.65539205",
"0.6488625",
"0.6419945",
"0.63692284",
"0.62638444",
"0.6259497",
"0.6241161",
"0.6187955",
"0.61411256",
"0.6132648",
"0.6115197",
"0.61077875",
"0.61029375",
"0.609717",
"0.60721636",
"0.6040843",
"0.6030072",
"0.6013212",
"0.60033256",
"0.5979004",
"0.59692276",
"0.59518373",
"0.5951207"
] | 0.7508032 | 0 |
Return time as a string | def timestr():
return dt.strftime(dt.now(),'%H:%M:%S') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_time() -> str:\n return strftime(\"%H:%M:%S\")",
"def get_time() -> str:\r\n return time.strftime(TIMEFORMAT)",
"def _time_str(self):\n try:\n if not self._time:\n raise ValueError\n format_ = '%a, %d %b %Y %H:%M:%S'\n return datetime.fromtimestamp(float(self._time)).strftime(format_)\n except ValueError:\n return plastic_date()",
"def get_time_str():\n return time.strftime(time_fmt)",
"def time():\n return datetime.datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")",
"def get_time(self):\n return time.strftime(\"%d/%m/%y %M:%H:%S\", self.time)",
"def get_time(self):\n x = time.localtime()\n return ''.join([\n str(x[0]).rjust(4, '0'), '/', str(x[1]).rjust(2, '0'), '/',\n str(x[2]).rjust(2, '0'), ' ', str(x[3]).rjust(2, '0'), ':',\n str(x[4]).rjust(2, '0'), ':', str(x[5]).rjust(2, '0')])",
"def get_time(self):\n return \"%02u:%02u:%02u (%d)\" % self.rtc.datetime()[4:8]",
"def getTimeString():\n\tfrom time import strftime\n\treturn strftime(\"%d-%m-%Y__%H-%M-%S\")",
"def time_str(self):\n return datetime.now().strftime('%c')",
"def get_time():\n return time.strftime(\"%Y%m%d-%H%M%S\")",
"def get_time():\r\n return datetime.datetime.now().strftime(\"%H\")+\":\"+datetime.datetime.now().strftime(\"%M\")+\":\"+datetime.datetime.now().strftime(\"%S\")",
"def get_time(self):\n return ''",
"def get_time_string(self):\n return f\"{self.year} {self.month:02} \" \\\n f\"{self.start_day:02} {self.start_hour:02} 00 {self.get_duration():6}\"",
"def time(self) -> str:\n return typing.cast(\n str,\n self._properties.get(\"time\"),\n )",
"def friendly_time(time=None):\n if time is None:\n time = pass_good_until()\n return time.strftime(config.TIME_PRINT_FORMAT)",
"def datetime_to_str(time):\n return '{hour}:{min}:{second}:{millisecond}'.format(\n hour=time.hour,\n min=time.minute, \n second=time.second,\n millisecond=str(int(round(time.microsecond / 1000.0))),\n )",
"def get_time_human_readable():\n return time.strftime(\"%A, %H:%M\")",
"def get_time_string(time):\r\n mins = time // 60\r\n secs = time % 60\r\n time_string = ''\r\n\r\n if mins < 10:\r\n time_string += ' '\r\n elif mins < 100:\r\n time_string += ' '\r\n\r\n time_string += '%dm ' % mins\r\n\r\n if secs < 10:\r\n time_string += ' '\r\n\r\n time_string += '%ds' % secs\r\n\r\n return time_string",
"def format_time(self, time):\n hours = time // 3600\n time = time - hours*3600\n minutes = time // 60\n seconds = time - minutes*60\n return ('%d:%d:%d' %(hours, minutes, seconds))",
"def time(self):\r\n now = datetime.datetime.now()\r\n month = rfc822._monthnames[now.month - 1].capitalize()\r\n return ('[%02d/%s/%04d:%02d:%02d:%02d]' %\r\n (now.day, month, now.year, now.hour, now.minute, now.second))",
"def time_to_string(value):\n if value == gst.CLOCK_TIME_NONE:\n return \"--:--:--.---\"\n ms = value / gst.MSECOND\n sec = ms / 1000\n ms = ms % 1000\n mins = sec / 60\n sec = sec % 60\n hours = mins / 60\n mins = mins % 60\n return \"%02d:%02d:%02d.%03d\" % (hours, mins, sec, ms)",
"def nowStr(time=None):\n if time is None:\n time = datetime.now().time()\n if time.minute < 10:\n return time.strftime(\"%H ноль %m\")\n else:\n return time.strftime(\"%H %M\")",
"def create_time_str(self):\n ts = time.localtime(self._create_ts)\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", ts)",
"def time_now() -> str:\n return datetime_to_str(datetime_now())",
"def get_time(self):\n now = datetime.datetime.now()\n hour = 12 if now.hour % 12 == 0 else now.hour % 12\n meridiem = \"AM\" if now.hour < 12 else \"PM\"\n return \"%d:%02d %s\" % (hour, now.minute, meridiem)",
"def time_str(num):\n if num > 3600:\n return \"%0.2f hrs\" % (num / 3600)\n elif num > 60:\n return \"%0.2f mins\" % (num / 60)\n else:\n return \"%d seconds\" % num",
"def time2shortstr(time):\n return time.strftime(MEM_SHORT_TIME_FORMAT)",
"def format_time(self, time):\n hh = time[0:2]\n mm = time[2:4]\n ss = time[4:]\n return \"%s:%s:%s UTC\" % (hh,mm,ss)",
"def get_time():\r\n \r\n dt = datetime.datetime.now()\r\n dt_parsed = dt.strftime(\"%Y-%m-%d %H:%M:%S\")\r\n return dt_parsed"
] | [
"0.8559975",
"0.8342602",
"0.8303421",
"0.8176281",
"0.80305576",
"0.7968672",
"0.79652303",
"0.79423606",
"0.79199684",
"0.79052913",
"0.7892776",
"0.7873904",
"0.7864454",
"0.785743",
"0.7819786",
"0.7692401",
"0.76851183",
"0.76786953",
"0.7673926",
"0.760527",
"0.7554719",
"0.7543063",
"0.75088876",
"0.7483111",
"0.74726576",
"0.74669933",
"0.7454152",
"0.74520653",
"0.7433724",
"0.7419259"
] | 0.83846116 | 1 |
Create or replace file indicated by filename, as a yaml serialization of dict d. | def save_file(filename,d):
f = open(filename, 'w')
yaml.dump(d, f)
f.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_file(filename,d):\n if os.path.exists(filename):\n f_old = open(filename,'r')\n d_old = yaml.load(f_old)\n f_old.close()\n d_old.update(d)\n d = d_old\n f = open(filename, 'w')\n yaml.dump(d, f)\n f.close()",
"def save(self, filename):\n with open(filename, 'w') as f:\n yaml.dump(self.to_dict(), f, sort_keys=False)",
"def write_yaml(fname: str, data: dict) -> None:\n try:\n with open(fname, 'w') as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n except IOError as e:\n print(f\"Cannot write YAML file {fname}\")\n print(f\"IOError: {e}\")",
"def write_yaml(yaml_config: Dict[str, Any], filename: str) -> None:\n\n with open(filename, 'w') as outfile:\n yaml.dump(yaml_config, outfile, default_flow_style=False,\n sort_keys=False)",
"def save(self, filename=None):\n name = filename or self.filename\n with open(name, \"w\") as stream:\n yaml.dump(self.data, stream, default_flow_style=False)",
"def generate_write_yaml_to_file(file_name):\n def write_yaml(config):\n with open(file_name, 'w+') as fh:\n fh.write(yaml.dump(config))\n return write_yaml",
"def write_yaml_file(loaded_yaml, filepath, default_flow_style=False):\n with open(filepath, 'w') as outfile:\n yaml.dump(loaded_yaml, outfile, default_flow_style=default_flow_style)",
"def save(self, filepath):\n writer = json.dump if Config.isjson(filepath) else yaml.dump\n with open(filepath, 'w') as f:\n writer(dict(self), f)",
"def save_yaml(dict_file, yaml_path):\n with open(yaml_path, \"w\") as file:\n documents = yaml.dump(dict_file, file)",
"def write_data(filename: str, old_position: dict, new_position: dict) -> None:\n\n combined = {\"old_positions\": old_position, \"new_positions\": new_position}\n\n with open(filename, \"w\") as f:\n yaml.dump(combined, f)\n\n return",
"def dump_yaml(file_path, data):\n\n with open(os.path.abspath(os.path.expanduser(file_path)), \"w\") as f:\n yaml.safe_dump(data, f, default_flow_style=False)\n\n return file_path",
"def to_yaml(self, filepath: typing.Union[str, pathlib.Path]) -> None:\n spec = self.to_spec()\n yaml = YAML()\n yaml.default_flow_style = False\n with open(filepath, \"w\") as fd:\n yaml.dump(spec, fd)",
"def dump(filename: Path) -> None:\n import yaml\n\n dumped_str = yaml.dump_all(\n [data_dict],\n Dumper=RegressionYamlDumper,\n default_flow_style=False,\n allow_unicode=True,\n indent=2,\n encoding=\"utf-8\",\n )\n with filename.open(\"wb\") as f:\n f.write(dumped_str)",
"def create_yaml_files(target_directory, data):\n import os\n import yaml\n\n for k, v in data.items():\n file = os.path.join(target_directory, '.'.join((k, \"yml\")))\n\n with open(file, 'w') as yaml_file:\n yaml.dump(v, yaml_file, default_flow_style=False)",
"def save(config: dict, out_dir: str, filename: str = \"config.yaml\"):\n assert filename.endswith(\".yaml\")\n with open(os.path.join(out_dir, filename), \"w+\") as f:\n f.write(yaml.dump(config))",
"def save(dikt):\n with open(SAVE_FILE_NAME, 'w') as save_file:\n yaml.safe_dump(dikt, save_file)",
"def write(self, fname=None):\n fname = fname or self.path\n with open(fname, \"w\") as fl:\n yaml.dump(self._as_dict(), fl)\n self.path = Path(fname)",
"def _load_yaml(source_dir, file_name):\n return yaml.dump(utils.load_yaml_dict(os.path.join(source_dir, file_name)))",
"def write(self, filename, mode=\"w\"):\n d = self._result_dict\n val = yaml.safe_dump(d, default_flow_style=False)\n\n with open(str(filename), mode) as outfile:\n outfile.write(val)",
"def create_yaml(response, file):\n\n output_file = open(file, \"w\")\n dest_file = os.path.join(export_path, file)\n yaml.dump(response, output_file, default_flow_style=False, allow_unicode=True)\n\n if os.path.exists(dest_file):\n os.remove(dest_file)\n\n shutil.move(file, export_path)\n\n return",
"def to_yaml_file(self, file_path, **kwargs):\n Params._check_yaml_import()\n import yaml\n\n try:\n with Params._open_file(file_path, \"w\") as fp:\n yaml.safe_dump(dict(self), stream=fp, **kwargs)\n return file_path\n except Exception as err:\n print(\"Failed to write {} instance to: {}\".format(self.__class__.__name__, file_path), err)\n return None",
"def save_yaml_to_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n d = i['dict']\n\n try:\n # If using just dump and keys are in unicode,\n # pyyaml adds warning and makes produced yaml unparsable\n s = yaml.safe_dump(d)\n except Exception as e:\n return {'return': 1, 'error': 'problem converting dict to YAML ('+format(e)+')'}\n\n return save_text_file({'text_file': fn, 'string': s})",
"def saveToFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n stream = open(path,\"w\")\n yaml.dump(self.parameters(),stream)",
"def store_as_yaml(dataset, dataset_file):\n\n with open(dataset_file, 'w') as outfile:\n yaml.safe_dump(dataset, outfile, default_flow_style=False)",
"def load(self, filename=None):\n prefix = os.path.dirname(filename)\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n name = filename or self.filename\n\n if os.path.exists(name):\n with open(name, 'rb') as dbfile:\n self.data = yaml.safe_load(dbfile) or dict()",
"def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)",
"def save_to_yml_file(self):\n yml_filename = self.get_yml_filename()\n\n if os.path.exists(yml_filename) and not self.force:\n logger.warning(\n f\"[red]File {yml_filename} already exists, not writing. To override add --force.[/red]\"\n )\n else:\n if self.force:\n logger.info(\n f\"[yellow]Force flag is used. Overriding {yml_filename} if it exists.[/yellow]\"\n )\n if self.metadata:\n self.metadata.save_dict_as_yaml_integration_file(yml_filename)",
"def append_dict_to_file(path, d):\n if os.path.isfile(path):\n with open(path, 'r') as f:\n data = eval(f.read())\n data = dict(data.items() + d.items())\n else:\n data = d\n with open(path, 'w') as f:\n f.write(str(data))",
"def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n\n _merge_a_into_b(yaml_cfg, cfg)",
"def cfg_from_file(filename):\n import yaml\n with open(filename, 'r') as f:\n yaml_cfg = edict(yaml.load(f))\n _merge_a_into_b(yaml_cfg, __C)"
] | [
"0.79054904",
"0.7014788",
"0.6703491",
"0.66639715",
"0.6618562",
"0.65786463",
"0.6543921",
"0.6513153",
"0.64326257",
"0.6422186",
"0.64148986",
"0.63806546",
"0.636776",
"0.62917686",
"0.6282388",
"0.62787414",
"0.62067103",
"0.61989194",
"0.61850554",
"0.6184453",
"0.6181673",
"0.61710757",
"0.61543494",
"0.61318934",
"0.6114263",
"0.6096166",
"0.60941905",
"0.60761595",
"0.6067438",
"0.603749"
] | 0.7766528 | 1 |
Check key validity. Keys may contain upper case letters, lower case letters, numbers, dashes (), and underscores (_) and period (.) marks. Any whitespace or any character in the string.punctuation library (other than , _, or .) results in an invalid key. | def is_key_valid(self,key):
if not key or any(map(lambda s: s in key,space_chars))\
or any(map(lambda s: s in key,bad_chars)):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_valid_key(self, key):\r\n\r\n # Check the length\r\n if len(key) > 250:\r\n return False\r\n\r\n # Check that there are no spaces or control characters\r\n for char in key:\r\n if ord(char) < 33 or ord(char) == 127:\r\n return False\r\n\r\n return True",
"def isValidKey(key):\n return True",
"def _check_key(key): # type: (str) -> None\n if not key:\n raise ValueError('Key must not be empty.')\n if '.' in key:\n raise ValueError('Key must not contain dots.')",
"def key_error_message(self,key):\n if not key:\n return 'key is blank.'\n elif any(map(lambda s: s in key,space_chars)):\n return '\"{}\" contains whitespace.'.format(key)\n elif any(map(lambda s: s in key,bad_chars)):\n return '\"{}\" contains special characters.'.format(key)",
"def check_valid_key_name(name):\n if type(name) not in [str]:\n return False\n bad_chars = [\"*\", \".\", \"&&&&\"]\n for k in bad_chars:\n if k in name:\n return False\n return True",
"def _is_valid_key(self, key):\n\t\t\n\t\t# If the key is not a string\n\t\tif not isinstance(key, str):\n\t\t\treturn False\n\t\telse:\n\t\t\tkey = str.upper(key)\n\t\t\n\t\t# If the given key does not match the standard notation XY\n\t\tif len(key) != 2:\n\t\t\treturn False\n\t\t\n\t\t# If the key is out of the board\n\t\tif key[0] not in self.columns or key[1] not in self.rows:\n\t\t\treturn False\n\t\t\n\t\t# Otherwise the key is valid\n\t\treturn True",
"def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")",
"def test_is_valid_label_key_valid_input():\n # test valid label keys\n assert is_valid_label_key(key=\"l0l\")\n assert is_valid_label_key(key=\"l0L\")\n assert is_valid_label_key(key=\"L-l\")\n assert is_valid_label_key(key=\"L.L\")\n assert is_valid_label_key(key=\"4-you\")\n assert is_valid_label_key(key=\"you.2\")\n assert is_valid_label_key(key=\"p/n\")\n assert is_valid_label_key(key=\"prefix/you.2\")\n assert is_valid_label_key(key=\"how.sad/to-see\")\n assert is_valid_label_key(key=f\"{'d'*253}/{'n'*63}\")",
"def IsKey(possibleKey):\n if FirstCharIsPunctuationChar(possibleKey):\n return possibleKey[1] == '|'\n else:\n return possibleKey[0] == '|'",
"def test_invalid_chars_ssck(self):\r\n valid_base = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})",
"def test_is_valid_annotation_key_invalid_input():\n # test length violations\n assert not is_valid_annotation_key(key=None) # Too short\n assert not is_valid_annotation_key(key=\"\") # Too short\n assert not is_valid_annotation_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_annotation_key(key=\"/n\") # prefix too short\n assert not is_valid_annotation_key(key=\"p/\") # name too short\n assert not is_valid_annotation_key(key=\"a\" * 254) # name too long\n assert not is_valid_annotation_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_annotation_key(key=\"-a\")\n assert not is_valid_annotation_key(key=\".b\")\n assert not is_valid_annotation_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_annotation_key(key=\"a-\")\n assert not is_valid_annotation_key(key=\"b.\")\n assert not is_valid_annotation_key(key=\"c \")\n assert not is_valid_annotation_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_annotation_key(key=\"a$$a\")\n assert not is_valid_annotation_key(key=\"b b\")",
"def clean_key_name(self):\n key = self.cleaned_data['key_name']\n # Ensure key starts with prefix\n if not key.startswith(self.get_key_prefix()):\n raise forms.ValidationError('Key does not have required prefix.')\n # Ensure key exists\n if not self.get_upload_key():\n raise forms.ValidationError('Key does not exist.')\n return key",
"def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]",
"def test_is_valid_annotation_key_valid_input():\n # test valid label keys\n assert is_valid_annotation_key(key=\"l0l\")\n assert is_valid_annotation_key(key=\"l0L\")\n assert is_valid_annotation_key(key=\"L-l\")\n assert is_valid_annotation_key(key=\"L.L\")\n assert is_valid_annotation_key(key=\"4-you\")\n assert is_valid_annotation_key(key=\"you.2\")\n assert is_valid_annotation_key(key=\"p/n\")\n assert is_valid_annotation_key(key=\"prefix/you.2\")\n assert is_valid_annotation_key(key=\"how.sad/to-see\")\n assert is_valid_annotation_key(key=f\"{'d'*253}/{'n'*63}\")",
"def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True",
"def _check_key_val(key, val):\n if any(ii in val for ii in ['-', '_', '/']):\n raise ValueError(\"Unallowed `-`, `_`, or `/` found in key/value pair\"\n \" %s: %s\" % (key, val))\n return key, val",
"def _validate_key(self, key):\n if isinstance(key, str):\n key = unicode(key, 'utf-8')\n elif not isinstance(key, unicode):\n raise TypeError(\n \"`key` must be `str` or `unicode`, not `{}`\".format(\n key.__class__.__name__)\n )\n return key",
"def _validate_string(self, path, value, value_is_key=False):\r\n value = re.sub('[/$#{}._|*=\\-]', ' ', value)\r\n\r\n tokens = nltk.tokenize.word_tokenize(value)\r\n for raw_token in tokens:\r\n if raw_token.startswith(\"'\"):\r\n raw_token = raw_token[1:]\r\n if self.corpus.validate_token(raw_token):\r\n continue\r\n sub_tokens = Validator.camel_case_split(raw_token)\r\n ret = True\r\n for sub_token in sub_tokens:\r\n ret = ret and self.corpus.validate_token(sub_token)\r\n\r\n if not ret:\r\n self.errors.append({\r\n \"isKey\": value_is_key,\r\n \"path\": path,\r\n \"typo\": raw_token,\r\n })",
"def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True",
"def test_invalid_chars_location(self):\r\n course_key = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n valid_base = course_key.make_usage_key('tomato-again%9', 'block-head:sub-4%9')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})",
"def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)",
"def _check_special_token_identifier(key):\n if not (key.endswith('_token') and key != '_token'):\n raise ValueError('Each key needs to have the form \"name_token\".'\n ' Received {}'.format(key))",
"def _check_key_name(cls, name):\n return (isinstance(name, basestring) and\n re.match('^[A-Za-z][A-Za-z0-9_]*$', name) and\n not hasattr(cls, name))",
"def test_validate_bookmark_key(self):\n valid_names = ['hodor', 'ostrich', 'potato123', 'dashy-key']\n invalid_names = ['thisnameisabittoolong', 'funny/characters', '-flag']\n\n for n in valid_names:\n self.assertTrue(bookmarks.BookmarkManager.validate_key(n))\n\n for n in invalid_names:\n self.assertFalse(bookmarks.BookmarkManager.validate_key(n))",
"def _is_valid(key):\n is_valid = False\n for valid_key in VALID_KEYS:\n if valid_key in key:\n is_valid = True\n for invalid_key in INVALID_KEYS:\n if invalid_key in key:\n is_valid = False\n return is_valid",
"def ShouldCapitalizeKey(keyString):\n if FirstCharIsPunctuationChar(keyString):\n return keyString[2] == \"!\"\n else:\n return keyString[1] == \"!\"",
"def check_symbols(self):\n # this method has a bug in that it never raises KeyError, it raises \n # ValueError instead.\n \n def is_valid(sym):\n # what symbols are valid? (, ), digits, atoms\n if sym in \"()\": return True\n #if sym.isdigit(): return True\n #if sym in _atomic_mass: return True\n if sym.isalnum(): return True\n return False\n\n for t in self._gettokens():\n if not is_valid(t): raise ValueError(\"bad symbol \" + t)\n if t.isalpha() and t not in _atomic_mass: raise KeyError(\"key error \" + t)\n return True",
"def is_key_string(string):\r\n return len(string) > 1 and string[0] == '_'",
"def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')",
"def validate_key(key):\r\n try:\r\n secret.Secret(key)\r\n except secret.Secret.InvalidSecret as e:\r\n raise KeyIsInvalid(e.message)"
] | [
"0.7808719",
"0.7333275",
"0.7270658",
"0.69943017",
"0.691718",
"0.68068576",
"0.6782772",
"0.67734516",
"0.6724122",
"0.6623195",
"0.6615757",
"0.6496824",
"0.6456321",
"0.6442815",
"0.64237046",
"0.6417285",
"0.6412873",
"0.6395467",
"0.638066",
"0.6371316",
"0.630625",
"0.62583077",
"0.6243868",
"0.6241449",
"0.6239327",
"0.6236835",
"0.6217854",
"0.6217122",
"0.621302",
"0.62111294"
] | 0.76165247 | 1 |
Provide a humanreadable error message for bad keys. | def key_error_message(self,key):
if not key:
return 'key is blank.'
elif any(map(lambda s: s in key,space_chars)):
return '"{}" contains whitespace.'.format(key)
elif any(map(lambda s: s in key,bad_chars)):
return '"{}" contains special characters.'.format(key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _KeyMissing(side):\n return 'Key missing from %s' % side",
"def format_error(invalid, doc_type):\n # using string for checking is probably not ideal,\n # but voluptuous does not have specific sub error\n # types for these errors\n if invalid.error_message == 'extra keys not allowed':\n msg = \"Key '{}' is not allowed\".format(invalid.path[0])\n elif invalid.error_message == 'required key not provided':\n msg = \"{} '{}' is missing\".format(doc_type, invalid.path[0])\n else:\n msg = invalid.message\n return {'message': msg, 'field': str(invalid.path[0])}",
"def format_errordict(self, errordict):\n errormsg = f'Comparison between {self.ad1.filename} and {self.ad2.filename}'\n for k, v in errordict.items():\n errormsg += f'\\nComparison failure in {k}'\n errormsg += '\\n' + ('-' * (22 + len(k))) + '\\n'\n errormsg += '\\n '.join(v)\n return errormsg",
"def error(self, key, **kwargs):\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n raise AssertionError('Error with key={} is not found for class={}'.format(key, class_name))\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string, code=key)",
"def __str__(self):\n base_message = self.base_message.format(filename=self.yaml_file_path)\n error_message = ERROR_MESSAGE.format(key=self.key, expected=self.expected)\n return base_message + error_message",
"def built_error_message(self, key: str, params: List[str]) -> str:\n if key in self.errors:\n error_msg = self.errors[key]\n error_msg = re.sub(\"{..}\", \"\", error_msg)\n return error_msg.format(*params)\n else:\n return \"\"",
"def error_message(self) -> str:\n return pulumi.get(self, \"error_message\")",
"def error_message(self) -> str:\n return pulumi.get(self, \"error_message\")",
"def error_message(self) -> str:\n return pulumi.get(self, \"error_message\")",
"def raise_validation_error(\n problematic_key_set,\n problem_message,\n exception_class\n):\n stringified_keys = '{0} '.format(linesep).join(sorted(problematic_key_set))\n\n tags_error_message = '{problem_message}{linesep}{linesep} {stringified_keys}{linesep}'.format(\n problem_message=problem_message,\n linesep=linesep,\n stringified_keys=stringified_keys\n )\n\n raise exception_class(tags_error_message)",
"def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")",
"def get_or_raise(self, key: str, error_message: str = None) -> str:\n v = self.get_or_default(key, None)\n if v is None:\n if error_message is None:\n print(\"Error, '\" + key + \"' is required.\")\n else:\n print(error_message)\n raise CLIMissingKeyError(error_message)\n\n else:\n return v",
"def error_message(self):\n if len(self.unmapped):\n return _('file(s) not in client view')\n if len(self.author_denied):\n restricted_user = self.author\n elif len(self.pusher_denied):\n restricted_user = self.pusher\n elif len(self.foruser_denied):\n restricted_user = self.foruser\n elif len(self.fusion_denied):\n restricted_user = p4gf_const.P4GF_USER\n else:\n restricted_user = _('<unknown>')\n return _(\"user '{user}' not authorized to submit file(s) in git commit\").format(\n user=restricted_user)",
"def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)",
"def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)",
"def error(self, key, value, context, errorclass=InvalidDataError, **values):\n msg_template = self.message_for_key(key, context)\n raise errorclass(msg_template % values, value, key=key, context=context)",
"def errmsg(self, str, prefix=\"** \"):\n raise NotImplementedError(NotImplementedMessage)",
"def error_message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"error_message\")",
"def make_error(self, key: str, **kwargs) -> ValidationError:\n try:\n msg = self.error_messages[key]\n except KeyError as error:\n class_name = self.__class__.__name__\n message = (\n \"ValidationError raised by `{class_name}`, but error key `{key}` does \"\n \"not exist in the `error_messages` dictionary.\"\n ).format(class_name=class_name, key=key)\n raise AssertionError(message) from error\n if isinstance(msg, (str, bytes)):\n msg = msg.format(**kwargs)\n return ValidationError(msg)",
"def display_errors(self):\r\n\r\n def format_name(field_name):\r\n \"\"\"Formats field names for error display\"\"\"\r\n if field_name == \"celebration_tier\":\r\n return \"{wLargesse{n\"\r\n return \"{w%s{n\" % field_name.capitalize()\r\n\r\n msg = \"Please correct the following errors:\\n\"\r\n msg += \"\\n\".join(\r\n \"%s: {r%s{n\" % (format_name(field), \", \".join(errs))\r\n for field, errs in self.errors.items()\r\n )\r\n return msg",
"def __str__(self):\n return \"Improperly formatted request: \" + self.source + \", resulting in exception: \" + self.bad",
"def error_msg(self) -> str:\n return self.__error_msg",
"def __str__(self):\n\n return _(\"Error creating device mapper device %(dev)r: %(msg)s\") % {\n 'dev': self.dm_name, 'msg': self.err_msg}",
"def get_expected_type_error_message(key, val, expected_type):\n\n return \"Invalid type at key '%s'. Expected '%s' got '%s'.\" \\\n % (str(key), str(expected_type), str(type(val)))",
"def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result",
"def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG",
"def error(self, e):\n return \"{}: {} ({})\".format(e.__class__.__name__, e.__doc__, e.message)",
"def keyError():\n d = {}\n d['cat']",
"def __str__(self):\n return \"ERROR: \" + self.error_message",
"def ErrorString(self): # real signature unknown; restored from __doc__\n pass"
] | [
"0.7176447",
"0.65737236",
"0.6520215",
"0.6431213",
"0.63895214",
"0.6355162",
"0.6349505",
"0.6349505",
"0.6349505",
"0.62899137",
"0.6279792",
"0.6276709",
"0.62599",
"0.6225085",
"0.6197143",
"0.61797196",
"0.61726445",
"0.6146756",
"0.6134861",
"0.61106735",
"0.6099486",
"0.60589087",
"0.60280895",
"0.6025504",
"0.6005605",
"0.60007185",
"0.59928566",
"0.5991659",
"0.597775",
"0.59671277"
] | 0.79099184 | 0 |
Adds dictionary data to hdf5 group with same keys as dictionary. See data_to_h5 for how datatypes are handled. | def dict_to_h5(data, grp, **kwargs):
for key in data:
s_key = str(key)
sub_data = data[key]
data_to_h5(sub_data, grp, s_key, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _save_h5_r(data_dict, h5obj, dlen):\n for key, val in data_dict.items():\n if isinstance(val, dict):\n h5group = h5obj[key] if key in h5obj.keys() else h5obj.create_group(key)\n _save_h5_r(val, h5group, dlen)\n else:\n if val.dtype == 'object':\n sub_dtype = f'float{dlen}' if val[0].dtype == np.float else f'int{dlen}' if val[0].dtype == np.int else val[0].dtype\n dtype = h5py.vlen_dtype(sub_dtype)\n else:\n dtype = f'float{dlen}' if val.dtype == np.float else f'int{dlen}' if val.dtype == np.int else val.dtype\n h5obj.create_dataset(key, data=val, dtype=dtype)",
"def write_h5(fname: str, data: dict) -> None:\n try:\n with h5py.File(fname, 'w') as f:\n recursively_save_dict_contents_to_group(f,'/',data)\n except IOError as e:\n print(f\"Cannot write HDF5 file {fname}\")\n print(f\"IOError: {e}\")",
"def write_dict_h5(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}):\n\n #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')}\n #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') }\n \n with h5py.File(dfile,mode) as fd:\n try:\n fd.create_group(k)\n index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1')\n fd[k].create_dataset('index', data=index)\n except:\n pass\n if not var_selection:\n var_selection=list(f.keys())\n \n string10=numpy.zeros(fixed_string_len,dtype='S1')\n sdict={}\n slist=[]\n\n #groupencodings \n \n for v in var_selection: \n #variables_dic[v] = ''\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n \n if type(fvv[0]) not in [str,bytes,numpy.bytes_]:\n\n if fvv.dtype !='S1':\n \n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')}\n if v in attrs.keys():\n for kk,vv in attrs[v].items():\n if type(vv) is str: \n fd[k][v].attrs[kk]=numpy.bytes_(vv)\n else:\n fd[k][v].attrs[kk]=vv\n \n if v in ['date_time','report_timestamp','record_timestamp']:\n fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) \n \n else:\n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n slen=fvv.shape[1]\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n else:\n sleno=len(fvv[0])\n slen=sleno\n try:\n slen=int(fvv.dtype.descr[0][1].split('S')[1])\n except: \n pass\n\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n \n #x=x.reshape(fvv.shape[0],slen)\n fd[k].create_dataset(v,data=fvv.view('S1').reshape(fvv.shape[0],slen),compression=fbencodings[v]['compression'],chunks=True)\n if v in attrs.keys():\n fd[k][v].attrs['description'] =numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) \n \n #variables_dic[v] = f[v].values.dtype\n \n for v in fd[k].keys(): #var_selection:\n l=0 \n \n '''\n if v == 'primary_station_id':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n except:\n pass\n \n try:\n slen = len( fd[k][v][0] )\n stringa=numpy.zeros( slen , dtype='S1')\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa ) \n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n except:\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n \n \n if v == 'station_name':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n slen = len( fd[k][v][0][0])\n stringa=numpy.zeros( slen , dtype='S1')\n except:\n pass\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa )\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n print('done attaching')\n except:\n print('not working')\n \n ''' \n try:\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n if 'string' not in v and v!='index': \n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n #print(v,fvv.ndim,type(fvv[0]))\n if fvv.ndim==2 or type(fvv[0]) in [str,bytes,numpy.bytes_]:\n slen=sdict[v]\n #slen=10\n fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)])\n except:\n pass\n \n \n \n i=4 \n for v in slist:\n s='string{}'.format(v)\n for a in ['NAME']:\n fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.')\n \n i+=1\n \n return",
"def h5_to_dict(grp, **kwargs):\n data = {}\n for key in grp.keys():\n try:\n e_key = eval(key, {})\n except:\n e_key = key\n\n data[e_key] = h5_to_data(grp[key], **kwargs)\n \n return data",
"def _write(self, h5_group, _) -> None:\n # Convert text from unicode to byte-string to avoid error in h5py\n data = np.asarray(self.data, dtype=np.string_)\n h5_field = h5_group.create_dataset(h5_group.attrs[\"fieldname\"], self.data.shape, dtype=data.dtype)\n h5_field[...] = data",
"def add_table_to_hdf(self, run_group, type_dict, data, name = 'bla',filename = []):\n\t\tif filename == []:\n\t\t\tfilename = self.edf_operator.inputFileName\n\t\t\t\n\t\tthis_table = self.h5f.createTable(run_group, name, type_dict, '%s in file %s' % (name, self.edf_operator.inputFileName))\n\t\t\n\t\trow = this_table.row\n\t\tfor r in data:\n\t\t\tfor par in r.keys():\n\t\t\t\trow[par] = r[par]\n\t\t\trow.append()\n\t\tthis_table.flush()",
"def write_dict_h6(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}):\n\n #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')}\n #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') }\n \n with h5py.File(dfile,mode) as fd:\n try:\n fd.create_group(k)\n# if type(f[v]) == pd.core.frame.DataFrame:\n# index=numpy.zeros (f[f.columns[0]].shape[0], dtype='S1')\n# else:\n index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1')\n \n fd[k].create_dataset('index', data=index)\n except:\n pass\n if not var_selection:\n var_selection=list(f.keys())\n \n string10=numpy.zeros(fixed_string_len,dtype='S1')\n sdict={}\n slist=[]\n\n #groupencodings \n \n for v in var_selection: \n #variables_dic[v] = ''\n \n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n if type(fvv[0]) not in [str,bytes,numpy.bytes_]:\n if fvv.dtype !='S1':\n \n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv #f[v][:]\n if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')}\n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n if v == 'date_time':\n fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) \n \n else:\n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv #f[v][:]\n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n else:\n sleno=len(fvv[0])\n slen=sleno\n x=numpy.array(fvv,dtype='S').view('S1')\n slen=x.shape[0]//fvv.shape[0]\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n \n \n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n \n x=x.reshape(fvv.shape[0],slen)\n fd[k].create_dataset(v,data=x,compression=fbencodings[v]['compression'],chunks=True)\n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) \n \n #variables_dic[v] = fvv.dtype\n \n for v in fd[k].keys(): #var_selection:\n l=0 \n try:\n if 'string' not in v and v!='index': \n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n if type(fvv[0]) in [str,bytes,numpy.bytes_]:\n slen=sdict[v]\n #slen=10\n fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)])\n except MemoryError:\n pass\n \n \n \n i=4 \n for v in slist:\n s='string{}'.format(v)\n for a in ['NAME']:\n fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.')\n \n i+=1\n \n return",
"def __setitem__(self, key, value):\n if hasattr(value, \"to_hdf\") & (\n not isinstance(value, (pandas.DataFrame, pandas.Series))\n ):\n value.to_hdf(self, key)\n return\n\n use_json = True\n if (\n isinstance(value, (list, np.ndarray))\n and len(value) > 0\n and isinstance(value[0], (list, np.ndarray))\n and len(value[0]) > 0\n and not isinstance(value[0][0], str)\n and _is_ragged_in_1st_dim_only(value)\n ):\n # if the sub-arrays in value all share shape[1:], h5io comes up with a more efficient storage format than\n # just writing a dataset for each element, by concatenating along the first axis and storing the indices\n # where to break the concatenated array again\n value = np.array([np.asarray(v) for v in value], dtype=object)\n use_json = False\n elif isinstance(value, tuple):\n value = list(value)\n write_hdf5(\n self.file_name,\n value,\n title=self._get_h5_path(key),\n overwrite=\"update\",\n use_json=use_json,\n )",
"def HDF5_to_HDF5(self, **kwds):\n # split extension from HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n # output HDF5 file\n hdf5_file = os.path.expanduser(f'{fileBasename}.h5')\n # copy everything from the HDF5 file\n with h5py.File(self.filename,mode='r') as source:\n dest = h5py.File(hdf5_file,mode='w')\n # value checks on output HDF5\n if not hasattr(dest, 'create_dataset'):\n raise ValueError('dest must be a group, got {!r}'.format(dest))\n # for each key in the root of the hdf5 file structure\n for k in source.keys():\n self.copy_from_HDF5(source[k], dest, name=k, **kwds)",
"def to_hdf5(self, path: Union[str, Path]):\n import h5py # : import-outside-toplevel\n\n with h5py.File(path, \"w\") as hdf:\n for k, v in self._to_list_dict().items():\n if k in self._cal_paras:\n hdf.create_dataset(k, data=v.tolist())\n elif v:\n hdf[k] = v",
"def read_generic_hdf5(fname):\n f = h5py.File(fname, \"r\")\n fcontent = {}\n\n def filldict(x, y):\n # create a new container\n tmp = {}\n # add attributes if present\n if len(y.attrs) > 0:\n tmp['attrs'] = dict(y.attrs)\n # add data if it is a dataset\n if isinstance(y, h5py.Dataset):\n tmp['data'] = np.array(y)\n # only add to the dictionary, if we have something meaningful to add\n if tmp != {}:\n fcontent[x] = tmp\n\n f.visititems(filldict)\n\n f.close()\n return fcontent",
"def spatialimg_to_hdfgroup(h5group, spatial_img):\n try:\n h5group['data'] = spatial_img.get_data()\n h5group['affine'] = spatial_img.get_affine()\n\n if hasattr(h5group, 'get_extra'):\n h5group['extra'] = spatial_img.get_extra()\n\n hdr = spatial_img.get_header()\n for k in list(hdr.keys()):\n h5group['data'].attrs[k] = hdr[k]\n\n except ValueError as ve:\n raise Exception('Error creating group ' + h5group.name) from ve",
"def to_hdf(self, hdf=None, group_name=None):\n super(SxUniqDispl, self).to_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.input.to_hdf(hdf5_input)",
"def write_hdf5(filename, data):\n \n if '.h5' in filename:\n fid = h5py.File(filename, 'w')\n else:\n filename = filename+'.h5'\n fid = h5py.File(filename, 'w')\n\n print('Writing %s...'%filename)\n\n write_hdf5_group(fid, data)\n\n fid.close()\n print('Finished writting %s.'%filename)\n return",
"def to_hdf(self, hdf=None, group_name=None):\n super(ParameterMaster, self).to_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n hdf5_input[\"dataframe\"] = self.iteration_frame.to_dict(orient=\"list\")",
"def save(self, h5group):\n subGroup = createH5Group(h5group, self.savePath)\n for key, vals in self.logWeights.items():\n subGroup[key] = vals",
"def recursively_save_dict_contents_to_group(h5file: \"h5py.File\", \n path: str, \n dic: dict,\n ) -> None:\n for key, item in dic.items():\n if isinstance(item, (np.ndarray, np.int64, np.float64, str, bytes,int,float,np.bool_)):\n h5file[path + key] = item\n elif isinstance(item, dict):\n recursively_save_dict_contents_to_group(h5file, path + key + '/', item)\n else:\n raise ValueError(f'Cannot save {item} type')",
"def addData(self,data):\n\t\tif isinstance(data,list):\n\t\t\tif isinstance(data[0],dict):\n\t\t\t\tself.data.extend(data)\n\t\t\telif isinstance(data[0],list):\t\n\t\t\t\tfor r in data:\n\t\t\t\t\tacc= dict()\n\t\t\t\t\tfor h in self.header:\n\t\t\t\t\t\tacc[h]=r[self.header.index(h)]\t\n\t\t\t\t\tself.data.append(acc) \n\t\t\telse:\n\t\t\t\tself.data.append(dict(zip(self.header,data)))\n\t\telif isinstance(data,dict):\n\t\t\tself.data.append(data)\n\t\telse:\n\t\t\traise datatools.WrongTypeError(data)",
"def addGroup(self, group):\n self._model.insertH5pyObject(group)",
"def loadhdf5file(file_h5, key='data'):\n\n with h5py.File(file_h5, 'r') as data:\n # Add datasets to dictionary\n info_value = {}\n info_attrs = {}\n\n for i in np.arange(len(data.items())):\n info_value.update({str(list(data.items())[i][0]): data[str(list(data.items())[i][0])].value})\n\n for i in np.arange(len(data[key].attrs)):\n info_attrs.update({list(data[key].attrs.keys())[i]: list(data[key].attrs.values())[i]})\n\n return info_value, info_attrs",
"def to_hdf(d, filename):\n if not isinstance(d, dict):\n errmsg = 'Only dictionaries may be written to HDF5 files.'\n logging.error(errmsg)\n raise TypeError(errmsg)\n\n # Define a function for iteratively doing the work\n def store_recursively(fhandle, node, path=[], node_hashes={}):\n full_path = '/' + '/'.join(path)\n if isinstance(node, dict):\n try:\n fhandle.create_group(full_path)\n except ValueError:\n pass\n for key in sorted(node.iterkeys()):\n key_str = str(key)\n if not isinstance(key, str):\n logging.warn('Stringifying key \"' + key_str +\n '\"for use as name in HDF5 file')\n val = node[key]\n new_path = path + [key_str]\n store_recursively(fhandle=fhandle, node=val, path=new_path,\n node_hashes=node_hashes)\n else:\n # Check for existing node\n node_hash = utils.utils.hash_obj(node)\n if node_hash in node_hashes:\n # Hardlink the matching existing dataset\n fhandle[full_path] = fhandle[node_hashes[node_hash]]\n return\n node_hashes[node_hash] = full_path\n # \"Scalar datasets don't support chunk/filter options\"; extra\n # checking that a sequence isn't a string, also. Shuffling is\n # a good idea since subsequent compression will generally benefit;\n # shuffling requires chunking. Compression is not done here\n # since it is slow.\n if hasattr(node, '__iter__') and not isinstance(node, basestring):\n shuffle = True\n chunks = True\n else:\n shuffle = False\n chunks = None\n fhandle.create_dataset(name=full_path, data=node, chunks=chunks,\n compression=None, shuffle=shuffle,\n fletcher32=False)\n \n # Perform the actual operation using the dict passed in by user\n try:\n h5file = h5py.File(os.path.expandvars(filename), 'w')\n store_recursively(fhandle=h5file, node=d)\n except IOError, e:\n logging.error(\"Unable to write to HDF5 file \\'%s\\'\" % filename)\n logging.error(e)\n raise e\n finally:\n h5file.close()",
"def FromH5(self,h5File=None):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n\r\n if h5File == None:\r\n h5File=self.h5File\r\n\r\n #Check if h5File exists\r\n if not os.path.exists(h5File): \r\n logStrFinal=\"{0:s}{1:s}: Not Existing!\".format(logStr,h5File) \r\n raise XmError(logStrFinal) \r\n \r\n try:\r\n self.dataFrames={} \r\n with pd.HDFStore(h5File) as h5Store:\r\n h5Keys=sorted(h5Store.keys())\r\n for h5Key in h5Keys:\r\n match=re.search('(/)(\\w+$)',h5Key)\r\n key=match.group(2)\r\n logger.debug(\"{0:s}{1:s}: Reading h5Key {2:s} to tableName {3:s}.\".format(logStr,h5File,h5Key,key)) \r\n self.dataFrames[key]=h5Store[h5Key]\r\n \r\n\r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n \r\n finally:\r\n h5Store.close()\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))",
"def add_metadata(self, key, value):\n self._h5.attrs[key] = value",
"def _put(self, name, chunk, row_offset):\n grp = self.grp[name]\n lo = row_offset\n if isinstance(chunk, pd.Series):\n chunk = chunk.to_frame()\n n_rows = len(chunk)\n else:\n n_rows = len(chunk[next(iter(chunk.keys()))])\n hi = lo + n_rows\n\n for name in chunk.keys():\n\n x = np.asarray(chunk[name])\n\n data, dtype, fillvalue = self._normalize_column(x, x.dtype)\n\n if name in grp.keys():\n dset = grp[name]\n if hi > len(dset):\n dset.resize((hi,))\n dset[lo:hi] = data\n else:\n try:\n enum_dict = h5py.check_dtype(enum=dtype)\n except AttributeError:\n enum_dict = None\n dset = grp.create_dataset(\n name,\n shape=(hi,),\n dtype=dtype,\n data=data,\n fillvalue=fillvalue,\n **self.storage_options\n )\n if enum_dict is not None:\n # store enum dictionary as attribute\n dset.attrs[\"categories\"] = sorted(\n enum_dict, key=enum_dict.__getitem__\n )",
"def add_data(self, key, data):\n with self.write():\n # index all columns if possible\n try:\n # FIXME: band-aid heuristic to catch a known corner case that\n # HDFStore doesn't catch; see ``Issue 20``\n if (isinstance(data, pd.DataFrame) and\n data.columns.dtype == np.dtype('int64')):\n raise AttributeError\n\n self.handle.put(\n key, data, format='table', data_columns=True, complevel=5,\n complib='blosc')\n except AttributeError:\n self.handle.put(\n key, data, format='table', complevel=5, complib='blosc')",
"def add_group_data(self, group_name):\n self.sorted = False\n self.grouped = False\n self.labels_to_add = []\n for path in self.all_groups.get(group_name):\n io = NWBHDF5IO(path, 'r')\n nwb_file = io.read()\n # self.labels.append(nwb_file.identifier)\n self.nwb_path_list.update({nwb_file.identifier: path})\n self.labels_to_add.append(nwb_file.identifier)\n self.musketeers_widget.session_widget.populate(self.labels_to_add, 'add')\n self.musketeers_widget.session_widget.update_text_filter()\n self.groupMenu.setEnabled(True)\n self.sortMenu.setEnabled(True)",
"def recursively_load_dict_contents_from_group(h5file: \"h5py.File\", \n path: str,\n ) -> dict:\n ans = {}\n for key, item in h5file[path].items():\n if isinstance(item, h5py._hl.dataset.Dataset):\n ans[key] = item.value\n elif isinstance(item, h5py._hl.group.Group):\n ans[key] = recursively_load_dict_contents_from_group(h5file, f\"{path}{key}/\")\n return ans",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def push(self, data: Dict[str, np.ndarray]) -> None:\n for key, value in data.items():\n self.data[key].extend(value)\n\n if self._keys is None:\n self._keys = list(self.data.keys())",
"def writeH5Dataset( self, foldername, time, nameConvention = \"grid\" ):\n filename = \"{0}/{1}_{2:06}.h5\".format(foldername,nameConvention,time)\n file = h5py.File(filename,'w',driver='mpio',comm=self.global_comm)\n dset = file.create_dataset(\"dset\",self._layout.fullShape, dtype = self._f.dtype)\n slices = tuple([slice(s,e) for s,e in zip(self._layout.starts,self._layout.ends)])\n dset[slices]=self._f[:]\n attr_data = np.array(self._layout.dims_order)\n dset.attrs.create(\"Layout\", attr_data, (self._nDims,), h5py.h5t.STD_I32BE)\n file.close()"
] | [
"0.7259529",
"0.67084503",
"0.6640664",
"0.66189474",
"0.65644884",
"0.65580463",
"0.6532053",
"0.63650477",
"0.634904",
"0.6265646",
"0.61399484",
"0.6080792",
"0.60668784",
"0.60593045",
"0.6021528",
"0.5996762",
"0.59759545",
"0.59177995",
"0.58672106",
"0.5858261",
"0.58332354",
"0.5766916",
"0.57339966",
"0.5725382",
"0.5696723",
"0.5691544",
"0.5671822",
"0.5668732",
"0.56350917",
"0.56246656"
] | 0.75657034 | 0 |
Function which takes a list of class attributes and stores them in a provided h5py group. See data_to_h5 for how datatypes are handled. | def attributes_to_h5(obj, grp, lst_attr=None, priv=False, dpriv=False,
**kwargs):
if lst_attr is None:
if dpriv:
lst_attr = list(obj.__dict__.keys())
elif priv:
lst_attr = [x for x in obj.__dict__.keys() if '__' not in x]
else:
lst_attr = [x for x in obj.__dict__.keys() if '_' not in x]
for attr in lst_attr:
if attr in grp.keys():
del(grp[attr])
data = getattr(obj, attr)
data_to_h5(data, grp, attr, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_hdf5_attributes(dset, attributes):\n for key in attributes.iterkeys():\n dset.attrs[key] = attributes[key]\n\n return dset",
"def h5_to_dict(grp, **kwargs):\n data = {}\n for key in grp.keys():\n try:\n e_key = eval(key, {})\n except:\n e_key = key\n\n data[e_key] = h5_to_data(grp[key], **kwargs)\n \n return data",
"def testAttributes(self):\n ddict = {\n \"group\": {\"dataset\": 100, \"@group_attr1\": 10},\n \"dataset\": 200,\n \"@root_attr\": 11,\n \"dataset@dataset_attr\": \"12\",\n \"group@group_attr2\": 13,\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n dictdump.dicttonx(ddict, h5file)\n self.assertEqual(h5file[\"group\"].attrs['group_attr1'], 10)\n self.assertEqual(h5file.attrs['root_attr'], 11)\n self.assertEqual(h5file[\"dataset\"].attrs['dataset_attr'], \"12\")\n self.assertEqual(h5file[\"group\"].attrs['group_attr2'], 13)",
"def spatialimg_to_hdfgroup(h5group, spatial_img):\n try:\n h5group['data'] = spatial_img.get_data()\n h5group['affine'] = spatial_img.get_affine()\n\n if hasattr(h5group, 'get_extra'):\n h5group['extra'] = spatial_img.get_extra()\n\n hdr = spatial_img.get_header()\n for k in list(hdr.keys()):\n h5group['data'].attrs[k] = hdr[k]\n\n except ValueError as ve:\n raise Exception('Error creating group ' + h5group.name) from ve",
"def testAttributes(self):\n ddict = {\n \"group\": {\"datatset\": \"hmmm\", (\"\", \"group_attr\"): 10},\n \"dataset\": \"aaaaaaaaaaaaaaa\",\n (\"\", \"root_attr\"): 11,\n (\"dataset\", \"dataset_attr\"): 12,\n (\"group\", \"group_attr2\"): 13,\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n dictdump.dicttoh5(ddict, h5file)\n self.assertEqual(h5file[\"group\"].attrs['group_attr'], 10)\n self.assertEqual(h5file.attrs['root_attr'], 11)\n self.assertEqual(h5file[\"dataset\"].attrs['dataset_attr'], 12)\n self.assertEqual(h5file[\"group\"].attrs['group_attr2'], 13)",
"def dict_to_h5(data, grp, **kwargs):\n for key in data:\n s_key = str(key)\n sub_data = data[key]\n data_to_h5(sub_data, grp, s_key, **kwargs)",
"def _write(self, h5_group, _) -> None:\n # Convert text from unicode to byte-string to avoid error in h5py\n data = np.asarray(self.data, dtype=np.string_)\n h5_field = h5_group.create_dataset(h5_group.attrs[\"fieldname\"], self.data.shape, dtype=data.dtype)\n h5_field[...] = data",
"def set_h5py_attr(attrs, key, val):\n if isinstance(val, basestring):\n val = np.string_(val)\n elif isinstance(val, Iterable) and len(val) > 0:\n if isinstance(val[0], basestring):\n val = np.array(val, dtype='S')\n attrs[key] = val",
"def to_hdf(cls, store, attributes_dict, key):\n\n for k, v in attributes_dict.items():\n next_key = key + '/' + k\n if hasattr(v, 'to_hdf'):\n v.to_hdf(store, next_key)\n elif isinstance(v, cls._scalar_types):\n cls._scalar_to_hdf(store, v, next_key)\n elif isinstance(v, cls._list_types):\n cls._list_to_hdf(store, v, next_key)\n elif isinstance(v, dict):\n cls._dict_to_hdf(store, v, next_key)\n elif isinstance(v, np.ndarray):\n cls._list_to_hdf(store, v, next_key)\n else:\n raise TypeError(\"Unable to handle type {}\".format(v.__class__.__name__))",
"def save(self, h5group):\n subGroup = createH5Group(h5group, self.savePath)\n for key, vals in self.logWeights.items():\n subGroup[key] = vals",
"def _save_h5_r(data_dict, h5obj, dlen):\n for key, val in data_dict.items():\n if isinstance(val, dict):\n h5group = h5obj[key] if key in h5obj.keys() else h5obj.create_group(key)\n _save_h5_r(val, h5group, dlen)\n else:\n if val.dtype == 'object':\n sub_dtype = f'float{dlen}' if val[0].dtype == np.float else f'int{dlen}' if val[0].dtype == np.int else val[0].dtype\n dtype = h5py.vlen_dtype(sub_dtype)\n else:\n dtype = f'float{dlen}' if val.dtype == np.float else f'int{dlen}' if val.dtype == np.int else val.dtype\n h5obj.create_dataset(key, data=val, dtype=dtype)",
"def df_to_h5(df, h5_store, group_name):\n # delete store store if exists \n if group_name in h5_store:\n del h5_store[group_name]\n my_group = h5_store.create_group(group_name)\n print \"Group \" + group_name + \" Exists. Group deleted then created\"\n #If not there, create the group\n else:\n my_group = h5_store.create_group(group_name)\n print \"Group \" + group_name + \" Created\"\n for col in df.columns:\n h5_store[group_name].create_dataset(col, data=df[col].values.astype('int32'))",
"def generate_attributes(self):\n for group in self.dict:\n for param in self.dict[group]:\n if group in self.group_douplicate and param in self.name_douplicate:\n setattr(self, group+'_'+param, self(group, param))\n else:\n setattr(self, param, self(group, param))",
"def to_hdf(self, hdf=None, group_name=None):\n super(SxUniqDispl, self).to_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.input.to_hdf(hdf5_input)",
"def export_to_hdf5(cls, h5_file, model, loads):\n #encoding = model._encoding\n #comments = []\n sid = []\n node = []\n cid = []\n mag = []\n xyz = []\n for load in loads:\n #comments.append(loads.comment)\n sid.append(load.sid)\n node.append(load.node)\n cid.append(load.cid)\n mag.append(load.mag)\n xyz.append(load.xyz)\n\n #h5_file.create_dataset('_comment', data=comments)\n h5_file.create_dataset('sid', data=sid)\n h5_file.create_dataset('node', data=node)\n h5_file.create_dataset('cid', data=cid)\n h5_file.create_dataset('mag', data=mag)\n h5_file.create_dataset('xyz', data=xyz)",
"def testAttributeAlreadyExists(self):\n ddict = {\n \"group\": {\"dataset\": \"hmmm\", (\"\", \"attr\"): 10},\n (\"group\", \"attr\"): 10,\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n dictdump.dicttoh5(ddict, h5file)\n self.assertEqual(h5file[\"group\"].attrs['attr'], 10)",
"def testAttributes(self):\n ddict = {\n \"group\": {\"dataset\": 100, \"@group_attr1\": 10},\n \"dataset\": 200,\n \"@root_attr\": 11,\n \"dataset@dataset_attr\": \"12\",\n \"group@group_attr2\": 13,\n }\n dictdump.dicttonx(ddict, self.h5_fname)\n ddict = dictdump.nxtodict(self.h5_fname, include_attributes=True)\n self.assertEqual(ddict[\"group\"][\"@group_attr1\"], 10)\n self.assertEqual(ddict[\"@root_attr\"], 11)\n self.assertEqual(ddict[\"dataset@dataset_attr\"], \"12\")\n self.assertEqual(ddict[\"group\"][\"@group_attr2\"], 13)",
"def setattrs(self, data, attrlist, id_array=None):\n\t\tassert len(data) == len(attrlist)\n\t\tfor d, attr in zip(data, attrlist):\n\t\t\tif id_array == None: setattr(self, attr, d)\n\t\t\telse:getattr(self, attr)[id_array] = d # Setting 1d array elements",
"def from_hdf(self, hdf=None, group_name=None):\n super(SxUniqDispl, self).from_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n self.input.from_hdf(hdf5_input)\n if \"output\" in self.project_hdf5.list_groups():\n with self.project_hdf5.open(\"output\") as hdf5_output:\n self.structure_lst = [\n Atoms().from_hdf(hdf5_output, group_name)\n for group_name in hdf5_output.list_groups()\n ]",
"def save_object(self, data):\n return GroupAttribute(**data)",
"def from_hff(cls, hff_data, name=None, args=None):\n assert isinstance(hff_data, h5py.Group)\n obj = cls()\n obj.name = _decode(hff_data[u'name'][()], u'utf-8')\n obj.version = _decode(hff_data[u'version'][()], u'utf-8')\n if u'processingDetails' in hff_data:\n obj.processing_details = _decode(hff_data[u'processingDetails'][()], u'utf-8')\n return obj",
"def saveTrainingData(model, hdf5, view_names=None, sample_names=None, feature_names=None, likelihoods=None):\n data = model.getTrainingData()\n data_grp = hdf5.create_group(\"data\")\n featuredata_grp = hdf5.create_group(\"features\")\n hdf5.create_dataset(\"samples\", data=np.array(sample_names, dtype='S50'))\n\n if likelihoods is not None:\n data_grp.attrs['likelihood'] = np.array(likelihoods, dtype='S50')\n\n for m in range(len(data)):\n view = view_names[m] if view_names is not None else str(m)\n data_grp.create_dataset(view, data=data[m].data.T)\n if feature_names is not None:\n # data_grp.attrs['features'] = np.array(feature_names[m], dtype='S')\n featuredata_grp.create_dataset(view, data=np.array(feature_names[m], dtype='S50'))",
"def to_hdf(self, hdf=None, group_name=None):\n super(ParameterMaster, self).to_hdf(hdf=hdf, group_name=group_name)\n with self.project_hdf5.open(\"input\") as hdf5_input:\n hdf5_input[\"dataframe\"] = self.iteration_frame.to_dict(orient=\"list\")",
"def get_data(self, grp, class_type=\"NXdata\"):\n coll = [grp[name] for name in grp\n if isinstance(grp[name], h5py.Dataset) and\n self.get_attr(grp[name], \"NX_class\") == class_type]\n return coll",
"def HDF5_to_HDF5(self, **kwds):\n # split extension from HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n # output HDF5 file\n hdf5_file = os.path.expanduser(f'{fileBasename}.h5')\n # copy everything from the HDF5 file\n with h5py.File(self.filename,mode='r') as source:\n dest = h5py.File(hdf5_file,mode='w')\n # value checks on output HDF5\n if not hasattr(dest, 'create_dataset'):\n raise ValueError('dest must be a group, got {!r}'.format(dest))\n # for each key in the root of the hdf5 file structure\n for k in source.keys():\n self.copy_from_HDF5(source[k], dest, name=k, **kwds)",
"def _setup_h5(self, data_gen_parms):\n\n '''\n Build the group structure down to the channel group\n '''\n # Set up the basic group structure\n root_grp = VirtualGroup('')\n root_parms = dict()\n root_parms['translator'] = 'FAKEBEPS'\n root_parms['data_type'] = data_gen_parms['data_type']\n root_grp.attrs = root_parms\n\n meas_grp = VirtualGroup('Measurement_')\n chan_grp = VirtualGroup('Channel_')\n\n meas_grp.attrs.update(data_gen_parms)\n\n # Create the Position and Spectroscopic datasets for the Raw Data\n ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals = self._build_ancillary_datasets()\n\n raw_chunking = calc_chunks([self.n_pixels,\n self.n_spec_bins],\n np.complex64(0).itemsize,\n unit_chunks=[1, self.n_bins])\n\n ds_raw_data = VirtualDataset('Raw_Data', data=None,\n maxshape=[self.n_pixels, self.n_spec_bins],\n dtype=np.complex64,\n compression='gzip',\n chunking=raw_chunking,\n parent=meas_grp)\n\n chan_grp.add_children([ds_pos_inds, ds_pos_vals, ds_spec_inds, ds_spec_vals,\n ds_raw_data])\n meas_grp.add_children([chan_grp])\n root_grp.add_children([meas_grp])\n\n hdf = HDFwriter(self.h5_path)\n hdf.delete()\n h5_refs = hdf.write(root_grp)\n\n # Delete the MicroDatasets to save memory\n del ds_raw_data, ds_spec_inds, ds_spec_vals, ds_pos_inds, ds_pos_vals\n\n # Get the file and Raw_Data objects\n h5_raw = get_h5_obj_refs(['Raw_Data'], h5_refs)[0]\n h5_chan_grp = h5_raw.parent\n\n # Get the Position and Spectroscopic dataset objects\n h5_pos_inds = get_h5_obj_refs(['Position_Indices'], h5_refs)[0]\n h5_pos_vals = get_h5_obj_refs(['Position_Values'], h5_refs)[0]\n h5_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_refs)[0]\n h5_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of Raw_Data\n link_as_main(h5_raw, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)\n\n '''\n Build the SHO Group\n '''\n sho_grp = VirtualGroup('Raw_Data-SHO_Fit_', parent=h5_chan_grp.name)\n\n # Build the Spectroscopic datasets for the SHO Guess and Fit\n sho_spec_starts = np.where(h5_spec_inds[h5_spec_inds.attrs['Frequency']].squeeze() == 0)[0]\n sho_spec_labs = get_attr(h5_spec_inds, 'labels')\n ds_sho_spec_inds, ds_sho_spec_vals = build_reduced_spec_dsets(h5_spec_inds,\n h5_spec_vals,\n keep_dim=sho_spec_labs != 'Frequency',\n step_starts=sho_spec_starts)\n\n sho_chunking = calc_chunks([self.n_pixels,\n self.n_sho_bins],\n sho32.itemsize,\n unit_chunks=[1, 1])\n ds_sho_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n ds_sho_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_sho_bins],\n dtype=sho32,\n compression='gzip',\n chunking=sho_chunking,\n parent=sho_grp)\n\n sho_grp.add_children([ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals])\n\n # Write the SHO group and datasets to the file and delete the MicroDataset objects\n h5_sho_refs = hdf.write(sho_grp)\n del ds_sho_fit, ds_sho_guess, ds_sho_spec_inds, ds_sho_spec_vals\n\n # Get the dataset handles for the fit and guess\n h5_sho_fit = get_h5_obj_refs(['Fit'], h5_sho_refs)[0]\n h5_sho_guess = get_h5_obj_refs(['Guess'], h5_sho_refs)[0]\n\n # Get the dataset handles for the SHO Spectroscopic datasets\n h5_sho_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_sho_refs)[0]\n h5_sho_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_sho_refs)[0]\n\n # Link the Position and Spectroscopic datasets as attributes of the SHO Fit and Guess\n link_as_main(h5_sho_fit, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n link_as_main(h5_sho_guess, h5_pos_inds, h5_pos_vals, h5_sho_spec_inds, h5_sho_spec_vals)\n\n '''\n Build the loop group\n '''\n loop_grp = VirtualGroup('Fit-Loop_Fit_', parent=h5_sho_fit.parent.name)\n\n # Build the Spectroscopic datasets for the loops\n loop_spec_starts = np.where(h5_sho_spec_inds[h5_sho_spec_inds.attrs['DC_Offset']].squeeze() == 0)[0]\n loop_spec_labs = get_attr(h5_sho_spec_inds, 'labels')\n ds_loop_spec_inds, ds_loop_spec_vals = build_reduced_spec_dsets(h5_sho_spec_inds,\n h5_sho_spec_vals,\n keep_dim=loop_spec_labs != 'DC_Offset',\n step_starts=loop_spec_starts)\n\n # Create the loop fit and guess MicroDatasets\n loop_chunking = calc_chunks([self.n_pixels, self.n_loops],\n loop_fit32.itemsize,\n unit_chunks=[1, 1])\n ds_loop_fit = VirtualDataset('Fit', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n ds_loop_guess = VirtualDataset('Guess', data=None,\n maxshape=[self.n_pixels, self.n_loops],\n dtype=loop_fit32,\n compression='gzip',\n chunking=loop_chunking,\n parent=loop_grp)\n\n # Add the datasets to the loop group then write it to the file\n loop_grp.add_children([ds_loop_fit, ds_loop_guess, ds_loop_spec_inds, ds_loop_spec_vals])\n h5_loop_refs = hdf.write(loop_grp)\n\n # Delete the MicroDatasets\n del ds_loop_spec_vals, ds_loop_spec_inds, ds_loop_guess, ds_loop_fit\n\n # Get the handles to the datasets\n h5_loop_fit = get_h5_obj_refs(['Fit'], h5_loop_refs)[0]\n h5_loop_guess = get_h5_obj_refs(['Guess'], h5_loop_refs)[0]\n h5_loop_spec_inds = get_h5_obj_refs(['Spectroscopic_Indices'], h5_loop_refs)[0]\n h5_loop_spec_vals = get_h5_obj_refs(['Spectroscopic_Values'], h5_loop_refs)[0]\n\n # Link the Position and Spectroscopic datasets to the Loop Guess and Fit\n link_as_main(h5_loop_fit, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n link_as_main(h5_loop_guess, h5_pos_inds, h5_pos_vals, h5_loop_spec_inds, h5_loop_spec_vals)\n\n self.h5_raw = USIDataset(h5_raw)\n self.h5_sho_guess = USIDataset(h5_sho_guess)\n self.h5_sho_fit = USIDataset(h5_sho_fit)\n self.h5_loop_guess = USIDataset(h5_loop_guess)\n self.h5_loop_fit = USIDataset(h5_loop_fit)\n self.h5_spec_vals = h5_spec_vals\n self.h5_spec_inds = h5_spec_inds\n self.h5_sho_spec_inds = h5_sho_spec_inds\n self.h5_sho_spec_vals = h5_sho_spec_vals\n self.h5_loop_spec_inds = h5_loop_spec_inds\n self.h5_loop_spec_vals = h5_loop_spec_vals\n self.h5_file = h5_raw.file\n\n return",
"def encode_group_class(group_class):\n\titemized = [[list(key), val] for key, val in group_class.iteritems()]\n\titemized.sort()\n\treturn json.dumps(itemized)",
"def from_hff(cls, hff_data, name=None, args=None):\n assert isinstance(hff_data, h5py.Group)\n obj = cls(new_obj=False)\n obj.vertices = SFFVertexList.from_hff(hff_data[u'vertices'], args=args)\n obj.polygons = SFFPolygonList.from_hff(hff_data[u'polygons'], args=args)\n return obj",
"def load(self):\n\n or_none = lambda x: x if x != \"none\" else None\n with h5py.File(self.filename, \"r\") as hf:\n for attr, val in hf.attrs.items():\n setattr(self, attr, or_none(val))",
"def to_object(self, class_name=None, **qwargs):\n if \"TYPE\" not in self.list_nodes() and class_name is None:\n raise ValueError(\"Objects can be only recovered from hdf5 if TYPE is given\")\n elif class_name is not None and class_name != self.get(\"TYPE\"):\n raise ValueError(\n \"Object type in hdf5-file must be identical to input parameter\"\n )\n class_name = class_name or self.get(\"TYPE\")\n class_path = class_name.split(\"<class '\")[-1].split(\"'>\")[0]\n class_convert_dict = { # Fix backwards compatibility\n \"pyiron_base.generic.datacontainer.DataContainer\": \"pyiron_base.storage.datacontainer.DataContainer\",\n \"pyiron_base.generic.inputlist.InputList\": \"pyiron_base.storage.inputlist.InputList\",\n \"pyiron_base.generic.flattenedstorage.FlattenedStorage\": \"pyiron_base.storage.flattenedstorage.FlattenedStorage\",\n }\n if class_path in class_convert_dict.keys():\n class_name_new = \"<class '\" + class_convert_dict[class_path] + \"'>\"\n class_object = self.import_class(class_name_new)\n elif not class_path.startswith(\"abc.\"):\n class_object = self.import_class(class_name)\n else:\n class_object = class_constructor(cp=JOB_DYN_DICT[class_path.split(\".\")[-1]])\n\n # Backwards compatibility since the format of TYPE changed\n if class_name != str(class_object):\n self[\"TYPE\"] = str(class_object)\n\n obj = self.create_instance(class_object, **qwargs)\n obj.from_hdf(hdf=self.open(\"..\"), group_name=self.h5_path.split(\"/\")[-1])\n return obj"
] | [
"0.59058964",
"0.5679107",
"0.5664282",
"0.5615434",
"0.5584132",
"0.5538772",
"0.5410745",
"0.53981966",
"0.5395402",
"0.5338279",
"0.5302236",
"0.5268607",
"0.51904416",
"0.51860553",
"0.5129682",
"0.5127386",
"0.50981534",
"0.50860167",
"0.5078723",
"0.5053184",
"0.5039512",
"0.49792984",
"0.49767768",
"0.49395198",
"0.49387804",
"0.49208698",
"0.49177256",
"0.4914209",
"0.49133286",
"0.49093884"
] | 0.70967436 | 0 |
Converts h5py group to dictionary. See h5_to_data for how different datatypes are handled. | def h5_to_dict(grp, **kwargs):
data = {}
for key in grp.keys():
try:
e_key = eval(key, {})
except:
e_key = key
data[e_key] = h5_to_data(grp[key], **kwargs)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_subtree(group: h5py.Group, refs: h5py.Group):\n d = {}\n for key in group:\n if key == \"#refs#\":\n continue\n value = group[key]\n if isinstance(value, h5py.Group):\n d[key] = convert_subtree(value, refs=refs)\n elif isinstance(value, h5py.Dataset):\n d[key] = convert_dataset(value, refs=refs)\n else:\n raise ValueError(f\"Can't convert {value} of type {type(value)}.\")\n return d",
"def spatialimg_to_hdfgroup(h5group, spatial_img):\n try:\n h5group['data'] = spatial_img.get_data()\n h5group['affine'] = spatial_img.get_affine()\n\n if hasattr(h5group, 'get_extra'):\n h5group['extra'] = spatial_img.get_extra()\n\n hdr = spatial_img.get_header()\n for k in list(hdr.keys()):\n h5group['data'].attrs[k] = hdr[k]\n\n except ValueError as ve:\n raise Exception('Error creating group ' + h5group.name) from ve",
"def recursively_load_dict_contents_from_group(h5file: \"h5py.File\", \n path: str,\n ) -> dict:\n ans = {}\n for key, item in h5file[path].items():\n if isinstance(item, h5py._hl.dataset.Dataset):\n ans[key] = item.value\n elif isinstance(item, h5py._hl.group.Group):\n ans[key] = recursively_load_dict_contents_from_group(h5file, f\"{path}{key}/\")\n return ans",
"def proc_group(inp):\n dic = {}\n dic.update(proc_attr(inp))\n for key in inp.keys():\n if isinstance(inp[key], h5py.Group):\n dic.update({key:proc_group(inp[key])})\n else:\n dic[key] = inp[key][()]\n pass\n return dic",
"def dict_to_h5(data, grp, **kwargs):\n for key in data:\n s_key = str(key)\n sub_data = data[key]\n data_to_h5(sub_data, grp, s_key, **kwargs)",
"def _write(self, h5_group, _) -> None:\n # Convert text from unicode to byte-string to avoid error in h5py\n data = np.asarray(self.data, dtype=np.string_)\n h5_field = h5_group.create_dataset(h5_group.attrs[\"fieldname\"], self.data.shape, dtype=data.dtype)\n h5_field[...] = data",
"def HDF5_to_HDF5(self, **kwds):\n # split extension from HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n # output HDF5 file\n hdf5_file = os.path.expanduser(f'{fileBasename}.h5')\n # copy everything from the HDF5 file\n with h5py.File(self.filename,mode='r') as source:\n dest = h5py.File(hdf5_file,mode='w')\n # value checks on output HDF5\n if not hasattr(dest, 'create_dataset'):\n raise ValueError('dest must be a group, got {!r}'.format(dest))\n # for each key in the root of the hdf5 file structure\n for k in source.keys():\n self.copy_from_HDF5(source[k], dest, name=k, **kwds)",
"def h5_to_df(h5_file, group_name):\n col_dict = {}\n h5_set = hdf_file[group_name]\n for col in h5_set.keys():\n my_array = np.asarray(h5_set[col])\n col_dict[col] = my_array\n df = pd.DataFrame(col_dict)\n return df",
"def serialized(self):\r\n return {'name':self._group.name, 'ip':self._ip}",
"def _read(cls, h5_group, memo) -> \"FieldType\":\n name = h5_group.attrs[\"fieldname\"]\n if name in memo:\n val = memo[name]\n else:\n # Convert back from byte-string to unicode\n val = np.asarray(h5_group[name][...], dtype=np.str_)\n return cls(num_obs=len(val), name=name.split(\".\")[-1], val=val)",
"def get_calibrate(hdf5_group):\n\n return dictionary_from_attributes(hdf5_group)",
"def _get_group_example_data(self, data_group_id: str) -> Dict[\n str, dict\n ]:\n return {\n e['example_id']: self._get_example_data(e['example_id'])\n for e in self.tasks['data_groups'][data_group_id]\n }",
"def from_hdf(filename):\n # Function for iteratively parsing the file to create the dictionary\n def visit_group(obj, sdict):\n name = obj.name.split('/')[-1]\n #indent = len(obj.name.split('/'))-1\n #print \" \"*indent,name, obj.value if (type(obj) == h5py.Dataset) else \":\"\n if type(obj) in [ h5py.Dataset ]:\n sdict[name] = obj.value\n if type(obj) in [ h5py.Group, h5py.File ]:\n sdict[name] = {}\n for sobj in obj.values():\n visit_group(sobj, sdict[name])\n\n data = {}\n try:\n h5file = h5py.File(os.path.expandvars(filename), 'r')\n # Run over the whole dataset\n for obj in h5file.values():\n visit_group(obj, data)\n except IOError, e:\n logging.error(\"Unable to read HDF5 file \\'%s\\'\" % filename)\n logging.error(e)\n raise e\n finally:\n h5file.close()\n\n return data",
"def from_hff(cls, hff_data, name=None, args=None):\n assert isinstance(hff_data, h5py.Group)\n obj = cls()\n obj.name = _decode(hff_data[u'name'][()], u'utf-8')\n obj.version = _decode(hff_data[u'version'][()], u'utf-8')\n if u'processingDetails' in hff_data:\n obj.processing_details = _decode(hff_data[u'processingDetails'][()], u'utf-8')\n return obj",
"def _save_h5_r(data_dict, h5obj, dlen):\n for key, val in data_dict.items():\n if isinstance(val, dict):\n h5group = h5obj[key] if key in h5obj.keys() else h5obj.create_group(key)\n _save_h5_r(val, h5group, dlen)\n else:\n if val.dtype == 'object':\n sub_dtype = f'float{dlen}' if val[0].dtype == np.float else f'int{dlen}' if val[0].dtype == np.int else val[0].dtype\n dtype = h5py.vlen_dtype(sub_dtype)\n else:\n dtype = f'float{dlen}' if val.dtype == np.float else f'int{dlen}' if val.dtype == np.int else val.dtype\n h5obj.create_dataset(key, data=val, dtype=dtype)",
"def from_hdf5(cls, group_or_filename):\n\n if isinstance(group_or_filename, h5py.Group):\n group = group_or_filename\n need_to_close = False\n else:\n h5file = h5py.File(str(group_or_filename), 'r')\n need_to_close = True\n\n # Make sure version matches\n if 'version' in h5file.attrs:\n major, minor = h5file.attrs['version']\n if major != WMP_VERSION_MAJOR:\n raise DataError(\n 'WMP data format uses version {}. {} whereas your '\n 'installation of the OpenMC Python API expects version '\n '{}.x.'.format(major, minor, WMP_VERSION_MAJOR))\n else:\n raise DataError(\n 'WMP data does not indicate a version. Your installation of '\n 'the OpenMC Python API expects version {}.x data.'\n .format(WMP_VERSION_MAJOR))\n\n group = list(h5file.values())[0]\n\n name = group.name[1:]\n out = cls(name)\n\n # Read scalars.\n\n out.spacing = group['spacing'][()]\n out.sqrtAWR = group['sqrtAWR'][()]\n out.E_min = group['E_min'][()]\n out.E_max = group['E_max'][()]\n\n # Read arrays.\n\n err = \"WMP '{}' array shape is not consistent with the '{}' array shape\"\n\n out.data = group['data'][()]\n\n out.windows = group['windows'][()]\n\n out.broaden_poly = group['broaden_poly'][...].astype(np.bool)\n if out.broaden_poly.shape[0] != out.windows.shape[0]:\n raise ValueError(err.format('broaden_poly', 'windows'))\n\n out.curvefit = group['curvefit'][()]\n if out.curvefit.shape[0] != out.windows.shape[0]:\n raise ValueError(err.format('curvefit', 'windows'))\n\n # _broaden_wmp_polynomials assumes the curve fit has at least 3 terms.\n if out.fit_order < 2:\n raise ValueError(\"Windowed multipole is only supported for \"\n \"curvefits with 3 or more terms.\")\n\n # If HDF5 file was opened here, make sure it gets closed\n if need_to_close:\n h5file.close()\n\n return out",
"def read_hdf5_group(filename, gname, vars_name=None):\n fid = h5py.File(filename, 'r')\n gid = fid.get(gname)\n if vars_name is None: vars_name = list(gid.keys())\n\n data = {}\n for var_name in vars_name:\n try:\n dset = gid.get(var_name)\n shape = dset.shape\n data[var_name] = np.zeros(shape)\n dset.read_direct(data[var_name])\n except:\n pass\n fid.close()\n print('Read from ', ''.join((filename,'/',gname)))\n print('Variables names = ')\n print('\\n'.join(vars_name))\n\n return data, vars_name",
"def load_h5(fname: str, path: str='/') -> dict:\n try:\n with h5py.File(fname, 'r') as f:\n dataMap = recursively_load_dict_contents_from_group(f, path)\n except IOError as e:\n print(f\"Cannot open HDF5 file {fname}\")\n print(f\"IOError: {e}\")\n\n return dataMap",
"def testFlatDict(self):\n ddict = {\n \"group/group/dataset\": 10,\n (\"group/group/dataset\", \"attr\"): 11,\n (\"group/group\", \"attr\"): 12,\n }\n with h5py.File(self.h5_fname, \"w\") as h5file:\n dictdump.dicttoh5(ddict, h5file)\n self.assertEqual(h5file[\"group/group/dataset\"][()], 10)\n self.assertEqual(h5file[\"group/group/dataset\"].attrs['attr'], 11)\n self.assertEqual(h5file[\"group/group\"].attrs['attr'], 12)",
"def HDF5_to_zarr(self, **kwds):\n # split extension from HDF5 file\n if isinstance(self.filename, str):\n fileBasename,fileExtension=os.path.splitext(self.filename)\n else:\n fileBasename,fileExtension=os.path.splitext(self.filename.filename)\n # output zarr file\n zarr_file = os.path.expanduser(f'{fileBasename}.zarr')\n # copy everything from the HDF5 file to the zarr file\n with h5py.File(self.filename, mode='r') as source:\n dest = zarr.open_group(zarr_file,mode='w')\n # value checks on output zarr\n if not hasattr(dest, 'create_dataset'):\n raise ValueError('dest must be a group, got {!r}'.format(dest))\n # for each key in the root of the hdf5 file structure\n for k in source.keys():\n self.copy_from_HDF5(source[k], dest, name=k, **kwds)",
"def _get_group_data(self, group_name):\n if self.plotter.plot_hues is None:\n data = self._get_group_data_without_hue(group_name)\n else:\n data = self._get_group_data_with_hue(group_name)\n\n group_data = remove_null(data)\n\n return group_data",
"def save(self, h5group):\n subGroup = createH5Group(h5group, self.savePath)\n for key, vals in self.logWeights.items():\n subGroup[key] = vals",
"def to_hdf5(self, group, energy):\n\n group.attrs['mt'] = self.mt\n group.attrs['redundant'] = 1 if self.redundant else 0\n\n if self.mt in _REACTION_NAME:\n group.attrs['label'] = np.string_(_REACTION_NAME[self.mt])\n else:\n group.attrs['label'] = np.string_(self.mt)\n\n dset = group.create_dataset('xs', data=self.xs(energy))\n threshold_idx = getattr(self.xs, '_threshold_idx', 0)\n dset.attrs['threshold_idx'] = threshold_idx",
"def attributes_to_h5(obj, grp, lst_attr=None, priv=False, dpriv=False,\n **kwargs):\n if lst_attr is None:\n if dpriv:\n lst_attr = list(obj.__dict__.keys())\n elif priv:\n lst_attr = [x for x in obj.__dict__.keys() if '__' not in x]\n else:\n lst_attr = [x for x in obj.__dict__.keys() if '_' not in x]\n for attr in lst_attr:\n if attr in grp.keys():\n del(grp[attr])\n data = getattr(obj, attr)\n data_to_h5(data, grp, attr, **kwargs)",
"def load_h5_file(file_path):\n # load\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = list(fr[a_group_key])\n # transform to appropriate numpy array \n data=data[0:]\n data = np.stack(data, axis=0)\n return data",
"def load_h5_file(file_path):\n # load\n fr = h5py.File(file_path, 'r')\n a_group_key = list(fr.keys())[0]\n data = list(fr[a_group_key])\n # transform to appropriate numpy array \n data=data[0:]\n data = np.stack(data, axis=0)\n return data",
"def write_dict_h5(dfile, f, k, fbencodings, var_selection=[], mode='a', attrs={}):\n\n #attrs= {'date_time':('units','seconds since 1900-01-01 00:00:00')}\n #attrs = {'observation_id': ('description', 'unique ID for observation'), 'report_id': ('description', 'Link to header information') , 'date_time':('units','seconds since 1900-01-01 00:00:00') }\n \n with h5py.File(dfile,mode) as fd:\n try:\n fd.create_group(k)\n index=numpy.zeros (f[list(f.keys())[0]].shape[0], dtype='S1')\n fd[k].create_dataset('index', data=index)\n except:\n pass\n if not var_selection:\n var_selection=list(f.keys())\n \n string10=numpy.zeros(fixed_string_len,dtype='S1')\n sdict={}\n slist=[]\n\n #groupencodings \n \n for v in var_selection: \n #variables_dic[v] = ''\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n \n if type(fvv[0]) not in [str,bytes,numpy.bytes_]:\n\n if fvv.dtype !='S1':\n \n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n if attrs: # attrs={'date_time':('units','seconds since 1900-01-01 00:00:00')}\n if v in attrs.keys():\n for kk,vv in attrs[v].items():\n if type(vv) is str: \n fd[k][v].attrs[kk]=numpy.bytes_(vv)\n else:\n fd[k][v].attrs[kk]=vv\n \n if v in ['date_time','report_timestamp','record_timestamp']:\n fd[k][v].attrs['units']=numpy.bytes_('seconds since 1900-01-01 00:00:00') #print ( fk, ' ' , v , ' ' , ) \n \n else:\n fd[k].create_dataset(v,fvv.shape,fvv.dtype,compression=fbencodings[v]['compression'], chunks=True)\n fd[k][v][:]=fvv[:]\n slen=fvv.shape[1]\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n if v in attrs.keys():\n fd[k][v].attrs['description']=numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table'])\n \n else:\n sleno=len(fvv[0])\n slen=sleno\n try:\n slen=int(fvv.dtype.descr[0][1].split('S')[1])\n except: \n pass\n\n sdict[v]=slen\n if slen not in slist:\n slist.append(slen)\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data=string10[:slen] )\n except:\n pass \n \n #x=x.reshape(fvv.shape[0],slen)\n fd[k].create_dataset(v,data=fvv.view('S1').reshape(fvv.shape[0],slen),compression=fbencodings[v]['compression'],chunks=True)\n if v in attrs.keys():\n fd[k][v].attrs['description'] =numpy.bytes_(attrs[v]['description'])\n fd[k][v].attrs['external_table']=numpy.bytes_(attrs[v]['external_table']) \n \n #variables_dic[v] = f[v].values.dtype\n \n for v in fd[k].keys(): #var_selection:\n l=0 \n \n '''\n if v == 'primary_station_id':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n except:\n pass\n \n try:\n slen = len( fd[k][v][0] )\n stringa=numpy.zeros( slen , dtype='S1')\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa ) \n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n except:\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n \n \n if v == 'station_name':\n try:\n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n slen = len( fd[k][v][0][0])\n stringa=numpy.zeros( slen , dtype='S1')\n except:\n pass\n try:\n fd[k].create_dataset( 'string{}'.format(slen), data= stringa )\n fd[k][v].dims[1].attach_scale( fd[k]['string{}'.format(slen)] ) \n print('done attaching')\n except:\n print('not working')\n \n ''' \n try:\n if type(f[v]) == pd.core.series.Series:\n fvv=f[v].values\n else:\n fvv=f[v]\n if 'string' not in v and v!='index': \n fd[k][v].dims[l].attach_scale(fd[k]['index'])\n #print(v,fvv.ndim,type(fvv[0]))\n if fvv.ndim==2 or type(fvv[0]) in [str,bytes,numpy.bytes_]:\n slen=sdict[v]\n #slen=10\n fd[k][v].dims[1].attach_scale(fd[k]['string{}'.format(slen)])\n except:\n pass\n \n \n \n i=4 \n for v in slist:\n s='string{}'.format(v)\n for a in ['NAME']:\n fd[k][s].attrs[a]=numpy.bytes_('This is a netCDF dimension but not a netCDF variable.')\n \n i+=1\n \n return",
"def df_to_h5(df, h5_store, group_name):\n # delete store store if exists \n if group_name in h5_store:\n del h5_store[group_name]\n my_group = h5_store.create_group(group_name)\n print \"Group \" + group_name + \" Exists. Group deleted then created\"\n #If not there, create the group\n else:\n my_group = h5_store.create_group(group_name)\n print \"Group \" + group_name + \" Created\"\n for col in df.columns:\n h5_store[group_name].create_dataset(col, data=df[col].values.astype('int32'))",
"def from_hff(cls, hff_data, name=None, args=None):\n assert isinstance(hff_data, h5py.Group)\n obj = cls()\n obj.lattice_id = hff_data[u'latticeId'][()]\n obj.value = hff_data[u'value'][()]\n if u'transformId' in hff_data:\n obj.transform_id = hff_data[u'transformId'][()]\n return obj",
"def read_generic_hdf5(fname):\n f = h5py.File(fname, \"r\")\n fcontent = {}\n\n def filldict(x, y):\n # create a new container\n tmp = {}\n # add attributes if present\n if len(y.attrs) > 0:\n tmp['attrs'] = dict(y.attrs)\n # add data if it is a dataset\n if isinstance(y, h5py.Dataset):\n tmp['data'] = np.array(y)\n # only add to the dictionary, if we have something meaningful to add\n if tmp != {}:\n fcontent[x] = tmp\n\n f.visititems(filldict)\n\n f.close()\n return fcontent"
] | [
"0.65968794",
"0.65202844",
"0.64542043",
"0.6409155",
"0.62138003",
"0.6023547",
"0.5987318",
"0.5958378",
"0.59519386",
"0.59472114",
"0.58982855",
"0.58897305",
"0.5867148",
"0.58339214",
"0.5762532",
"0.5750586",
"0.5739597",
"0.57342637",
"0.5716493",
"0.5699773",
"0.56248766",
"0.5586285",
"0.55825",
"0.5568207",
"0.555155",
"0.555155",
"0.55461943",
"0.5534207",
"0.55253106",
"0.5490575"
] | 0.8002928 | 0 |
Tries to create list of evaluated items in data. If exception is thrown by eval, it just adds the element as is to the list. | def soft_list_eval(data):
out = []
for x in data:
try:
out.append(eval(x, {}))
except:
try:
out.append(x.decode())
except (AttributeError, SyntaxError):
out.append(x)
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_literal_eval(data):\n if type(data) != type([]):\n try:\n return ast.literal_eval(data)\n except:\n return [data] if data is not np.nan else []\n\n return data",
"def my_evalf(expr, chop=False):\r\n if type(expr) == list:\r\n try:\r\n return [x.evalf(chop=chop) for x in expr]\r\n except:\r\n return expr\r\n try:\r\n return expr.evalf(chop=chop)\r\n except:\r\n return expr",
"def create_list(self, data):\n\n temp = []\n for item in data:\n if len(item) > 2:\n i = 0\n while i < len(item):\n temp.append(item[i:min(i+2, len(item))])\n i += 2\n else:\n temp.append(item)\n data = temp\n\n temp = []\n for item in data:\n if item[-1] == \"{\":\n temp.append(\"[\" + item[0][:-1] + \",\")\n elif item[-1] == \"}\":\n temp.append(\"],\")\n else:\n temp.append(\"[\" + \" \".join(item).replace(\":\", \",\") + \"],\")\n return ast.literal_eval(\"\".join(temp))",
"def eval_list(self, exps: ExperimentList):\n for e in exps:\n self.eval(e)",
"def process_exp_values(exp_data_list):\n exp_data_values = []\n for exp_data in exp_data_list:\n exp_data_values.append(process_exp_value(exp_data))\n return exp_data_values",
"def ex_list(data):\n return tuple(data)",
"def add_input_eval_data(self, *eval_data):\n self._component._input_eval_data.extend([d for d in eval_data])\n return self",
"def to_internal_value(self, data):\n if html.is_html_input(data):\n data = html.parse_html_list(data)\n\n if not isinstance(data, list):\n message = self.error_messages['not_a_list'].format(\n input_type=type(data).__name__\n )\n raise ValidationError(detail={\n 'not_field_errors': [message, ]\n }, api_code='not_a_list')\n\n if not self.allow_empty and len(data) == 0:\n message = self.error_messages['empty']\n raise ValidationError(detail={\n NON_FIELD_ERRORS_KEY: [message]\n }, code='empty')\n\n ret = []\n errors = []\n\n for item in data:\n try:\n validated = self.child.run_validation(item)\n except ValidationError as exc:\n errors.append(exc.detail)\n else:\n ret.append(validated)\n errors.append({})\n\n if any(errors):\n raise ValidationError(detail=errors)\n\n return ret",
"def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l",
"def __expression_list(self):\n exprs = []\n try:\n expr = self.expression()\n exprs.extend(expr)\n\n while True:\n self.match_value(Punctuator, \",\")\n expr = self.expression()\n exprs.extend(expr)\n except ParseError:\n return exprs",
"def zzX_eval_list(f, A):\n def rec_eval(g, l, L):\n if l == L:\n return zzx_eval(g, A[-1])\n else:\n h = [ rec_eval(h, l+1, L) for h in g ]\n\n if l <= L - len(A):\n return h\n else:\n return zzx_eval(h, A[-L+l-1])\n\n if not A:\n return f\n\n L = poly_level(f)\n\n if zzX_zero_p(f):\n return zzX_zero(L - len(A))\n\n e = rec_eval(f, 1, L)\n\n if L == len(A):\n return e\n else:\n return zzX_strip(e)",
"def genAssign(self, varList, dataVariableName):\n\n tmpVar = self.genVar('tmp')\n #tmpVar = iter(dataVariableName)\n initPart = [tmpVar.assign( Call(Name('iter', Load()), [Name(dataVariableName, Load())], [], None, None))]\n\n moreVars = []\n affectations = []\n assignations = []\n for i, n in enumerate(varList.elts):\n myTmpName = self.genVar(i)\n affectations += [\n #tmpVar_<i> = tmpVar.next()\n myTmpName.assign( Call(tmpVar.load('next'), [], [], None, None ))\n ]\n\n if isinstance(n, Tuple) or isinstance(n, List):\n moreVars.append( (n, myTmpName.name) )\n else:\n assignations += [\n #var = tmpVar_<i>\n Assign( [n], myTmpName.load() )\n ]\n\n tryAssign = [\n # #try: affectations\n # TryExcept(\n # affectations,\n # #except StopIteration:\n # [ExceptHandler( Name('StopIteration', Load()), None, [\n # #raise ValueError(\"need more value to unpack\")\n # Raise(Call(Name('ValueError', Load()), [Str(\"need more value to unpack\")], [], None, None), None, None),\n # ]\n # )],\n # [])\n ]\n\n testMoreValue = [\n #try:\n TryExcept(\n #tmpVar.next()\n [ Expr(Call(tmpVar.load('next'), [], [], None, None )) ],\n #except StopIteration: pass\n [ExceptHandler( Name('StopIteration', Load()), None, [Pass()]) ],\n #else : raise ValueError(\"too many values to unpack\")\n [\n Raise(Call(Name('ValueError', Load()), [Str(\"too many values to unpack\")], [], None, None), None, None),\n\n ])\n ]\n\n return (initPart + tryAssign + testMoreValue, assignations, moreVars)",
"def visit_List(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n return to_call(to_attribute(self.operator, '__list__'), node.elts)\n return node",
"def _data_sanity_checks(self, explore_iterable):\n data_list = []\n\n for val in explore_iterable:\n\n if not self.f_supports(val):\n raise TypeError(\n \"%s is of not supported type %s.\" % (repr(val), str(type(val)))\n )\n\n if not self._values_of_same_type(val, self._default):\n raise TypeError(\n \"Data of `%s` is not of the same type as the original entry value, \"\n \"new type is %s vs old type %s.\"\n % (self.v_full_name, str(type(val)), str(type(self._default)))\n )\n\n data_list.append(val)\n\n if len(data_list) == 0:\n raise ValueError(\"Cannot explore an empty list!\")\n\n return data_list",
"def test_create_results_data_list(self):\n user_created = self.create_user()\n list_return = self.new_calculation.create_results_data_list(user_created)\n\n list_data = [['Semaine', 'Poids'], [0.0, 100.0], [1.0, 95.0]]\n\n self.assertEqual(list, type(list_return))\n self.assertEqual(str, type(list_return[0][0]))\n self.assertEqual(str, type(list_return[0][1]))\n self.assertEqual(list_data, list_return)\n for elt in list_return:\n self.assertEqual(list, type(elt))\n self.assertEqual(2, len(elt))",
"def list_func(lst: List[valueType]) -> List[valueType]:\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp",
"def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators",
"def eval_list(self, value):\n\n okay = True\n count = 0\n for v in value.elts:\n if not self.eval_value(v):\n okay = False\n break\n count += 1\n return okay",
"def task_5_append_str_to_list_and_return(input_data: List, elem: str):\n my_list = input_data.copy()\n my_list.append(elem)\n return my_list",
"def eval(self):\n raise NotImplementedError",
"def _populate_old_exceptions_list(self, operator_id, executor):\n with create_db_connection(self._config.db_config) as conn, conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._exceptions_lists_old_part_tblname(operator_id)\n cursor.execute(sql.SQL(\"\"\"INSERT INTO {0}(operator_id, imei_norm, virt_imei_shard, imsi)\n SELECT %s, imei_norm, virt_imei_shard, imsi\n FROM gen_exceptions_list(%s)\n \"\"\").format(sql.Identifier(tblname)),\n [operator_id, operator_id])\n num_records = cursor.rowcount\n self._add_pk(conn, tblname=tblname, pk_columns=['imei_norm', 'imsi'])\n\n return num_records, cp.duration",
"def evaluated_data(self) -> Dict:\n if not self.eval_data:\n raise Exception(\"Evaluation Failed\")\n\n statistics = [stat['statistics'] for stat in self.eval_data]\n for count, stat in enumerate(statistics):\n if stat:\n eval_data = stat.split(\"=\")\n char_error = re.findall(r\"\\d+\\.\\d+\", eval_data[-2])[0]\n word_error = eval_data[-1]\n\n self.eval_data[count]['character_error'] = float(char_error)\n self.eval_data[count]['word_error'] = float(word_error)\n del self.eval_data[count]['statistics']\n\n return self.eval_data",
"def _eval(self, node, ctx):\n if node is None:\n return None\n elif isinstance(node, ast.Name): # <identifier>\n # lookup identifiers in local namespace\n if node.id in ctx['locals']:\n _local = ctx['locals'][node.id]\n\n # if local variable contains a list, evaluate each element by threading 'get_expr' over it\n if isinstance(_local, list):\n _retlist = []\n for _local_el in _local:\n # non-string elements are simply passed through\n if not isinstance(_local_el, str):\n _retlist.append(_local_el)\n continue\n\n # string-valued elements are evaluated\n try:\n # NOTE: local variable lookup is disabled when threading\n # over lists that were stored in local variables themselves.\n # This is done to prevent infinite recursion errors for\n # expressions which may reference themselves\n _ret_el = self.get_expr(_local_el, locals=None)\n except NameError as e:\n # one element of the list references a local variable\n # -> stop evaluation and return dummy\n # use NameError object instead of None to identifiy\n # dummy elements unambiguously later\n _retlist.append(e)\n else:\n # evaluation succeeded\n _retlist.append(_ret_el)\n return _retlist\n # local variables containing strings are parsed\n elif isinstance(_local, str):\n return self.get_expr(_local, locals=None)\n # all other types are simply passed through\n else:\n return _local\n\n # if no local is found, try a few builtin Python literals\n elif node.id in ('True', 'False', 'None'): # restrict subset of supported literals\n return ast.literal_eval(node.id) # returns corresponding Python literal from string\n\n # if nothing above matched, assume mistyped identifier and give up\n # NOTE: do *not* assume identifier is a ROOT file path. ROOT file paths\n # must be given explicitly as strings.\n else:\n raise NameError(\"Cannot resolve identifier '{}': not a valid Python literal or a registered local variable!\".format(node.id))\n elif isinstance(node, ast.Str): # <string> : array column\n if ctx['input']:\n # lookup in ROOT file\n return self.get(node.s)\n else:\n # return string as-is\n return node.s\n elif isinstance(node, ast.Num): # <number>\n return node.n\n elif isinstance(node, ast.Call): # node names containing parentheses (interpreted as 'Call' objects)\n # -- determine function to call\n\n # function handle is a simple identifier\n if isinstance(node.func, ast.Name):\n\n # handle special functions\n if node.func.id in self.special_functions:\n _spec_func_spec = self.special_functions[node.func.id]\n # callable for special function (default to no-op)\n _callable = _spec_func_spec.get('func', lambda x: x)\n # modify avaluation context for special function\n ctx = dict(ctx, **_spec_func_spec.get('ctx', {}))\n\n # call a registered input function\n else:\n try:\n _callable = ctx['functions'][node.func.id]\n except KeyError as e:\n raise KeyError(\n \"Cannot call input function '{}': no such \"\n \"function!\".format(node.func.id))\n\n # function handle is an expression\n else:\n # evaluate 'func' as any other node\n _callable = self._eval(node.func, ctx)\n\n # evaluate unpacked positional arguments, if any\n _starargs_values = []\n if node.starargs is not None:\n _starargs_values = self._eval(node.starargs, ctx)\n\n # starred kwargs (**) not supported for the moment\n if node.kwargs:\n raise NotImplementedError(\n \"Unpacking keyword arguments in expressions via \"\n \"** is not supported. Expression was: '{}'\".format(\n ast.dump(node, annotate_fields=False)))\n\n # evaluate arguments\n _args = map(lambda _arg: self._eval(_arg, ctx), node.args) + _starargs_values\n _kwargs = {\n _keyword.arg : self._eval(_keyword.value, ctx)\n for _keyword in node.keywords\n }\n\n # call function\n return _callable(*_args, **_kwargs)\n elif isinstance(node, ast.BinOp): # <left> <operator> <right>\n return ctx['operators'][type(node.op)](self._eval(node.left, ctx), self._eval(node.right, ctx))\n elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1\n return ctx['operators'][type(node.op)](self._eval(node.operand, ctx))\n elif isinstance(node, ast.Subscript): # <operator> <operand> e.g., -1\n if isinstance(node.slice, ast.Index): # support subscripting via simple index\n return self._eval(node.value, ctx)[self._eval(node.slice.value, ctx)]\n elif isinstance(node.slice, ast.Slice): # support subscripting via slice\n return self._eval(node.value, ctx)[self._eval(node.slice.lower, ctx):self._eval(node.slice.upper, ctx):self._eval(node.slice.step, ctx)]\n else:\n raise TypeError(node)\n elif isinstance(node, ast.Attribute): # <value>.<attr>\n return getattr(self._eval(node.value, ctx), node.attr)\n elif isinstance(node, ast.List): # list of node names\n return [self._eval(_el, ctx) for _el in node.elts]\n elif isinstance(node, ast.Tuple): # tuple of node names\n return tuple(self._eval(_el, ctx) for _el in node.elts)\n else:\n raise TypeError(node)",
"def __parse_list(self) -> list:\r\n self.idx += 1\r\n l = []\r\n while self.data[self.idx: self.idx + 1] != b'e':\r\n l.append(self.__parse())\r\n self.idx += 1\r\n return l",
"def eval(self):\n raise NotImplemented()",
"def __call__(self, X, Y=None, eval_gradient=False):\n return [f(X, Y=Y, eval_gradient=eval_gradient) for f in self.list_func]",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def list_value(self) -> global___Expression.RepeatedValue:"
] | [
"0.5927537",
"0.56138974",
"0.5492731",
"0.5462819",
"0.54572475",
"0.54434615",
"0.5393195",
"0.531069",
"0.5254724",
"0.5226049",
"0.5118209",
"0.50996184",
"0.50991887",
"0.50744665",
"0.50701755",
"0.50688696",
"0.5019691",
"0.500871",
"0.5003541",
"0.49978063",
"0.49603376",
"0.4956619",
"0.4954231",
"0.49122322",
"0.49057838",
"0.48883572",
"0.48830733",
"0.48830733",
"0.48830733",
"0.48367772"
] | 0.71692944 | 0 |
Function k_means applies kmeans clustering alrorithm on dataset and prints the crosstab of cluster and actual labels and clustering performance parameters. | def k_means(n_clust, data_frame, true_labels):
k_means = KMeans(n_clusters=n_clust, random_state=123, n_init=30)
k_means.fit(data_frame)
c_labels = k_means.labels_
df = pd.DataFrame({'clust_label': c_labels, 'orig_label': true_labels.tolist()})
ct = pd.crosstab(df['clust_label'], df['orig_label'])
y_clust = k_means.predict(data_frame)
display(ct)
print('% 9s' % 'inertia homo compl v-meas ARI AMI silhouette')
print('%i %.3f %.3f %.3f %.3f %.3f %.3f'
% (k_means.inertia_,
homogeneity_score(true_labels, y_clust),
completeness_score(true_labels, y_clust),
v_measure_score(true_labels, y_clust),
adjusted_rand_score(true_labels, y_clust),
adjusted_mutual_info_score(true_labels, y_clust),
silhouette_score(data_frame, y_clust, metric='euclidean'))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans",
"def K_Means(self, n_clusters: int=150):\n start_time = datetime.datetime.now()\n self.func_log(\"\\n\\tIn K-Measn()\")\n \n kmeans = KMeans(n_clusters = n_clusters)\n kmeans.fit(self.descriptor_list)\n self.visual_words = kmeans.cluster_centers_ \n \n end_time = datetime.datetime.now() \n self.func_log(\"\\n\\t\\tTime Cost: {}\\n\".format(end_time-start_time))",
"def kmeans(k, descriptor_list):\r\n kmeans = KMeans(n_clusters = k, n_init=10, verbose = 1) \r\n kmeans.fit(descriptor_list)\r\n visual_words = kmeans.cluster_centers_ \r\n return visual_words",
"def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels",
"def kmeans(X, n_clust):\n\n X = scale(X)\n estimator = KMeans(init = 'k-means++', n_clusters = n_clust, n_init = 10, verbose = 2)\n \n estimator.fit(X)\n labels = estimator.predict(X)\n return labels",
"def k_means_cluster(datapoint, *args, n_clusters = 54):\n\n # run k-means\n km = KMeans(n_clusters = n_clusters, random_state = 1)\n # fit the data to the k-means model\n km.fit(datapoint)\n # obtain labels for the resulting clusters\n labels = km.labels_\n plt.figure(figsize=(9,5))\n # plot the data, coloring points based on the cluster\n #for label in np.unique(labels):\n #plt.scatter(datapoint[labels==label,0], datapoint[labels==label,1],c=label,label=label)\n plt.set_cmap('Set2')\n \n plt.scatter(datapoint[:,0], datapoint[:,1], label = labels, c = labels) \n try:\n plt.scatter(args[0][:,0],args[0][:,1],c=\"red\")\n plt.legend()\n \"\"\" for cat, (x,y) in zip(labels, args[0]):\n #plt.scatter(args[0][:,0],args[0][:,1],c=colors[cat])\n plt.text(x+0.1, y+0.1, cat)\"\"\"\n except IndexError:\n plt.title(\"K-means, {} clusters\".format(n_clusters))\n plt.legend(loc = \"lower right\")\n plt.show()",
"def kmeans(points,n_clusters):\n # create kmeans object\n kmeans = KMeans(n_clusters=n_clusters)\n # fit kmeans object to data\n kmeans.fit(points)\n # print location of clusters learned by kmeans object\n print(kmeans.cluster_centers_)\n # save new clusters for chart\n y_km = kmeans.fit_predict(points)\n\n print('Clusters partition: ', Counter(y_km))\n \n return y_km, kmeans",
"def kmeans_classification(nb_clusters, data2d):\n num_clusters = nb_clusters\n model = KMeans(n_clusters=num_clusters, verbose=0, random_state=6)\n model.fit(data2d)\n labels = model.labels_\n # Clustering assign random labels to female (0 or 1), the prior knowledge tells us male are half of the samples\n # we use the knowledge to assign 0 to female, 1 to male\n if sum(labels) > (data2d.shape[0]) / 2:\n labels = 1 - labels\n print(\"First 30 result, female = 0\")\n print(*labels[:30])\n return labels",
"def UI_KMeans_Orch(\n\t\t\t\t train_data,\n\t\t\t\t orig_data,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_analysis = False,\n\t\t\t\t silhouette_cluster_range = range(0,0),\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = True,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\t#Make directory on the users desktop\n\tsegmentation_folder_name = \"Customer-Segmentation-Test\" + str(dt.datetime.now().strftime(\"_%Y-%m-%d_%H.%M.%S\"))\n\tos.makedirs(str(Path.home()) + \"\\\\Desktop\\\\\" + segmentation_folder_name)\n\n\t#Make the log\n\tlog = Log(\"Master-Log\", \"Preprocess-Log\", \"SegMethod-Log\", directory = str(Path.home()) + \"\\\\Desktop\\\\\" + segmentation_folder_name)\n\t\n\tprint(\"\\nData\\n\")\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\torig_data = orig_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data)\n\n\tprint(\"\\nPCA\\n\")\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\tprint(\"\\nElbow Chart Analysis\\n\")\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\tif silhouette_analysis:\n\t\tprint(\"\\nSilhouette Analysis\\n\")\n\t\t#Conduct Silhouette analysis\n\t\ttest.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\tprint(\"\\nLog Saving\\n\")\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)\n\tlog.saveMasterLog()",
"def kmeans_clustering(proj_df, k):\r\n k_means= k_means = KMeans(random_state=25, n_clusters=k)\r\n k_means.fit(proj_df)\r\n labels= k_means.predict(proj_df)\r\n \r\n return labels",
"def k_means_PCA(k_means_model, X_emb, y, display_k_means=True):\n\n pca = PCA(n_components=2)\n\n X_emb = StandardScaler().fit_transform(X_emb)\n X_emb = pca.fit_transform(X_emb)\n\n centroids = k_means_model.cluster_centers_\n centroids_pca = pca.transform(centroids)\n n_clusters = centroids_pca.shape[0]\n\n if display_k_means:\n\n labels = k_means_model.labels_\n colors = cm.rainbow(np.linspace(0, 1, n_clusters))\n\n plt.scatter(X_emb[:, 0], X_emb[:, 1], c=colors[labels])\n plt.scatter(centroids_pca[:, 0], centroids_pca[:, 1], c=colors, marker=\"x\", s=25)\n plt.savefig(\"k_means_colors.png\")\n\n else:\n\n labels = utils.one_hot_to_index(y)\n class_colors = cm.rainbow(np.linspace(0, 1, y.shape[1]))\n cluster_colors = cm.rainbow(np.linspace(0, 1, n_clusters))\n\n plt.scatter(X_emb[:, 0], X_emb[:, 1], c=class_colors[labels])\n plt.scatter(centroids_pca[:, 0], centroids_pca[:, 1], c=cluster_colors, marker=\"x\", s=25)\n plt.savefig(\"class_colors.png\")",
"def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)",
"def Demo_K_Means_Orch(log,\n\t\t\t\t train_data,\n\t\t\t\t class_label,\n\t\t\t\t cluster_range,\n\t\t\t\t silhouette_cluster_range,\n\t\t\t\t train_col_names = None, \n\t\t\t\t x_feature_index = 0,\n\t\t\t\t y_feature_index = 1,\n\t\t\t\t viz = False,\n\t\t\t\t show = False,\n\t\t\t\t viz_name = \"\",\n\t\t\t\t test_name = \"\"):\n\n\t\t\n\t#Strip and replace off any spaces\n\ttest_name = test_name.strip().replace(\" \",\"_\")\n\n\t#Initialize customer segmentation test\n\ttest = CustomerSegmentation(Method = KMeans(), \n\t\t\t\t\t\t\t\tdata = train_data,\n\t\t\t\t\t\t\t\tlog = log, \n\t\t\t\t\t\t\t\ttest_name = test_name)\n\n\n\t# Set train data and class labels\n\ttest.Preprocess.set_train_data(train_data, \n\t\t\t\t\t\t\t\t col_names = train_col_names)\n\ttest.Preprocess.set_class_label(class_label)\n\n\t# Conduct PCA, fit and transformation\n\ttest.Preprocess.PCA_fit(viz = viz, viz_name = viz_name, show = show)\n\ttest.Preprocess.PCA_transform()\n\n\n\tif viz:\n\t\t#Create cluster plot visualization if requested\n\t\tcluster_plot = cluster_viz(test.train_data, test.class_label, x_feature_index = x_feature_index, y_feature_index = y_feature_index)\n\t\t\n\t\t#Show the plot at runtime if requested\n\t\tif show:\n\t\t\tcluster_plot.show()\n\n\t\t#Save the image\n\t\ttest.Log.saveImage(cluster_plot, \"cluster_plot\", test.viz_folder_name)\n\n\t#Conduct elbow chart analysis\n\ttest.SegMethod.elbow_chart_test(cluster_range, viz = viz,show = show, viz_name = viz_name, profile = True)\n\n\t#Conduct Silhouette analysis\n\t#test.Preprocess.silhouette_analysis(silhouette_cluster_range, viz = viz, viz_name = viz_name, show = show)\n\n\t#Save Preprocess and Method logs\n\ttest.Preprocess.PreprocessLog.savePreprocessLog()\n\ttest.SegMethod.MethodLog.saveMethodLog()\n\n\t#Add final masterlog record\n\tlog.addMasterLogRecord(test)",
"def kmeans(self, dims, k):\n points = self.get_points(*dims)\n from mlabwrap import mlab\n idx = mlab.kmeans(points, k, nout=1)\n \n # we need to convert the index array from matlab to python (and remember\n # that python is 0-based and not 1-based)\n idx = idx.astype('int')\n \n new_data = self.data[idx]\n tables = [DataTable(\n self.data[idx==(i+1)],\n self.dims,\n self.legends,\n self.tags,\n self.sub_name('kmeans cluster %d' % i)) for i in xrange(k)]\n #for t in tables:\n # t.properties['original_table'] = self\n return tables",
"def KMeansCluster(matrix):\n\n # Possibly need to scale the data first\n data = scale(matrix)\n\n # Approximate the number of clusters using c = root(n/2)\n # num_clusters = int(sqrt(len(matrix) / 2))\n num_clusters = 5\n number_init = 10 # Default\n number_iter = 300\n num_cpus = 2\n\n print \"===================\"\n print \"Training KMeans with (num_clusters, num_init, num_iters, num_cpus)\"\n print num_clusters, number_init, number_iter, num_cpus\n\n # estimator = KMeans(init='k-means++', n_clusters = num_clusters, n_init = number_init)\n # estimator.fit(data)\n # clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, \n # init='k-means++', n_jobs = num_cpus)\n clusters = k_means(data, n_clusters = num_clusters, max_iter=number_iter, n_init = number_iter, n_jobs = num_cpus)\n\n\n return clusters",
"def plot_kmeans(df, k_start=1, k_end=10, step=2):\r\n scores = []\r\n # loop through the range start/end\r\n for i in range(k_start, k_end, step):\r\n # new up a kmeans, with clusters\r\n kmeans = KMeans(n_clusters=i)\r\n km = kmeans.fit(df)\r\n # append the scores\r\n scores.append(abs(km.score(df)))\r\n\r\n # create a plot\r\n plt.figure(figsize=(14, 7))\r\n plt.plot(range(k_start, k_end, step), scores, marker='h', color='r')\r\n plt.xlabel('Cluster Count')\r\n plt.ylabel('Errors')\r\n plt.title('Errors by Cluster')\r\n plt.show()\r\n # plt.savefig('cluster_plot.png')\r",
"def clustering_and_visulization(self):\n try:\n centroids, _ = kmeans(self.data_mat, self.k)\n except ValueError:\n print(\"The number of clusters is more than the data points\")\n self.idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[self.idx == i, 0])\n self.plot_list1.append(self.data_mat[self.idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n\n for i in range(self.k):\n self.cluster = self.data_mat[self.idx == i]\n self.clusterlist.append(self.cluster)\n print(self.clusterlist)\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n index_dict ={}\n for i in self.clusterdict:\n index_dict[i] = []\n for i in range(len(self.data_mat)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n index_dict[j].append(i)\n print(\"drugs cluster dict\", index_dict)\n\n self.drugsdict = {}\n for i in index_dict:\n self.drugsdict[i] = []\n drugslist = list(self.df.columns.values)\n print(\"drugs list from dataframe\", drugslist)\n\n for i in index_dict:\n self.drugsdict[i] = [drugslist[index] for index in index_dict[i]]\n\n print(\"drugs cluster dict\", self.drugsdict)\n########################################################################################################################\n clusterdict_from_df_as_drug_frequency = {}\n clusterdict_from_df_as_drug_non_O_frequency = {}\n\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i] = []\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_frequency[i].append(self.df.iloc[i].to_dict()) #\n print(\"packs in dict form of drugs frequency\", clusterdict_from_df_as_drug_frequency)\n\n for i in range(len(self.df)):\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_frequency[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n for i in range(len(self.df)):\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in range(len(self.df)):\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in range(len(self.df)):\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse(\n [list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n robot_for_packs_dict = {}\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = []\n\n # for i in range(len(self.df)):\n for i in range(len(self.df)):\n for j in clusterdict_of_non_repeated_drugs[i]:\n if j in self.drugsdict[0]:\n robot_for_packs_dict[i].append(0)\n elif j in self.drugsdict[1]:\n robot_for_packs_dict[i].append(1)\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = set(robot_for_packs_dict[i])\n\n for i in range(len(self.df)):\n robot_for_packs_dict[i] = list(more_itertools.collapse(robot_for_packs_dict[i]))\n print('\\n')\n print(\"clusterdict_of_non_repeated_drugs\", robot_for_packs_dict)",
"def clustering_and_visulization(self):\n centroids, _ = kmeans(self.data_mat, self.k)\n idx, _ = vq(self.data_mat, centroids)\n for i in range(self.k):\n\n self.plot_list.append(self.data_mat[idx == i, 0])\n self.plot_list1.append(self.data_mat[idx == i, 1])\n\n for j in range(self.k):\n plot(self.plot_list[j], self.plot_list1[j], self.color_code[j])\n plot(centroids[:, 0], centroids[:, 1], 'sg', markersize=8)\n show()\n for i in range(self.k):\n self.cluster = self.data_mat[idx == i]\n self.clusterlist.append(self.cluster)\n\n for i in range(len(self.clusterlist)):\n self.clusterdict[i] = self.clusterlist[i]\n print(self.clusterdict)\n\n\n self.indexdict = {}\n for i in self.clusterdict:\n self.indexdict[i] = []\n print(len(self.clusterdict))\n for i in range(len(idx)):\n for j in range(len(self.clusterdict)):\n if (self.clusterdict[j][:] == self.data_mat[i]).any():\n self.indexdict[j].append(i)\n print(\"cluster dict of packs\",self.indexdict)\n\n self.drugdict = {}\n for i in self.clusterdict:\n self.drugdict[i] = []\n self.drug=[]\n for i in range(len(self.indexdict.keys())):\n for j in range(len(self.indexdict[i])):\n self.drugdict[i].append(self.df.iloc[self.indexdict[i][j]].to_dict())\n print(\"drugs dict with their frequencies\",self.drugdict)\n clusterdict_from_df_as_drug_non_O_frequency = {}\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs ={}\n for i in self.drugdict:\n clusterdict_from_df_as_drug_non_O_frequency[i] = []\n for i in self.drugdict:\n for j in self.drugdict[i]:\n clusterdict_from_df_as_drug_non_O_frequency[i].append({x: y for x, y in j.items() if y != 0})\n print(\"clusterdict_from_df_as_drug_non_O_frequency\", clusterdict_from_df_as_drug_non_O_frequency)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in clusterdict_from_df_as_drug_non_O_frequency[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i]=list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n\n\n print(\"only drugs only\", clusterdict_of_non_repeated_drugs)\n\n########################################################################################################################\n try:\n common_drug_list = [x for x in clusterdict_of_non_repeated_drugs[0] if x in clusterdict_of_non_repeated_drugs[1]]\n print('\\n')\n print(\"common drug list\", common_drug_list)\n total_frequency_of_drugs_dict = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict[i] = []\n\n for drug in common_drug_list:\n\n for cluster_keys in clusterdict_from_df_as_drug_non_O_frequency.keys():\n temp_list = []\n for cluster_values_as_list in clusterdict_from_df_as_drug_non_O_frequency[cluster_keys]:\n try:\n temp_list.append(cluster_values_as_list[str(drug)])\n except KeyError:\n print(\"\\t\")\n total_frequency_of_drugs_dict[cluster_keys].append(np.sum(temp_list))\n print(\"total drugs frequency\",total_frequency_of_drugs_dict)\n total_frequency_of_drugs_dict_with_drugs = {}\n for i in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[i] = []\n temp_list1 = []\n temp_list2 = []\n for keys in self.drugdict.keys():\n temp_list1.append(clusterdict_of_non_repeated_drugs[keys])\n for keys in self.drugdict.keys():\n temp_list2.append(total_frequency_of_drugs_dict[keys])\n temp_list3 = []\n for i in temp_list1:\n for j in temp_list2:\n temp_list3.append(dict(zip(i,j)))\n temp_list4 = temp_list3[:2]\n print('\\n')\n for keys in self.drugdict:\n total_frequency_of_drugs_dict_with_drugs[keys].append(temp_list4[keys])\n print(\"total frequency with drugs dict\",total_frequency_of_drugs_dict_with_drugs)\n\n final_drugs_in_clusters_dict = {}\n for i in self.drugdict:\n final_drugs_in_clusters_dict[i] = []\n compare_list = []\n for drug in common_drug_list:\n compare_list.append(min(total_frequency_of_drugs_dict_with_drugs[0][0][drug], total_frequency_of_drugs_dict_with_drugs[1][0][drug]))\n print(\"compare list\",compare_list)\n for values in total_frequency_of_drugs_dict_with_drugs.values():\n for key1, value1 in values[0].items():\n if value1 in compare_list:\n\n key2 =values[0].keys()[values[0].values().index(value1)]\n values[0].pop(key2, None)\n\n\n print('final dict with deleted keys', total_frequency_of_drugs_dict_with_drugs)\n\n clusterdict_from_as_drugs_only_as_list = {}\n clusterdict_of_non_repeated_drugs = {}\n\n for i in self.drugdict:\n clusterdict_from_as_drugs_only_as_list[i] = []\n\n for i in self.drugdict:\n for j in total_frequency_of_drugs_dict_with_drugs[i]:\n clusterdict_from_as_drugs_only_as_list[i].append(j.keys())\n print(\"only keys drugs with drugs name\", clusterdict_from_as_drugs_only_as_list)\n print('\\n')\n\n for i in self.drugdict:\n clusterdict_of_non_repeated_drugs[i] = list(more_itertools.collapse([list(x) for x in set([tuple(x) for x in clusterdict_from_as_drugs_only_as_list[i]])]))\n print(\"only drugs\",clusterdict_of_non_repeated_drugs)\n\n final_robot_packs_dict = {}\n for i in self.drugdict:\n final_robot_packs_dict[i] = []\n\n winner_drug_dict = {}\n for i in common_drug_list:\n winner_drug_dict[i] = []\n for drug in common_drug_list:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n winner_drug_dict[str(drug)].append(0)\n if drug in clusterdict_of_non_repeated_drugs[1]:\n winner_drug_dict[str(drug)].append(1)\n print(\"winner drug dict\",winner_drug_dict)\n\n for i in self.indexdict:\n print(i)\n for pack in self.indexdict[i]:\n packdict = self.df.iloc[pack].to_dict()\n packdict_non_0 = {x: y for x, y in packdict.items() if y != 0}\n packdict_non_0_key = packdict_non_0.keys()\n for drug in packdict_non_0_key:\n if drug in clusterdict_of_non_repeated_drugs[0]:\n final_robot_packs_dict[0].append(pack)\n elif drug in clusterdict_of_non_repeated_drugs[1]:\n final_robot_packs_dict[1].append(pack)\n\n final_robot_packs_dict[i].append(pack)\n for commondrugs in winner_drug_dict:\n for winnercluster in winner_drug_dict[commondrugs]:\n if winnercluster==0:\n loosercluster =1\n if winnercluster == 1:\n loosercluster = 0\n if commondrugs in packdict_non_0_key and i==loosercluster:\n try:\n final_robot_packs_dict[i].remove(pack)\n final_robot_packs_dict[winnercluster].append(pack)\n except ValueError:\n print('\\t')\n\n for i in self.indexdict:\n final_robot_packs_dict[i] = set(final_robot_packs_dict[i])\n\n print(\"final which pack which robot dict\",final_robot_packs_dict)\n\n except IndexError:\n print(\"No common drugs\")",
"def kmeans_vis(Df):\n data_pca = scale(Df)\n k_means = KMeans(n_clusters=3)\n k_means.fit(data_pca)\n k_means_predicted = k_means.predict(data_pca)\n plt.figure(\"K-Means on 3 component analysis\", figsize=(7, 7))\n ax_2 = plt.axes(projection=\"3d\")\n ax_2.scatter(\n data_pca[:1500, 0],\n data_pca[:1500, 1],\n data_pca[:1500, 2],\n c=k_means_predicted[:1500],\n cmap=\"Set2\",\n s=20,\n )\n ax_2.set_title(\"K-means on PCA result\", fontsize=20)\n ax_2.set_xlabel(\"Principal Component 1\", fontsize=15)\n ax_2.set_ylabel(\"Principal Component 2\", fontsize=15)\n ax_2.set_zlabel(\"Principal Component 3\", fontsize=15)\n plt.show()",
"def distortion_of_kmeans_clustering(data_table):\n num_iritations = 5\n singleton_list = []\n for line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n distortion_list = []\n for num in range(20, 5, -1):\n cluster_list = kmeans_clustering(singleton_list,num, num_iritations)\n distortion = compute_distortion(data_table, cluster_list)\n distortion_list.append(distortion)\n return distortion_list\n\n#####################################################################\n# Code to load cancer data, compute a clustering and\n# visualize the results\n\n\n# def run_example():\n# \"\"\"\n# Load a data table, compute a list of clusters and\n# plot a list of clusters\n#\n# Set DESKTOP = True/False to use either matplotlib or simplegui\n# \"\"\"\n# data_table = load_data_table(DATA_3108_URL)\n# singleton_list = []\n# for line in data_table:\n# singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n num_clusters = 16\n # cluster_list = sequential_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"sequential clusters\")\n #\n # cluster_list = alg_project3_solution.hierarchical_clustering(singleton_list, num_clusters)\n # print(\"Displaying\", len(cluster_list), \"hierarchical clusters\")\n #\n # cluster_list = alg_project3_solution.kmeans_clustering(singleton_list, num_clusters, 5)\n # print(\"Displaying\", len(cluster_list), \"k-means clusters\")\n\n # draw the clusters using matplotlib or simplegui\n #\n # if DESKTOP:\n # # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, False)\n # alg_clusters_matplotlib.plot_clusters(data_table, cluster_list, True) #add cluster centers\n\n # else:\n # alg_clusters_simplegui.PlotClusters(data_table, cluster_list) # use toggle in GUI to add cluster centers",
"def main():\n data = Dummy(n_samples=500, n_dim=3)\n X = data.get_dummy()\n clustering = Kmeans(X, K=5, display=False)\n clustering.run()\n print(f\"Number of iterations: {clustering.num_iterations}\\n\")\n\n \"\"\" Test example of clustering_kmeans with unknown number of clusters K \"\"\"\n clustering = Kmeans(X,)\n clustering.silhouette_find_k()\n print(f\"Number of centroids found: {clustering.num_K}\")",
"def start_algorithm(self):\r\n vectors = self.vectorize_data()\r\n kmeans = KMeans(init='k-means++', n_clusters=self.cluster_amount, n_init=10)\r\n kmeans.fit(vectors)\r\n return self.cluster_tweet(kmeans.labels_)",
"def evaluation_k_means(X_selected, n_clusters, y, n_jobs = 1):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=n_jobs)\n \n k_means.fit(X_selected)\n y_predict = k_means.labels_\n \n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict)\n \n # calculate ACC\n y_permuted_predict = best_map(y, y_predict)\n acc = accuracy_score(y, y_permuted_predict)\n \n return nmi, acc",
"def print_cluster_summary(algo, i):\n assert algo in ['DBSCAN', 'KMeans', 'DBSCAN_topics', 'KMeans_topics']\n \n cluster_df = apps_df.copy()\n cluster_df = cluster_df[cluster_df[algo] == i]\n print('Cluster {} consists out of {} apps.'.format(str(i), str(cluster_df.shape[0])))\n titles = list(cluster_df['title'])\n print('The apps are:\\n {}'.format('\\n\\t'.join(titles)))",
"def cluster_kmeans(self, data, n_clusters):\n km = cl.KMeans(n_clusters)\n kmf = km.fit(data)\n\n labels = kmf.labels_\n\n return labels, [np.nan]",
"def kmeansClustering(data, x_scaled, clust, random_s):\n np.random.seed(random_s)\n #Performs clustering with the right number of clusters\n kmeans = KMeans(n_clusters=clust, random_state=random_s, n_jobs=-1).fit(x_scaled)\n kmeans = pd.DataFrame(kmeans.labels_, index=data.index, columns=[\"Clusters\"])\n #Merge on our main dataframe for better vizualisation of the clusters\n data_clust = pd.merge(data, kmeans, left_index=True, right_index=True, how='left')\n return data_clust",
"def kmeansWithpca(df, plot=False):\n ipca = IncrementalPCA(n_components=2).fit(df)\n X = ipca.transform(df)\n kmeans = KMeans(n_clusters=5, random_state=0, init='k-means++', n_init=10).fit(X)\n if plot == True:\n # Step size of the mesh. Decrease to increase the quality of the VQ.\n h = .02\t# point in the mesh [x_min, x_max]x[y_min, y_max].\n\n # Plot the decision boundary. For that, we will assign a color to each\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n # Obtain labels for each point in mesh. Use last trained model.\n Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.figure(1)\n plt.clf()\n plt.imshow(Z, interpolation='nearest', extent=(xx.min(), xx.max(), yy.min(), yy.max()), cmap=plt.cm.Paired, aspect='auto', origin='lower')\n plt.plot(X[:, 0], X[:, 1], 'k.', markersize=2)\n\n # Plot the centroids as a white X\n centroids = kmeans.cluster_centers_\n plt.scatter(centroids[:, 0], centroids[:, 1], marker='x', s=169, linewidths=3, color='w', zorder=10)\n plt.title('K-means clustering on the digits dataset (PCA-reduced data)\\n' 'Centroids are marked with white cross')\n plt.xlim(x_min, x_max); plt.ylim(y_min, y_max)\n plt.xticks(()); plt.yticks(()); plt.show()\n print(kmeans.labels_)\n\n return kmeans.labels_, kmeans.cluster_centers_",
"def kmean_test_n_clusters(data, n_clusters):\n n_clusters += 1\n kmeans_per_k = [KMeans(n_clusters=k, random_state=42).fit(data)for k in range(1, n_clusters)]\n inertias = [model.inertia_ for model in kmeans_per_k]\n silhouette_scores = [silhouette_score(data, model.labels_)\n for model in kmeans_per_k[1:]]\n\n fig, (ax1, ax2) = plt.subplots(2,1, figsize=(8, 3.5))\n\n ax1.plot(range(1, n_clusters), inertias, \"bo-\")\n ax1.set_xlabel(\"$k$\", fontsize=14)\n ax1.set_ylabel(\"Inertia\", fontsize=14)\n #ax1.annotate('Elbow',\n # xy=(4, inertias[3]),\n # xytext=(0.55, 0.55),\n # textcoords='figure fraction',\n # fontsize=16,\n # arrowprops=dict(facecolor='black', shrink=0.1)\n # )\n ax2.plot(range(2, n_clusters), silhouette_scores, \"bo-\")\n ax2.set_xlabel(\"$k$\", fontsize=14)\n ax2.set_ylabel(\"Silhouette score\", fontsize=14)\n #plt.axis([2, 8, 0.3, 0.475])\n plt.show()",
"def cluster_feature(feature_mat, k):\n whitened = whiten(feature_mat.transpose())\n centroid, distortion = kmeans(whitened, k)\n\n return centroid, distortion",
"def kmean(encoder,tsne,true_data,true_label):\n enc_output = encoder.predict(true_data)\n kmean = KMeansClustering()\n kmean.fit(enc_output)\n pred = kmean.predict(enc_output)\n accuracy(true_label,pred)\n confusion_matrix(true_label,pred, save_name = \"confusion_matrix_kmean.png\")\n tsne.tsne_plot(true_data,pred,save_data_dir =\"kmean\",save_name=\"kmean\")"
] | [
"0.7423728",
"0.687318",
"0.6822117",
"0.6821187",
"0.65778583",
"0.6511952",
"0.6426332",
"0.64061534",
"0.63889825",
"0.6368608",
"0.6357942",
"0.6357617",
"0.6327497",
"0.6320824",
"0.62938166",
"0.6260522",
"0.6256508",
"0.6255686",
"0.6255126",
"0.6225553",
"0.61986464",
"0.6193358",
"0.61849445",
"0.6179392",
"0.6176688",
"0.6151357",
"0.61359113",
"0.61029",
"0.60897607",
"0.60759294"
] | 0.7953019 | 0 |
Displays the list of options. | def display_options(self):
print()
options = list(self.get_commands().values())
options.sort(key=lambda op: int(op.name))
for option in options:
print(f'{"%3d" % int(option.name)}. {option.description}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def display(self):\n\n print('\\n')\n for key, val in self.option.items():\n print(key, val, '\\n') # make it more confortable to read\n self.get_choice() # launch automaticly the choice method after display",
"def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))",
"def getopt_display(self):\n self._print_enum_opt(\"display\", DISPLAYS)",
"def help_opt(self):\n print(OPTIONS)",
"def do_list(self, args):\n if args.option == 'config':\n print(list_config())\n if args.option == 'queries':\n for k,v in list_queries().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'jobs':\n update_jobs(CLI_GLOBALS.ENGAGEMENT)\n for k,v in list_jobs().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'results':\n for i in list_results():\n print(i)\n if args.option == 'key':\n for k,v in list_key().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'engagement':\n print(list_engagement())",
"def display_menu_options(length):\r\n print('\\n***********************************************\\nVeuillez choisir une option entre 1 et', str(length))",
"def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')",
"def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')",
"def display_menu():\n print(\"\"\"\\nChoose option:\n (1) List statistics\n (2) Display 3 cities with longest names\n (3) Display county's name with the largest number of communities\n (4) Display locations, that belong to more than one category\n (5) Advanced search\n (0) Exit program\"\"\")",
"def print_options(order_list, option_list):\n menu = ''\n for order, text in zip(order_list, option_list):\n menu += (str(order) + ' - ' + text + '\\n')\n return menu",
"def print_options(self):\n for option in self._options.items():\n print \"{0} = {1}\".format(option[0], option[1])",
"def list(self):\n return self._options",
"def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"",
"def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()",
"def show(self):\n # Display the menu.\n self._print_menu()\n\n # Wait for input.\n selection = None\n while selection not in self.__options:\n selection = input(\"(Choose an option): \")\n\n # Perform the command.\n _, command = self.__options[selection]\n return command(selection)",
"def options_menu(title, options):\n\tprint width_screen * \"-\"\n\tprint(title.center(width_screen))\n #\tprint '{:^{width_screen}}'.format(title,width_screen)\n\tprint width_screen * \"-\"\n\tfor x in range(len(options)):\n\t\tprint str(x+1) + \". {}\".format(options[x])\n\tprint width_screen * \"-\"\n\treturn(options)",
"def listOptions(lst):\n for k, e in enumerate(lst,1):\n print(\"{:^15}{:<10}\".format(k,e))",
"def test_list_options(self):\n pass",
"def display_other_options():\n print(\"> - Next Song page.\")\n print(\"< - Previous song page.\")\n print(\"q - to quit\")",
"async def list(self, ctx: MyContext):\n if ctx.subcommand_passed is None:\n await ctx.send_help(\"wormhole list\")",
"def display(self):\n while (True):\n self.print()\n choice = self.get_choice()\n if (choice == len(self.options)):\n break\n else:\n self.options[choice].function()",
"def printCurrentOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)",
"def print_menu(title, list_options, exit_message):\n print((\"\\n\" + title + \":\"))\n for i in range(1, len(list_options) + 1):\n print(\"(\" + str(i) + \") \" + list_options[i - 1])\n print(\"(0) \" + exit_message)",
"def print_menu():\n print(\"\\nMenu:\")\n print(\"\\t\" + colored('+', 'red') + \" for adding a complex number\")\n print(\"\\t\" + colored('s', 'red') + \" for showing the list of all complex numbers\")\n print(\"\\t\" + colored('f', 'red') + \" for filtering the list\")\n print(\"\\t\\t-the new list will contain only the numbers between indices `start` and `end`\")\n print(\"\\t\" + colored('u', 'red') + \" to undo the last operation\")\n print(\"\\t\" + colored('x', 'red') + \" to close the calculator\")",
"def menuItem(*args):\n\toptionsWindow()",
"def display_collected():\n os.system('clear') # clearscreen\n print('BS4 widget generator')\n print('-' * 20)\n print('options selected:')\n for col in collected:\n print(col)\n\n print('-' * 20)\n\n return",
"def display_help(self):\n pass",
"def show_help():\n clear_screen()\n print(\"\"\"\n What should we pick up at the store?\\n\n Enter 'DONE' or 'QUIT' to stop adding items.\n Enter 'HELP' for this help.\n Enter 'SHOW' to see your current list.\n Enter 'REMOVE' to remove an item from the list.\n \"\"\")",
"def menu_cust(self):\n intro = \"Here are the options available for you to choose from:\"\n option1 = \"[1] UNLOCK THE CAR\"\n option2 = \"[2] RETURN THE CAR\"\n option3 = \"[3] BACK\"\n print(intro, option1, option2, option3, sep='\\n')",
"def _print_menu(self):\n # Create header line.\n header = \"%s Menu:\" % (self.__name)\n header = header.title()\n print(header)\n\n # Show the iterations counter.\n iterations = self._status.get_value(\"iterations\")\n print(\"(Iteration %d)\" % (iterations))\n\n self._print_custom()\n\n # Display the options alphabetically.\n option_names = list(self.__options.keys())\n option_names.sort()\n for option in option_names:\n desc, command = self.__options[option]\n print(\"\\t%s: %s\" % (option, desc))"
] | [
"0.7148875",
"0.7040174",
"0.69901764",
"0.6922297",
"0.68601656",
"0.66925323",
"0.6622868",
"0.6622868",
"0.6617802",
"0.6555089",
"0.6488466",
"0.6478217",
"0.6464231",
"0.64514065",
"0.6409447",
"0.6362477",
"0.63184685",
"0.6313654",
"0.6292926",
"0.6234333",
"0.62332445",
"0.62302506",
"0.623007",
"0.6228186",
"0.6209911",
"0.6191491",
"0.6169082",
"0.6166833",
"0.6132914",
"0.6120027"
] | 0.7489189 | 0 |
Vary a (sub)query represented by the given operator node | def vary_query(operator_node, num_columns, tables_to_colums):
# Adapt predicates of this Operator Node
if operator_node.original_predicates_list is not None and len(operator_node.original_predicates_list) > 0:
valid_ids = tables_to_colums[operator_node.table_id]
new_predicates = []
# For every predicate...
for (p_column_id, p_operator, p_value) in operator_node.original_predicates_list:
# ... choose a new column id randomly...
p_column_id = random.choice(valid_ids)
new_predicates.append((p_column_id, p_operator, p_value))
# ... and use the new predicates afterwards
operator_node.set_predicates(new_predicates, num_columns)
# Vary sub queries
for child in operator_node.children:
vary_query(child, num_columns, tables_to_colums) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mutate_single_node(self, node, operator):\n if node.__class__ is operator[0] or (operator[1] is StatementDeletion and node.__class__ is ast.Pass):\n mutated_node = operator[1].mutate(node)\n node = mutated_node\n\n return node",
"def mutate_bySingleOperator(self, root, operator):\n self.operator = operator\n\n ast.fix_missing_locations(root)\n # traverse the target ast tree and mutate interesting node\n mutated_ast = self.visit(root)\n ast.fix_missing_locations(root)\n\n return mutated_ast",
"def modify_rhs(self, expr, operator, var):\n assert isinstance(var, cellml_variable)\n # Ensure var is available in expr's component\n local_var_name = var.name\n source_comp = var.component\n expr_comp = expr.component\n if source_comp != expr_comp:\n local_var = self.connect_variables(var, (expr_comp.name, var.fullname(cellml=True)))\n local_var_name = local_var.name\n # Change expr\n rhs = expr.eq.rhs\n expr.safe_remove_child(rhs)\n new_rhs = mathml_apply.create_new(var.model, operator, [rhs, local_var_name])\n expr.xml_append(new_rhs)\n return expr",
"def __rsub__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(sub, other)",
"def mod_op_eval(node, table):\n\n expr_value = expr_eval(node.expr, table)\n\n if node.var.kind == \"ARRAY_REF\":\n # TODO: Refactor to use the Array object's fetch.\n array = table[node.var.name]\n index = expr_eval(node.var.expr, table)\n\n # A[x] += 1 expands into A[x] = A[x] + 1.\n array[index] = bin_ops[node.op](array[index], expr_value)\n\n elif node.var.kind == \"VAR_REF\":\n # x += 1 expands into x = x + 1.\n old_value = table[node.var.name]\n table[node.var.name] = bin_ops[node.op](old_value, expr_value)\n\n return table",
"def operation(self, other=None, operator=None):\n terms = [self]\n if other is not None and operator is not EmptyQuery:\n terms.append(other)\n return Operation(terms, operator=operator)",
"def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)",
"def __sub__(self, other: Any) -> ColumnOperators:\n return self.operate(sub, other)",
"def _append_operator(self, operator):",
"def RewriteTerm(self, key, op, operand, key_type):\n if key not in self._keys or op != '=':\n return None\n return operand",
"def _remove_operator(self, operator):",
"def operator_to_vector(op):\n if op.type in ['super', 'operator-ket', 'operator-bra']:\n raise TypeError(\"Cannot convert object already \"\n \"in super representation\")\n return Qobj(stack_columns(op.data),\n dims=[op.dims, [1]],\n type='operator-ket',\n superrep=\"super\",\n copy=False)",
"def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15",
"def swap_op_eval(node, table):\n # TODO: This is a mess. Array object simplification?\n\n if node.left.kind == \"VAR_REF\" and node.right.kind == \"VAR_REF\":\n\n l, r = node.left.name, node.right.name\n table[l], table[r] = table[r], table[l]\n\n if node.left.kind == \"ARRAY_REF\" and node.right.kind == \"VAR_REF\":\n\n l_array, r = table[node.left.name], node.right.name\n l_index = expr_eval(node.left.expr, table)\n\n l_array[l_index], table[r] = table[r], l_array[l_index]\n\n if node.left.kind == \"VAR_REF\" and node.right.kind == \"ARRAY_REF\":\n\n l, r_array = node.left.name, table[node.right.name]\n r_index = expr_eval(node.right.expr, table)\n\n table[l], r_array[r_index] = r_array[r_index], table[l]\n\n if node.left.kind == \"ARRAY_REF\" and node.right.kind == \"ARRAY_REF\":\n\n L, R = left_array, right_array = table[node.left.name], table[node.right.name]\n i = left_index = expr_eval(node.left.expr, table)\n j = right_index = expr_eval(node.right.expr, table)\n\n L[i], R[j] = R[j], L[i]\n\n return table",
"def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)",
"def _reduce_expr(tree, tok):\n second = tree.pop()\n if len(tree) > 0 and not Parser._is_unary_op(tok):\n first = tree.pop()\n expr = BinaryExpression(first, tok, second)\n else:\n expr = UnaryExpression(second, tok)\n tree.append(expr)",
"def __rtruediv__(self, other: Any) -> ColumnOperators:\n return self.reverse_operate(truediv, other)",
"def visitor(node: NodeT, left_distribute: bool) -> NodeT:\n if isinstance(node, ir.AddSub):\n items = OrderedDict() # type: Dict[ir.Node, List[Tuple[str, ir.Node]]]\n new_operators = []\n new_operands = []\n for operator, operand in zip(('+',) + getattr(node, 'operator'),\n getattr(node, 'operand')):\n if (operator == '+' and isinstance(operand, ir.MulDiv) and\n getattr(operand, 'operator') == ('*',)):\n if left_distribute:\n coeff, item = getattr(operand, 'operand')\n else:\n item, coeff = getattr(operand, 'operand')\n items.setdefault(coeff, []).append((operator, item))\n else:\n new_operators.append(operator)\n new_operands.append(operand)\n for coeff, item in items.items():\n operator, operand = zip(*item)\n assert operator[0] == '+'\n new_operators.append(operator[0])\n if len(operand) > 1:\n new_item = ir.AddSub(operator=operator[1:], operand=operand)\n else:\n new_item = operand[0]\n if left_distribute:\n children = coeff, new_item\n else:\n children = new_item, coeff\n new_operands.append(ir.MulDiv(operator=('*',), operand=children))\n if len(new_operands) > 1:\n assert new_operators[0] == '+'\n new_node = ir.AddSub(operator=tuple(new_operators[1:]),\n operand=tuple(new_operands))\n if new_node != node:\n return new_node # type: ignore\n elif new_operands and new_operands[0] != node:\n return new_operands[0]\n return node",
"def __eq__(self, other: Any) -> ColumnOperators: # type: ignore[override]\n return self.operate(eq, other)",
"def __sub__(self, other):\r\n if isinstance(other, Node):\r\n new_node = sub_op(self, other)\r\n else:\r\n # Add by a constant stores the constant in the new node's const_attr field.\r\n # 'other' argument is a constant\r\n new_node = sub_byconst_op(self, other)\r\n return new_node",
"def applyOperator(self, operator, operand):\n if self.currentTotal == None:\n self.currentTotal = operand\n elif operator == \"=\":\n self.equalsOp(operand)\n elif self.previousOperand:\n self.previousOperand = None\n else:\n self.computeTotal(operator, operand)\n if operator != \"=\":\n self.previousOperator = operator",
"def where(self, value, operator=\"\"):\n return f\"\"\"\nto_tsvector('english', json->>'{sqlq(self.name)}') @@ plainto_tsquery(${{arg}}::text)\"\"\"",
"def pull_out_quantifications_from_left_across_binary_operator(formula:\r\n Formula) -> \\\r\n Tuple[Formula, Proof]:\r\n assert has_uniquely_named_variables(formula)\r\n assert is_binary(formula.root)\r\n # Task 11.7.1\r\n\r\n\r\n prover = Prover(Prover.AXIOMS.union(ADDITIONAL_QUANTIFICATION_AXIOMS))\r\n\r\n # Basic Case - No quantifier to change n = 0 and no n = 1\r\n if not is_quantifier(formula.first.root):\r\n ccl = equivalence_of(formula, formula)\r\n prover.add_tautology(ccl)\r\n return formula, prover.qed()\r\n\r\n\r\n # Without the predicate\r\n form = Formula(formula.root, formula.first.predicate, formula.second)\r\n pred, proof = pull_out_quantifications_from_left_across_binary_operator(form)\r\n\r\n my_quantifier = formula.first.root\r\n\r\n # Define (or change) the quantifier and define the axioms depending on the binary operator\r\n if formula.root == \"->\":\r\n if my_quantifier == \"A\":\r\n my_quantifier = \"E\"\r\n axiom_scd = 10\r\n else: # \"E\"\r\n my_quantifier = \"A\"\r\n axiom_scd = 11\r\n\r\n elif formula.root == \"&\":\r\n axiom_scd = 2 if my_quantifier == \"A\" else 3\r\n\r\n else: # \"|\" or\r\n axiom_scd = 6 if my_quantifier == \"A\" else 7\r\n\r\n\r\n\r\n # proof for changing quantifier\r\n # because add_proof() is my friend\r\n step1 = prover.add_proof(proof.conclusion, proof)\r\n\r\n form2 = Formula(\"->\", proof.conclusion, equivalence_of(Formula(my_quantifier, formula.first.variable, form),\r\n Formula(my_quantifier, formula.first.variable, pred)))\r\n my_map2 = {'R': str(form.substitute({formula.first.variable: Term(\"_\")})),\r\n 'Q': str(pred.substitute({formula.first.variable: Term(\"_\")})), \"x\": formula.first.variable, \"y\": formula.first.variable}\r\n\r\n step2 = prover.add_instantiated_assumption(form2,\r\n ADDITIONAL_QUANTIFICATION_AXIOMS[14 if my_quantifier==\"A\" else 15], my_map2)\r\n\r\n step3 = prover.add_mp(equivalence_of(Formula(my_quantifier, formula.first.variable, form),\r\n Formula(my_quantifier, formula.first.variable, pred)), step1, step2)\r\n\r\n\r\n my_map4 = {'R': str(formula.first.predicate.substitute({formula.first.variable: Term(\"_\")})), \"x\": formula.first.variable, \"Q\" : str(formula.second)}\r\n form4 = equivalence_of(formula, Formula(my_quantifier, formula.first.variable, form))\r\n step4 = prover.add_instantiated_assumption(form4,\r\n ADDITIONAL_QUANTIFICATION_AXIOMS[axiom_scd], my_map4)\r\n\r\n prover.add_tautological_implication(equivalence_of(formula, Formula(my_quantifier, formula.first.variable, pred)), [step3, step4])\r\n\r\n return Formula(my_quantifier, formula.first.variable, pred), prover.qed()",
"def set_operator(self, op):\n self.operator = op",
"def transform_query(self, node: Tree) -> Query:\n assert node.data == 'query' and len(node.children) == 2\n\n tuple_vars_node: Tree = node.children[0]\n predicate_node: Tree = node.children[1]\n\n tuple_vars = tuple(tuple_vars_node.children)\n self.validate_scope(predicate_node, set(tuple_vars))\n\n return Query(tuple(tuple_vars_node.children), predicate_node)",
"def expression(self, expr):\n self.set(expression=expr)",
"def operator(self, operator):\n\n self._operator = operator",
"def where(self, value, operator=\">\"):\n assert operator in self.operators\n return f\"\"\"\nf_cast_isots(json->>'{sqlq(self.name)}') {sqlq(operator)} ${{arg}}::{sqlq(self.cast_type)}\"\"\"",
"def selection(self, clause):\n result = DBTable()\n result.columnNames = self.columnNames\n if clause.operator == '=':\n for rec in self.records:\n if rec[clause.operand1] == clause.operand2:\n result.records.append(rec)\n return result",
"def binary_operator(cls, quad):\n\t\tleft_op = cls.get_address_value(quad.left_operand)\n\t\tright_op = cls.get_address_value(quad.right_operand)\n\t\tresult = cls.execute_binary_operator(quad.operator, left_op, right_op)\n\t\tcls.set_address_value(quad.result, result)"
] | [
"0.611861",
"0.6039169",
"0.5810166",
"0.57214564",
"0.5701275",
"0.56353176",
"0.5629461",
"0.557142",
"0.55459064",
"0.5535041",
"0.54918355",
"0.5490138",
"0.54886895",
"0.5476805",
"0.54562044",
"0.5445115",
"0.5409686",
"0.53946817",
"0.5333961",
"0.53089803",
"0.52976656",
"0.5253855",
"0.5212939",
"0.51890004",
"0.5183178",
"0.5175257",
"0.51502526",
"0.51159084",
"0.5111939",
"0.5092207"
] | 0.62107515 | 0 |
Save indices of train and test queries into a json file | def save_train_test_split(base_path, experiment_name, train_queries, test_queries):
filename_prefix = path.join(base_path, "data", experiment_name)
train_indices = list(set(query[2] for query in train_queries))
test_indices = list(set(query[2] for query in test_queries))
with open(filename_prefix + ".traintest.json", "w") as tt_json:
json.dump({"train": train_indices, "test": test_indices}, tt_json, indent=2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_target_data(term_to_index, output_prefix):\n index_to_term = dict((i,t) for t,i in term_to_index.iteritems())\n names = sorted(index_to_term.iteritems(), key=itemgetter(0))\n names = [n[1] for n in names]\n target_data = {}\n target_data['y'] = {}\n target_data['y']['names'] = names\n target_data['y']['weights'] = dict(zip(range(len(names)), [1] * len(names)))\n json.dump(target_data, open(output_prefix + 'target-data.json', 'w'))",
"def save_index(self):\n vsn_objs = [dict(Id = v['id'], Name = v['name']) for v in self.versions]\n self.backend.write_json(dict(\n Versions = vsn_objs,\n Channels = [], # This is unused.\n ApiVersion = 0,\n ), self.index_path())",
"def save_index_config(index_dir, data):\n\n with open(os.path.join(index_dir, 'index.json'), \"w+\") as data_file:\n json.dump(data, data_file, indent=4)",
"def save(self):\n path = self.get_benchmark_file_path(self._conf.results_dir)\n util.write_json(path, self._all_benchmarks, self.api_version)",
"def save_training(self):\n\n filename = str(hashlib.sha1(str(self.training_data).encode(\"utf-8\"))\n .hexdigest())\n path = \"./training/\" + filename + \".json\"\n\n data = {\n \"states\": self.states,\n \"transitions\": self.transitions,\n \"matrix\": self.matrix.tolist()\n }\n\n with open(path, \"w\") as outfile:\n json.dump(data, outfile)",
"def test_export_index(self):",
"def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)",
"def save(self):\n self.index.saveIndex(c.index_path('hnsw.index'))\n joblib.dump(self.ys, \"%s.ys\" % self.index_file_prefix)",
"def save_results(results):\n json.dump(results, open(\"results.json\", \"w\"))",
"def save_predictions(path: str, wrapper, results: Dict):\n predictions_with_idx = []\n\n if wrapper.task_helper and wrapper.task_helper.output:\n predictions_with_idx = wrapper.task_helper.output\n else:\n inv_label_map = {idx: label for label,\n idx in wrapper.label_map.items()}\n for idx, prediction_idx in zip(results['indices'], results['predictions']):\n prediction = inv_label_map[prediction_idx]\n idx = idx.tolist() if isinstance(idx, np.ndarray) else int(idx)\n predictions_with_idx.append({'idx': idx, 'label': prediction})\n\n with open(path, 'w', encoding='utf8') as fh:\n for line in predictions_with_idx:\n fh.write(json.dumps(line) + '\\n')",
"def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())",
"def save_results(self, *args):\n try:\n filename = args[0]\n except IndexError:\n filename = self.filename\n results = {}\n results['gp_pred'] = self.gp_predictions\n results['func_val'] = self.target_func_vals\n results['inds_all'] = np.array(self.indices_all)\n results['vals_all'] = np.array(self.vals_all)\n np.save(filename+\".npy\", results)",
"def write_result_to_file(self):\n self.__test_result[Result.__RUN] = self.__run\n with open(self.__json_file_path, \"w+\") as outfile:\n json.dump(self.__test_result, outfile,\n ensure_ascii=False, indent=2)",
"def save(self):\n filename = os.path.join(self.directory, 'experiment.json')\n with open(filename, 'w') as f:\n json.dump(self.report, f, indent=2, sort_keys=True)\n filename = os.path.join(self.directory, 'training_progress.csv')\n with open(filename, 'w') as csvfile:\n csv.writer(csvfile).writerows(self.history)\n filename = os.path.join(self.directory, 'learned_parameters.npy')\n parameters = lasagne.layers.get_all_param_values(self.__network)\n parameters = parameters\n numpy.save(filename, parameters)",
"def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))",
"def write_main_index(self):\n\n for miEntry in self.mainIndex:\n self.db_file.write(miEntry.get_representation())",
"def _write_keypoint_results(keypoint_results, gt_folder, pred_folder):",
"def save(self) -> None:\n try:\n js = json.loads(\n self.reset_index().to_json(orient=\"records\", date_format=\"iso\")\n )\n\n with open(self._fp, \"w\") as f:\n f.writelines(json.dumps(js, indent=4))\n logger.debug(f\"Saved index to {self._fp}\")\n except Exception as e:\n logger.error(f\"Could not update database -- {e}\")",
"def save_index(self, index_path: str = \"annoy_index.bin\"):\n if index_path:\n if self.index is None:\n self.build_index()\n self.index.save(index_path)\n corpus_emb_json_path = index_path + \".json\"\n super().save_index(corpus_emb_json_path)\n logger.info(f\"Saving Annoy index to: {index_path}, corpus embedding to: {corpus_emb_json_path}\")\n else:\n logger.warning(\"No index path given. Index not saved.\")",
"def _index_sub(self, uri_list, num, batch_num):\n bname = '%s-%s' % (batch_num, num)\n log.debug(\"batch_num '%s' starting es_json conversion\",\n bname)\n qry_data = get_all_item_data([item[0] for item in uri_list],\n self.tstore_conn,\n rdfclass=self.rdf_class)\n log.debug(\"batch_num '%s-%s' query_complete | count: %s\",\n batch_num,\n num,\n len(qry_data))\n # path = os.path.join(CFG.dirs.cache, \"index_pre\")\n # if not os.path.exists(path):\n # os.makedirs(path)\n # with open(os.path.join(path, bname + \".json\"), \"w\") as fo:\n # fo.write(json.dumps(qry_data))\n data = RdfDataset(qry_data)\n del qry_data\n log.debug(\"batch_num '%s-%s' RdfDataset Loaded\", batch_num, num)\n for value in uri_list:\n try:\n\n self.batch_data[batch_num]['main'].append(\\\n data[value[0]].es_json())\n self.count += 1\n except KeyError:\n pass\n for name, indexer in self.other_indexers.items():\n for item in data.json_qry(\"$.:%s\" % name.pyuri):\n val = item.es_json()\n if val:\n self.batch_data[batch_num][name].append(val)\n self.batch_uris[batch_num].append(item.subject)\n del data\n del uri_list\n log.debug(\"batch_num '%s-%s' converted to es_json\", batch_num, num)",
"def save_performances(self):\r\n nb_datasets = len(self.results)\r\n resu = [[] for k in range(nb_datasets)]\r\n\r\n # fetch results\r\n for k in range(nb_datasets):\r\n best = np.argmax(self.results[k]['mean_test_score'])\r\n resu[k].append(('score', self.results[k]['mean_test_score'][best]))\r\n resu[k] = resu[k] + list(self.results[k]['params'][best].items())\r\n\r\n # write results in csv\r\n for k, resu in enumerate(resu):\r\n with open('results/final_results_{}.csv'.format(k), 'a') as file:\r\n writer = csv.writer(file)\r\n writer.writerow(resu)",
"def save_results(self, export_json_path):\n with open(export_json_path, 'w') as f:\n json.dump(self.results, f)",
"def retrieve_saved_data():\n \n sequences = np.load('sequences.npy')\n test_sequences = np.load('test_sequences.npy')\n labels = np.load('labels.npy')\n \n iw = []\n with open('index_word.json', 'r') as f:\n for l in f:\n iw.append(json.loads(l))\n\n index_word = iw[0]\n index_word = {int(key): word for key, word in index_word.items()}\n\n wi = []\n with open('word_index.json', 'r') as f:\n for l in f:\n wi.append(json.loads(l))\n\n word_index = wi[0]\n word_index = {word: int(index) for word, index in word_index.items()}\n \n vs = len(word_index) + 1\n \n return sequences, labels, test_sequences, word_index, index_word, vs",
"def write_sub_index(self):\n for sie in self.subIndex:\n self.db_file.write(sie.get_representation())",
"def save(statistic_entries):\n with open('learn.json', 'w') as file:\n json.dump(statistic_entries, file, indent=2)",
"def save_index(self, fn):\n utils.save_obj(self.tweetTerms, \"TweetTerm_%s\" % (self.counterOfTweetTermsFiles))\n self.computeTfIdf(self.counterOfTweets)\n self.deleteSingleEntities()\n inv_dict = {'inverted_idx': self.inverted_idx, 'posting': self.postingFiles}\n utils.save_obj(inv_dict, fn)",
"def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)",
"def buildFeatureList():\n with open('./feature_list.txt', 'w')as out:\n res = es.search(index=indexName, doc_type=document,\n body={\n 'query': {\n 'query_string': {\n \"default_field\": \"split\",\n \"query\": \"training\"\n }\n },\n \"size\": indexSize\n })\n ids = [d['_id'] for d in res['hits']['hits']]\n for id in ids:\n text = es.get(index=indexName, doc_type=document, id=id)['_source']['body']\n terms = text.split()\n for term in terms:\n features[term] = term\n count = 0\n for term in features:\n count += 1\n out.write(str(count)+ \" \" + term + '\\n')",
"def index():\n data = te.getMarketsData(marketsField='index', output_type='df')\n return jsonify(data.to_dict(orient='records'))",
"def export_documents(self, index, filename, **kwargs):\n documentsGenerator = self.get_documents(index, **kwargs)\n documents = []\n format=kwargs.get('format','json')\n for doc in documentsGenerator:\n doc_with_id={**doc.to_dict(),'_id':doc.meta.id}\n documents.append(doc_with_id)\n self.__export_documents(documents,filename,exportformat=format)"
] | [
"0.6417914",
"0.62603885",
"0.6204297",
"0.6181379",
"0.61736476",
"0.5953024",
"0.5868456",
"0.5851617",
"0.5841516",
"0.57562774",
"0.5734983",
"0.57247096",
"0.5689729",
"0.5672398",
"0.5640475",
"0.563358",
"0.5581112",
"0.55707127",
"0.55586636",
"0.55359846",
"0.55289686",
"0.55210364",
"0.5503634",
"0.5499756",
"0.54908234",
"0.54788095",
"0.5474028",
"0.5444488",
"0.5437347",
"0.54309005"
] | 0.77021956 | 0 |
Subclass the save method, to hash ndarray subclass, rather than pickling them. Off course, this is a total abuse of the Pickler class. | def save(self, obj):
if isinstance(obj, self.np.ndarray):
# Compute a hash of the object:
try:
self._hash.update(self.np.getbuffer(obj))
except TypeError:
# Cater for non-single-segment arrays: this creates a
# copy, and thus aleviates this issue.
# XXX: There might be a more efficient way of doing this
self._hash.update(self.np.getbuffer(obj.flatten()))
# We store the class, to be able to distinguish between
# Objects with the same binary content, but different
# classes.
if self.coerce_mmap and isinstance(obj, self.np.memmap):
# We don't make the difference between memmap and
# normal ndarrays, to be able to reload previously
# computed results with memmap.
klass = self.np.ndarray
else:
klass = obj.__class__
# We also return the dtype and the shape, to distinguish
# different views on the same data with different dtypes.
# The object will be pickled by the pickler hashed at the end.
obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
Hasher.save(self, obj) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _save(self, data: np.ndarray) -> None:\n ...",
"def save(self, obj):\r\n if self.np is not None and type(obj) in (self.np.ndarray,\r\n self.np.matrix, self.np.memmap):\r\n size = obj.size * obj.itemsize\r\n if self.compress and size < self.cache_size * _MEGA:\r\n # When compressing, as we are not writing directly to the\r\n # disk, it is more efficient to use standard pickling\r\n if type(obj) is self.np.memmap:\r\n # Pickling doesn't work with memmaped arrays\r\n obj = self.np.asarray(obj)\r\n return Pickler.save(self, obj)\r\n self._npy_counter += 1\r\n try:\r\n filename = '%s_%02i.npy' % (self._filename,\r\n self._npy_counter)\r\n # This converts the array in a container\r\n obj, filename = self._write_array(obj, filename)\r\n self._filenames.append(filename)\r\n except:\r\n self._npy_counter -= 1\r\n # XXX: We should have a logging mechanism\r\n print('Failed to save %s to .npy file:\\n%s' % (\r\n type(obj),\r\n traceback.format_exc()))\r\n return Pickler.save(self, obj)",
"def save(self, obj):\r\n if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:\r\n # Compute a hash of the object:\r\n try:\r\n self._hash.update(self._getbuffer(obj))\r\n except (TypeError, BufferError, ValueError):\r\n # Cater for non-single-segment arrays: this creates a\r\n # copy, and thus aleviates this issue.\r\n # XXX: There might be a more efficient way of doing this\r\n # Python 3.2's memoryview raise a ValueError instead of a\r\n # TypeError or a BufferError\r\n self._hash.update(self._getbuffer(obj.flatten()))\r\n\r\n # We store the class, to be able to distinguish between\r\n # Objects with the same binary content, but different\r\n # classes.\r\n if self.coerce_mmap and isinstance(obj, self.np.memmap):\r\n # We don't make the difference between memmap and\r\n # normal ndarrays, to be able to reload previously\r\n # computed results with memmap.\r\n klass = self.np.ndarray\r\n else:\r\n klass = obj.__class__\r\n # We also return the dtype and the shape, to distinguish\r\n # different views on the same data with different dtypes.\r\n\r\n # The object will be pickled by the pickler hashed at the end.\r\n obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))\r\n Hasher.save(self, obj)",
"def save(self, timestamp: int, data_path: str, file_base: str):\n file_name = os.path.join(data_path,\n '{}-{}.pkl'.format(file_base, timestamp))\n pickle.dump(self.as_numpy_array(),\n open(file_name, 'wb'),\n protocol=pickle.HIGHEST_PROTOCOL)",
"def _store(self):\n if type(self._data) not in (np.ndarray, tuple, np.matrix, list):\n return super(ArrayParameter, self)._store()\n else:\n store_dict = {\"data\" + ArrayParameter.IDENTIFIER: self._data}\n\n if self.f_has_range():\n # Supports smart storage by hashable arrays\n # Keys are the hashable arrays or tuples and values are the indices\n smart_dict = {}\n\n store_dict[\"explored_data\" + ArrayParameter.IDENTIFIER] = ObjectTable(\n columns=[\"idx\"], index=list(range(len(self)))\n )\n\n count = 0\n for idx, elem in enumerate(self._explored_range):\n\n # First we need to distinguish between tuples and array and extract a\n # hashable part of the array\n if isinstance(elem, np.ndarray):\n # You cannot hash numpy arrays themselves, but if they are read only\n # you can hash array.data\n hash_elem = HashArray(elem)\n elif isinstance(elem, list):\n hash_elem = tuple(elem)\n else:\n hash_elem = elem\n\n # Check if we have used the array before,\n # i.e. element can be found in the dictionary\n if hash_elem in smart_dict:\n name_idx = smart_dict[hash_elem]\n add = False\n else:\n name_idx = count\n add = True\n\n name = self._build_name(name_idx)\n # Store the reference to the array\n store_dict[\"explored_data\" + ArrayParameter.IDENTIFIER][\"idx\"][\n idx\n ] = name_idx\n\n # Only if the array was not encountered before,\n # store the array and remember the index\n if add:\n store_dict[name] = elem\n smart_dict[hash_elem] = name_idx\n count += 1\n\n self._locked = True\n\n return store_dict",
"def store(obj, filename, suffix = ''):\n # It is a numpy array\n if type(obj) == np.ndarray:\n path,f = writefile(filename, obj_id='numpy_objs', suffix=suffix)\n json.dump(obj, fp=f, cls=NumpyEncoder,\n separators=(',', ':'), sort_keys=True, indent=4)\n print '> saved with JSON to {}'.format(path)\n else:\n path, f = writefile(filename, obj_id='other_objs', suffix=suffix)\n pickle.dump(obj, file=f)\n print '> saved with dill (pickled) to {}'.format(path)\n return path",
"def save(self, *args, **kwargs):\n raise NotImplementedError('missing data mixin')",
"def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)",
"def save(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n filename = filename# + '_Lx_' + str(self.Lx) + 'm_Ly_' + str(self.Ly) + 'm'\n self.path_filename = path + filename + '.pkl'\n f = open(self.path_filename, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()",
"def dump_data(self):\n attr_names = [field for field in self.unique_together if field != 'parent']\n save_ndarrays_to_hdf5(\n self.data_path,\n [getattr(self, data_field) for data_field in self.data_fields],\n [self._get_dataset_path(field) for field in self.data_fields],\n attr_names,\n [getattr(self, attr_name) for attr_name in attr_names],\n )",
"def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()",
"def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))",
"def save_obsarray_to_pickle(self, obs_array, dest_dir):\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n dump_array = obs_array\n pkl_name = 'batch' + str(len(os.listdir(dest_dir)))\n dump_path = os.path.join(dest_dir, pkl_name)\n self.pickledump(dump_array, dump_path)\n return pkl_name",
"def serialize_numpy(self, buff, numpy):\n try:\n buff.write(self.thumb.tostring())\n buff.write(self.index.tostring())\n buff.write(self.middle.tostring())\n buff.write(self.ring.tostring())\n buff.write(self.little.tostring())\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))",
"def save_to_array(arr_name, arr_object):\n return np.save(arr_name, arr_object)",
"def save_pickle(self, filename):\n x, y, _ = self.get_coords_enu()\n cx, cy = self.get_centres_enu()\n coords = dict(x=x, y=y, cx=cx, cy=cy)\n pickle.dump(coords, open(filename, 'wb'))",
"def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_12d2f3d().pack(_x.position.x, _x.position.y, _x.position.z, _x.approach.x, _x.approach.y, _x.approach.z, _x.binormal.x, _x.binormal.y, _x.binormal.z, _x.axis.x, _x.axis.y, _x.axis.z, _x.width.data, _x.score.data, _x.sample.x, _x.sample.y, _x.sample.z))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))",
"def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)",
"def save(self, path):\n np.save(path, self.q)",
"def save(fname, data):\n from ..numpy import ndarray as np_ndarray\n if isinstance(data, NDArray):\n data = [data]\n handles = c_array(NDArrayHandle, [])\n if isinstance(data, dict):\n str_keys = data.keys()\n nd_vals = data.values()\n if any(not isinstance(k, string_types) for k in str_keys) or \\\n any(not isinstance(v, NDArray) for v in nd_vals):\n raise TypeError('save only accept dict str->NDArray or list of NDArray')\n if any(isinstance(v, np_ndarray) for v in nd_vals):\n raise TypeError('cannot save mxnet.numpy.ndarray using mxnet.ndarray.save;'\n ' use mxnet.numpy.save instead.')\n keys = c_str_array(str_keys)\n handles = c_handle_array(nd_vals)\n elif isinstance(data, list):\n if any(not isinstance(v, NDArray) for v in data):\n raise TypeError('save only accept dict str->NDArray or list of NDArray')\n if any(isinstance(v, np_ndarray) for v in data):\n raise TypeError('cannot save mxnet.numpy.ndarray using mxnet.ndarray.save;'\n ' use mxnet.numpy.save instead.')\n keys = None\n handles = c_handle_array(data)\n else:\n raise ValueError(\"data needs to either be a NDArray, dict of str, NDArray pairs \"\n \"or a list of NDarrays.\")\n check_call(_LIB.MXNDArraySave(c_str(fname),\n mx_uint(len(handles)),\n handles,\n keys))",
"def save(self):\n #--Data file exists?\n filePath = self.path\n if os.path.exists(filePath):\n ins = open(filePath)\n outData = compat.uncpickle(ins)\n ins.close()\n #--Delete some data?\n for key in self.deleted:\n if key in outData:\n del outData[key]\n else:\n outData = {}\n #--Write touched data\n for key in self.changed:\n outData[key] = self.data[key]\n #--Pickle it\n tempPath = filePath+'.tmp'\n cPickle.dump(outData,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)",
"def saveData(self):\n pass",
"def save_vals (self):\n raise NotImplementedError",
"def save(self, path, name):\n if not self._frozen:\n raise Exception(\"Dataset must be frozen\")\n # create directory\n pathlib.Path(os.path.join(path,name)).mkdir(parents=True, exist_ok=True)\n self._raw_data.to_hdf(os.path.join(path,name,\"dataset.h5\"), key=\"raw_data\")\n self._proc_data.to_hdf(os.path.join(path,name,\"dataset.h5\"), key=\"proc_data\")\n np.save(os.path.join(path,name,\"_X_train.npy\"), self._X_train)\n np.save(os.path.join(path,name,\"_X_test.npy\"), self._X_test)\n np.save(os.path.join(path,name,\"_y_train.npy\"), self._y_train)\n np.save(os.path.join(path,name,\"_y_test.npy\"), self._y_test)\n \n np.save(os.path.join(path,name,\"_X_mean.npy\"), self._X_mean)\n np.save(os.path.join(path,name,\"_X_std.npy\"), self._X_std)\n np.save(os.path.join(path,name,\"_y_mean.npy\"), self._y_mean)\n np.save(os.path.join(path,name,\"_y_std.npy\"), self._y_std)\n \n with open(os.path.join(path,name,\"_seed.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._seed, fp)\n with open(os.path.join(path,name,\"_train_part.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._train_part, fp)\n with open(os.path.join(path,name,\"_test_part.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._test_part, fp)\n with open(os.path.join(path,name,\"_columns.pkl\"), \"wb\") as fp: #Pickling\n pickle.dump(self._columns, fp)",
"def dump_npy(filename: str, obj, **kwargs):\n return np.save(filename, obj)",
"def save(self):\n pickle.dump(self, open(self.path, \"wb\"))",
"def _save_numpy(self, folderpath: str):\n\n if not os.path.exists(folderpath):\n raise EnvironmentError('Unable to save numpy data to {}, does not exist'.format(folderpath))\n da.to_npy_stack(folderpath + '/data', self.data)\n da.to_npy_stack(folderpath + '/node_data', self.node_data)",
"def _save(self, dataset, path, files, copy_files=False):\n raise NotImplementedError('Loader {} does not support saving datasets.'.format(self.type()))",
"def save(self, base_path, time_stamp):\r\n cdef np.ndarray temp = np.zeros([self.maxInd,2])\r\n cdef np.ndarray temp1 = np.zeros((self.maxInd,2),dtype=np.int)\r\n cdef np.ndarray temp2 = np.zeros(self.cce.shape)\r\n cdef np.ndarray temp3 = np.zeros(self.inhibition.shape)\r\n cdef np.ndarray temp4 = np.zeros(self.sync_t.shape,dtype=np.int)\r\n temp+=self.C\r\n temp1+=self.state\r\n temp2+=self.cce\r\n temp3+=self.inhibition\r\n temp4+=self.sync_t\r\n self.C=temp\r\n self.state=temp1\r\n self.cce=temp2\r\n self.inhibition=temp3\r\n self.sync_t=temp4\r\n path = base_path + self.name + \"_\" + time_stamp\r\n f = open(path, \"wb\")\r\n pickle.dump(self, f,2)\r\n f.close()\r\n \r\n return path",
"def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_get_struct_6d2IB().pack(_x.position.x, _x.position.y, _x.position.z, _x.position.roll, _x.position.pitch, _x.position.yaw, _x.position.stamp.secs, _x.position.stamp.nsecs, _x.is_Known))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))"
] | [
"0.73430663",
"0.7268807",
"0.72580737",
"0.6872324",
"0.67022103",
"0.63646066",
"0.6179825",
"0.60192174",
"0.60099596",
"0.6003321",
"0.5994046",
"0.5980241",
"0.5976691",
"0.5966399",
"0.59421",
"0.5908745",
"0.587527",
"0.58730143",
"0.5858162",
"0.585457",
"0.58540595",
"0.5836554",
"0.582165",
"0.5820673",
"0.58199036",
"0.5817729",
"0.5816731",
"0.58125675",
"0.5805494",
"0.5802004"
] | 0.76393694 | 0 |
This routine must be implemented by subclasses. Returns an array of binaries that need to be symbolized. | def _binaries_to_symbolize(self):
raise NotImplementedError() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]",
"def get_password_binaries_array(password):\r\n password_binary_array = []\r\n\r\n # Create array of binaries from the password\r\n for character in password:\r\n password_binary_array.append(get_binary(character))\r\n\r\n # Join it together for parsing\r\n binary = \"\".join(password_binary_array)\r\n\r\n # Start the array off with the actual padded password binary\r\n rebuild_binaries = [binary]\r\n\r\n # This loops through the binary string, reducing it by\r\n # one (in length) with each pass appending string to array\r\n # Stops once the binary length is 1 (one)\r\n while len(binary) > 1:\r\n # Use the function logic to reduce the binary by one based on simple logic\r\n binary = binary_reduction(binary)\r\n # Add that new binary to this array for later usage\r\n rebuild_binaries.append(binary)\r\n\r\n return rebuild_binaries",
"def bin_code(self):\n self.alphabet = np.unique(self.sequence)\n\n for s, n in zip([chr(k + ord('a') - 1) for k in self.alphabet], self.alphabet):\n self.alphabet_symbol[s] = n\n\n sigm = len(self.alphabet)\n bin_code = []\n for i, e in enumerate(self.alphabet):\n em = [0] * sigm\n em[sigm - 1 - i] = 1\n bin_code.append(em)\n\n for i in range(len(bin_code)):\n self.alphabet_dict[self.alphabet[i]] = bin_code[i]\n\n return reduce(lambda r, e: r + self.alphabet_dict[e], self.sequence, [])",
"def symLoad(loadFile='symsave.npy'):\n loaded = np.load(loadFile)\n n = -1/2.+np.sqrt(1/4.+2*loaded.size)\n array = np.zeros([n,n])\n indices = np.triu_indices(n)\n array[indices]=loaded\n return array+np.transpose(array)-np.diag(np.diag(array))",
"def sym(self) -> np.ndarray:\n if self._sym is None:\n self._sym = symmetrize_discrete_vector_field(self.F, mode=\"sym\")\n return self._sym",
"def get_symmetries(self):\n temp = self._properties.get('symmetries', [])\n return temp",
"def z2_symmetries(self) -> \"Z2Symmetries\":\n return self._z2_symmetries",
"def xbin_states(n, sym=False):\n\n assert n>0, \"n cannot be <0\"\n \n def v():\n for i in range(2**n):\n if sym is False:\n yield np.array(list(np.binary_repr(i,width=n))).astype('int')\n else:\n yield np.array(list(np.binary_repr(i,width=n))).astype('int')*2-1\n\n return v()",
"def bin_states(n, sym=False):\n\n if n<0:\n raise Exception(\"n cannot be <0\")\n if n>30:\n raise Exception(\"n is too large to enumerate all states.\")\n \n v = np.array([list(np.binary_repr(i,width=n)) for i in range(2**n)]).astype(int)\n\n if sym is False:\n return v\n return v*2-1",
"def bin_binarise(self):\n pass",
"def binary_encode(self, literals):\n arr = np.zeros(len(self.encoder), dtype='bool')\n for p in literals:\n assert isinstance(p, Literal)\n arr[self.encoder[p]] = True\n return arr",
"def binary_bases(cls):\n return cls._BINARY_BASES",
"def get_binaries(kdb,entry):\n xml = objectify.fromstring(entry.dump_xml())\n binaries = list(xml.xpath('./Binary'))\n for binary in binaries:\n yield (binary.Key.text, Binary(kdb,binary))",
"def get_binaries(name_only=False):\n\n bins = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM binaries ')\n\n for binary in cur.execute(sql):\n bins.append(binary[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM binaries '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_BINARY\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n bins.append(item)\n\n return bins",
"def get_data():\n state = np.array([1,1,1,1,1,1,1,1,1], dtype=np.bool)\n taps = np.array([0,0,0,0,1,0,0,0,1], dtype=np.bool)\n p = np.zeros(176, dtype=np.uint8)\n for i in range(176):\n p[i] = np.sum(state[-3:]*[4,2,1])\n for _ in range(3):\n state = np.concatenate(([np.sum(state&taps)&1], state[0:-1]))\n a = np.zeros(176, common.SYMB_SCRAMBLE_DTYPE)\n ## 8PSK modulation\n constellation = PhysicalLayer.make_psk(8,range(8))['points']\n a['scramble'] = constellation[p,]\n known_symbols = np.mod(range(176),48)>=32\n a['symb'][known_symbols] = a['scramble'][known_symbols]\n return a",
"def mk_bitvecs(self):\n self.bitvec = ''.join([f'{b:#010b}'[2:] for b in self.code ][::-1])\n self.bitvec_data = ''.join([f'{b:#010b}'[2:] for b in self.input][::-1])\n\n # Pad with some zeros to catch the last instructions.\n self.bitvec = '0'*64 + self.bitvec",
"def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymStringVec.append(\"TET_SYM\");\n\tSymStringVec.append(\"OCT_SYM\");\n\tSymStringVec.append(\"ICOS_SYM\");\n\tSymStringVec.append(\"ISYM\");\n\treturn SymStringVec",
"def get_binary(self):\n\n assert len(self._pattern) <= 128\n\n rtr = bytes()\n\n for word in self._pattern:\n rtr += struct.pack(\"<H\", word)\n return rtr",
"def build_weak_versioned_sym_gtirb() -> gtirb.IR:\n (ir, module) = gth.create_test_module(\n gtirb.Module.FileFormat.ELF,\n gtirb.Module.ISA.X64,\n )\n (text_section, text_bi) = gth.add_text_section(module)\n\n gth.add_section(module, \".dynamic\")\n\n proxy_a = gth.add_proxy_block(module)\n symbol_a = gth.add_symbol(module, \"a\", proxy_a)\n se_a = gtirb.SymAddrConst(\n 0, symbol_a, {gtirb.SymbolicExpression.Attribute.PLT}\n )\n\n module.aux_data[\"elfSymbolVersions\"] = gtirb.AuxData(\n type_name=(\n \"tuple<mapping<uint16_t,tuple<sequence<string>,uint16_t>>,\"\n \"mapping<string,mapping<uint16_t,string>>,\"\n \"mapping<UUID,tuple<uint16_t,bool>>>\"\n ),\n data=(\n # ElfSymVerDefs\n {},\n # ElfSymVerNeeded\n {\"libmya.so\": {1: \"LIBA_1.0\"}},\n # ElfSymbolVersionsEntries\n {symbol_a.uuid: (1, False)},\n ),\n )\n\n # For the following code:\n # e8 00 00 00 00 callq a@LIBA_1.0@plt\n # 48 31 c0 xor %rax,%rax\n # 48 c7 c0 3c 00 00 00 mov $0x3c,%rax\n # 48 31 ff xor %rdi,%rdi\n # 0f 05 syscall\n cb = gth.add_code_block(\n text_bi,\n b\"\\xe8\\x00\\x00\\x00\\x00\"\n b\"\\x48\\x31\\xc0\"\n b\"\\x48\\xc7\\xc0\\x3c\\x00\\x00\\x00\"\n b\"\\x48\\x31\\xff\"\n b\"\\x0f\\x05\",\n {1: se_a},\n )\n symbol_start = gth.add_symbol(module, \"_start\", cb)\n\n module.aux_data[\"libraries\"].data.extend([\"libmya.so\"])\n\n module.aux_data[\"elfSymbolInfo\"].data[symbol_start.uuid] = (\n 0,\n \"FUNC\",\n \"GLOBAL\",\n \"DEFAULT\",\n 0,\n )\n module.aux_data[\"elfSymbolInfo\"].data[symbol_a.uuid] = (\n 0,\n \"FUNC\",\n \"WEAK\",\n \"DEFAULT\",\n 0,\n )\n\n return ir",
"def get_symbols(obj_path):\n cmd = ['nm', obj_path]\n res = subprocess.run(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n return res.stdout.decode()",
"def create_binaries(num_sys=1, ecc_prob='thermal', a_prob='log_flat'):\n\n M1 = get_M1(num_sys=num_sys)\n M2 = get_M2(M1, num_sys=num_sys)\n a = get_a(num_sys=num_sys, prob=a_prob)\n e = get_e(num_sys=num_sys, prob=ecc_prob)\n M = get_M(num_sys=num_sys)\n omega = get_omega(num_sys=num_sys)\n Omega = get_Omega(num_sys=num_sys)\n inc = get_inc(num_sys=num_sys)\n\n return M1, M2, a, e, M, Omega, omega, inc",
"def tobinary_multiples(arr):\n return [np.array(arr_i).tobytes() for arr_i in arr]",
"def make_sym(self):\n\n phi_list = []\n sym_vars = sym.zeros(len(self.rigid_body_list) * 12, 1)\n\n for i, rigid_body in enumerate(self.rigid_body_list): # Global symbolic variable matrix (column vector)\n for j, sim_var in enumerate(rigid_body.symbolic_variables):\n sym_vars[i * 12 + j, 0] = rigid_body.symbolic_variables[j]\n phi_list.extend(rigid_body.constraints)\n\n self.r = sym_vars\n\n for i in self.constraint_list:\n phi_list.append(i.symbolic)\n\n phi = sym.zeros(len(phi_list), 1)\n\n for i, constraint in enumerate(phi_list):\n phi[i] = constraint\n\n self.phi = phi\n self.phi_t = self.phi.diff(t)\n\n rows = len(self.r)\n columns = len(phi)\n\n phi_r = sym.zeros(rows, columns)\n\n for j, constraint in enumerate(phi):\n for i, var in enumerate(sym_vars):\n phi_r[i, j] = constraint.diff(var)\n\n self.phi_r = phi_r\n\n self.b = sym.Matrix([- (self.phi_r.diff(t)).T*self.r.diff(t) - self.phi_t.diff(t) -\n 2*self.alpha*(self.phi_r.T*self.r.diff(t) + self.phi_t) -\n (self.beta**2)*self.phi])",
"def find_binaries():\n\n builddir = Path(__file__).parent.parent / \"builddir\"\n\n bins = []\n\n for folder in [\"examples\", \"tests\", \"tools\"]:\n for path in sorted((builddir / folder).rglob(\"*\")):\n if path.stem.startswith(\"xnvme_single\"):\n continue\n if path.stem.startswith(\"xnvme_dev\"):\n continue\n if path.stem.startswith(\"xnvme_enum\"):\n continue\n if path.is_file() and path.stat().st_mode & os.X_OK:\n bins.append(path.name)\n\n return bins",
"def get_bases():\n\tbss = []\n\tfor es in MV.index:\n\t\tbs = []\n\t\tif es == ():\n\t\t\tbs.append(_1)\n\t\telse:\n\t\t\tfor js in es:\n\t\t\t\tbmv = reduce(operator.mul, map(lambda j: e[j], js))\n\t\t\t\tbs.append(bmv)\n\t\t\t\t\n\t\tbss.append(bs)\n\t\n\treturn bss",
"def tx_as_binary(self, tx: pd.Series) -> np.array:\n tx_items_indexes = [\n self.__item_map[(attribute, tx[attribute])]\n for attribute in self.__attribute_columns]\n\n tx_array = np.zeros(len(self.__item_map), dtype=int)\n tx_array.put(tx_items_indexes, 1)\n return np.packbits(tx_array)",
"def _bin_backport(x):\n chars = []\n for n in range(7, -1, -1):\n y = x - 2**n\n if y >= 0:\n chars.append('1')\n x = y\n else:\n chars.append('0')\n return ''.join(chars)",
"def bytify(binary):\n\tbytes = [0,0,0,0]\n\ti = 3\n\twhile binary:\n\n\t\tbytes[i] = binary&255\n\t\tbinary >>= 8\n\t\ti -= 1 \n\treturn bytes",
"def symbolize(X, m):\n \n X = np.array(X)\n\n if m >= len(X):\n raise ValueError(\"Length of the series must be greater than m\")\n \n dummy = []\n for i in range(m):\n l = np.roll(X,-i)\n dummy.append(l[:-(m-1)])\n \n dummy = np.array(dummy)\n \n symX = []\n \n for mset in dummy.T:\n rank = stats.rankdata(mset, method=\"min\")\n symbol = np.array2string(rank, separator=\"\")\n symbol = symbol[1:-1]\n symX.append(symbol)\n \n return symX",
"def build_versioned_syms_gtirb() -> gtirb.IR:\n (ir, module) = gth.create_test_module(\n gtirb.Module.FileFormat.ELF,\n gtirb.Module.ISA.X64,\n )\n (text_section, text_bi) = gth.add_text_section(module)\n\n proxy_a = gth.add_proxy_block(module)\n symbol_a = gth.add_symbol(module, \"a\", proxy_a)\n se_a = gtirb.SymAddrConst(\n 0, symbol_a, {gtirb.SymbolicExpression.Attribute.PLT}\n )\n proxy_a2 = gth.add_proxy_block(module)\n symbol_a2 = gth.add_symbol(module, \"a\", proxy_a2)\n se_a2 = gtirb.SymAddrConst(\n 0, symbol_a2, {gtirb.SymbolicExpression.Attribute.PLT}\n )\n\n module.aux_data[\"elfSymbolVersions\"] = gtirb.AuxData(\n type_name=(\n \"tuple<mapping<uint16_t,tuple<sequence<string>,uint16_t>>,\"\n \"mapping<string,mapping<uint16_t,string>>,\"\n \"mapping<UUID,tuple<uint16_t,bool>>>\"\n ),\n data=(\n # ElfSymVerDefs\n {},\n # ElfSymVerNeeded\n {\"libmya.so\": {1: \"LIBA_1.0\", 2: \"LIBA_2.0\"}},\n # ElfSymbolVersionsEntries\n {symbol_a.uuid: (1, False), symbol_a2.uuid: (2, False)},\n ),\n )\n\n # For the following code:\n # e8 00 00 00 00 callq a@LIBA_1.0@plt\n # e8 00 00 00 00 callq a@LIBA_2.0@plt\n # 48 31 c0 xor %rax,%rax\n # 48 c7 c0 3c 00 00 00 mov $0x3c,%rax\n # 48 31 ff xor %rdi,%rdi\n # 0f 05 syscall\n cb = gth.add_code_block(\n text_bi,\n b\"\\xe8\\x00\\x00\\x00\\x00\"\n b\"\\xe8\\x00\\x00\\x00\\x00\"\n b\"\\x48\\x31\\xc0\"\n b\"\\x48\\xc7\\xc0\\x3c\\x00\\x00\\x00\"\n b\"\\x48\\x31\\xff\"\n b\"\\x0f\\x05\",\n {1: se_a, 6: se_a2},\n )\n symbol_start = gth.add_symbol(module, \"_start\", cb)\n\n module.aux_data[\"libraries\"].data.extend([\"libmya.so\"])\n\n for sym in (symbol_start, symbol_a, symbol_a2):\n module.aux_data[\"elfSymbolInfo\"].data[sym.uuid] = (\n 0,\n \"FUNC\",\n \"GLOBAL\",\n \"DEFAULT\",\n 0,\n )\n\n return ir"
] | [
"0.61693114",
"0.61270034",
"0.5790367",
"0.57455647",
"0.56719047",
"0.56250364",
"0.5542743",
"0.536507",
"0.53464913",
"0.53261465",
"0.5300343",
"0.52819985",
"0.5180829",
"0.5157225",
"0.5139798",
"0.5120473",
"0.5091495",
"0.5066631",
"0.5062331",
"0.5053902",
"0.50444293",
"0.50363517",
"0.501989",
"0.4995316",
"0.49584883",
"0.49186423",
"0.49032223",
"0.49008393",
"0.48894197",
"0.48778164"
] | 0.75760055 | 0 |
Generator yielding the flight index, assigned bay and flight object of departing flights. | def departing_flights(self):
for i in range(self.flights.n_flights):
if self.flights.departing(i):
yield i, self.bay[i], self.flights.flight_schedule[i] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_return_flights(self):\n flights = []\n for date in self.return_dates:\n for src in self.src_airports:\n for dst in self.dst_airports:\n flights.append( Flight(dst, src, date) )\n\n return flights",
"def __iter__(self):\n\n return iter([self.id, self.id, self.flight_num, self.arrival_time,\n self.first_name, self.last_name, self.birthdate,\n self.nationality, self.enque_time, self.departure_time,\n self.service_time, self.connecting_flight, self.processed])",
"def generate_outgoing_flights(self):\n flights = []\n for date in self.leave_dates:\n for src in self.src_airports:\n for dst in self.dst_airports:\n flights.append( Flight(src, dst, date) )\n\n return flights",
"def __iter__(self) -> Iterable[\"AbstractLane\"]:\n for origin in self.graph:\n for destination in self.graph[origin]:\n for index, lane in self.graph[origin][destination].items():\n yield lane",
"def aligned_on_runway(\n self, airport: Union[str, \"Airport\"]\n ) -> Iterator[\"Flight\"]:\n\n from ..data import airports\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n _airport = airports[airport] if isinstance(airport, str) else airport\n if (\n _airport is None\n or _airport.runways is None\n or _airport.runways.shape.is_empty\n ):\n return None\n\n if isinstance(_airport.runways.shape, LineString):\n candidate_shapes = [\n LineString(list(self.xy_time)).intersection(\n _airport.runways.shape.buffer(5e-4)\n )\n ]\n else:\n candidate_shapes = [\n LineString(list(self.xy_time)).intersection(\n on_runway.buffer(5e-4)\n )\n for on_runway in _airport.runways.shape.geoms\n ]\n\n for intersection in candidate_shapes:\n if intersection.is_empty:\n continue\n if isinstance(intersection, LineString):\n (*_, start), *_, (*_, stop) = intersection.coords\n segment = self.between(start, stop, strict=False)\n if segment is not None:\n yield segment\n if isinstance(intersection, MultiLineString):\n (*_, start), *_, (*_, stop) = intersection.geoms[0].coords\n for chunk in intersection.geoms:\n (*_, start_bak), *_, (*_, stop) = chunk.coords\n if stop - start > 40: # crossing runways and back\n start = start_bak\n segment = self.between(start, stop, strict=False)\n if segment is not None:\n yield segment",
"def runway_change(\n self,\n airport: Union[str, \"Airport\", None] = None,\n dataset: Optional[\"Airports\"] = None,\n **kwargs: Any,\n ) -> Iterator[\"Flight\"]:\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if airport is None:\n if dataset is None:\n airport = self.landing_airport()\n else:\n airport = self.landing_airport(dataset=dataset)\n\n if airport is None:\n return None\n\n aligned = iter(self.aligned_on_ils(airport, **kwargs))\n first = next(aligned, None)\n if first is None:\n return\n\n for second in aligned:\n candidate = self.between(first.start, second.stop)\n assert candidate is not None\n candidate = candidate.assign(ILS=None)\n if candidate.phases().query('phase == \"CLIMB\"') is None:\n candidate.data.loc[\n candidate.data.timestamp <= first.stop, \"ILS\"\n ] = first.max(\"ILS\")\n candidate.data.loc[\n candidate.data.timestamp >= second.start, \"ILS\"\n ] = second.max(\"ILS\")\n\n yield candidate.assign(\n airport=airport\n if isinstance(airport, str)\n else airport.icao\n )\n\n first = second",
"def get_ARNA_flights_as_dfs():\n flight_nums = [216, 217, 218, 219, 220, 221, 222, 223, 224, 225]\n flight_IDs = ['C{}'.format(i) for i in flight_nums]\n dfs = {}\n for flight_ID in flight_IDs:\n print(flight_ID)\n try:\n df = AC.get_FAAM_locations_as_df(flight_ID=flight_ID)\n dfs[flight_ID] = df\n except:\n print('WARNING: failed for {}'.format(flight_ID))\n return dfs",
"def landing_attempts(\n self, dataset: Optional[\"Airports\"] = None, **kwargs: Any\n ) -> Iterator[\"Flight\"]:\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n candidate = self.query(\"altitude < 8000\")\n if candidate is not None:\n for chunk in candidate.split(\"10T\"):\n point = chunk.query(\"altitude == altitude.min()\")\n if point is None:\n return\n if dataset is None:\n cd = point.landing_airport()\n else:\n cd = point.landing_airport(dataset=dataset)\n if cd.runways is not None:\n yield from chunk.assign(airport=cd.icao).aligned_on_ils(\n cd, **kwargs\n )",
"def __iter__(self):\n for (_,_,path) in self.frontierpq:\n yield path",
"def __iter__(self):\n with open(self.fn + \".fai\") as fai:\n for toks in (l.rstrip(\"\\r\\n\").split(\"\\t\") for l in fai):\n yield toks[0], int(toks[1])",
"def __iter__(self):\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n self.path = arr[0]\n self.y = int(arr[1])\n if len(arr) > 2:\n self.feats = map(float, arr[2:])\n yield self.path, self.y, self.feats\n else:\n yield self.path, self.y",
"def _create_record_iterator(self) -> Iterator[RDSModel]:\n for downstream_key in self.downstream_deps:\n record = RDSTableLineage(\n table_source_rk=self.table_key,\n table_target_rk=downstream_key\n )\n yield record",
"def __next__(self):\n y,x = self.pos\n\n if self.fuel > 0:\n\n if self.facing == \"N\":\n if y > 0:\n y -= 1\n else:\n raise StopIteration\n\n elif self.facing == \"S\":\n if y < self.thefarm.h - 1:\n y += 1\n else:\n raise StopIteration\n\n elif self.facing == \"W\":\n if x > 0:\n x -= 1\n else:\n raise StopIteration\n\n elif self.facing == \"E\":\n if x < self.thefarm.w - 1:\n x += 1\n else:\n raise StopIteration\n\n self.fuel -= 1\n self.pos = (y,x)\n\n else: # outta gas\n raise StopIteration\n\n return self.thefarm.field[y][x]",
"def _create_record_iterator(self) -> Iterator[RDSModel]:\n for downstream_key in self.downstream_deps:\n record = RDSColumnLineage(\n column_source_rk=self.column_key,\n column_target_rk=downstream_key\n )\n yield record",
"def go_around(\n self,\n airport: None | str | \"Airport\" = None,\n dataset: None | \"Airports\" = None,\n **kwargs: Any,\n ) -> Iterator[\"Flight\"]:\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if airport is None:\n if dataset is None:\n airport = self.landing_airport()\n else:\n airport = self.landing_airport(dataset=dataset)\n\n if airport is None:\n return None\n\n attempts = self.aligned_on_ils(airport, **kwargs)\n # you need to be aligned at least twice on a rway to have a GA:\n if len(attempts) < 2:\n return\n\n first_attempt = next(attempts, None)\n\n while first_attempt is not None:\n after_first_attempt = self.after(first_attempt.start)\n assert after_first_attempt is not None\n\n climb = after_first_attempt.phases().query('phase == \"CLIMB\"')\n if climb is None:\n return\n\n after_climb = self.after(next(climb.split(\"10T\")).stop)\n if after_climb is None:\n return\n\n next_attempt = next(\n after_climb.aligned_on_ils(airport, **kwargs), None\n )\n\n if next_attempt is not None:\n goaround = self.between(first_attempt.start, next_attempt.stop)\n assert goaround is not None\n\n goaround = goaround.assign(\n ILS=None,\n airport=airport\n if isinstance(airport, str)\n else airport.icao,\n )\n goaround.data.loc[\n goaround.data.timestamp <= first_attempt.stop, \"ILS\"\n ] = first_attempt.max(\"ILS\")\n goaround.data.loc[\n goaround.data.timestamp >= next_attempt.start, \"ILS\"\n ] = next_attempt.max(\"ILS\")\n yield goaround\n\n first_attempt = next_attempt",
"def thermals(self) -> Iterator[\"Flight\"]:\n self = cast(\"Flight\", self)\n all_segments = (\n self.unwrap()\n .diff(\"track_unwrapped\")\n .agg_time(\"1T\", vertical_rate=\"max\", track_unwrapped_diff=\"median\")\n .abs(track_unwrapped_diff_median=\"track_unwrapped_diff_median\")\n .query(\"vertical_rate_max > 2 and track_unwrapped_diff_median > 5\")\n )\n if all_segments is not None:\n yield from all_segments.split(\"1T\")",
"def iter_atoms(self):\n for frag in self.fragment_list:\n for atm in frag.atom_list:\n yield atm",
"def flightLegs(Alist,start,dest):\n Alen = len(Alist) #length of Alist\n index = list(range(Alen)) #list of airport indexes\n Adict = dict(zip(index,Alist)) #zip into dictionary\n #Adict = dict.fromkeys(range(Alen),large value) possibly qiucker?\n \n Flights = [] #output\n\n checked = [] #empty list\n tocheck = deque() #empty deque for a queue of paths to be checked\n tocheck.append([start])\n \n if start == dest: #trivial case\n return \"trivial case explored\"\n \n while tocheck: #while there are elements in tocheck\n path = tocheck.popleft() #pop out a path to be checked\n node = path[-1] #extract last element of path, this node represents an airport\n if node not in checked: #check if this path terminates in a new node/airport\n adjacents = Adict[node] #look up in dictionary airport index and return list of adjacent airports\n for n in adjacents: #extracts nodes inside list of adjacents\n if n not in checked:\n new_path = path +[n] #add neighbour to this current path\n tocheck.append(new_path) #put this new path into the queue to be checked\n if n == dest: #if this neighbour is the final destination, then the new path is the full path\n if len(Flights)==0:\n Flights.append(new_path)\n elif len(new_path)==len(Flights[-1]):\n Flights.append(new_path)\n else:\n return (len(Flights[0]),len(Flights))\n \n checked.append(node)\n\n return Flights #finds all paths need to modify to stop after a path of non shortest length is found",
"def pairing(self):\n if len(self._paths) == 0:\n second_values = self.data\n get_flight = lambda x: x\n first = True\n else:\n second_values = self._paths\n get_flight = lambda x: x.get_last_flight()\n first = False\n\n for value in second_values:\n f1 = get_flight(value)\n for f2 in self.data:\n if f1.connects_to(f2):\n if first:\n self._paths.append(FlightPath(f1, f2))\n else:\n path_copy = copy.copy(value)\n added = path_copy.try_add(f2)\n if added:\n self._paths.append(path_copy)",
"def __next__(self):\n self.index += 1\n if self.index >= len(self.zone_list):\n raise StopIteration\n return self.zone_list[self.index]",
"def __next__(self):\n self.index += 1\n if self.index >= len(self.zone_list):\n raise StopIteration\n return self.zone_list[self.index]",
"def process_airlines_list(airline_list):\r\n for airline in airline_list:\r\n n, r = divmod(airline.frequency, 10)\r\n for i in range(n):\r\n yield airline.definition(10)\r\n if r:\r\n yield airline.definition(r)",
"def iter_atoms(self):\n for chain in self.chain_list:\n for frag in chain.fragment_list:\n for atm in frag.atom_list:\n yield atm",
"def parse_flights(flights):\n if not flights:\n return None\n result = list()\n for flight in flights:\n result.append({\n 'carrier': flight.findtext('./Carrier').strip(),\n 'flight_number': flight.findtext('./FlightNumber').strip(),\n 'departure_ts': flight.findtext('./DepartureTimeStamp').strip(),\n 'arrival_ts': flight.findtext('./ArrivalTimeStamp').strip(),\n 'class': flight.findtext('./Class').strip(),\n 'fare_basis': flight.findtext('./FareBasis').strip(),\n 'ticket_type': flight.findtext('./TicketType').strip(),\n })\n return result",
"def __iter__(self):\n for arc in self.agenda:\n yield arc",
"def get_next_item(self):\n excel_df = pandas.read_excel(self.path)\n for row in excel_df.iterrows():\n row_dic = row[1].to_dict()\n yield Movie(**row_dic)",
"def __iter__(self):\n for feature in itertools.izip(self.shapes, self.records):\n yield feature",
"def preparing(fasta_list, pdb_dict):\n for item1 in fasta_list:\n matchObj = re.search( '^(.*)_([a-zA-Z0-9])$', item1[0])\n fasta1= item1[1]\n if matchObj:\n original_name1= matchObj.group(1)\n original_structure1=pdb_dict[original_name1]\n chain_1= matchObj.group(2) \n yield fasta1, [original_structure1, chain_1]",
"def get_next_item(self) -> Generator[LibraryItem, None, None]:\n df = pandas.read_excel(self.path)\n for row in df.iterrows():\n row_no = row[0] + 2\n row_data = row[1]\n\n if row_data.isnull().any():\n print(f\"Missing data at line {row_no}\")\n continue\n\n try:\n movie = Movie(**row_data)\n except ValueError as e:\n print(f\"Invalid data at line {row_no}:\", e)\n else:\n yield movie",
"def iter_fragments(self):\n for chain in self.chain_list:\n for frag in chain.fragment_list:\n yield frag"
] | [
"0.6066153",
"0.60416305",
"0.5919573",
"0.5495734",
"0.5476187",
"0.54701424",
"0.53442407",
"0.5342146",
"0.5320868",
"0.53155226",
"0.5294439",
"0.5286481",
"0.528069",
"0.5221552",
"0.51874614",
"0.5186745",
"0.5112128",
"0.5110774",
"0.5096295",
"0.50627136",
"0.50627136",
"0.5032011",
"0.49924785",
"0.49868813",
"0.4952123",
"0.49284205",
"0.49263608",
"0.49017698",
"0.49012968",
"0.48763356"
] | 0.79937047 | 0 |
Checks whether a certain flight gate combination is feasible. | def is_feasible(self, i, l):
# Get the bay assigned to the flight
k = self.bay[i]
if self.flights.domestic(i, departing=True):
# If it's a domestic flight, then only the domestic gates are feasible. Also check if the bay gate
# combination is feasible.
return (l in self.airport.domestic_gates) and (self.airport.bay_gate_distance[k][l] is not None)
else:
# Domestic gates are unfeasible for non-domestic flights. Check bay gate combination as well.
if (l not in self.airport.domestic_gates) and (self.airport.bay_gate_distance[k][l] is not None):
# KQ flight after 6PM are only allowed in terminal A
time_6pm = datetime.combine(self.flights.config['date'], time(hour=18))
# Check if its a KQ flight after 6pm
if (self.flights.flight_schedule[i].etd >= time_6pm) and (self.flights.airline(i) == "KQ"):
# If it is, check whether the gate is a terminal a gate.
return l in self.terminal_a_gates
else:
# It's not, so it's feasible
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def valid_trip(self):\n if self.pickupcoords is None or self.dropoffcoords is None:\n return False\n valid = lambda x, y: 41 < x < 43.5 and -72.5 < y < - 70.5\n return valid(self.pickupcoords[0], self.pickupcoords[1]) and valid(self.dropoffcoords[0], self.dropoffcoords[1])",
"def is_valid(self,):\r\n return self.g > 0 and self.l > 0 and self.m1 > 0 and self.m2 > 0 and self.m3 > 0 and self.r1 > 0 and self.r2 > 0 and self.tau > 0 and self.theta1 > 0 and self.theta2 > 0 and self.theta3 > 0",
"def is_valid_flight(self) -> bool:\n flight_snapshot = self.flight()\n orbit_snapshot = self.orbit()\n direction_snapshot = np.array(self.direction())\n\n # zero altitude after x time condition\n if self.vessel.met > 10 and flight_snapshot.speed == 0:\n print('Rocket never left')\n return False\n\n # vessel not in ocean condition\n if self.vessel.met > 10 and (self.situation() == self.situation().docked\n or self.situation() == self.situation().landed\n or self.situation() == self.situation().splashed):\n print('Rocket not flying anymore')\n return False\n\n # zero fuel condition\n if min(self.liquid_fuel(), self.oxidizer()) == 0:\n print('Rocket out of fuel')\n return False\n\n # If rocket is ballistic. As in flying towards the ground\n horizontal_direction = np.array((0, direction_snapshot[1], direction_snapshot[2]))\n pitch = self.angle_between_vectors(direction_snapshot, horizontal_direction)\n if direction_snapshot[0] < 0:\n pitch = -pitch\n\n if pitch < -3 and flight_snapshot.mean_altitude < 70000:\n print(f'Went Ballistic with pitch{pitch} at altitude {flight_snapshot.mean_altitude}')\n return False\n\n return True",
"def feasible(self, c):\n\t\tfor played_combination in self.combinations:\n\t\t\tif not self.consistent(c, played_combination):\n\t\t\t\treturn False\n\t\treturn True",
"def _is_valid(self):\n\n if (\n self.poly.weight_0 != 0\n or len(self.poly.weight_1) != self.num_qubits\n or len(self.poly.weight_2) != int(self.num_qubits * (self.num_qubits - 1) / 2)\n or len(self.poly.weight_3)\n != int(self.num_qubits * (self.num_qubits - 1) * (self.num_qubits - 2) / 6)\n ):\n return False\n if (\n (self.linear).shape != (self.num_qubits, self.num_qubits)\n or len(self.shift) != self.num_qubits\n or not np.allclose((np.linalg.det(self.linear) % 2), 1)\n ):\n return False\n if (\n not (set(self.poly.weight_1.flatten())).issubset({0, 1, 2, 3, 4, 5, 6, 7})\n or not (set(self.poly.weight_2.flatten())).issubset({0, 2, 4, 6})\n or not (set(self.poly.weight_3.flatten())).issubset({0, 4})\n ):\n return False\n if not (set(self.shift.flatten())).issubset({0, 1}) or not (\n set(self.linear.flatten())\n ).issubset({0, 1}):\n return False\n return True",
"def is_valid(self):\n posit1 = (self.mean_v > 0) & (self.kappa_y > 0) & (self.eta_y > 0)\n posit2 = (self.kappa_s > 0) & (self.eta_s > 0)\n return posit1 & posit2 & self.feller()",
"def is_valid(self):\r\n return self.circuit.is_valid",
"def is_winning(self, curr_state):\n winning_combinations = [(0,1,2),(3,4,5),(6,7,8),(0,3,6),(1,4,7),(2,5,8),(0,4,8),(2,4,6)]\n # We will check only for the above 8 combinations to see any of them sums up to 15 which implies winning\n for combination in winning_combinations:\n #print('Combination:',combination)\n if not np.isnan(curr_state[combination[0]]) and not np.isnan(curr_state[combination[1]]) and not np.isnan(curr_state[combination[2]]) :\n if curr_state[combination[0]] + curr_state[combination[1]] + curr_state[combination[2]] == 15 :\n return True\n \n #If none of the above condition is True return False \n return False",
"def valid(a,b,x,y):\n\t# Size of the square grid that encases rectagle x,y\n\tsquare = x + y - 2\n\t# Taxi cab distance (no diagonals) from (p_a, p_b) to (a,b)\n\tsteps = lambda p_a, p_b: abs(p_a - a) + abs(p_b - b)\n\t# Top/Bottom/Left/Right bound\n\tif min(a,b) < 0 or max(a,b) >= square: return False\n\t# Upper left/Lower right corner check\n\tif steps(0,0) < (x - 2) or steps(square - 1, square - 1) < (x - 2): return False \n\t# Lower left/Upper right corner check\n\telif steps(square - 1, 0) < (y - 2) or steps( 0, square - 1) < (y - 2): return False\n\treturn True",
"def ok(self, solution):\n if self.constraints is not None:\n for constraint in self.constraints:\n if not constraint(solution):\n return False\n return True",
"def check_flag(self):\r\n # Loop over each checking result to see if it is feasible or not\r\n self.flag = True\r\n for key in self.is_feasible.keys():\r\n if self.is_feasible[key] == False:\r\n self.flag = False\r\n return self.flag",
"def check_me(triplet, list_of_coords):\n c = True\n for element in list_of_coords:\n if (float(triplet[0])*0.99 <= float(element[0]) <= float(triplet[0])*1.01):\n if (float(triplet[1])*0.99 <= float(element[1]) <= float(triplet[1])*1.01):\n if (float(triplet[2])*0.99 <= float(element[2]) <= float(triplet[2])*1.01):\n c = False\n return c",
"def is_solvable(self):\n for row, col in np.ndindex(9, 9):\n if len(self.possible_values[row][col]) < 1 and self.final_values[row][col] == 0:\n return False\n return True",
"def goal_test(self, state):\r\n assignment = dict(state)\r\n return (len(assignment) == len(self.variables)\r\n and all(self.nconflicts(variables, assignment[variables], assignment) == 0\r\n for variables in self.variables))",
"def valid(self):\n\t\tfor k, v in self.rules.items():\n\t\t\tfor i in v:\n\t\t\t\tif any([self.valid_rule_1(i), self.valid_rule_2(i), self.valid_rule_3(k, i)]):\n\t\t\t\t\t# print(\"Got a pass\")\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\t# print(\"Got a fail\")\n\t\t\t\t\treturn False\n\t\t# print(\"CORRECT CFG\")\n\t\treturn True",
"def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0",
"def constraint_single_gate_per_flight(self):\n print(\" - Constraint: Single gate per flight constraint.\")\n c = \"\\\\ Single gate per flight constraint.\\n \"\n\n # Loop through each flight.\n for i, k, flight in self.departing_flights():\n # Start writing the constraint for this flight.\n c += \"sg_{}:\\n \".format(i)\n\n # Loop through each bay and check whether it's compliant with the aircraft used in the flight.\n for l in range(self.airport.n_gates):\n # Check whether the bay gate combination is feasible.\n if self.is_feasible(i, l):\n\n # If compliant add the term to the constraint's sum.\n c += \" + {:10s}\".format(self.x(i, l))\n\n # Add new line if necessary\n if len(c.split(\"\\n\")[-1]) > self.line_width_limit:\n c += \"\\n \"\n\n # Finish the constraint, and loop back again fo the next flight.\n c += \" = 1\\n \"\n return c",
"def isValid(self):\n return self.isOutOfDate() == False \\\n and self.isQCValid() == True \\\n and self.getDisposeUntilNextCalibrationTest() == False \\\n and self.isValidationInProgress() == False \\\n and self.isCalibrationInProgress() == False",
"def _isvalid(self, x):\n return (x <= self.n) & (x > 0)",
"def check_fleet(self):\n if len(self.ships) > 0:\n response = False\n for ship in self.ships:\n if ship.afloat == True:\n response = True\n return response",
"def is_valid(self) -> bool:\n return \\\n (self.spatial is None or all([v(self.spatial)\n for v, _ in self.spatial_validations])) \\\n and \\\n (self.temporal is None or all([v(self.temporal)\n for v, _ in self.temporal_validations]))",
"def is_valid(data):\n check = [0 for i in range(4)]\n # calculate how many ships are with different lengths\n for i in range(10):\n for j in range(10):\n if type(data[i][j]) == Ship:\n check[data[i][j]._Ship__length - 1] += 1\n # check ships\n for i in range(4):\n if check[i] != (i + 1) * (4 - i):\n return False\n # check corners\n for i in range(1, 10):\n for j in range(10):\n try:\n if type(data[i - 1][j + 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n try:\n if type(data[i - 1][j - 1]) == Ship and \\\n type(data[i][j]) == Ship:\n return False\n except:\n pass\n return True",
"def _is_action_legal(self, action):\n loading_position = self.end_of_lanes[self.current_Lane]\n length_of_vehicle = self.vehicle_data[4][action]\n\n # Check if the corresponding lane has sufficient capacity for cargo\n if loading_position + length_of_vehicle <= self.rows:\n # Check if still vehicle are due to be loaded or infinite vehicle are in harbour yard to load\n if self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action] or \\\n self.vehicle_data[1][action] == -1:\n # Check if cargo type is a reefer that it can be placed in chosen position\n if self.vehicle_data[5][action] == 1:\n designated_loading_area = self.grid_reefer.T[self.current_Lane][\n loading_position:(loading_position + length_of_vehicle)]\n return np.all(designated_loading_area == 1)\n else:\n return True\n else:\n return False\n else:\n return False",
"def valid(self):\n try:\n if self.getPret() > 0 and self.getAn() > 0 and self.validProgram(self.getProgram()):\n return True\n except:\n return False\n return False",
"def valid_transportation(self, transportation_flag):\n return bool(transportation_flag & self._allowed_transport) \\\n or (transportation_flag == self._allowed_transport)",
"def check_all_constraints(Instance: dict):\r\n\r\n print(\"Checking constraints...\")\r\n # Schedule constraints\r\n check_schedule(Instance)\r\n # Resources constraints\r\n check_resources(Instance)\r\n # Exclusions constraints\r\n check_exclusions(Instance)\r\n if (\r\n check_exclusions(Instance)\r\n and check_resources(Instance)\r\n and check_schedule(Instance)\r\n ):\r\n print(\"Done\")\r\n return True\r\n else:\r\n return False",
"def has_solution(self) -> bool:\n if self in [self.SATISFIED, self.ALL_SOLUTIONS, self.OPTIMAL_SOLUTION]:\n return True\n return False",
"def is_valid(field):\n requirement = {4: 1, 3: 2, 2: 3, 1: 4}\n ships = {4: 0, 3: 0, 2: 0, 1: 0}\n used = []\n for row in range(len(field)):\n for column in range(len(field[row])):\n if row < 10 or column < 10:\n coord = change((column, row))\n ship = has_ship(coord, field)\n if ship:\n ship = ship_size(coord, field)\n if ship and ship[0] > 0 and ship[1][0] not in used:\n try:\n ships[ship[0]] += 1\n used.extend(ship[1])\n except KeyError:\n return False\n else:\n return False\n return requirement == ships",
"def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)",
"def check_completion(wfc_parameters, wfc_state):\n if np.all(np.count_nonzero(wfc_state.wave_table, axis=2) == 1):\n # Require that every pattern be use at least once?\n pattern_set = set(np.argmax(wfc_state.wave_table, axis=2).flatten())\n # Force a test to encourage backtracking - temporary addition\n if (\n len(pattern_set) != wfc_state.number_of_patterns\n ) and wfc_parameters.wfc_ns.force_use_all_patterns:\n WFC_LOGGER.info(\"Some patterns were not used\")\n return WFC_FAILURE\n WFC_LOGGER.info(\"Check complete: Solver FINISHED\")\n return WFC_FINISHED\n if np.any(np.count_nonzero(wfc_state.wave_table, axis=2) < 1):\n return WFC_FAILURE\n return None"
] | [
"0.6605091",
"0.6594661",
"0.6543195",
"0.63051647",
"0.6281908",
"0.62644327",
"0.6178001",
"0.6077229",
"0.6027049",
"0.60206586",
"0.59558594",
"0.5950025",
"0.5949242",
"0.59165204",
"0.5897148",
"0.58894616",
"0.5884269",
"0.5869693",
"0.5857131",
"0.5834676",
"0.58020765",
"0.57991153",
"0.5792238",
"0.5788767",
"0.5787161",
"0.5786936",
"0.5779706",
"0.57634354",
"0.5759068",
"0.57444966"
] | 0.71497816 | 0 |
This constraint makes sure that one gate is assigned to each departing flight. | def constraint_single_gate_per_flight(self):
print(" - Constraint: Single gate per flight constraint.")
c = "\\ Single gate per flight constraint.\n "
# Loop through each flight.
for i, k, flight in self.departing_flights():
# Start writing the constraint for this flight.
c += "sg_{}:\n ".format(i)
# Loop through each bay and check whether it's compliant with the aircraft used in the flight.
for l in range(self.airport.n_gates):
# Check whether the bay gate combination is feasible.
if self.is_feasible(i, l):
# If compliant add the term to the constraint's sum.
c += " + {:10s}".format(self.x(i, l))
# Add new line if necessary
if len(c.split("\n")[-1]) > self.line_width_limit:
c += "\n "
# Finish the constraint, and loop back again fo the next flight.
c += " = 1\n "
return c | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _constraints_task_valid(self):\n def rule(model):\n \"\"\"\n Bind the tail entries to zero\n \"\"\"\n num = self.num_timeslots\n ind_j = model.tasks\n total = sum(model.A2[num-1, j] for j in ind_j)\n total += sum(model.A3[num-1, j] for j in ind_j)\n total += sum(model.A4[num-1, j] for j in ind_j)\n total += sum(model.A3[num-2, j] for j in ind_j)\n total += sum(model.A4[num-2, j] for j in ind_j)\n total += sum(model.A4[num-3, j] for j in ind_j)\n return None, total, 0\n\n self.model.constrain_tail = Constraint(rule=rule)\n\n def rule(model):\n \"\"\"\n Only permit \"valid\" allocation on A, A2, A3, etc.\n \"\"\"\n ind_i = model.timeslots\n ind_j = model.tasks\n total = sum(model.A[i, j] * (1-self.valid[i, j]) for i in ind_i\n for j in ind_j)\n total += sum(model.A2[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n total += sum(model.A3[i, j] * (1 - self.valid[i, j]) for i in\n ind_i for j in ind_j)\n\n return None, total, 0\n\n self.model.constrain_valid0 = Constraint(rule=rule)\n\n def rule(model):\n \"\"\"\n Only permit \"valid\" allocation on A, A2, A3, etc.\n \"\"\"\n ind_i = model.timeslots2\n ind_j = model.tasks\n inv = 1-self.valid\n total = sum(\n model.A2[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A3[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 1, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots3\n ind_j = model.tasks\n total += sum(\n model.A3[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n total += sum(\n model.A4[i, j] * inv[i + 2, j] for i in ind_i for j in ind_j)\n\n ind_i = model.timeslots4\n ind_j = model.tasks\n total += sum(\n model.A4[i, j] * inv[i + 3, j] for i in ind_i for j in ind_j)\n\n return None, total, 0\n\n self.model.constrain_valid1 = Constraint(rule=rule)",
"def is_feasible(self, i, l):\n # Get the bay assigned to the flight\n k = self.bay[i]\n if self.flights.domestic(i, departing=True):\n # If it's a domestic flight, then only the domestic gates are feasible. Also check if the bay gate\n # combination is feasible.\n return (l in self.airport.domestic_gates) and (self.airport.bay_gate_distance[k][l] is not None)\n else:\n # Domestic gates are unfeasible for non-domestic flights. Check bay gate combination as well.\n if (l not in self.airport.domestic_gates) and (self.airport.bay_gate_distance[k][l] is not None):\n # KQ flight after 6PM are only allowed in terminal A\n time_6pm = datetime.combine(self.flights.config['date'], time(hour=18))\n\n # Check if its a KQ flight after 6pm\n if (self.flights.flight_schedule[i].etd >= time_6pm) and (self.flights.airline(i) == \"KQ\"):\n # If it is, check whether the gate is a terminal a gate.\n return l in self.terminal_a_gates\n else:\n # It's not, so it's feasible\n return True\n else:\n return False",
"def __init__(self, input0, input1, output) :\n Gate.__init__(self, [input0,input1], output)",
"def constraints(self, x):\n pass",
"def check_sporadic(self):\n par = self._get_parameters()\n if par is None:\n return\n if par in sporadic:\n raise InfeasibleError(refs=sporadic[par])",
"def _valid_bond_pair(self, set):\n (sbu1, cp1), (sbu2, cp2) = set\n if all([i is None for i in [cp1.special, cp2.special, cp1.constraint, cp2.constraint]]):\n return sbu1.is_metal != sbu2.is_metal\n\n return (cp1.special == cp2.constraint) and (cp2.special == cp1.constraint)",
"def __addValueConstraints(self):\n for x in range(self.width):\n for y in range(self.height):\n g = self.grid[(x, y)]\n self.solver.add(\n Or([g == Magnets.EMPTY, g == Magnets.PLUS, g == Magnets.MINUS]))\n if x > 0:\n left = self.grid[(x-1, y)]\n self.solver.add(Or([g != left, g == Magnets.EMPTY]))\n if y > 0:\n up = self.grid[(x, y-1)]\n self.solver.add(Or([g != up, g == Magnets.EMPTY]))",
"def _constraints_nonoverlapping_tasks(self):\n\n def rule(model, i):\n total = sum(model.A[i, j] for j in model.tasks)\n total += sum(model.A2[i, j] for j in model.tasks)\n total += sum(model.A3[i, j] for j in model.tasks)\n total += sum(model.A4[i, j] for j in model.tasks)\n if i > 0:\n total += sum(model.A2[i - 1, j] for j in model.tasks)\n total += sum(model.A3[i - 1, j] for j in model.tasks)\n total += sum(model.A4[i - 1, j] for j in model.tasks)\n if i > 1:\n total += sum(model.A3[i - 2, j] for j in model.tasks)\n total += sum(model.A4[i - 2, j] for j in model.tasks)\n if i > 2:\n total += sum(model.A4[i - 3, j] for j in model.tasks)\n return 0, total, 1\n\n self.model.constrain_nonoverlapping = Constraint(self.model.timeslots,\n rule=rule)",
"def constraint_not_adjacent(m, n) :\n return not constraint_adjacent(m,n)",
"def valid_trip(self):\n if self.pickupcoords is None or self.dropoffcoords is None:\n return False\n valid = lambda x, y: 41 < x < 43.5 and -72.5 < y < - 70.5\n return valid(self.pickupcoords[0], self.pickupcoords[1]) and valid(self.dropoffcoords[0], self.dropoffcoords[1])",
"def __init__(self, input, output) :\n Gate.__init__(self, [input], output)",
"def gate(self):\n locked = self.is_locked()\n if locked:\n self.PAUSED() # pause at locked gate\n self.fsm_gate.wait() # wait for gate to unlock\n self.CONTINUE() # continue through open gate",
"def _constraints_other(self):\n pass",
"def allow_gateway_transit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")",
"def allow_gateway_transit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")",
"def constraint_adjacent(m, n) :\n return abs(m-n)==1",
"def route_fiber_single(\n component: Component,\n optical_io_spacing: int = 50,\n grating_coupler: Component = grating_coupler_te,\n min_input2output_spacing: int = 230,\n optical_routing_type: int = 1,\n optical_port_labels: None = None,\n excluded_ports: List[Any] = [],\n **kwargs\n) -> Union[\n Tuple[List[Union[ComponentReference, Label]], List[List[ComponentReference]], None],\n Tuple[\n List[Union[ComponentReference, Label]],\n List[Union[List[ComponentReference], ComponentReference]],\n None,\n ],\n]:\n component = component.copy()\n component_copy = component.copy()\n\n if optical_port_labels is None:\n optical_ports = component.get_ports_list(port_type=\"optical\")\n else:\n optical_ports = [component.ports[lbl] for lbl in optical_port_labels]\n optical_ports = [p for p in optical_ports if p.name not in excluded_ports]\n N = len(optical_ports)\n\n if isinstance(grating_coupler, list):\n grating_couplers = [pp.call_if_func(g) for g in grating_coupler]\n grating_coupler = grating_couplers[0]\n else:\n grating_coupler = pp.call_if_func(grating_coupler)\n grating_couplers = [grating_coupler] * N\n\n gc_port2center = getattr(grating_coupler, \"port2center\", grating_coupler.xsize / 2)\n if component.xsize + 2 * gc_port2center < min_input2output_spacing:\n fanout_length = (\n pp.drc.snap_to_grid(\n min_input2output_spacing - component.xsize - 2 * gc_port2center, 10\n )\n / 2\n )\n else:\n fanout_length = None\n\n \"\"\"\n _________\n | |_E1\n W0_| |\n | |_E0\n |_________|\n\n rotate +90 deg and route West ports to South\n\n E1 E0\n _|___|_\n | |\n | |\n | |\n | |\n | |\n | |\n |_______|\n |\n W0\n\n \"\"\"\n # route west ports to south\n component = component.rotate(90)\n west_ports = component.get_ports_dict(prefix=\"W\")\n north_ports = {\n p.name: p for p in component.ports.values() if not p.name.startswith(\"W\")\n }\n component.ports = west_ports\n\n elements_south, gratings_south, _ = route_fiber_array(\n component=component,\n with_align_ports=False,\n optical_io_spacing=optical_io_spacing,\n fanout_length=fanout_length,\n grating_coupler=grating_couplers[0],\n optical_routing_type=optical_routing_type,\n **kwargs\n )\n\n # route north ports\n component = component_copy.rotate(-90)\n north_ports = {\n p.name: p for p in component.ports.values() if not p.name.startswith(\"W\")\n }\n component.ports = north_ports\n\n elements_north, gratings_north, _ = route_fiber_array(\n component=component,\n with_align_ports=False,\n optical_io_spacing=optical_io_spacing,\n fanout_length=fanout_length,\n grating_coupler=grating_couplers[1:],\n **kwargs\n )\n for e in elements_north:\n elements_south.append(e.rotate(180))\n\n if len(gratings_north) > 0:\n for io in gratings_north[0]:\n gratings_south.append(io.rotate(180))\n\n return elements_south, gratings_south, None",
"def remaining_constraints(self):\r\n \r\n def iec1(state,decision,nodes):\r\n return decision['E:L']+decision['E:R_1']<=nodes['E'].get_preds_value(state)\r\n def iec2(state,decision,nodes):\r\n return decision['R_1:L']<=nodes['R_1'].get_preds_value(state)\r\n def iec3(state,decision,nodes):\r\n return decision['G:R_1']>=-(nodes['R_1'].get_preds_value(state)) \r\n def iec4(state,decision,nodes):\r\n return decision['G:L']>=0.0\r\n def iec5(state,decision,nodes):\r\n return decision['E:L']>=0.0\r\n def iec6(state,decision,nodes):\r\n return decision['E:R_1']>=0.0\r\n def iec7(state,decision,nodes):\r\n return decision['R_1:L']>=0.0\r\n\r\n Inequality_Constraints=[iec1,iec2,iec3,iec4,iec5,iec6,iec7]\r\n \r\n return Inequality_Constraints",
"def validate_gatetype(self, gt):\r\n if gt not in self.VALID_GATES:\r\n return False\r\n return True",
"def constraint_adjacent(m, n) :\n if abs(m-n)==1:\n return True\n return False",
"def z3_dynamics_constraints(self, T):\n\n def z3max(assignment, array):\n assert (isinstance(array,types.TupleType) or isinstance(array,types.ListType))\n\n constraints = [assignment >= array[i] for i in range(len(array))]\n equal_to_constraint = z3.Or([assignment == array[i] for i in range(len(array))])\n constraints.append(equal_to_constraint)\n\n return constraints\n\n def z3min(assignment, array):\n assert (isinstance(array,types.TupleType) or isinstance(array,types.ListType))\n\n constraints = [assignment <= array[i] for i in range(len(array))]\n equal_to_constraint = z3.Or([assignment == array[i] for i in range(len(array))])\n constraints.append(equal_to_constraint)\n\n return constraints\n\n x = np.array([[z3.Real(\"x%i\" % i)] for i in range(T+1)])\n u = np.array([[z3.Real(\"u%i\" % i)] for i in range(T)])\n\n assert(self.state_dim == x.shape[1]), \"z3 state variable dimension inconsistent\"\n assert(self.input_dim == u.shape[1]), \"z3 input variable dimension inconsistent\"\n\n constraints = [x[i,0] >= 0 for i in range(T+1)]\n\n for i in range(T):\n dynamics_constraint = z3max(x[i+1,0], (0, self.a * x[i,0] + u[i,0]))\n constraints.extend(dynamics_constraint)\n\n if self.x0 is not None:\n for i in range(self.state_dim):\n initial_constraint = [x[0,i] == self.x0[i]]\n constraints.extend(initial_constraint)\n\n return constraints, x, u",
"def can_relax_constraints(self):\n if len(self.mand_classroom_constraints) == 0:\n if len(self.high_classroom_constraints) > 0:\n return True\n else:\n for cc in self.low_classroom_constraints:\n if cc.can_relax_constraints():\n return True\n\n if len(self.mand_timeblock_ids) == 0:\n if len(self.high_timeblock_ids) > 0:\n return True\n\n return False",
"def _constraints_task_contiguity(self):\n # CONT_STRIDE=1 would give original implementation\n triu = util.triu(self.num_timeslots, incr=self.cont_incr)\n tril = util.tril(self.num_timeslots, incr=self.cont_incr)\n\n def rule(model, i, j):\n \"\"\"\n This rule is used to encourage early completion (in terms of\n allocation) of a task.\n\n More precisely:\n CTu[i,j] = whether task j is UNASSIGNED between slot i and the end\n\n Maximizing sum_i CTu[i,j] encourages early task completion.\n Maximizing sum_i CTu[i,j]+CTl[i,j] encourages contiguous scheduling.\n \"\"\"\n active = 1-self.task_spread[j]\n den = sum(triu[i, :])\n ind = model.timeslots\n # FIXME(cathywu) can/should be more precise with A,A2,A3 offsets\n total = sum(triu[i, k] * (\n 1 - model.A[k, j] - model.A2[k, j] - model.A3[k, j] - model.A4[\n k, j]) for k in ind)\n total /= den\n total *= active\n # CTu[i,j] = floor(total)\n return -1 + EPS, model.CTu[i, j] - total, EPS + self.slack_cont\n\n self.model.constrain_contiguity_u = Constraint(self.model.contslots,\n self.model.tasks,\n rule=rule)\n\n def rule(model, i, j):\n \"\"\"\n This rule is used to encourage late start (in terms of\n allocation) of a task.\n\n More precisely:\n CTl[i,j] = whether task j is UNASSIGNED between slot 0 and slot i\n\n Maximizing sum_i CTl[i,j] encourages late starting.\n Maximizing sum_i CTu[i,j]+CTl[i,j] encourages contiguous scheduling.\n \"\"\"\n active = 1-self.task_spread[j]\n den = sum(tril[i, :])\n ind = model.timeslots\n total = sum(tril[i, k] * (\n 1 - model.A[k, j] - model.A2[k, j] - model.A3[k, j] - model.A4[\n k, j]) for k in ind)\n total /= den\n total *= active\n return -1 + EPS, model.CTl[i, j] - total, EPS + self.slack_cont\n\n self.model.constrain_contiguity_l = Constraint(self.model.contslots,\n self.model.tasks,\n rule=rule)\n\n def rule(model):\n den = self.num_tasks * self.cont_slots * (self.slack_cont + 1)\n num = 0.25\n total = summation(model.CTu) / den * num\n return model.CTu_total == total\n\n self.model.constrain_contiguity_ut = Constraint(rule=rule)\n\n def rule(model):\n den = self.num_tasks * self.cont_slots * (self.slack_cont + 1)\n num = 0.25\n total = summation(model.CTl) / den * num\n return model.CTl_total == total\n\n self.model.constrain_contiguity_lt = Constraint(rule=rule)",
"def isFeasible(self, A):\n\t\treturn False",
"def G(self, (k,t), (j,x), **params):\n d = len(x)/2\n q,dq = x[:d],x[d:]\n J = (j == True)\n _J = np.logical_not(J)\n # number of constraints\n n = len(J) \n # number of active constraints\n m = np.sum(J) # = n - len(a)\n a = self.a( (k,t), (_J,q), **params)\n lambda_ = self.lambda_( (k,t), (J,q,dq), **params)\n # unilateral constraint forces\n lambda_ = lambda_[:m] \n g = np.nan*np.zeros(n)\n g[_J] = a\n g[J] = lambda_\n return g",
"def test_has_gate(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(U1Gate(0), (0,), sched)\n inst_map.add(CXGate(), [0, 1], sched)\n\n self.assertTrue(inst_map.has(U1Gate(0), [0]))\n self.assertTrue(inst_map.has(CXGate(), (0, 1)))\n with self.assertRaises(PulseError):\n inst_map.assert_has(\"dne\", [0])\n with self.assertRaises(PulseError):\n inst_map.assert_has(CXGate(), 100)",
"def constraint_not_adjacent(m, n) :\n if abs(m-n)==1:\n return False\n return True",
"def constraints(self):\n ...",
"def constrain(*args, barrier: bool=True, damping: Union[float, bool]=0.0, directionalHinge:\n bool=True, hinge: bool=True, interpenetrate: bool=True, nail: bool=True, name:\n Union[AnyStr, bool]=\"\", orientation: Union[List[float, float, float], bool]=None,\n pinConstraint: bool=True, position: Union[List[float, float, float], bool]=None,\n restLength: Union[float, bool]=0.0, spring: bool=True, stiffness: Union[float,\n bool]=0.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[None, Any]:\n pass",
"def add_constraints_based_on_task(self):\n # Note this method is only called when a task is found\n for counter, agent in enumerate(self.agents):\n if len(agent.task_list) > 0: # task has been chosen\n last_element = agent.task_list[-1]\n self.graph.add_movement_constraint_by_name(self.tasks[last_element].getName(), weight=self.t)"
] | [
"0.58306277",
"0.5543608",
"0.51670986",
"0.51483375",
"0.514472",
"0.504169",
"0.5010115",
"0.5004392",
"0.50034606",
"0.49957335",
"0.49898705",
"0.49694562",
"0.4947864",
"0.49331027",
"0.49331027",
"0.4885146",
"0.48799786",
"0.4871206",
"0.48651466",
"0.4862099",
"0.48545948",
"0.48509073",
"0.48506072",
"0.48280504",
"0.4824886",
"0.4817204",
"0.48142397",
"0.47987783",
"0.4787681",
"0.47753277"
] | 0.69282305 | 0 |
Gets the _5qi of this RequestedQos. | def _5qi(self):
return self.__5qi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _5qi(self, _5qi):\n if _5qi is None:\n raise ValueError(\"Invalid value for `_5qi`, must not be `None`\") # noqa: E501\n\n self.__5qi = _5qi",
"def __init__(self, _5qi=None, gbr_ul=None, gbr_dl=None): # noqa: E501\n self.swagger_types = {\n '_5qi': Model5Qi,\n 'gbr_ul': BitRate,\n 'gbr_dl': BitRate\n }\n\n self.attribute_map = {\n '_5qi': '5qi',\n 'gbr_ul': 'gbrUl',\n 'gbr_dl': 'gbrDl'\n }\n self.__5qi = _5qi\n self._gbr_ul = gbr_ul\n self._gbr_dl = gbr_dl",
"def _get_qos(self):\n return self.__qos",
"def _get_qos(self):\n return self.__qos",
"def _get_qos(self):\n return self.__qos",
"def q0(self):\n charge = self.get('q0')\n if charge is not None:\n charge = charge[0]\n return charge",
"def get_qos(self):\n return self.qos",
"def get_fastq(self):\n\t\tif self.have_fastqs is False:\n\t\t\tself._extract_fastqs_from_fast5()\n\t\t\tself.have_fastqs = True\n\n\t\tif not self.fastqs:\n\t\t\treturn None\n\t\telif self.fastqs.get('twodirections') is not None:\n\t\t\treturn self.fastqs.get('twodirections')\n\t\telif self.fastqs.get('template') is not None:\n\t\t\treturn self.fastqs.get('template')\n\t\telif self.fastqs.get('complement') is not None:\n\t\t\treturn self.fastqs.get('complement')",
"def qfi(self):\n return self._qfi",
"def qos(self) -> int:\n return self._qos",
"def get_qos(self, qos_id):\n aname = \"cinder_v%s.get_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.get(qos_id)",
"def cached_WikidataItem_Q5():\n return m.WikidataItem(\"Q5\")",
"def c5(self):\n return self._c5",
"def qd(self):\n return self._qd",
"def q(self):\n return self._q",
"def qos(self):\n if self == SubscribeResult.qos0:\n rv = 0\n elif self == SubscribeResult.qos1:\n rv = 1\n elif self == SubscribeResult.qos2:\n rv = 2\n else:\n raise TypeError()\n\n return rv",
"def snmpqosqos_bytes_rx(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_bytes_rx\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_qos(self, qos_id):\n return self._unify_qos(self._impl.get_qos(qos_id))",
"def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue",
"def snmpqosqos_packets_filteredrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_packets_filteredrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def show_qos_queue(self, queue, **_params):\r\n return self.get(self.qos_queue_path % (queue),\r\n params=_params)",
"def quantum(self):\n return self._quantum",
"def __str__(self):\n return self.qseqid",
"def get_port_qos_rxrate(self, iface, qos):\n pytest.skip(\"Method is not supported by Iperf TG\")",
"def snmpqosqos_packets_received(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_packets_received\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_queue_num(self, qos_id, queue_id):\n\n q_num = None\n queues = self.qos_dict[qos_id][\"ovsdb:qos-entries\"][0][\"queue-list\"]\n\n # Go through all queues\n for queue in queues:\n cur_queue_id = queue[\"queue-ref\"].split(\"'\")[-2]\n # If we have a match, get the q_num and break\n if cur_queue_id == queue_id:\n q_num = queue[\"queue-number\"]\n break\n\n # queue_id is not found in the qos\n if q_num is None:\n #print(json.dumps(self.qos_dict[qos_id], indent=3))\n raise KeyError\n\n return q_num",
"def snmpqosqos_lazy_bytes(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_lazy_bytes\n\t\texcept Exception as e:\n\t\t\traise e",
"def snmpqosqos_real_bytes(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_real_bytes\n\t\texcept Exception as e:\n\t\t\traise e",
"def snmpqosqos_bytes_tx(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_bytes_tx\n\t\texcept Exception as e:\n\t\t\traise e",
"def iq_gain_trim(self):\n return self._read(0x11, 2, 0xFC)"
] | [
"0.6760163",
"0.60865784",
"0.5826613",
"0.5826613",
"0.5826613",
"0.57201535",
"0.5680535",
"0.5635592",
"0.5635423",
"0.55357844",
"0.53329676",
"0.5326896",
"0.52858484",
"0.52672887",
"0.52363986",
"0.5208113",
"0.52069074",
"0.52045333",
"0.5195297",
"0.5194402",
"0.5188192",
"0.51706266",
"0.5113547",
"0.5095658",
"0.50730795",
"0.5064182",
"0.5006813",
"0.5005056",
"0.500199",
"0.50002736"
] | 0.8261977 | 0 |
Sets the _5qi of this RequestedQos. | def _5qi(self, _5qi):
if _5qi is None:
raise ValueError("Invalid value for `_5qi`, must not be `None`") # noqa: E501
self.__5qi = _5qi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _5qi(self):\n return self.__5qi",
"def __init__(self, _5qi=None, gbr_ul=None, gbr_dl=None): # noqa: E501\n self.swagger_types = {\n '_5qi': Model5Qi,\n 'gbr_ul': BitRate,\n 'gbr_dl': BitRate\n }\n\n self.attribute_map = {\n '_5qi': '5qi',\n 'gbr_ul': 'gbrUl',\n 'gbr_dl': 'gbrDl'\n }\n self.__5qi = _5qi\n self._gbr_ul = gbr_ul\n self._gbr_dl = gbr_dl",
"def setS5(self, num):\n self.space5 = num",
"def setQ(self,Q):\n self.Q = Q",
"def set_qty(self, qty):\n self.__qty = qty",
"def set_qxqz(self):\n self.qx = self._q_x()\n self.qz = self._q_z()",
"def set_qos(self, qos, set_specs_args):\n self._impl.set_qos(qos.id, set_specs_args)\n return self._unify_qos(qos)",
"def Q(self, value):\n assert value > 0, \"Q needs to be positive and above zero (we divide by Q)\"\n self._Q = value\n self._update()",
"def set_RQ(self): \n # Convenience abbreviations.\n ion_atms = self.ion_atms # Atoms to average dictionary\n ion_res = self.ion_res # Ionizable residues\n RQ = self.RQ # list of q_i coordinates\n for res_id in self.res_ids:\n if res_id[0] in ion_res:\n # Atoms to average. Omitting the residue id at the 0 position in the\n # 'res'-list, therefore 'res_id[3:]'.\n # 'atm.split()[0]' returns the atom type.\n av_atms = []\n for atm in res_id[3:]:\n if atm.split()[0] in ion_atms[res_id[0]]:\n av_atms.append(\" \".join(res_id[:3]) + \" \" + atm.strip())\n RQ.append(av_atms) \n self.RQ = RQ",
"def initial_Q(self, negative):\n \n ##get each values in the Q, and change their content to given number, plan to use in Q5\n for key in self.Q.iterkeys():\n self.Q[key] = float(negative)",
"def set_qos(self, on_ok):\n self._channel.basic_qos(\n prefetch_count=self._prefetch_count, callback=on_ok)",
"def set_qos(self, qos_id, set_specs_args):\n aname = \"cinder_v%s.set_qos\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().qos_specs.set_keys(qos_id,\n set_specs_args)",
"def rule_axiom_5(self, idx: int, line: Statement) -> None:\n self.at_most_refs(line, 0, 'axiom 5')\n if line.formula != self.AXIOMS[5].formula:\n raise InvalidRule('not axiom 5')",
"def tweak_q(self, q):\n self._q = q\n self.reset()",
"def quantity(self, quantity):\n\n self._quantity = quantity",
"def quantity(self, quantity):\n\n self._quantity = quantity",
"def quantity(self, quantity):\n\n self._quantity = quantity",
"def quantity(self, quantity):\n\n self._quantity = quantity",
"def quantity(self, quantity: int):\n\n self._quantity = quantity",
"def embed_fastqs_wrapper(fast5_path, fastq_string):\n f5_handle = Fast5(fast5_path, 'r+')\n f5_handle.set_fastq(\"Analyses/Basecall_1D_000\", fastq_string,\n overwrite=True)\n f5_handle.close()\n return True",
"def quality(self, value: int):\n # TODO - Ensure that this is valid\n self._quality = value",
"def queue_size(self, queue_size: ConfigNodePropertyInteger):\n\n self._queue_size = queue_size",
"def set_quality(self):\n p = self.suitability + 1.15 * self.fono\n self.quality = np.exp(p) / (1 + np.exp(p))",
"def qty(self, qty):\n\n self._qty = qty",
"def host_alias5(self, host_alias5):\n\n self._host_alias5 = host_alias5",
"def set_qname(self, qname):\n self._qname = qname",
"def on_VI_518p_set_clicked(self):\n # TODO: not implemented yet\n disp_518P()\n self.up_slot = qmdz_const.up_slot\n self.down_slot = qmdz_const.down_slot",
"def set_qword(ea, value):\n if not ida_bytes.patch_qword(ea, value):\n raise RuntimeError(\"Unable to set value {} at {}\".format(ea, value))",
"def __init__(self, queue_id):\n self.queue_id = queue_id\n self.action_type = 'set_queue'",
"def q(self, q: ComType):\n if isinstance(q, complex):\n self._pwr = q\n else:\n self._pwr = complex(0, q)"
] | [
"0.7202126",
"0.6528238",
"0.5769253",
"0.55811924",
"0.5257013",
"0.5193161",
"0.51654893",
"0.5160565",
"0.51235825",
"0.51198363",
"0.5082881",
"0.5069115",
"0.5051344",
"0.5051314",
"0.50429034",
"0.50429034",
"0.50429034",
"0.50429034",
"0.50224656",
"0.5010282",
"0.4956324",
"0.49102014",
"0.490984",
"0.4908227",
"0.49049306",
"0.48458958",
"0.48190466",
"0.48151192",
"0.48010576",
"0.47770745"
] | 0.81541806 | 0 |
Gets the gbr_ul of this RequestedQos. | def gbr_ul(self):
return self._gbr_ul | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gbr_dl(self):\n return self._gbr_dl",
"def getBL(self):\r\n return self.bL;",
"def gbr_ul(self, gbr_ul):\n\n self._gbr_ul = gbr_ul",
"def get_bribe(self):\r\n return self.bribe",
"def _get_qos(self):\n return self.__qos",
"def _get_qos(self):\n return self.__qos",
"def _get_qos(self):\n return self.__qos",
"def buGroup(self):\n return self.xsID[1]",
"def getLRUPPN(self):\n return min(self.memory, key=lambda frame: frame[1])[0]",
"def getGbl(self, key):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n return self.getVal(self.gbls, key)",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def _get_queue(self):\n return self.__queue",
"def gb(self):\n return self.data.gb",
"def g_lb(self):\n pass",
"def get_cumpnl(self):\n\n return self._cumpnl",
"def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue",
"def get_unlock_comb(self, comb_no):\n rreq_ulg = bytearray()\n rreq_ulg.append(comb_no)\n rreq_ulg.extend([0x00]*8)\n self.send_command(cmd=DEFS.CMD_ULG_RRQ, data=rreq_ulg)\n self.recv_reply()\n ulg_comb = []\n for n in range(struct.unpack('<H', self.last_payload_data[6:8])[0]):\n ulg_comb += [self.last_payload_data[1+n]]\n return ulg_comb"
] | [
"0.6624793",
"0.6183227",
"0.5677202",
"0.56101704",
"0.55133814",
"0.55133814",
"0.55133814",
"0.5454623",
"0.5451101",
"0.5410858",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.54033214",
"0.5358289",
"0.5357806",
"0.5316637",
"0.5288969",
"0.5248644"
] | 0.7476282 | 0 |
Sets the gbr_ul of this RequestedQos. | def gbr_ul(self, gbr_ul):
self._gbr_ul = gbr_ul | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gbr_dl(self, gbr_dl):\n\n self._gbr_dl = gbr_dl",
"def gbr_ul(self):\n return self._gbr_ul",
"def set_unlock_comb(self, comb_no, ulg_comb):\n wreq_ulg = bytearray([0x00]*8)\n wreq_ulg[0] = comb_no\n wreq_ulg[6:8] = struct.pack('<H', len(ulg_comb))\n\n for n in range(len(ulg_comb)):\n wreq_ulg[1+n] = ulg_comb[n]\n\n self.send_command(cmd=DEFS.CMD_ULG_WRQ, data=wreq_ulg)\n self.recv_reply()\n self.refresh_data()",
"def g_lb(self):\n pass",
"def allocSetBillable(alloc, is_billable):\n return Cuebot.getStub('allocation').SetBillable(\n facility_pb2.AllocSetBillableRequest(allocation=alloc, value=is_billable),\n timeout=Cuebot.Timeout)",
"def gallbladder(self, gallbladder):\n\n self.logger.debug(\"In 'gallbladder' setter.\")\n\n self._gallbladder = gallbladder",
"def _set_lb(o, d):\n o.setlb(d)",
"def gbr_dl(self):\n return self._gbr_dl",
"def set_bribe(self, bribe_amount):\r\n self.bribe = bribe_amount",
"def set_bpq_val(self, bpq_val):\n if not (bpq_val and (len(bpq_val) > 0)):\n raise ValueError\n \n self.bpq_val = bpq_val\n self.bpq_val_len = len(bpq_val)\n \n return",
"def update_lr(self, g_lr):\r\n for param_group in self.g_optimizer.param_groups:\r\n param_group['lr'] = g_lr",
"def ugc_limit(self, ugc_limit):\n\n self._ugc_limit = ugc_limit",
"def bairro(self, bairro):\n self._bairro = bairro",
"def rbiRefRate(self, rbiRefRate):\n\n self._rbiRefRate = rbiRefRate",
"def xbrl_axis(self, xbrl_axis):\n\n self._xbrl_axis = xbrl_axis",
"def set_rx_lpf_bandwith(self, rx_lpf_bandwith):\n\t\tif (rx_lpf_bandwith in range(17)):\n\t\t\tself._rx_lpf_bandwith = rx_lpf_bandwith\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s rx_lpf_bandwith must be an integer between 0 and 16 so it can't be %s !\\n\" % (self._target_id, rx_lpf_bandwith))\n\t\t\tsys.exit(1)",
"def setPTBR(self, PTBR_addr):\n self.PTBR = PTBR_addr",
"def getBL(self):\r\n return self.bL;",
"def rpn(self, rpn):\n\n self._rpn = rpn",
"def rpn(self, rpn):\n\n self._rpn = rpn",
"def setB(self, b):\n\t\tself.b = int(b)",
"def set_lr(self, lr: Union[float, List[float]]):\n self._check_train_ready()\n param_groups = self.optimizer.param_groups\n if isinstance(lr, (list, tuple)):\n lrs = list(lr)\n if len(lrs) != len(param_groups):\n raise ValueError(f\"Expected lrs length {len(param_groups)}, \"\n f\"got {len(lrs)}\")\n elif isinstance(lr, numbers.Number):\n lrs = [lr] * len(param_groups)\n else:\n raise ValueError(f\"Expected lr type list, tuple or number, \"\n f\"got {type(lr)}\")\n for group_lr, param_group in zip(lrs, param_groups):\n param_group['lr'] = group_lr",
"def set_bunit(self, bunit):\n self.bunit = bunit",
"def b(self, b):\n\n self._b = b",
"def setBarGroups(ngroups, gap):\n dislin.bargrp(ngroups, gap)",
"def setGbl(self, key, val):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n if hasattr(val, \"encode\"):\n val = val.encode(\"utf-8\") # convert str to bytes\n return self.setVal(self.gbls, key, val)",
"def __post_init__(self) -> None:\n self.gtex += [None]\n self.bm += [None]\n self._q: queue.Queue = queue.Queue(maxsize=self.maxsize)",
"def add_flag_band(self, fb: FlagBand) -> None:\n self.flag_bands[fb.pq_band] = fb\n self.bands.add(fb.pq_band)\n if fb.pq_manual_merge:\n fb.pq_manual_merge = True\n if fb.pq_fuse_func and self.fuse_func and fb.pq_fuse_func != self.fuse_func:\n raise ConfigException(f\"Fuse functions for flag bands in product set {self.product_names} do not match\")\n if fb.pq_ignore_time != self.ignore_time:\n raise ConfigException(f\"ignore_time option for flag bands in product set {self.product_names} do not match\")\n elif fb.pq_fuse_func and not self.fuse_func:\n self.fuse_func = fb.pq_fuse_func\n self.declare_unready(\"products\")\n self.declare_unready(\"low_res_products\")",
"def set_linked_lmax(\n self,\n val=None\n ):\n if val != None:\n self.linked_lmax = val",
"def __init__(self, _5qi=None, gbr_ul=None, gbr_dl=None): # noqa: E501\n self.swagger_types = {\n '_5qi': Model5Qi,\n 'gbr_ul': BitRate,\n 'gbr_dl': BitRate\n }\n\n self.attribute_map = {\n '_5qi': '5qi',\n 'gbr_ul': 'gbrUl',\n 'gbr_dl': 'gbrDl'\n }\n self.__5qi = _5qi\n self._gbr_ul = gbr_ul\n self._gbr_dl = gbr_dl"
] | [
"0.61691666",
"0.594303",
"0.5098859",
"0.5023717",
"0.49849656",
"0.49809927",
"0.49650007",
"0.48411912",
"0.4841159",
"0.4805827",
"0.4739387",
"0.47129312",
"0.4658429",
"0.46445525",
"0.4636409",
"0.461732",
"0.45834967",
"0.45813844",
"0.45802906",
"0.45802906",
"0.45708507",
"0.45651",
"0.45632327",
"0.456033",
"0.45489484",
"0.45262724",
"0.4515602",
"0.44962648",
"0.4491606",
"0.44807476"
] | 0.760756 | 0 |
Gets the gbr_dl of this RequestedQos. | def gbr_dl(self):
return self._gbr_dl | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gbr_ul(self):\n return self._gbr_ul",
"def getBL(self):\r\n return self.bL;",
"def snmpqosqos_packets_droppedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_packets_droppedrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down",
"def _get_lsp_config_frr_bandwidth(self):\n return self.__lsp_config_frr_bandwidth",
"def gbr_dl(self, gbr_dl):\n\n self._gbr_dl = gbr_dl",
"def snmpqosqos_lazy_bytesrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_lazy_bytesrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def budget(self):\n return self._budget",
"def _get_qos(self):\n return self.__qos",
"def _get_qos(self):\n return self.__qos",
"def _get_qos(self):\n return self.__qos",
"def get_lr(self):\n\n if self.opt is None:\n raise ValueError('No learning rate schedulers initialized')\n else:\n for pg in self.opt.param_groups:\n return pg['lr']",
"def get_lr(self):\n return self.call_async(0, '_async_get_lr').gen()",
"def get_dcmgnd(self):\n return self.dcmgnd",
"def qdd(self):\n return self._qdd",
"def getGbl(self, key):\n if hasattr(key, \"encode\"):\n key = key.encode(\"utf-8\") # convert str to bytes\n return self.getVal(self.gbls, key)",
"def getDM(self):\n return self.subintheader['DM']\n #if self.params is None:\n # return\n #return self.params.getDM()",
"def snmpqosqos_real_bytesrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_real_bytesrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def __get_deadline(self):\n return self.__deadline",
"def snmpqosqos_packets_receivedrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_packets_receivedrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def _get_lsp_config_frr_bandwidth_configured(self):\n return self.__lsp_config_frr_bandwidth_configured",
"def snmpqosqos_bytes_rxrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_bytes_rxrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def qd(self):\n return self._qd",
"def snmpqosqos_flow_recyclesrate(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_flow_recyclesrate\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_bribe(self):\r\n return self.bribe",
"def lbda(self):\n return self._properties['lbda']",
"def getSignalQualityInDBM(self):\n return (float(self.wlanSignalQuality) / 2.0) - 100.0",
"def gb(self):\n return self.data.gb",
"def AdvertiseSRLB(self):\r\n\t\treturn self._get_attribute('advertiseSRLB')",
"def comphttpbandwidthsaving(self) :\n\t\ttry :\n\t\t\treturn self._comphttpbandwidthsaving\n\t\texcept Exception as e:\n\t\t\traise e"
] | [
"0.6006991",
"0.5707925",
"0.5370055",
"0.53580135",
"0.5345465",
"0.5287211",
"0.52737546",
"0.5232863",
"0.52164197",
"0.52164197",
"0.52164197",
"0.51493675",
"0.51317436",
"0.51186025",
"0.51073915",
"0.5064704",
"0.505674",
"0.50535154",
"0.5048442",
"0.50189704",
"0.50181186",
"0.50173265",
"0.49907443",
"0.49733388",
"0.49677485",
"0.495792",
"0.49204263",
"0.49028498",
"0.4871768",
"0.4866435"
] | 0.7665163 | 0 |
Sets the gbr_dl of this RequestedQos. | def gbr_dl(self, gbr_dl):
self._gbr_dl = gbr_dl | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gbr_ul(self, gbr_ul):\n\n self._gbr_ul = gbr_ul",
"def gbr_dl(self):\n return self._gbr_dl",
"def gallbladder(self, gallbladder):\n\n self.logger.debug(\"In 'gallbladder' setter.\")\n\n self._gallbladder = gallbladder",
"def dnb(self, dnb):\n\n self._dnb = dnb",
"def _set_lb(o, d):\n o.setlb(d)",
"def update_lr(self, g_lr, d_lr):\n for param_group in self.g_optimizer.param_groups:\n param_group['lr'] = g_lr\n for param_group in self.d_optimizer.param_groups:\n param_group['lr'] = d_lr",
"def update_lr(self, g_lr, d_lr):\n for param_group in self.g_optimizer.param_groups:\n param_group['lr'] = g_lr\n for param_group in self.d_optimizer.param_groups:\n param_group['lr'] = d_lr",
"def set_dpg_demand(self, dpg_list):\n if self.dpg_demand_id:\n self.dpg_demand = dpg_list.by_id[self.dpg_demand_id]\n try:\n self.dpg_demand.add_bs_or_gaes_supply(self)\n except AttributeError:\n print('dpg %s has no corresponding dpg demand %i' % (self.code, self.dpg_demand_id))",
"def update_lr(self, g_lr):\r\n for param_group in self.g_optimizer.param_groups:\r\n param_group['lr'] = g_lr",
"def decay_lr(self):\n self.scheduler.step()\n for i, param_group in enumerate(self.optimizer.param_groups):\n self.lrs[i] = param_group['lr']",
"def BGD(self, tr_d, va_d, epochs, lr, relz=None, lmbda=0.0):\n print \"Training the network with BGD......\"\n trlen = len(tr_d)\n for j in xrange(epochs):\n random.shuffle(tr_d)\n self.update_network(tr_d, lr)\n #for i in xrange(trlen):\n # self.update_network([tr_d[i]],lr)\n if(va_d):\n print \"Epoch {0}: {1}/{2}\".format(j, self.Evaluate(va_d),len(va_d))\n else:\n print \"Epoch {0}:\".format(j)",
"def set_bpq_val(self, bpq_val):\n if not (bpq_val and (len(bpq_val) > 0)):\n raise ValueError\n \n self.bpq_val = bpq_val\n self.bpq_val_len = len(bpq_val)\n \n return",
"def set_drhold(self, value):\n self._connector.set_drhold()",
"def set_dcmgnd(self, gnd):\n self.dcmgnd = gnd",
"def set_dh_params(self, dh_params, flag=0):\r\n return self._arm.set_dh_params(dh_params, flag)",
"def setup_dd(self, company):\n self.company = company\n self.apr = .03\n self.minimum_balance = 1000.0",
"def setPTBR(self, PTBR_addr):\n self.PTBR = PTBR_addr",
"def d_rate(self, d_rate):\n\n self._d_rate = d_rate",
"def b(self, b):\n\n self._b = b",
"def lr_setter(optimizer, epoch, args, bl=False):\n\n lr = args.lr\n if bl:\n lr = args.lrbl * (0.1 ** (epoch // (args.epochb * 0.5)))\n else:\n if args.cos:\n lr *= ((0.01 + math.cos(0.5 * (math.pi * epoch / args.epochs))) / 1.01)\n else:\n if epoch >= args.epochs_decay[0]:\n lr *= 0.1\n if epoch >= args.epochs_decay[1]:\n lr *= 0.1\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr",
"def lmp(self, lmp):\n\n self.logger.debug(\"In 'lmp' setter.\")\n\n self._lmp = lmp",
"def set_rx_lpf_bandwith(self, rx_lpf_bandwith):\n\t\tif (rx_lpf_bandwith in range(17)):\n\t\t\tself._rx_lpf_bandwith = rx_lpf_bandwith\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s rx_lpf_bandwith must be an integer between 0 and 16 so it can't be %s !\\n\" % (self._target_id, rx_lpf_bandwith))\n\t\t\tsys.exit(1)",
"def set_linked_lmax(\n self,\n val=None\n ):\n if val != None:\n self.linked_lmax = val",
"def __setstate__(self, d):\n self.__dict__.update(d)\n self.__queueLock = threading.RLock()",
"def xbrl_axis(self, xbrl_axis):\n\n self._xbrl_axis = xbrl_axis",
"def _set_lsp_config_frr_bandwidth(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-frr-bandwidth\", rest_name=\"lsp-config-frr-bandwidth\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"lsp_config_frr_bandwidth must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"lsp-config-frr-bandwidth\", rest_name=\"lsp-config-frr-bandwidth\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__lsp_config_frr_bandwidth = t\n if hasattr(self, '_set'):\n self._set()",
"def setDlPos(beamNumber, pos):\n simConfig[\"POS.DL\"][beamNumber] = pos",
"def __init__(self, reason, install_pbr=True):\n self._reason = reason\n self._install_pbr = install_pbr",
"def set_lbda(self, lbda, derive_mag=True, negative_fluxmag=None):\n self._properties[\"lbda\"] = lbda\n if derive_mag:\n self.derive_magsamples(negative_fluxmag=negative_fluxmag)",
"def set_bid(self, bid):\n self.__bid = bid"
] | [
"0.6014968",
"0.569527",
"0.52426",
"0.52092737",
"0.51112777",
"0.49458075",
"0.49458075",
"0.4866286",
"0.48009413",
"0.47358114",
"0.4625417",
"0.46135667",
"0.46096244",
"0.45907134",
"0.45766228",
"0.45682302",
"0.4564721",
"0.45394444",
"0.45390162",
"0.45298594",
"0.4524398",
"0.4523508",
"0.4501404",
"0.44942302",
"0.4491649",
"0.44911683",
"0.4468794",
"0.44670543",
"0.44576833",
"0.44445544"
] | 0.7670086 | 0 |
Keypoint evaluation using COCOAPI. | def _do_python_keypoint_eval(self, res_file):
coco_det = self.coco.loadRes(res_file)
coco_eval = COCOeval(
self.coco, coco_det, 'keypoints', self.sigmas, use_area=False)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats_names = [
'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',
'AR .75', 'AR (M)', 'AR (L)'
]
info_str = list(zip(stats_names, coco_eval.stats))
return info_str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _evaluate_predictions_on_coco_ps(coco_gt, coco_results, iou_type, ps_mode, kpt_oks_sigmas=None):\n assert len(coco_results) > 0\n\n if iou_type == \"segm\":\n coco_results = copy.deepcopy(coco_results)\n # When evaluating mask AP, if the results contain bbox, cocoapi will\n # use the box area as the area of the instance, instead of the mask area.\n # This leads to a different definition of small/medium/large.\n # We remove the bbox field to let mask AP use mask area.\n for c in coco_results:\n c.pop(\"bbox\", None)\n\n coco_dt = coco_gt.loadRes(coco_results)\n coco_eval = COCOeval(coco_gt, coco_dt, iou_type)\n if ps_mode == 'voc':\n coco_eval.params.catIds = VOC_IDS\n elif ps_mode == 'nvoc':\n coco_eval.params.catIds = NON_VOC_IDS\n\n # Use the COCO default keypoint OKS sigmas unless overrides are specified\n if kpt_oks_sigmas:\n coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)\n\n if iou_type == \"keypoints\":\n num_keypoints = len(coco_results[0][\"keypoints\"]) // 3\n assert len(coco_eval.params.kpt_oks_sigmas) == num_keypoints, (\n \"[COCOEvaluator] The length of cfg.TEST.KEYPOINT_OKS_SIGMAS (default: 17) \"\n \"must be equal to the number of keypoints. However the prediction has {} \"\n \"keypoints! For more information please refer to \"\n \"http://cocodataset.org/#keypoints-eval.\".format(num_keypoints)\n )\n\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n\n return coco_eval",
"def run_eval(gt_anno, pred_anno):\n coco = COCO(gt_anno)\n coco_dets = coco.loadRes(pred_anno)\n coco_eval = COCOeval(coco, coco_dets, \"keypoints\")\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n coco_eval = COCOeval(coco, coco_dets, \"bbox\")\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()",
"def evaluate(self, edict):\n pass",
"def evaluate(self):\n pass",
"def evaluate(self):\n pass",
"def evaluate(self) :\n pass",
"def main(cfg: DictConfig):\n result, evaluation_metric = evaluate(cfg)\n print(result)\n print(f\"Validation dice coefficient: {result[evaluation_metric]}\")",
"def evaluate_c(self, x, out=None, **kwargs):\n return self._base_nlp.evaluate_c(x, out=out, **kwargs)",
"def RunPersistentOp(key, anchor, inputs, outputs):\n RunPersistentOpCC(key, anchor, inputs, outputs)",
"def evaluate():\n click.echo(\"Not implemented yet. In the future, this command will be used for evaluation.\")\n sys.exit(-2)",
"def evaluate(self, algo):\n raise NotImplementedError()",
"def test_keypoint_detection(init_env, config_file):\n run_all_steps(init_env, config_file)",
"def test(self):\n self.eval()",
"def get_keypoints():\n # Keypoints are not available in the COCO json for the test split, so we\n # provide them here.\n keypoints = [\n 'nose',\n 'neck',\n 'right_shoulder',\n 'right_elbow',\n 'right_wrist', \n 'left_shoulder',\n 'left_elbow',\n 'left_wrist',\n 'right_hip',\n 'right_knee',\n 'right_ankle',\n 'left_hip',\n 'left_knee',\n 'left_ankle',\n 'right_eye', \n 'left_eye',\n 'right_ear',\n 'left_ear']\n\n return keypoints",
"def xkcd():",
"def c_test_eval_inp(self, population, run_locals):\r\n return 1",
"def evaluate(self) -> int:",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def eval(self):\n pass",
"def testOperation(self):\n gen = self.gen\n prof = self.profile\n\n # Try the direct evaluation\n gen.operation()\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n\n # Try evaluation through __call__\n gen(prof.x)\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n return",
"def sample_event_key_evaluator(response, payload, value):\n try:\n if value != \"\":\n exec (\"global value1; value1 = \" + value)\n value = value1\n return value\n except Exception as _:\n return value",
"def _evalContext(self):\n def xor(*args):\n return sum(args) == 1\n def neg(result):\n return not result\n context = {\n 'xor': xor,\n 'neg': neg\n }\n return context",
"def evaluate(self):\n raise Exception(\"Not implemented.\")",
"def evaluate(self):\r\n raise Exception(\"Not implemented.\")",
"def next_point(self):\n if self.verbose:\n print(\"Computing acquisition function...\")\n if self.acquisition_function == 'cb':\n acq, pred = acqfunc.confidence_bound(\n self.surrogate_model, self.X_full,\n alpha=self.alpha, beta=self.beta)\n elif self.acquisition_function == 'ei':\n acq, pred = acqfunc.expected_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif self.acquisition_function == 'poi':\n acq, pred = acqfunc.probability_of_improvement(\n self.surrogate_model, self.X_full,\n self.X_sparse, xi=self.xi)\n elif isinstance(self.acquisition_function, types.FunctionType):\n acq, pred = self.acquisition_function(\n self.surrogate_model, self.X_full, self.X_sparse)\n else:\n raise NotImplementedError(\n \"Choose between 'cb', 'ei', and 'poi' acquisition functions or define your own\")\n self.gp_predictions.append(pred)\n if self.mask is None:\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list][::-1][:self.batch_size].tolist()\n indices_list = np.dstack(indices_list)[0][::-1][:self.batch_size].tolist()\n else:\n acq = self.mask*acq\n indices_list = np.unravel_index(np.argsort(acq.ravel()), acq.shape)\n vals_list = acq[indices_list]\n vals_list = vals_list[~np.isnan(vals_list)][::-1]\n indices_list = np.dstack(indices_list)[0]\n indices_list = indices_list[:len(vals_list)][::-1]\n vals_list = vals_list[:self.batch_size].tolist()\n indices_list = indices_list[:self.batch_size].tolist()\n if not self.batch_update:\n return vals_list, indices_list\n if self.batch_dscale is None:\n batch_dscale_ = self.surrogate_model.model.kernel.lengthscale.mean().item()\n else:\n batch_dscale_ = self.batch_dscale\n vals_list, indices_list = self.update_points(\n vals_list, indices_list, batch_dscale_)\n return vals_list, indices_list",
"def evaluate(self):\n raise NotImplementedError()",
"def evaluate(self, X):\n\n\t\tpass",
"def test_circuit(self):\n num_qubits = 3\n strike_price = 0.5\n bounds = (0, 2)\n ecd = EuropeanCallDeltaObjective(\n num_state_qubits=num_qubits, strike_price=strike_price, bounds=bounds\n )\n\n # map strike_price to a basis state\n x = (strike_price - bounds[0]) / (bounds[1] - bounds[0]) * (2 ** num_qubits - 1)\n comparator = IntegerComparator(num_qubits, x)\n\n self.assertTrue(Operator(ecd).equiv(comparator))",
"def main(codelabel):\n try:\n code = Code.get_from_string(codelabel)\n except NotExistent:\n print(\"The code '{}' does not exist\".format(codelabel))\n sys.exit(1)\n\n print(\"Testing CP2K ENERGY on H2 (DFT) without StructureData...\")\n\n # parameters\n parameters = Dict(\n dict={\n 'FORCE_EVAL': {\n 'METHOD': 'Quickstep',\n 'DFT': {\n 'BASIS_SET_FILE_NAME': 'BASIS_MOLOPT',\n 'QS': {\n 'EPS_DEFAULT': 1.0e-12,\n 'WF_INTERPOLATION': 'ps',\n 'EXTRAPOLATION_ORDER': 3,\n },\n 'MGRID': {\n 'NGRIDS': 4,\n 'CUTOFF': 280,\n 'REL_CUTOFF': 30,\n },\n 'XC': {\n 'XC_FUNCTIONAL': {\n '_': 'LDA',\n },\n },\n 'POISSON': {\n 'PERIODIC': 'none',\n 'PSOLVER': 'MT',\n },\n },\n 'SUBSYS': {\n # structure directly included in parameters\n 'CELL': {\n 'ABC': '4.0 4.0 4.75'\n },\n 'COORD': {\n ' ': ['H 2.0 2.0 2.737166', 'H 2.0 2.0 2.000000']\n },\n 'KIND': [\n {\n '_': 'O',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q6'\n },\n {\n '_': 'H',\n 'BASIS_SET': 'DZVP-MOLOPT-SR-GTH',\n 'POTENTIAL': 'GTH-LDA-q1'\n },\n ],\n },\n }\n })\n\n # resources\n options = {\n \"resources\": {\n \"num_machines\": 1,\n \"num_mpiprocs_per_machine\": 1,\n },\n \"max_wallclock_seconds\": 1 * 3 * 60,\n }\n\n inputs = {'parameters': parameters, 'code': code, 'metadata': {'options': options,}}\n\n print(\"submitted calculation...\")\n calc = run(Cp2kCalculation, **inputs)\n\n # check energy\n expected_energy = -1.14005678487\n if abs(calc['output_parameters'].dict.energy - expected_energy) < 1e-10:\n print(\"OK, energy has the expected value\")\n else:\n print(\"ERROR!\")\n print(\"Expected energy value: {}\".format(expected_energy))\n print(\"Actual energy value: {}\".format(calc['output_parameters'].dict.energy))\n sys.exit(3)\n\n sys.exit(0)"
] | [
"0.62909716",
"0.60523283",
"0.5970072",
"0.5801929",
"0.5801929",
"0.57388675",
"0.571071",
"0.55617356",
"0.5529449",
"0.5511643",
"0.5455271",
"0.5396832",
"0.5377042",
"0.53401375",
"0.5296298",
"0.5288935",
"0.5284459",
"0.52335393",
"0.52335393",
"0.52335393",
"0.5183442",
"0.51712227",
"0.5161918",
"0.5147395",
"0.51457024",
"0.51336366",
"0.51288056",
"0.51163167",
"0.5045067",
"0.5015808"
] | 0.61717105 | 1 |
Extend the given Frame. | def extend(self, frame):
self.static_link = StaticLink(self.canvas, self, frame)
self.update() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def raiseframe_extra(self, name):\n\n self.extraframe.forget()\n frame = self.frames[name]\n frame.pack(expand=True, fill='both', padx=10)\n frame.update()\n frame.event_generate('<<ShowFrame>>')",
"def FrameAddBlendFrame(builder, blendFrame):\n return AddBlendFrame(builder, blendFrame)",
"def FrameAddPointFrame(builder, pointFrame):\n return AddPointFrame(builder, pointFrame)",
"def FrameAddEventFrame(builder, eventFrame):\n return AddEventFrame(builder, eventFrame)",
"def extend(self, *args, **kwargs): # real signature unknown\n pass",
"def extend(self, other):\n # YOUR CODE HERE\n raise NotImplementedError()",
"def add_frame(*args):\n return _ida_frame.add_frame(*args)",
"def extend(self, x) -> None:\n pass",
"def add_frame(self, frame: Frame):\n self.add(frame.timestep, frame.position, frame.orientation)",
"def append(self, other, ignore_index=False, verify_integrity=False):\n new_frame = self.frame.append(other.frame, ignore_index=ignore_index,\n verify_integrity=verify_integrity)\n return self.__class__(new_frame)",
"def FrameAddIntFrame(builder, intFrame):\n return AddIntFrame(builder, intFrame)",
"def extra_frame(self):\n\n self.extraframe = tk.Frame(self.extra_notebook, bg='white')\n self.extraframe.pack(anchor='center', expand=True, fill='y')\n # RoHS checker\n self.rohsframe = tk.Frame(self.extraframe, bg='#7093db')\n self.rohsframe.pack(pady=10, fill='x', expand=True)\n rohs = DoubleTextButton(self.rohsframe,\n text_main='RoHS Bill of Materials Comparison',\n text_sub='Output a delta report between two BOMS',\n command=lambda: self.raiseframe_extra(ROHSCompare))\n rohs.pack(fill='x', expand=True, side='right', padx=(4, 0))\n # Format Checker\n self.filterframe = tk.Frame(self.extraframe, bg='#7093db')\n self.filterframe.pack(pady=10, fill='x', expand=True)\n filtercheck = DoubleTextButton(self.filterframe,\n text_main='Format Checker',\n text_sub='Will output filtered CCL to check CCL format',\n command=lambda: self.raiseframe_extra(FilterCompare))\n filtercheck.pack(fill='x', expand=True, side='right', padx=(4, 0))\n # Illustration tool\n self.illtoolframe = tk.Frame(self.extraframe, bg='#7093db')\n self.illtoolframe.pack(pady=10, fill='x', expand=True)\n illustration_tool = DoubleTextButton(self.illtoolframe,\n text_main='Illustration Tool',\n text_sub='Used to insert and delete illustrations',\n command=lambda: self.raiseframe_extra(InsertDelIllustration))\n illustration_tool.pack(fill='x', expand=True, side='right', padx=(4, 0))",
"def FrameAddScaleFrame(builder, scaleFrame):\n return AddScaleFrame(builder, scaleFrame)",
"def extended_frame_annotation(self, original_frame):\n self.frame = self.annotated_frame(original_frame)\n text = \"\"\n if self.is_right():\n text = \"Looking right\"\n elif self.is_left():\n text = \"Looking left\"\n elif self.is_center():\n text = \"Looking center\"\n\n h_ratio = \"HR: \" + str(self.horizontal_ratio())[:4]\n v_ratio = \"VR: \" + str(self.vertical_ratio())[:4]\n\n width = int(0.9 * self.frame.shape[1])\n height = int(0.9 * self.frame.shape[0])\n\n # cv2.putText(self.frame, text, (60, 60), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n # cv2.putText(self.frame, h_ratio, (60, height), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n # cv2.putText(self.frame, v_ratio, (int(0.8 * width), height), cv2.FONT_HERSHEY_DUPLEX, 2, (255, 0, 0), 2)\n return self.frame",
"def change_frame(self, frame):\r\n pass",
"def add_frame(self, frame, player_box):\n # ROI is a small box around the player\n box_center = center_of_box(player_box)\n patch = frame[int(box_center[1] - self.box_margin): int(box_center[1] + self.box_margin),\n int(box_center[0] - self.box_margin): int(box_center[0] + self.box_margin)].copy()\n patch = imutils.resize(patch, 299)\n frame_t = patch.transpose((2, 0, 1)) / 255\n frame_tensor = torch.from_numpy(frame_t).type(self.dtype)\n frame_tensor = self.normalize(frame_tensor).unsqueeze(0)\n with torch.no_grad():\n # forward pass\n features = self.feature_extractor(frame_tensor)\n features = features.unsqueeze(1)\n # Concatenate the features to previous features\n if self.frames_features_seq is None:\n self.frames_features_seq = features\n else:\n self.frames_features_seq = torch.cat([self.frames_features_seq, features], dim=1)",
"def add_frame(\n self : \"animation\",\n frame : \"matplotlib.figure.Figure\",\n facecolor : \"str\" = 'white'\n ):\n self._make_animation_from_raw_list([frame], facecolor=facecolor)",
"def FrameAddColorFrame(builder, colorFrame):\n return AddColorFrame(builder, colorFrame)",
"def addBeforeFrameRender(call, args=(), kwargs={}, nodeClass='Write'):",
"def addFramePoint(self,**kwargs):\n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\",str(e))\n self.mm.neglect()\n return\n step = int(self.mm.modes[self.mm.cur_mode][6:7])\n rospy.loginfo(\"step %d\"%step)\n point = self.baxter.frame.addPoint(side,step) \n self.mm.confirm()\n #self.mm.loadMenu(self.mm.cur_page)",
"def process_frame(self, frame):\n\t\treturn frame",
"def __init__(self, frame):\n self.frame = frame",
"def augment_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:",
"def extend_spec(self, extend_spec):\n self._extend_spec = extend_spec",
"def FrameAddTextureFrame(builder, textureFrame):\n return AddTextureFrame(builder, textureFrame)",
"def __init__(self, frame):\n self.frame = frame\n self._configure()",
"def half_frame(self) -> None:\n pass",
"def half_frame(self) -> None:\n pass",
"def dispatch_frame(self, frame):",
"def extendleft(self, *args, **kwargs): # real signature unknown\n pass"
] | [
"0.62277865",
"0.6135089",
"0.6081007",
"0.6039482",
"0.60271853",
"0.5970307",
"0.59035146",
"0.5898341",
"0.58268374",
"0.58001316",
"0.5760515",
"0.57385683",
"0.5728255",
"0.56615496",
"0.5592534",
"0.5527621",
"0.5516896",
"0.5500574",
"0.54418653",
"0.5379401",
"0.5343444",
"0.5328016",
"0.530097",
"0.52994704",
"0.5269056",
"0.52573675",
"0.5255499",
"0.5255499",
"0.5251225",
"0.5243438"
] | 0.6509338 | 0 |
The frame that encloses this frame (what is pointed to by the static link). | def enclosing_frame(self):
return self.static_link.frame | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main_frame(self):\n return self.frame",
"def get_frame(self):\n return self.frame",
"def frame(self):\n return self._frame",
"def frame(self):\n return self._frame",
"def frame(self):\n return self._frame",
"def frame(self):\n return self.head",
"def get_frame(self):\n return self.frames.get()",
"def get_frame(self):\n return self.get_frame_at_index(self.current_frame)",
"def curframe(self):\n return self._stack[self._curframe_index][0]",
"def current_frame(self):\n return self._current_frame",
"def frame(self):",
"def sub_frame(self):\n return self._sub_frame",
"def get_frame(self, frame):\n return self.frames[frame]",
"def get_frame(self):\n\t\tframe = None\n\t\twhile not frame:",
"def frame(self):\n with self.frame_lock:\n self._new_frame = False\n return self._frame",
"def current_frame(self) -> str:\n return self.frames[self.frame]",
"def get_frame(self):\n return self.last_frame",
"def currentframe():\n return sys._getframe(3)",
"def sup_frame(self):\n return self._sup_frame",
"def bg_frame(self):\n\n return self._bg_frame",
"def read(self):\n return self.frame",
"def main_frame(self):\n debug.virtual('wxMediator.main_frame')",
"def _get_frame(self, key):\n pass",
"def index(self):\n return self.frame.index",
"def get_frame(self, ind):\n pass",
"def get_frame_data(self):\n # FrameObject is a dictionary of slot names and values.\n frameObject = self.pgdb.sendPgdbFnCall('get-frame-object', self.frameid)\n if not frameObject:\n raise PythonCycError(\"Could not retrieve frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n else:\n self._gotframe = True\n # Modify slot names to allow Python's syntax (e.g., '_' instead of '-').\n for slot in frameObject:\n self.__dict__[convertLispIdtoPythonId(slot)] = frameObject[slot]\n return self",
"def frame_index(self):\n return self._findex",
"def output_frame(self):\n if self._pipeline:\n frame = self._pipeline[-1].frame\n if not isinstance(frame, str):\n frame = frame.name\n return getattr(self, frame)\n else:\n return None",
"def EventFrame (self):\n pass",
"def process_frame(self, frame):\n\t\treturn frame"
] | [
"0.767556",
"0.74161756",
"0.7345391",
"0.7345391",
"0.7345391",
"0.71622056",
"0.6995642",
"0.6989063",
"0.6887034",
"0.68754154",
"0.6817774",
"0.6768423",
"0.6639166",
"0.6615822",
"0.6602196",
"0.6492776",
"0.6487509",
"0.6400091",
"0.6300003",
"0.62185115",
"0.6148845",
"0.6133849",
"0.60954547",
"0.6084181",
"0.6082169",
"0.6040915",
"0.6012404",
"0.59931046",
"0.5988647",
"0.59424895"
] | 0.8402784 | 0 |
Adds a Binding to this Frame. | def add_binding(self, variable, value):
# If there's already a binding, update it rather than add a new one.
for binding in self.bindings:
if binding.variable.name == variable:
return self.update_binding(variable, value)
variable = Variable(self.canvas, self, variable)
binding = Binding(self.canvas, variable, value)
self.bindings.append(binding)
x, y = self.pos
variable.set_pos(x + 10, y + len(self.bindings) * 20)
if value.moves_with_binding:
value.set_pos(x + 140, y + len(self.bindings) * 20)
self.update() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def addBindingToFrame(var, val, frame):\n set_car(frame, cons(var, frame_variables(frame)))\n set_cdr(frame, cons(val, frame_values(frame)))\n return",
"def bind(self, sequence=None, func=None, add=None):\n return self._widget_bind(sequence, func, add, internal=False)",
"def add_binding(ctx, binding_name, pool_name, acl_name, nat_type, twice_nat_id):\n\n entryFound = False\n table = 'NAT_BINDINGS'\n key = binding_name\n dataKey1 = 'access_list'\n dataKey2 = 'nat_pool'\n dataKey3 = 'nat_type'\n dataKey4 = 'twice_nat_id'\n\n if acl_name is None:\n acl_name = \"\"\n\n if len(binding_name) > 32:\n ctx.fail(\"Invalid binding name. Maximum allowed binding name is 32 characters !!\")\n\n config_db = ConfigDBConnector()\n config_db.connect()\n\n data = config_db.get_entry(table, key)\n if data:\n if data[dataKey1] == acl_name and data[dataKey2] == pool_name:\n click.echo(\"Trying to add binding, which is already present.\")\n entryFound = True\n\n binding_dict = config_db.get_table(table)\n if len(binding_dict) == 16:\n click.echo(\"Failed to add binding, as already reached maximum binding limit 16.\")\n entryFound = True\n\n if nat_type is not None:\n if nat_type == \"dnat\":\n click.echo(\"Ignored, DNAT is not yet suported for Binding \")\n entryFound = True\n else:\n nat_type = \"snat\"\n\n if twice_nat_id is None:\n twice_nat_id = \"NULL\"\n\n if entryFound is False:\n count = 0\n if twice_nat_id is not None:\n count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAT', count)\n count = getTwiceNatIdCountWithStaticEntries(twice_nat_id, 'STATIC_NAPT', count)\n count = getTwiceNatIdCountWithDynamicBinding(twice_nat_id, count, key)\n if count > 1:\n ctx.fail(\"Same Twice nat id is not allowed for more than 2 entries!!\")\n\n config_db.set_entry(table, key, {dataKey1: acl_name, dataKey2: pool_name, dataKey3: nat_type, dataKey4: twice_nat_id})",
"def AddIamPolicyBinding(asset_ref, member, role):\n policy = GetIamPolicy(asset_ref)\n iam_util.AddBindingToIamPolicy(\n dataplex_api.GetMessageModule().GoogleIamV1Binding, policy, member, role)\n return SetIamPolicy(asset_ref, policy)",
"def addBoundConnection(self, connection):\r\n system_id = connection.system_id\r\n self.log.debug('Adding SMPP binding for %s' % system_id)\r\n if not system_id in self.bound_connections:\r\n self.bound_connections[system_id] = SMPPBindManager(system_id)\r\n self.bound_connections[system_id].addBinding(connection)\r\n bind_type = connection.bind_type\r\n self.log.info(\"Added %s bind for '%s'. Active binds: %s. Max binds: %s\" % (bind_type, system_id, self.getBoundConnectionCountsStr(system_id), self.config.systems[system_id]['max_bindings']))",
"def visit_AttributeBinding(self, node):\n obj = self.stack[-1]\n py_ast = node.binding.expr.py_ast\n op = node.binding.op\n op_compiler = COMPILE_OP_MAP[op]\n code = op_compiler(py_ast, self.filename)\n binding = {\n 'operator': op,\n 'code': code,\n 'name': node.name,\n 'lineno': node.binding.lineno,\n 'filename': self.filename,\n 'block': self.block,\n }\n obj['bindings'].append(binding)",
"def add_bindings(self, configuration, bind_to, typ, bindings):\n wanted = list(bindings.wanted(configuration[typ].values()))\n if not self.get_current(bind_to)[0]:\n log.info(\"Would bind <%s>(%s) to %s\", typ, ', '.join(wanted), bind_to.long_name)\n return\n\n for thing in wanted:\n bound = self.is_bound(typ, thing, bind_to.typ, bind_to.name)\n\n if not bound:\n log.info(\"Binding <%s>(%s) to %s\", typ, thing, bind_to.long_name)\n combined_typ, binding_name_str, name_str = self.combined_typ(bind_to.typ, typ)\n payload = {binding_name_str: bind_to.name, name_str: thing}\n payload.update(configuration[typ][thing].binding_options)\n self.post(combined_typ, {combined_typ: payload, \"params\": {\"action\": \"bind\"}}, content_type=self.content_type(combined_typ))\n else:\n log.debug(\"<%s(%s) already bound to %s\", typ, thing, bind_to.long_name)",
"def bind(self, state_id, binding_id, event_props, event_handler):\n\n self.event_binder.bind(binding_id, event_props, event_handler)\n self._state_bindings[state_id].append(binding_id)",
"def AddIamPolicyBinding(task_ref, member, role):\n policy = GetIamPolicy(task_ref)\n iam_util.AddBindingToIamPolicy(\n dataplex_api.GetMessageModule().GoogleIamV1Binding, policy, member, role\n )\n return SetIamPolicy(task_ref, policy)",
"def define(self, label, obj):\n self.bindings[label] = obj",
"def _bind(self):\n\n pass",
"def bind(self, binding_id, event_props, handler, arg_list=None, once=False):\n\n args = (arg_list,) if arg_list else ()\n self._bindings[binding_id] = (once, event_props, handler) + args",
"def connect(self, binding):\n\n # Check whether the binding setting is correct or not.\n if self.io_owner == binding.io_owner:\n raise RuntimeError(\"Can not bind itself.\")\n\n if self.io_type == \"param\" and not self.is_pipeline_executor_interface():\n raise RuntimeError(\n 'The \"param\" binding can only be used by a pipeline executor interface!'\n )\n\n if not self.is_pipeline_executor_interface() and self.io_type == \"input\":\n raise RuntimeError(\"Module can only bind from output interface!\")\n\n if self.io_type == \"param\" and binding.io_type != \"param\":\n raise RuntimeError(\n 'A global \"param\" interface can only be bind with a module \"param\" interface!'\n )\n\n if (\n not self.is_pipeline_executor_interface()\n and not binding.is_pipeline_executor_interface()\n and binding.io_type == \"output\"\n ):\n raise RuntimeError(\"Can not bind module output with another module output!\")\n\n if (\n not self.is_pipeline_executor_interface()\n and binding.is_pipeline_executor_interface()\n and binding.io_type == \"input\"\n ):\n raise RuntimeError(\"Can not bind module output with pipeline input!\")\n\n if self.is_pipeline_executor_interface() and self.io_type == \"output\":\n raise RuntimeError(\"Global output can not be used as binding start point.\")\n\n if (\n self.is_pipeline_executor_interface()\n and self.io_type == \"input\"\n and binding.io_type != \"input\"\n ):\n raise RuntimeError(\"Global input can only bind with module input.\")\n\n self.bindings.append(binding)\n if not self.is_pipeline_executor_interface():\n # Check whether the data types of the source and destination are the same.\n if (\n isinstance(binding.io_owner, PipelineConfig.ModuleWrapper)\n and self.data_type != binding.data_type\n ):\n raise RuntimeError(\n f\"Illegal type (%s vs. %s): binding type is not same!\"\n % (self.data_type, binding.data_type)\n )\n\n binding.parents.append(self)\n\n # Do acyclic check after increasing the in-degree of child node by setting\n # current interface as a parent of the child node.\n\n if not self.check_dag_acyclic(\n binding.io_owner, self.io_owner.input_bindings.bindings\n ):\n raise RuntimeError(\"Illegal connection: Cause a cycle!\")",
"def add_binding_sites(self, binding_sites, final_conformation: str = None):\n\n if final_conformation is None:\n self.binding_sites[self.name] += binding_sites\n elif final_conformation in self.binding_sites:\n self.binding_sites[final_conformation] += binding_sites\n else:\n self.binding_sites[final_conformation] = binding_sites\n\n # also reverse_link the binding sites to this TF object\n for binding_site in binding_sites:\n binding_site.add_transcription_factor(self, final_conformation)",
"def add(self, widget: Component) -> None:\n self._add(widget)",
"def __init__(self, binding, parent=None):\n self.binding = binding\n self.parent = parent",
"def binding_site(self, binding_site):\n self._binding_site = binding_site",
"def add_child(self, blueprint: 'Blueprint'):\n self._children.append(blueprint)\n blueprint._parent = self\n return blueprint",
"def add(self, widget: Component) -> None:\n self._root.add(widget)",
"def bind_key(self, key):\n self.key_bindings.append(key)",
"def _widget_bind(self, sequence=None, func=None, add=None, **kw):\n debug_b = kw.get(\"debug\", self.default_debug)\n if debug_b:\n self._print(\n \"_WIDGET_BIND({self}, {sequence}, {func}, {add}, **{kw})\"\n \" Called!\".format(\n self=self, sequence=sequence, func=func, add=add, kw=kw\n )\n )\n if sequence == func is None:\n return super().bind() if self.emulation_b else self.widget.bind()\n if func is None:\n return (\n super().bind(sequence)\n if self.emulation_b\n else self.widget.bind(sequence)\n )\n internal_b = kw.get(\"internal\", True)\n kids_d = kw.get(\"kids\", self._kids)\n kidsonly = kw.get(\"kidsonly\", None)\n recurse_b = kw.get(\"recurse\", False)\n release_b = kw.get(\"release\", internal_b or not add)\n if not hasattr(self, \"_funcids_d\"):\n setattr(self, \"_funcids_d\", {})\n if sequence not in self._funcids_d:\n self._funcids_d[sequence] = {}\n if release_b:\n self._widget_unbind(sequence, True, func=func, add=add, **kw)\n if kidsonly:\n func_id = kidsonly # from dict\n func_ids = [\n func_id,\n ]\n else:\n if self.emulation_b:\n func_id = super().bind(sequence, func, \"+\")\n else:\n func_id = self.widget.bind(sequence, func, \"+\")\n self._funcids_d[sequence][func_id] = dict(\n sequence=sequence,\n func=func,\n add=add,\n internal=internal_b,\n self=func_id,\n kids={},\n frames={},\n )\n func_ids = [\n func_id,\n ]\n for frame in [\n getattr(self, \"_compoundframe\", None),\n getattr(self, \"_textframe\", None),\n ] + getattr(self, \"_subframes\", []):\n if frame and frame.winfo_exists():\n fid = frame.bind(sequence, func, \"+\")\n frame_str = str(frame)\n self._funcids_d[sequence][func_id][\"frames\"][frame_str] = fid\n func_ids.append(fid)\n matching_recursion_events = (\n e\n for e in self.event_types_requiring_recursion\n if e.startswith(sequence[1:])\n )\n recursion_b = (\n recurse_b or matching_recursion_events or not internal_b and False\n )\n if recursion_b or kidsonly:\n for child_str, vals in kids_d.items():\n child = vals[\"label\"]\n if debug_b:\n self._print(\n \"BINDING {sequence} to Child={child}: Child.bind(\"\n \"{sequence}, {func}, {add}, **{kw})\".format(\n child=child,\n sequence=sequence,\n func=func,\n add=add,\n kw=kw,\n )\n )\n # try:\n if kidsonly:\n cfid = self._funcids_d[sequence][func_id][\"kids\"].get(\n child_str\n )\n if cfid:\n child.unbind(sequence, cfid)\n child_func_id = child.bind(sequence, func, \"+\")\n self._funcids_d[sequence][func_id][\"kids\"][\n child_str\n ] = child_func_id\n func_ids.append(child_func_id)\n # except: # else: #\n # self._print(\n # \"EXCEPTION while Binding Child({c}, {s}, {f}, {ad})\"\n # \"\".format(\n # c=child, s=sequence, f=func, ad=add\n # )\n # )\n result = func_ids if internal_b else func_ids[0]\n if debug_b:\n self._print(\n \"FUNC_IDS are \", func_ids, \" internal_b is \", internal_b\n )\n return result",
"def add(self, *args, **kwargs):\n widget = self.widget_cls(self, *args, **kwargs)\n self.widgets.append(widget)\n widget.pack(**self.pack_opts)\n return widget",
"def add_relationship(self, rel: ResourceRelationshipDescriptor) -> None:\n self._relationships[assert_not_none(rel.name)] = rel.bind(self)",
"def add_frame(self, frame: Frame):\n self.add(frame.timestep, frame.position, frame.orientation)",
"def extend(self, frame):\n self.static_link = StaticLink(self.canvas, self, frame)\n self.update()",
"def set_bindpoint(self, bindpoint):\n self.options['bindpoint'] = bindpoint",
"def add_link(self, src, dst, src_port, dst_port, weight = 1):\n\t\tif src not in self.switches_adj:\n\t\t\tself.switches_adj[src] = []\n\t\tself.switches_adj[src].append(dst)\t\n\n\n\t\t#add link and it's attributes\n\t\tif src not in self.links:\n\t\t\tself.links[src] = {}\n\t\tself.links[src][dst] = {}\n\t\tself.links[src][dst]['src_port'] = src_port\n\t\tself.links[src][dst]['dst_port'] = dst_port\n\t\tself.links[src][dst]['weight'] = weight",
"def _add_varbind(self, oid, value):\n \n self._encoded_oids.append(oid)\n self._encoded_vals.append(value)",
"def FrameAddBlendFrame(builder, blendFrame):\n return AddBlendFrame(builder, blendFrame)",
"def bind(self, *args):\r\n return self._fd.bind(*args)"
] | [
"0.66476285",
"0.61366373",
"0.6026425",
"0.60156393",
"0.59045696",
"0.58596855",
"0.57560503",
"0.5732177",
"0.5641009",
"0.54628783",
"0.5456201",
"0.5436792",
"0.54301035",
"0.53278595",
"0.532564",
"0.5314",
"0.52351445",
"0.5233988",
"0.52169967",
"0.51750636",
"0.51471865",
"0.5112431",
"0.51045245",
"0.5099985",
"0.50945973",
"0.5080686",
"0.5076656",
"0.506861",
"0.5067704",
"0.5061122"
] | 0.73459095 | 0 |
Updates a preexisting Binding in this or some enclosing Frame. | def update_binding(self, variable, value):
old_value = self.lookup(variable).value
if old_value is None:
raise BaseException(
"Tried to update a variable that's not in scope!")
var_x, var_y = self.lookup(variable).variable.pos
self.lookup(variable).value = value
if old_value.moves_with_binding:
old_value.set_pos(0, 0) # Or better yet, somehow remove it
if value.moves_with_binding:
value.set_pos(var_x + 130, var_y)
self.update() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_binding(self, variable, value):\n # If there's already a binding, update it rather than add a new one.\n for binding in self.bindings:\n if binding.variable.name == variable:\n return self.update_binding(variable, value)\n variable = Variable(self.canvas, self, variable)\n binding = Binding(self.canvas, variable, value)\n self.bindings.append(binding)\n x, y = self.pos\n variable.set_pos(x + 10, y + len(self.bindings) * 20)\n if value.moves_with_binding:\n value.set_pos(x + 140, y + len(self.bindings) * 20)\n self.update()",
"def _update(self, binding, data):\n self._validate_data(data)\n if not data.get('name',False):\n data['name'] = data.get('frontend_label',False) or 'No Label'\n if not data.get('create_variant',False):\n data['create_variant'] = data.get('is_configurable',False)\n binding.write(data)\n self._create_attribute_option(binding, data)\n _logger.debug('%d updated from magento %s', binding.id, self.magento_id)\n return",
"def update_content_binding(sender, **kwargs):\n if not kwargs['created']:\n return\n\n cbas = ContentBindingAndSubtype(content_binding=kwargs['instance'], subtype=None)\n cbas.save()",
"def addBindingToFrame(var, val, frame):\n set_car(frame, cons(var, frame_variables(frame)))\n set_cdr(frame, cons(val, frame_values(frame)))\n return",
"def reifyBinding(slot):\n return Binding(slot)",
"def _bind(self):\n\n pass",
"def binding_site(self, binding_site):\n self._binding_site = binding_site",
"def updateBindPose():\n\n dag = pmc.dagPose(q=True, bindPose=True)\n objects = pmc.dagPose(dag, q=True, members=True)\n for obj in objects:\n pmc.dagPose(obj, reset=True, name=dag[0])",
"def binding(self, value):\n old_value = self._value\n self._set(value)\n yield\n self._set(old_value)",
"def _reset_bind(self):\n self.binded = False\n self._exec_group = None\n self._data_shapes = None\n self._label_shapes = None",
"def _update_binding_after_export(self, map_record, sync_data=None, compare_data=None):\n self.binder.bind(self.getresponse_id, self.binding_id,\n sync_data=sync_data, compare_data=compare_data)",
"def setBindingStatus(self, *args):\n return _libsbml.OutwardBindingSite_setBindingStatus(self, *args)",
"def get_binding(self, *bases, replace_existing=False, **attrs):\n if self.binding is None or replace_existing:\n if not bases:\n bases = (dataobjects.DataElement,)\n attrs['xsd_element'] = self\n class_name = '{}Binding'.format(self.local_name.title().replace('_', ''))\n self.binding = dataobjects.DataBindingMeta(class_name, bases, attrs)\n\n return self.binding",
"def UpdateAccessBindings(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _widget_bind(self, sequence=None, func=None, add=None, **kw):\n debug_b = kw.get(\"debug\", self.default_debug)\n if debug_b:\n self._print(\n \"_WIDGET_BIND({self}, {sequence}, {func}, {add}, **{kw})\"\n \" Called!\".format(\n self=self, sequence=sequence, func=func, add=add, kw=kw\n )\n )\n if sequence == func is None:\n return super().bind() if self.emulation_b else self.widget.bind()\n if func is None:\n return (\n super().bind(sequence)\n if self.emulation_b\n else self.widget.bind(sequence)\n )\n internal_b = kw.get(\"internal\", True)\n kids_d = kw.get(\"kids\", self._kids)\n kidsonly = kw.get(\"kidsonly\", None)\n recurse_b = kw.get(\"recurse\", False)\n release_b = kw.get(\"release\", internal_b or not add)\n if not hasattr(self, \"_funcids_d\"):\n setattr(self, \"_funcids_d\", {})\n if sequence not in self._funcids_d:\n self._funcids_d[sequence] = {}\n if release_b:\n self._widget_unbind(sequence, True, func=func, add=add, **kw)\n if kidsonly:\n func_id = kidsonly # from dict\n func_ids = [\n func_id,\n ]\n else:\n if self.emulation_b:\n func_id = super().bind(sequence, func, \"+\")\n else:\n func_id = self.widget.bind(sequence, func, \"+\")\n self._funcids_d[sequence][func_id] = dict(\n sequence=sequence,\n func=func,\n add=add,\n internal=internal_b,\n self=func_id,\n kids={},\n frames={},\n )\n func_ids = [\n func_id,\n ]\n for frame in [\n getattr(self, \"_compoundframe\", None),\n getattr(self, \"_textframe\", None),\n ] + getattr(self, \"_subframes\", []):\n if frame and frame.winfo_exists():\n fid = frame.bind(sequence, func, \"+\")\n frame_str = str(frame)\n self._funcids_d[sequence][func_id][\"frames\"][frame_str] = fid\n func_ids.append(fid)\n matching_recursion_events = (\n e\n for e in self.event_types_requiring_recursion\n if e.startswith(sequence[1:])\n )\n recursion_b = (\n recurse_b or matching_recursion_events or not internal_b and False\n )\n if recursion_b or kidsonly:\n for child_str, vals in kids_d.items():\n child = vals[\"label\"]\n if debug_b:\n self._print(\n \"BINDING {sequence} to Child={child}: Child.bind(\"\n \"{sequence}, {func}, {add}, **{kw})\".format(\n child=child,\n sequence=sequence,\n func=func,\n add=add,\n kw=kw,\n )\n )\n # try:\n if kidsonly:\n cfid = self._funcids_d[sequence][func_id][\"kids\"].get(\n child_str\n )\n if cfid:\n child.unbind(sequence, cfid)\n child_func_id = child.bind(sequence, func, \"+\")\n self._funcids_d[sequence][func_id][\"kids\"][\n child_str\n ] = child_func_id\n func_ids.append(child_func_id)\n # except: # else: #\n # self._print(\n # \"EXCEPTION while Binding Child({c}, {s}, {f}, {ad})\"\n # \"\".format(\n # c=child, s=sequence, f=func, ad=add\n # )\n # )\n result = func_ids if internal_b else func_ids[0]\n if debug_b:\n self._print(\n \"FUNC_IDS are \", func_ids, \" internal_b is \", internal_b\n )\n return result",
"def bind(self):\n super(QtBaseWidgetComponent, self).bind()",
"def get_binding(self, orgname):\n pass",
"def updateWidget(self):\n pass",
"def update(self, bsd):\n raise NotImplementedError()",
"def update(self):\n self.backbone_module.update()\n mx.nd.waitall()",
"def __init__(self, binding, parent=None):\n self.binding = binding\n self.parent = parent",
"def _maintain_backwards_compatibility(binding):\n\n for member in (\"__binding__\",\n \"__binding_version__\",\n \"__qt_version__\",\n \"__added__\",\n \"__remapped__\",\n \"__modified__\",\n \"convert\",\n \"load_ui\",\n \"translate\"):\n setattr(binding, member, getattr(self, member))\n self.__added__.append(member)\n\n setattr(binding, \"__wrapper_version__\", self.__version__)\n self.__added__.append(\"__wrapper_version__\")",
"def update_port_precommit(self, port_context):\n # TODO(ijw): optimisation: the update port may leave the\n # binding state the same as before if someone updated\n # something other than the binding on the port, but this\n # way we always send it out and it's the far end's job to\n # ignore it. Doing less work is nevertheless good, so we\n # should in future avoid the send.\n\n # unbind port from old host, if already bound\n if port_context.original_binding_levels is not None:\n prev_bind = port_context.original_binding_levels[-1]\n\n if (prev_bind is not None and\n prev_bind.get(api.BOUND_DRIVER) == self.MECH_NAME and\n port_context.host != port_context.original_host):\n\n # Note that we skip this step if the change happens while\n # 'unbinding' and rebinding to the same host - it's probably\n # an update of extraneous detail and not really a request\n # that requires binding.\n\n self.communicator.unbind(port_context._plugin_context.session,\n port_context.original,\n port_context.original_host,\n prev_bind[api.BOUND_SEGMENT]\n )\n\n # (Re)bind port to the new host, if it needs to be bound\n if port_context.binding_levels is not None:\n current_bind = port_context.binding_levels[-1]\n\n if (current_bind is not None and\n current_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n\n binding_type = self.get_vif_type(port_context)\n # Remove port membership from any previously associated\n # security groups for updating remote_security_group_id ACLs\n self.communicator.unbind_port_from_remote_groups(\n port_context._plugin_context.session,\n port_context.original,\n port_context.current)\n\n self.communicator.bind(port_context._plugin_context.session,\n port_context.current,\n current_bind[api.BOUND_SEGMENT],\n port_context.host,\n binding_type)\n\n # TODO(ijW): The agent driver checks for a change of\n # host, but we're oddly seeing that the orig_host is\n # always set. Should confirm if this is a problem or\n # not.\n self._insert_provisioning_block(port_context)",
"def _bbox_updated(self):\n self.updated = True",
"def update(self):\r\n self._state = self._dev.state",
"def bind(self, *args, **kwargs):\n self._canvas.bind(*args, **kwargs)",
"def update_port_postcommit(self, port_context):\n # TODO(ijw): optimisation: the update port may leave the\n # binding state the same as before if someone updated\n # something other than the binding on the port, but this\n # way we always send it out and it's the far end's job to\n # ignore it. Doing less work is nevertheless good, so we\n # should in future avoid the send.\n\n if port_context.binding_levels is not None:\n current_bind = port_context.binding_levels[-1]\n if port_context.original_binding_levels is None:\n prev_bind = None\n else:\n prev_bind = port_context.original_binding_levels[-1]\n\n if (current_bind is not None and\n current_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n self.communicator.kick()\n elif (prev_bind is not None and\n prev_bind.get(api.BOUND_DRIVER) == self.MECH_NAME):\n self.communicator.kick()",
"def update(self, *args, **kwargs) -> None:\n self.update_state(args[0])\n super().update(*args, **kwargs)",
"def _getForBinding (self):\n return self.__forBinding",
"def update(cls) -> None:\n raise NotImplementedError"
] | [
"0.5982152",
"0.5768461",
"0.5693645",
"0.5657816",
"0.5610024",
"0.5507929",
"0.54648364",
"0.54608685",
"0.54569656",
"0.5437134",
"0.5436132",
"0.5410777",
"0.5389101",
"0.5331553",
"0.532694",
"0.52867365",
"0.52445334",
"0.522141",
"0.5190998",
"0.51589876",
"0.5157782",
"0.5135629",
"0.51258",
"0.5101367",
"0.501839",
"0.50090396",
"0.5007011",
"0.49951553",
"0.49892426",
"0.4958264"
] | 0.68966717 | 0 |
Get the binding associated with this variable. | def lookup(self, variable):
for binding in self.bindings:
if binding.variable.name == variable:
return binding
if self.static_link is not None:
return self.enclosing_frame.lookup(variable)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_binding(self, v: str) -> Optional[str]:\n assert is_var(v)\n t = self\n ret = t.binding.get(v)\n while not ret and t.parent:\n t = t.parent\n ret = t.binding.get(v)\n return ret",
"def _get_binding_record(self):\n binding = self.binder.get_bindings(domain=[('id', '=', self.binding_id)])\n assert len(binding) <= 1, \"More than one binding record returned!\"\n if binding:\n assert binding.id == self.binding_id, \"Id of returned binding does not match self.binding_id!\"\n assert binding._name == self.model._name, \"Model of binding record does not match self.model\"\n return binding",
"def _getForBinding (self):\n return self.__forBinding",
"def reg_binding(self, gate_reg_name):\n return self.reg_bind.get(gate_reg_name)",
"def param_binding(self, gate_param_name):\n return self.param_bind.get(gate_param_name)",
"def binding(model, binding):\n var = model.binding(binding)\n if var is None:\n raise myokit.IncompatibleModelError(\n model.name(),\n 'No variable found with binding \"' + str(binding) + '\".')\n return var",
"def resolve(self, binding_key: Hashable) -> Any:\n binding = self._bindings[binding_key](self)\n return binding",
"def binding_selector(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"binding_selector\")",
"def lookup_var(self, var):\n if var in self.binding:\n return self.binding[var]\n elif self.parent is not None:\n return self.parent.lookup_var(var)\n else:\n raise Environment.Unbound('unbound variable \"%s\"' % var)",
"def get_binding(self, orgname):\n pass",
"def value(self):\n if self.is_bound():\n return self._value\n else:\n raise UninitializedBinding('{} is unbound.'.format(self))",
"def binding_information(self):\n return self._binding_data",
"def get_bindpoint(self):\n return self.options['bindpoint']",
"def bind_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bind_name\")",
"def bind_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bind_name\")",
"def binding_site(self):\n return self._binding_site",
"def bindings(self):\n return self.__bindings",
"def bind_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bind_name\")",
"def getBinding(o, name):\n raise RuntimeError()",
"def _get_reference_by_variable(self, var):\n if not var[0] == consts.VARIABLE:\n raise Exception('Internal error: Expected a variable, got: \"%r\"' % var)\n res = self._bindings.get(var, var)\n if res == consts.TOPIC_IN_FOCUS:\n res = self.focus\n while res[0] == consts.VARIABLE and self.parent:\n res = self.parent._get_reference_by_variable(res) #pylint: disable-msg=W0212\n if res == consts.TOPIC_IN_FOCUS:\n res = self.focus\n return res",
"def getvarbound(self,i_): # 3\n res,resargs = self.__obj.getvarbound(i_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value,_bl_return_value,_bu_return_value = resargs\n _bk_return_value = boundkey(_bk_return_value)\n return _bk_return_value,_bl_return_value,_bu_return_value",
"def lookup(self, label):\n if label in self.bindings:\n return self.bindings[label]\n else:\n if self.parent:\n return self.parent.lookup(label)\n else:\n raise SnekNameError(\"name '{}' is not defined\".format(label))",
"def getBindings(self):\n return self.getBindingManager().getBindings()",
"def lookup(self, var):\n \n namespace = self.first_namespace_that_binds_the_var(var)\n if namespace is None:\n raise LookupError(f'the variable \"{var}\" is not bound in this environment')\n return namespace[var]",
"def getNextBindingForDelivery(self):\r\n binding = None\r\n # If we now have more trx/rx bindings than have been used\r\n # then iterate through our trx/rx binds until we find one\r\n # that hasn't yet been used\r\n if len(self._delivery_binding_history) < self.getBindingCountForType(pdu_types.CommandId.bind_receiver):\r\n for binding in self._binds[pdu_types.CommandId.bind_receiver] + self._binds[pdu_types.CommandId.bind_transceiver]:\r\n if not binding in self._delivery_binding_history:\r\n break\r\n else:\r\n binding = None\r\n \r\n # Otherwise send on the last trx/rx binding delivered on, as\r\n # long as it is still bound\r\n while binding is None and self._delivery_binding_history:\r\n # get last binding used\r\n _binding = self._delivery_binding_history.popleft()\r\n # check it is still bound\r\n if _binding in self._binds[_binding.bind_type]:\r\n # If so then use it\r\n binding = _binding\r\n \r\n if binding is not None:\r\n self._delivery_binding_history.append(binding)\r\n return binding",
"def getvarbound(self,i_):\n bk_ = ctypes.c_int32()\n bl_ = ctypes.c_double()\n bu_ = ctypes.c_double()\n res = __library__.MSK_XX_getvarbound(self.__nativep,i_,ctypes.byref(bk_),ctypes.byref(bl_),ctypes.byref(bu_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _bk_return_value = boundkey(bk_.value)\n bl_ = bl_.value\n _bl_return_value = bl_\n bu_ = bu_.value\n _bu_return_value = bu_\n return (_bk_return_value,_bl_return_value,_bu_return_value)",
"def find_bindings(self, bindName):\n try:\n return self.bind2index[bindName]\n except:\n raise KeyError(\n f\"The binding {bindName} is not in the general list... check your input file!\")",
"def bound_for(self, name):\n if '.' in name:\n module, name = name.split('.', 1)\n if module in self._modules:\n return self.__getattr__(module).bound_for(name)\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no module %s' % (name, type(self).__name__, module))\n else:\n if name in self._parameters:\n return self._bounds[name]\n else:\n raise AttributeError('Invalid bound name %s. '\n '%s has no parameter %s' % (name, type(self).__name__, module))",
"def getVariable(self):\n return _libsbml.Rule_getVariable(self)",
"def bind_type(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bind_type\")"
] | [
"0.74545455",
"0.6984103",
"0.6952437",
"0.6926125",
"0.6841516",
"0.68226844",
"0.67925",
"0.6688647",
"0.6679474",
"0.6650369",
"0.6598982",
"0.65434706",
"0.64257133",
"0.6307767",
"0.630486",
"0.62405646",
"0.62294364",
"0.6209628",
"0.618126",
"0.59600055",
"0.5916183",
"0.5909439",
"0.58792263",
"0.5846009",
"0.5830707",
"0.5818714",
"0.57811064",
"0.5780117",
"0.573873",
"0.5724536"
] | 0.71176594 | 1 |
The frame pointed to by a StaticLink is the head. | def frame(self):
return self.head | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enclosing_frame(self):\n return self.static_link.frame",
"def head(self):\n return self._head",
"def head(self):\n return self._head",
"def do_HEAD(self):\n self.log.debug('do_HEAD called')\n self.HeadGet('HEAD')",
"def get_head_vertex(self):\n return self.graph.vertices[self.head_vertex.vertex_number]",
"def handle_magic_head(self, tag):\n if not self._skip_video_tag and tag.type == VIDEO_TAG:\n self._first_video_tag = tag\n self._head_dead_line = self.position()\n self._skip_video_tag = True\n if not self._skip_first_audio_tag and tag.type == AUDIO_TAG:\n self._first_audio_tag = tag\n self._head_dead_line = self.position()\n self._skip_first_audio_tag = True\n return (not(self._skip_video_tag and self._skip_first_audio_tag))",
"def atHead(self):\n return self.cursor == self.head",
"def getHostHead(self):\n return self.host_head",
"def head(self, head):\n\n self._head = head",
"def link_hopping(self):\n\n if self.is_empty():\n raise Empty(\"List is empty\")\n\n fast = slow = self._header\n\n while fast is not None:\n fast = fast._next\n if fast is not None and fast != self._trailer:\n fast = fast._next\n slow = slow._next\n return slow._element",
"def link(self):\n return self.container['link']",
"def selflinking(self):\n return self._selflinking",
"def getDomainHead(self):\n return self.domain_head",
"def __init__(self, head=None):\r\n self.head = head",
"def get_head_node_id() -> str:\n head_node_id = None\n for node in ray.nodes():\n if HEAD_NODE_RESOURCE_NAME in node[\"Resources\"] and node[\"Alive\"]:\n head_node_id = node[\"NodeID\"]\n break\n assert head_node_id is not None, \"Cannot find alive head node.\"\n\n return head_node_id",
"def __init__(self):\r\n self.head = None",
"def assert_has_valid_head(self, response, expected):\r\n assert 'head' in response\r\n head = response['head']\r\n assert isinstance(head, str)\r\n assert head == expected",
"def __init__(self, head):\n self.head = head",
"def __init__(self, head):\n self.head = head",
"def __init__(self, head):\n self.head = head",
"def __init__(self, head):\n self.head = head",
"def first(self):\r\n return self.__head",
"def do_HEAD(self):\r\n f = self.send_head()\r\n if f:\r\n f.close()",
"def __init__(self):\n self.head = None",
"def __init__(self):\n self.head = None",
"def __init__(self):\n self.head = None",
"def __init__(self):\n self.head = None",
"def __init__(self):\n self.head = None",
"def __init__(self):\n self.head = None",
"def __init__(self):\n self.head = None"
] | [
"0.57880884",
"0.5785145",
"0.5785145",
"0.550507",
"0.55021834",
"0.54791784",
"0.53977954",
"0.53869563",
"0.53779864",
"0.53493875",
"0.5296728",
"0.528315",
"0.52398014",
"0.52345103",
"0.52054757",
"0.52017754",
"0.51980126",
"0.5196535",
"0.5196535",
"0.5196535",
"0.5196535",
"0.5150175",
"0.5140671",
"0.5140356",
"0.5140356",
"0.5140356",
"0.5140356",
"0.5140356",
"0.5140356",
"0.5140356"
] | 0.6079391 | 0 |
The base of a StaticLink is the tail. | def base(self):
return self.tail | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rel_base(self, p_addr = 0):\n\t\trel_pos = self.get_address(p_addr, 1)\n\t\trel_val = self.get_data(rel_pos)\n\t\tself.rel_pos += rel_val\n\t\tself.pos += 2",
"def base(self):\n return self if self._base is None else self._base",
"def base_path(self):\n return self._base_path",
"def base_only(self):\n return self.base",
"def base_url_path(self):\n path = urlsplit(self.base_url())[2]\n if path.endswith(\"/\"):\n path = path[:-1]\n return path",
"def GetBaseURL(self):\n base_url = self.server_base_url\n if base_url is None:\n base_url = 'http://%s:%s' % self.server_address[:2]\n\n return base_url",
"def get_short_url_base():",
"def get_sdram_base_address_for(self, vertex):",
"def base(self) -> str:\n return self._base",
"def base_url(self) -> str | None:\n return self._base_url",
"def base(self):\n if self._base is None:\n return SE3()\n else:\n return self._base",
"def _get_base_url(self):\n return '/{}/'.format(self.name.replace('__', '/'))",
"def duplicate_static_links (self):\n # Create backward links\n backwards = [EdgeLink(src=link.dst, dst=link.src, id=str(link.id) + \"-back\",\n backward=True, delay=link.delay,\n bandwidth=link.bandwidth) for u, v, link in\n self.network.edges_iter(data=True) if link.type == Link.STATIC]\n # Add backward links to the NetworkX structure in a separate step to\n # avoid the link reduplication caused by the iterator based for loop\n for link in backwards:\n self.add_edge(src=link.src, dst=link.dst, link=link)\n return self",
"def base_url(self):\n return self._get_base_url()",
"def base(self):\n return os.path.basename(self.path)",
"def get_base_url(self):\n return self.base_url",
"def link(self):\n return self.container['link']",
"def get_base_url(self):\n return urlparse.urljoin(self.domain, self.root_path)",
"def base_url(self):\n return self._base_url",
"def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()",
"def get_base_url(self):\n return getattr(self.instance, 'base_url')",
"def endpoint(self, link):\n\n return join(self.baseurl, link)",
"def getSuffixLink(self):\n return self.suffix_link",
"def getBase(self):\n return self.base",
"def getBase(self):\n return self.base",
"def getBase(self):\n return self.base",
"def get_base_path(self) -> str:\n raise NotImplementedError()",
"def base_url(self) -> str:\n return self._base_url",
"def base_ring(self):\n return self.domain().base_ring()",
"def __init__(self):\n\n self.head = linkNode()\n self.tail = None\n # print(self.head.val)"
] | [
"0.56164706",
"0.5547956",
"0.5547714",
"0.5442039",
"0.5399766",
"0.53787285",
"0.5375355",
"0.53430074",
"0.5331589",
"0.5316092",
"0.5314165",
"0.53085375",
"0.5287327",
"0.5280603",
"0.5275888",
"0.52740526",
"0.52636313",
"0.52579504",
"0.52409434",
"0.52350146",
"0.5231127",
"0.52282053",
"0.52233094",
"0.5210127",
"0.5210127",
"0.5210127",
"0.52068603",
"0.5205854",
"0.51985586",
"0.5185949"
] | 0.67270696 | 0 |
The variable is at the tail of the Binding Connector. | def variable(self):
return self.tail | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def LocalEndPoint(self) -> _n_5_t_1:",
"def _bind(self):\n\n pass",
"def add_binding(self, variable, value):\n # If there's already a binding, update it rather than add a new one.\n for binding in self.bindings:\n if binding.variable.name == variable:\n return self.update_binding(variable, value)\n variable = Variable(self.canvas, self, variable)\n binding = Binding(self.canvas, variable, value)\n self.bindings.append(binding)\n x, y = self.pos\n variable.set_pos(x + 10, y + len(self.bindings) * 20)\n if value.moves_with_binding:\n value.set_pos(x + 140, y + len(self.bindings) * 20)\n self.update()",
"def variable(self):\n return _coconut_tail_call(Var, self.name)",
"def update_binding(self, variable, value):\n old_value = self.lookup(variable).value\n if old_value is None:\n raise BaseException(\n \"Tried to update a variable that's not in scope!\")\n var_x, var_y = self.lookup(variable).variable.pos\n self.lookup(variable).value = value\n if old_value.moves_with_binding:\n old_value.set_pos(0, 0) # Or better yet, somehow remove it\n if value.moves_with_binding:\n value.set_pos(var_x + 130, var_y)\n self.update()",
"def variable(self):",
"def get_binding(self, orgname):\n pass",
"def after_bind(self, node, kw):\n pass",
"def after_bind(self, node, kw):\n pass",
"def test_bind(self):\n x = t.Exactly(\"x\")\n b = t.Bind(\"var\", x)\n self.assertEqual(writePython(b),\n dd(\"\"\"\n _G_exactly_1, lastError = self.exactly('x')\n self.considerError(lastError, None)\n _locals['var'] = _G_exactly_1\n _locals['var']\n \"\"\"))",
"def binding(model, binding):\n var = model.binding(binding)\n if var is None:\n raise myokit.IncompatibleModelError(\n model.name(),\n 'No variable found with binding \"' + str(binding) + '\".')\n return var",
"def __init__(self, canvas, variable, value):\n Connector.__init__(self, canvas, value, variable)",
"def end_address(self):\n return self.address + len(self.data)",
"def get_bindpoint(self):\n return self.options['bindpoint']",
"def _getForBinding (self):\n return self.__forBinding",
"def _origin(self):\n return 1",
"def __init__(self):\n super().__init__()\n self.port_end = PortTerminator()",
"def Port(self) -> int:",
"def binding_information(self):\n return self._binding_data",
"def protocol(self):\n ...",
"def RemoteEndPoint(self) -> _n_5_t_1:",
"def RemoteEndPoint(self) -> _n_5_t_1:",
"def address(self):\n ...",
"def incBound(self):\n # increment the bound for the local variables.\n self.incVariableList()\n super().incBound()\n\n # get the last variable.\n idx = len(self.stateNormalPath) - 1\n assert(idx > 0)\n\n # we add the constraints that specify the id of the transition\n self.addConstraintOnIdTransition(idx)",
"def bound(name):",
"def RemoteEndPoint(self) -> _n_5_t_2:",
"def variable(self) -> Variable:\n ...",
"def binding_site(self, binding_site):\n self._binding_site = binding_site",
"def bottom(self, value):\n\n pass",
"def variable(self, val):"
] | [
"0.5865614",
"0.58038676",
"0.5525564",
"0.55219877",
"0.54474205",
"0.5439413",
"0.53656787",
"0.5347091",
"0.5347091",
"0.5289647",
"0.5277234",
"0.5262203",
"0.52546877",
"0.5248067",
"0.5224067",
"0.520301",
"0.51902634",
"0.51829165",
"0.51748765",
"0.5161431",
"0.51585275",
"0.51585275",
"0.515703",
"0.5156646",
"0.5145834",
"0.51454264",
"0.5126998",
"0.5100683",
"0.50689334",
"0.5046816"
] | 0.60987073 | 0 |
The value is at the tail of the Binding Connector. | def value(self, new_val):
# TODO: This is a hack, we should have the head/tail be properties with
# appropriate setter methods in Connector.
self.head = new_val
self.head.add_connector(self)
self.update() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def bottom(self, value):\n\n pass",
"def value(self):\n if self.is_bound():\n return self._value\n else:\n raise UninitializedBinding('{} is unbound.'.format(self))",
"def binding_information(self):\n return self._binding_data",
"def cdr(self, value):\n self.pair.cdr = value",
"def end_address(self):\n return self.address + len(self.data)",
"def binding(self, value):\n old_value = self._value\n self._set(value)\n yield\n self._set(old_value)",
"def get_bindpoint(self):\n return self.options['bindpoint']",
"def variable(self):\n return self.tail",
"def last_value(self):\n return self._value",
"def LocalEndPoint(self) -> _n_5_t_1:",
"def complete_value(self, value):\n pass",
"def end(self):\n return self._values.head",
"def tail(self):\n return self._tail",
"def tail(self):\n return self._tail",
"def at(self):\n return self.data[self.end]",
"def cdr(self):\n return self.pair.cdr",
"def _getForBinding (self):\n return self.__forBinding",
"def bot(self):\n if self.is_empty():\n return None\n return self._tail.value",
"def get_tail(self):\n return self._readahead.getvalue()",
"def set(self, value):\n self._proxy.delete(0, tkinter.END)\n self._proxy.insert(0, value)",
"def last_value(self):\n return 0",
"def bottom(self):\n return self.__b",
"def _update_value(self, value):\n super(OutputPlug, self)._update_value(value)\n for plug in self.connections:\n plug.value = value",
"def __init__(self, canvas, variable, value):\n Connector.__init__(self, canvas, value, variable)",
"def value(self, value):\n self._value = value\n self.is_dirty = True\n for plug in self.connections:\n plug.value = value",
"def _bind(self):\n\n pass",
"def end(self):\n return self.__end_line",
"def last_value(self):\n return self._last_value",
"def get_binding(self, orgname):\n pass",
"def base(self):\n return self.tail"
] | [
"0.60678184",
"0.5622013",
"0.56060386",
"0.5578984",
"0.55146396",
"0.54757786",
"0.5419785",
"0.5415978",
"0.5368446",
"0.53399867",
"0.5329448",
"0.5305098",
"0.5272067",
"0.5272067",
"0.52655005",
"0.5237537",
"0.5227217",
"0.5215954",
"0.52116716",
"0.5199955",
"0.5177223",
"0.51715815",
"0.5133266",
"0.5127165",
"0.51136863",
"0.5107595",
"0.5103758",
"0.5100104",
"0.5091982",
"0.5075233"
] | 0.6146018 | 0 |
Move the Binding. Moves the variable by the given amounts. If value.moves_with_binding is True, it is also moved by the given amounts. Updates the Connector after the movement has been done. | def move(self, dx, dy):
self.variable.move(dx, dy)
if self.value.moves_with_binding:
self.value.move(dx, dy)
self.update() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_binding(self, variable, value):\n old_value = self.lookup(variable).value\n if old_value is None:\n raise BaseException(\n \"Tried to update a variable that's not in scope!\")\n var_x, var_y = self.lookup(variable).variable.pos\n self.lookup(variable).value = value\n if old_value.moves_with_binding:\n old_value.set_pos(0, 0) # Or better yet, somehow remove it\n if value.moves_with_binding:\n value.set_pos(var_x + 130, var_y)\n self.update()",
"def add_binding(self, variable, value):\n # If there's already a binding, update it rather than add a new one.\n for binding in self.bindings:\n if binding.variable.name == variable:\n return self.update_binding(variable, value)\n variable = Variable(self.canvas, self, variable)\n binding = Binding(self.canvas, variable, value)\n self.bindings.append(binding)\n x, y = self.pos\n variable.set_pos(x + 10, y + len(self.bindings) * 20)\n if value.moves_with_binding:\n value.set_pos(x + 140, y + len(self.bindings) * 20)\n self.update()",
"def move_by(cls, value):\n cls.set_position(cls._position + value)",
"def move(self, direction, cycles):\n\t\tpass",
"def move(self,amount):\n self.positionx=self.positionx+self.amount\n return self.positionx",
"def move(self, direction):\n pass",
"def move(self,amount):\n angle=self.dirction/180*math.pi\n self.postionx += amount*math.cos(angle)\n self.postiony += amount*math.sin(angle)",
"def move(self, direction):\n\n if direction == \"north\":\n self.go_and_update(-1, 0)\n\n elif direction == \"south\":\n self.go_and_update(1, 0)\n\n elif direction == \"east\":\n self.go_and_update(0, 1)\n\n elif direction == \"west\":\n self.go_and_update(0, -1)",
"def table_move_update():\n pos = self.variables.table.get_current_position()\n self.table_move_ui.x_move.setProperty(\"value\", int(pos[0]))\n self.table_move_ui.y_move.setProperty(\"value\", int(pos[1]))\n self.table_move_ui.z_move.setProperty(\"value\", int(pos[2]))",
"def _move(self):\n self.pos += self.direction # add direction vector\n self.direction += self.gravity # add gravity to direction\n self.direction = self.direction.elementwise() * self.drag # apply drag to direction",
"def move_cursor(self, direction):\n movement = 1\n last_input = \"\"\n if direction is Direction.U:\n movement = -1\n last_input = \"w\"\n elif direction is Direction.L:\n movement = -1\n last_input = \"a\"\n elif direction is Direction.D:\n last_input = \"s\"\n elif direction is Direction.R:\n last_input = \"d\"\n\n uielements = self.buttons + self.numberfields\n\n # Button selection rules:\n # Hard <- Custom <- Easy -> Medium -> Hard -> Custom -> NumberField\n # Button | NumberField <- NumberField -> NumberField\n if movement == 1:\n if self.selected is uielements[-1]:\n next_selected = self.numberfields[0]\n else:\n next_selected = uielements[uielements.index(self.selected)+1]\n else:\n if self.selected is uielements[0]:\n next_selected = self.buttons[-1]\n else:\n next_selected = uielements[uielements.index(self.selected)-1]\n\n # Update UIElement hovering.\n self.selected.set_hovered(False)\n next_selected.set_hovered(True)\n\n # Update changed settings.\n if self.selected.get_type() is UIType.NumberField:\n self.selected.fix_bounds()\n values = [numberfield.value for numberfield in self.numberfields]\n self.controller.set_custom_field_options(Option(*values))\n\n # Update NumberField focus.\n condition = next_selected.get_type() is UIType.NumberField\n next_focus = next_selected if condition else None\n self.set_focused_ui(next_focus)\n\n self.selected = next_selected\n self.update_information_box_text()\n\n self.controller.set_last_inp(last_input)",
"def move(self, dx, dy):\n self.x += dx\n self.y += dy",
"def move(self, coordinates, direction):\n pass",
"def move(self, direction):\r\n dx = direction[0]\r\n dy = direction[1]\r\n\r\n self.head[0] += dx * 10\r\n self.head[1] += dy * 10\r\n\r\n for i in range(self.length - 1):\r\n self.body[i] = self.body[i + 1]\r\n self.body[-1] = copy.copy(self.head)",
"def move(self, direction):\r\n self.stored_direction = direction",
"def move(self, x, y):\n self.x = x\n self.y = y\n self.call('move', x, y)",
"def move_ball():\n print(\"Current position: ({},{}). \"\n \"Direction: ({},{}). Value: {}\".format(shared.ball_yy, shared.ball_xx,\n shared.direction[0], shared.direction[1],\n map_data[shared.ball_yy][shared.ball_xx]))\n if does_apply_direction():\n shared.ball_yy += shared.direction[0]\n shared.ball_xx += shared.direction[1]\n else:\n pass\n # shared.ball_yy = shared.ball_yy + shared.direction[0] \\\n # if default_positions.get(collision)[0] == None else default_positions.get(collision)[0]\n # shared.ball_xx = shared.ball_xx + shared.direction[1] \\\n # if default_positions.get(collision)[1] == None else default_positions.get(collision)[1]",
"def binding(self, value):\n old_value = self._value\n self._set(value)\n yield\n self._set(old_value)",
"def move(self, distance):\n self._go(distance)",
"def move(self, p):\r\n self.position.setvalue(p)",
"def displace(self,dirk,val):\n idirk = dir2ind[dirk]\n print(\">>> WARNING: displacing nodes along %s, limits = %1.5f-->%1.5f by %1.4f\"%(\n dirk, self.box_lim[0][idirk],self.box_lim[1][idirk], val))\n \n for i in range(len(self.nodes)): self.nodes[i].x[idirk] += val\n self.wrap_nodes([idirk]) # wrap nodes",
"def move(self, *step):\n self.x += step[0]\n self.y += step[1]",
"def move(self, direction):\n # replace with your code\n pass",
"def move(self, direction):\n # replace with your code\n pass",
"def make_combobox_movements(self):\n self.gui.comboBox_kindOfMove.setCurrentText(self.current_move)\n self.gui.comboBox_kindOfMove.currentTextChanged.connect(self.get_move)\n\n self.gui.comboBox_unit.setCurrentText('um')\n self.gui.doubleSpinBox_distance.setValue(self.distance.m_as('um'))\n self.gui.doubleSpinBox_distance.valueChanged.connect(self.set_distance)\n self.gui.comboBox_unit.currentTextChanged.connect(self.set_distance)\n\n # self.gui.pushButton_left.setCheckable(True)\n # self.gui.pushButton_left.toggle()\n self.gui.pushButton_left.clicked.connect(lambda: self.move('left'))\n self.gui.pushButton_right.clicked.connect(lambda: self.move('right'))\n self.gui.pushButton_up.clicked.connect(lambda: self.move('up'))\n self.gui.pushButton_down.clicked.connect(lambda: self.move('down'))",
"def update_direction(self, move : np.ndarray, direction: np.ndarray):\r\n pos = move.copy()\r\n \r\n\r\n pos += direction\r\n while(self.in_board(pos)):\r\n if self.board[pos[0],pos[1]] == self.turn:\r\n pos -= direction\r\n while((pos != move).any()):\r\n self.board[pos[0], pos[1]] = self.turn\r\n self.count += 1\r\n pos -= direction\r\n break\r\n\r\n elif self.board[pos[0],pos[1]] == 0:\r\n\r\n break\r\n else:\r\n pos += direction",
"def move(self, delta):\n newPos = (self._pos + delta) % self._board.size\n # check for Pass GO condition\n if delta > 0 and newPos < self._pos:\n self._cash += 200\n self._board.acceptNotification(notification.PNPassGo(self))\n\n self._pos = newPos\n self._board.acceptNotification(notification.PNPlayerMove(self))",
"def move(self, x, y):\n self.x+=x\n self.y+=y",
"def update(self):\n if not self._move:\n self.get_next_move()\n if self._move:\n self._move.update()",
"def moving(self, moving):\n\n self._moving = moving"
] | [
"0.60930204",
"0.5636774",
"0.56021535",
"0.5309398",
"0.5231337",
"0.52088094",
"0.5198945",
"0.51483846",
"0.51353896",
"0.50959015",
"0.50775903",
"0.5074223",
"0.50660914",
"0.50594985",
"0.50539726",
"0.50329864",
"0.5032517",
"0.5019829",
"0.50132936",
"0.5010506",
"0.49744585",
"0.4953096",
"0.49459773",
"0.49459773",
"0.49157795",
"0.49150562",
"0.48877728",
"0.48754475",
"0.48661548",
"0.48655695"
] | 0.6740303 | 0 |
Location of the center of gravity of the compound stabilizer w.r.t the origin. This is calculated with weighted averages. | def center_of_gravity(self):
weights = [self.stabilizer_h.weight, self.stabilizer_vright.weight, self.stabilizer_vleft.weight]
cgs = [self.stabilizer_h.center_of_gravity, self.stabilizer_vright.center_of_gravity,
self.stabilizer_vleft.center_of_gravity]
total_weight = sum(weights)
cg_x = sum([weights[i] * cgs[i].x for i in range(0, len(weights))]) / total_weight
cg_y = sum([weights[i] * cgs[i].y for i in range(0, len(weights))]) / total_weight
cg_z = sum([weights[i] * cgs[i].z for i in range(0, len(weights))]) / total_weight
return Point(cg_x, cg_y, cg_z) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _center(self, forces):\n\t\t\n\t\tzipped = zip(self.grid.corners(), forces)\n\t\treturn self._weightedAverage(zipped)",
"def center(self):\n return self._lower + 0.5 * (self._upper - self._lower)",
"def center(self):\n try: \n return self._center\n except AttributeError:\n self._center = vector(ZZ, [0]*self.ambient_dim())\n for v in self.vertex_generator(): self._center += v.vector()\n self._center /= self.n_vertices()\n return self._center",
"def center(self):\n return (self.upper_right + self.lower_left) * 0.5",
"def get_weighted_rf_center(self):\n return self.get_weighted_center_in_coordinate(self.altPos, self.aziPos)",
"def distance_to_galactic_center(self):\n l, b = self.galactic_coords\n h_star_gcp = self.distance * np.sin(b)\n d_star_sun = self.distance * np.cos(b)\n d_star_gc = np.sqrt(d_star_sun**2 + d_sun_GC**2 - 2*d_star_sun*d_sun_GC*np.cos(l))\n return d_star_gc",
"def getCenter(self):\n return Point.average(self.points)",
"def center(self):\n return np.array([0,0,1/self.C+self.pos()])",
"def center(self):\n return self.map_.geom.center_skydir",
"def calculateCenter(self):\n y_avg = int(sum(self.points[:,0])/float(len(self.points)))\n x_avg = int(sum(self.points[:,1])/float(len(self.points)))\n self.center = (x_avg, y_avg)\n return(x_avg,y_avg)",
"def center(self):\n return self.centralizer(self)",
"def center(self):\n # minz to offset the heights to 0\n mz = (self.maxz-self.minz)/2\n #mz = self.minz\n return (self.minx + self.width / 2, self.miny + self.height / 2, mz)",
"def center(self):\n return np.sum(self.bbox, 0) / 2",
"def get_center(self):\n\n x = np.array(self.x)\n y = np.array(self.y)\n return np.mean(x), np.mean(y)",
"def test_one_center(self):\n sv=system_vars_c().init_xyzlike([ [8, [0.0, 0.0, 0.0]]])\n atom2rcut=np.array([5.0])\n g = dft.gen_grid.Grids(sv)\n g.level = 1 # precision as implemented in pyscf\n g.radi_method=leggauss_ab\n g.build(atom2rcut=atom2rcut)\n\n #print( max( np.linalg.norm(g.coords, axis=1) ) )\n #print( g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0 )\n self.assertAlmostEqual(max( np.linalg.norm(g.coords, axis=1) ), 4.9955942742763986)\n self.assertAlmostEqual(g.weights.sum(), 4.0 *np.pi*5.0**3 / 3.0)\n self.assertEqual(len(g.weights), 6248)",
"def calculate_center(self):\n return [(self.startX + self.endX) / 2., (self.startY + self.endY) / 2.]",
"def center(self):\n if not hasattr(self, '_center'):\n self._center = np.unique(self.points, axis=0).mean(axis=0)\n return self._center",
"def center(self):\n return self.pos + self.axis / 2.0",
"def center(self):\n points = set()\n for face in self._points:\n points.update(face)\n x_points = [point[0] for point in points]\n y_points = [point[1] for point in points]\n z_points = [point[2] for point in points]\n return \\\n (np.average(x_points), np.average(y_points), np.average(z_points))",
"def getCenter(self):\n return [self.tx/self.tw, self.ty/self.tw]",
"def center(self) -> Tuple[float, float]:\n return self.x + self.width / 2, self.y + self.height / 2",
"def find_center(self) -> tuple:\r\n \r\n # Add up all the x values of pixels in the plant\r\n # Then divide by total pixels in the plant\r\n avg_x = sum([i[0] for i in self.cluster]) / len(self.cluster)\r\n\r\n # Add up all the y values of pixels in the plant\r\n # Then divide by total pixels in the plant\r\n avg_y = sum([i[1] for i in self.cluster]) / len(self.cluster)\r\n\r\n self.center = (int(round(avg_x)), int(round(avg_y)))\r\n \r\n # return the results in a tuple of integers\r\n return self.center",
"def get_center_of_mass_allies(self,obs):",
"def get_min_mag_center(self):\r\n\t\treturn self.min_mag + self.bin_width / 2",
"def center(self):\n bounds = self.bounds\n x = (bounds[1] + bounds[0]) / 2\n y = (bounds[3] + bounds[2]) / 2\n z = (bounds[5] + bounds[4]) / 2\n return [x, y, z]",
"def get_center(self):\n return center_points(np.expand_dims(self.best_box, axis=0))[0]",
"def get_center(self) -> Tuple[int, int]:\n raise NotImplementedError()",
"def getcenter(self):\n return self.centro.cartesianas()",
"def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))",
"def compute_centrifugal(self):\r\n # update the coordinates\r\n self.get_coords()\r\n\r\n # compute the centrifugal force\r\n self.centrifugal.assign(project(\r\n -1*self.rho*cross(self.omega, cross(self.omega, self.r)), self.V))"
] | [
"0.68209493",
"0.6734817",
"0.67244107",
"0.67140454",
"0.66436017",
"0.6559522",
"0.64326143",
"0.6418216",
"0.6416276",
"0.63852113",
"0.635442",
"0.63412035",
"0.63249606",
"0.6314047",
"0.6261982",
"0.6245681",
"0.6224372",
"0.61856043",
"0.61612463",
"0.6155308",
"0.60794574",
"0.60650516",
"0.60447687",
"0.6040608",
"0.60286504",
"0.6013966",
"0.6013621",
"0.60058653",
"0.597444",
"0.5962821"
] | 0.798243 | 0 |
This joins the tails and connector shafts together through a series of Fuse operations to be able to present a single `external_shape` required for the .step file output. | def tail_joiner(self):
# Fusing Right Horizontal Tail:
shape_in_r = Fused(shape_in=self.stabilizer_h.solid, tool=self.stabilizer_vright.solid)
shape_out_r = Fused(shape_in=shape_in_r, tool=self.connector_right)
# Fusing Left Horizontal Tail:
shape_in_l = Fused(shape_in=self.stabilizer_h.ht_mirror, tool=self.stabilizer_vleft.solid)
shape_out_l = Fused(shape_in=shape_in_l, tool=self.connector_left)
shape_out = Fused(shape_in=shape_out_r, tool=shape_out_l)
return shape_out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mainFunction(f):\n\n #############################################################################\n \n \n # biomass hexagon\n predF = '/vol/v3/lt_stem_v3.1/models/biomassfiaald_20180708_0859/2000/biomassfiaald_20180708_0859_2000_mean.tif'\n trainF = '/vol/v2/datasets/biomass/nbcd/fia_ald/nbcd_fia_ald_biomass_clipped_to_conus.tif'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = -32768\n predND = -9999\n trgField = 'id'\n descrField = 'id'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/biomassfiaald_20180708_0859/hexagon_correlation'\n xyLim = (500, 500)\n xLab = 'Reference (tons/ha)'\n yLab = 'Prediction (tons/ha)'\n annoXY = (15,420)\n \n \n \"\"\"\n # cc\n predF = '/vol/v3/lt_stem_v3.1/models/canopy_20180915_1631/2001/canopy_20180915_1631_2001_mean.tif'\n trainF = '/vol/v2/stem/conus/reference_rasters/nlcd_2001_canopy_clipped_to_conus_train.tif'\n #shpF = '/vol/v2/datasets/Eco_Level_III_US/us_eco_l3_no_states_multipart.shp'\n shpF = '/vol/v1/general_files/datasets/spatial_data/hexagons/hexagons_conus_albers_30km_with_id.shp'\n trainND = 255\n predND = 255\n trgField = 'id'\n descrField = 'id'\n #trgField = 'US_L3CODE'\n #descrField = 'US_L3NAME'\n #outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/ecoregion_correlation'\n outDir = '/vol/v3/lt_stem_v3.1/evaluation/canopy_20180915_1631/hexagon_correlation'\n xyLim = (100, 100)\n xLab = 'Reference (%)'\n yLab = 'Prediction (%)'\n annoXY = (5,82)\n \"\"\"\n #############################################################################\n\n\n # get color setup\n norm = colors.Normalize(vmin=0, vmax=1)\n f2rgb = cm.ScalarMappable(norm=norm, cmap=cm.get_cmap('YlGnBu_r'))\n \n # open the shapefile\t\n vDriver = ogr.GetDriverByName(\"ESRI Shapefile\")\n vSrc = vDriver.Open(shpF, 0)\n vLayer = vSrc.GetLayer()\n \n commonBox = get_intersec([predF, trainF])\n\n#for f in range(vLayer.GetFeatureCount()):\n feature = vLayer[f]\n name = feature.GetField(trgField)\n print('f: '+str(f))\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n if os.path.exists(outFig):\n #break\n return\n \n descr = feature.GetField(descrField)\n \n predP, coords = get_zone_pixels(feature, shpF, predF, 1, [commonBox[0], commonBox[2], commonBox[3], commonBox[1]])#.compressed() [commonBox[0], commonBox[2], commonBox[3], commonBox[1]]\n trainP, coords = get_zone_pixels(feature, shpF, trainF, 1, [coords[0], coords[1], coords[2], coords[3]])#.compressed()\n \n predP = ma.masked_equal(predP, predND)\n trainP = ma.masked_equal(trainP, trainND)\n trainP = ma.masked_equal(trainP, 0)\n\n combMask = np.logical_not(np.logical_not(predP.mask) * np.logical_not(trainP.mask))\n predP[combMask] = ma.masked\n trainP[combMask] = ma.masked\n predP = predP.compressed()\n trainP = trainP.compressed()\n if (predP.shape[0] == 0) | (trainP.shape[0] == 0) | (predP==0).all() | (trainP==0).all():\n predP = np.array([0,0,1,1], dtype='float64')\n trainP = np.array([0,0,1,1], dtype='float64')\n mae = round(np.mean(np.absolute(np.subtract(predP, trainP))),1)\n rmse = round(np.sqrt(np.mean((predP-trainP)**2)),1)\n \n\n totPixs = trainP.shape[0]\n sampSize = round(totPixs*1)\n pickFrom = range(sampSize)\n #sampIndex = np.random.choice(pickFrom, size=sampSize)\n sampIndex = pickFrom\n\n r = round(np.corrcoef(trainP[sampIndex], predP[sampIndex])[0][1], 2)\n if (mae == 0) & (r == 1):\n r = 0.0\n rColor = f2hex(f2rgb, r)\n p = sns.jointplot(trainP[sampIndex], predP[sampIndex], kind=\"hex\", color='blue', xlim=(0,xyLim[0]), ylim=(0,xyLim[1]), size=5)\n p.ax_joint.set_xlabel(xLab)\n p.ax_joint.set_ylabel(yLab)\n p.ax_joint.annotate('r: '+str(r)+'\\nrmse: '+str(rmse)+'\\nmae: '+str(mae), annoXY)\n plt.tight_layout()\n outFig = os.path.join(outDir, (trgField.replace(' ','_').lower()+'_'+str(name)+'.png'))\n p.savefig(outFig)\n \n df = pd.DataFrame({'id':name, 'descr':descr, 'r':r, 'rmse':rmse, 'mae':mae, 'color':rColor, 'img':os.path.basename(outFig)}, index=[0])\n outCSV = outFig.replace('.png','.csv')\n df.to_csv(outCSV, ',', index=False)",
"def write_shapefile_combined(self, shpname):\r\n self.read_traveltime()\r\n \r\n westlats1 = []\r\n westlons1 = []\r\n eastlats1 = []\r\n eastlons1 = [] \r\n lines1 = []\r\n #### points ####\r\n lats1 = []\r\n lons1 = []\r\n for i in range(len(self.westPnts1)):\r\n westlat1, westlon1 = utm.to_latlon(self.westPnts1[i,0], self.westPnts1[i,1], 14, 'U')\r\n eastlat1, eastlon1 = utm.to_latlon(self.eastPnts1[i,0], self.eastPnts1[i,1], 14, 'U')\r\n lines1.append([[westlon1, westlat1], [eastlon1, eastlat1]])\r\n westlats1.append(westlat1)\r\n westlons1.append(westlon1)\r\n eastlats1.append(eastlat1)\r\n eastlons1.append(eastlon1)\r\n #### points ####\r\n lats1.append((westlat1+eastlat1)/2.)\r\n lons1.append((westlon1+eastlon1)/2.)\r\n \r\n \r\n \r\n \r\n \r\n westlats5 = []\r\n westlons5 = []\r\n eastlats5 = []\r\n eastlons5 = [] \r\n lines5 = []\r\n #### points ####\r\n lats5 = []\r\n lons5 = []\r\n for i in range(len(self.westPnts5)):\r\n westlat5, westlon5 = utm.to_latlon(self.westPnts5[i,0], self.westPnts5[i,1], 14, 'U')\r\n eastlat5, eastlon5 = utm.to_latlon(self.eastPnts5[i,0], self.eastPnts5[i,1], 14, 'U')\r\n lines5.append([[westlon5, westlat5], [eastlon5, eastlat5]])\r\n westlats5.append(westlat5)\r\n westlons5.append(westlon5)\r\n eastlats5.append(eastlat5)\r\n eastlons5.append(eastlon5)\r\n #### points ####\r\n lats5.append((westlat5+eastlat5)/2.)\r\n lons5.append((westlon5+eastlon5)/2.)\r\n \r\n \r\n Narray_branch1 = len(self.inarrays_branch1)\r\n Narray_branch5 = len(self.inarrays_branch5)\r\n \r\n #### travel time for branch 1\r\n Ttime = self.inarrays_branch1[0][:,2]\r\n ind0 = np.nonzero(Ttime)[0][0]\r\n ind = np.arange(ind0, Ttime.shape[0])\r\n \r\n\r\n branchIDs_branch1 = []\r\n SegIDs_branch1 = []\r\n lines_branch1 = []\r\n westlats_branch1 = []\r\n westlons_branch1 = []\r\n eastlats_branch1 = []\r\n eastlons_branch1 = []\r\n lats_branch1 = []\r\n lons_branch1 = []\r\n Ttimes_branch1 = []\r\n Density_branch1 = []\r\n Initial_loc_branch1 = []\r\n solubility_branch1 = []\r\n flow_condition_branch1 = []\r\n concentration_branch1 = []\r\n water_level_branch1 = []\r\n dist_branch1 = []\r\n \r\n \r\n \r\n for iarray in range(Narray_branch1):\r\n \r\n #### find indexes which segment has travel time\r\n Ttime_tem = self.inarrays_branch1[iarray][:,2]\r\n ind0 = np.nonzero(Ttime_tem)[0][0]\r\n ind = np.arange(ind0, Ttime_tem.shape[0])\r\n \r\n for i in range(self.inarrays_branch1[0].shape[0]):\r\n \r\n if i in ind:\r\n branchIDs_branch1.append(self.inarrays_branch1[iarray][i,0])\r\n SegIDs_branch1.append(self.inarrays_branch1[iarray][i,1])\r\n lines_branch1.append(lines1[i])\r\n westlats_branch1.append(westlats1[i])\r\n westlons_branch1.append(westlons1[i])\r\n eastlats_branch1.append(eastlats1[i])\r\n eastlons_branch1.append(eastlons1[i])\r\n lats_branch1.append(lats1[i])\r\n lons_branch1.append(lons1[i])\r\n \r\n Ttimes_branch1.append(self.inarrays_branch1[iarray][i,2])\r\n if self.inarrays_branch1[iarray][i,3] == 0:\r\n Density_branch1.append('Light')\r\n elif self.inarrays_branch1[iarray][i,3] == 1:\r\n Density_branch1.append('Heavy')\r\n elif self.inarrays_branch1[iarray][i,3] == 9:\r\n Density_branch1.append('None')\r\n \r\n Initial_loc_branch1.append('East')\r\n \r\n if self.inarrays_branch1[iarray][i,5] == 0:\r\n solubility_branch1.append('Insoluble')\r\n elif self.inarrays_branch1[iarray][i,5] == 1:\r\n solubility_branch1.append('Soluble')\r\n \r\n if self.inarrays_branch1[iarray][i,6] == 3:\r\n #flow_condition_branch1.append('High')\r\n flow_condition_branch1.append('> 945 cfs')\r\n elif self.inarrays_branch1[iarray][i,6] == 2:\r\n #flow_condition_branch1.append('Medium')\r\n flow_condition_branch1.append('110 ~ 945 cfs')\r\n elif self.inarrays_branch1[iarray][i,6] == 1:\r\n #flow_condition_branch1.append('Low')\r\n flow_condition_branch1.append('< 110 cfs')\r\n \r\n if self.inarrays_branch1[iarray][i,7] != 0:\r\n concentration_branch1.append(\"{:.3E}\".format(Decimal(self.inarrays_branch1[iarray][i,7])))\r\n else:\r\n concentration_branch1.append(str(self.inarrays_branch1[iarray][i,7]))\r\n \r\n water_level_branch1.append(self.inarrays_branch1[iarray][i,8])\r\n dist_branch1.append(self.inarrays_branch1[iarray][i,9])\r\n \r\n #### travel time for branch 5\r\n #Ttime = self.inarrays_particle_branch5[0][:,2]\r\n #ind1 = np.arange(43, 45) -1 #### hard coded, for release in branch 5\r\n #ind5 = np.nonzero(Ttime)[0]\r\n \r\n \r\n branchIDs_branch5 = []\r\n SegIDs_branch5 = []\r\n lines_branch5 = []\r\n westlats_branch5 = []\r\n westlons_branch5 = []\r\n eastlats_branch5 = []\r\n eastlons_branch5 = []\r\n lats_branch5 = []\r\n lons_branch5 = []\r\n Ttimes_branch5 = []\r\n Density_branch5 = []\r\n Initial_loc_branch5 = []\r\n solubility_branch5 = []\r\n flow_condition_branch5 = []\r\n concentration_branch5 = []\r\n water_level_branch5 = []\r\n dist_branch5 = []\r\n \r\n \r\n ## loop over all travel time for each array, find which is in branch 1 and which is in branch 5\r\n for iarray in range(Narray_branch5): \r\n \r\n #### find indexes which segment has travel time\r\n Ttime_tem = self.inarrays_branch5[iarray][:,2]\r\n \r\n nbr5 = len(lines5) ## number of elements in branch 5\r\n ind1 = np.arange(43, 45) -1 + nbr5 #### hard coded, for release in branch 5 len(branch5)+ [43,44] - 1\r\n ind5 = np.nonzero(Ttime_tem)[0]\r\n \r\n for i in range(self.inarrays_branch5[0].shape[0]):\r\n #if iarray==6 and i == 44:\r\n # pdb.set_trace()\r\n if self.inarrays_branch5[iarray][i,0] == 5: ## at branch 5\r\n \r\n if i in ind5:\r\n branchIDs_branch5.append(self.inarrays_branch5[iarray][i,0])\r\n SegIDs_branch5.append(self.inarrays_branch5[iarray][i,1])\r\n lines_branch5.append(lines5[i])\r\n westlats_branch5.append(westlats5[i])\r\n westlons_branch5.append(westlons5[i])\r\n eastlats_branch5.append(eastlats5[i])\r\n eastlons_branch5.append(eastlons5[i])\r\n lats_branch5.append(lats5[i])\r\n lons_branch5.append(lons5[i])\r\n \r\n Ttimes_branch5.append(self.inarrays_branch5[iarray][i,2])\r\n if self.inarrays_branch5[iarray][i,3] == 0:\r\n Density_branch5.append('Light')\r\n elif self.inarrays_branch5[iarray][i,3] == 1:\r\n Density_branch5.append('Heavy')\r\n elif self.inarrays_branch5[iarray][i,3] == 9:\r\n Density_branch5.append('None')\r\n \r\n if self.inarrays_branch5[iarray][i,4] == 1:\r\n Initial_loc_branch5.append('East')\r\n elif self.inarrays_branch5[iarray][i,4] == 5:\r\n Initial_loc_branch5.append('West')\r\n \r\n if self.inarrays_branch5[iarray][i,5] == 0:\r\n solubility_branch5.append('Insoluble')\r\n elif self.inarrays_branch5[iarray][i,5] == 1:\r\n solubility_branch5.append('Soluble')\r\n \r\n if self.inarrays_branch5[iarray][i,6] == 3:\r\n flow_condition_branch5.append('> 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 2:\r\n flow_condition_branch5.append('110 ~ 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 1:\r\n flow_condition_branch5.append('< 110 cfs')\r\n \r\n if self.inarrays_branch5[iarray][i,7] != 0:\r\n concentration_branch5.append(\"{:.3E}\".format(Decimal(self.inarrays_branch5[iarray][i,7])))\r\n else: \r\n concentration_branch5.append(str(self.inarrays_branch5[iarray][i,7]))\r\n \r\n water_level_branch5.append(self.inarrays_branch5[iarray][i,8])\r\n dist_branch5.append(self.inarrays_branch5[iarray][i,9])\r\n \r\n \r\n elif self.inarrays_branch5[iarray][i,0] == 1: ## at branch 1\r\n \r\n if i in ind1:\r\n \r\n branchIDs_branch5.append(self.inarrays_branch5[iarray][i,0])\r\n SegIDs_branch5.append(self.inarrays_branch5[iarray][i,1])\r\n lines_branch5.append(lines1[i-nbr5])\r\n westlats_branch5.append(westlats1[i-nbr5])\r\n westlons_branch5.append(westlons1[i-nbr5])\r\n eastlats_branch5.append(eastlats1[i-nbr5])\r\n eastlons_branch5.append(eastlons1[i-nbr5])\r\n lats_branch5.append(lats1[i-nbr5])\r\n lons_branch5.append(lons1[i-nbr5])\r\n \r\n Ttimes_branch5.append(self.inarrays_branch5[iarray][i,2])\r\n if self.inarrays_branch5[iarray][i,3] == 0:\r\n Density_branch5.append('Light')\r\n elif self.inarrays_branch5[iarray][i,3] == 1:\r\n Density_branch5.append('Heavy')\r\n elif self.inarrays_branch5[iarray][i,3] == 9:\r\n Density_branch5.append('None')\r\n \r\n if self.inarrays_branch5[iarray][i,4] == 1:\r\n Initial_loc_branch5.append('East')\r\n elif self.inarrays_branch5[iarray][i,4] == 5:\r\n Initial_loc_branch5.append('West')\r\n \r\n if self.inarrays_branch5[iarray][i,5] == 0:\r\n solubility_branch5.append('Insoluble')\r\n elif self.inarrays_branch5[iarray][i,5] == 1:\r\n solubility_branch5.append('Soluble')\r\n \r\n if self.inarrays_branch5[iarray][i,6] == 3:\r\n flow_condition_branch5.append('> 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 2:\r\n flow_condition_branch5.append('110 ~ 945 cfs')\r\n elif self.inarrays_branch5[iarray][i,6] == 1:\r\n flow_condition_branch5.append('< 110 cfs')\r\n \r\n if self.inarrays_branch5[iarray][i,7] != 0:\r\n concentration_branch5.append(\"{:.3E}\".format(Decimal(self.inarrays_branch5[iarray][i,7])))\r\n else:\r\n concentration_branch5.append(str(self.inarrays_branch5[iarray][i,7]))\r\n \r\n water_level_branch5.append(self.inarrays_branch5[iarray][i,8])\r\n dist_branch5.append(self.inarrays_branch5[iarray][i,9])\r\n \r\n\r\n #### combine all data into one big array\r\n branchIDs_combined = branchIDs_branch1 + branchIDs_branch5\r\n SegIDs_combined = SegIDs_branch1 + SegIDs_branch5 \r\n lines_combined = lines_branch1 + lines_branch5\r\n# westlats_combined = westlats_branch1 + westlats_branch5\r\n# westlons_combined = westlons_branch1 + westlons_branch5\r\n# eastlats_combined = eastlats_branch1 + eastlats_branch5\r\n# eastlons_combined = eastlons_branch1 + eastlons_branch5\r\n lats_combined = lats_branch1 + lats_branch5\r\n lons_combined = lons_branch1 + lons_branch5\r\n \r\n Ttimes_combined = Ttimes_branch1 + Ttimes_branch5\r\n Density_combined = Density_branch1 + Density_branch5\r\n Initial_loc_combined = Initial_loc_branch1 + Initial_loc_branch5\r\n solubility_combined = solubility_branch1 + solubility_branch5\r\n flow_combined = flow_condition_branch1 + flow_condition_branch5\r\n concentration_combined = concentration_branch1 + concentration_branch5 \r\n water_level_combined = water_level_branch1 + water_level_branch5 \r\n dist_combined = dist_branch1 + dist_branch5\r\n \r\n #### Create the shapefile\r\n # Create the projection\r\n spatialReference = osgeo.osr.SpatialReference()\r\n spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n \r\n # Create the shape file\r\n outfile = r'ArcGIS_online\\%s'%shpname\r\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')\r\n shapeData = driver.CreateDataSource(outfile)\r\n \r\n # Create the layer\r\n layer = shapeData.CreateLayer('Contour', spatialReference, osgeo.ogr.wkbPoint)\r\n layerDefinition = layer.GetLayerDefn()\r\n \r\n # Create fields containing segment infos\r\n field_def = osgeo.ogr.FieldDefn('BranchID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('SegID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n #field_def = osgeo.ogr.FieldDefn('Lon_east', osgeo.ogr.OFTReal)\r\n #layer.CreateField(field_def)\r\n \r\n #field_def = osgeo.ogr.FieldDefn('Lat_east', osgeo.ogr.OFTReal)\r\n #layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('T (day)', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n ## density - type: string, option: light-0, heavey-1 \r\n field_def = osgeo.ogr.FieldDefn('Density', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## initial release location - type: string, option: East-1, West-5\r\n field_def = osgeo.ogr.FieldDefn('Initial', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## solubility\r\n field_def = osgeo.ogr.FieldDefn('Solubility', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## flow condition\r\n field_def = osgeo.ogr.FieldDefn('Flow', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## concentration\r\n field_def = osgeo.ogr.FieldDefn('C (mg/L)', osgeo.ogr.OFTString)\r\n layer.CreateField(field_def)\r\n \r\n ## water surface elevation\r\n field_def = osgeo.ogr.FieldDefn('WSE (ft)', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n ## distance to WTP gate\r\n field_def = osgeo.ogr.FieldDefn('D (ft)', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n \r\n def add_feature(layer, branchID, segs, lines, lon, lat, Ttime, density, Initial_loc, solubility, flows, concentration, water_level, dist):\r\n \"\"\"\r\n function that adds feature to layer\r\n \"\"\" \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n point = osgeo.ogr.Geometry(osgeo.ogr.wkbPoint)\r\n # Add points individually to the line\r\n #xy = lines[i]\r\n \r\n #line.AddPoint_2D(xy[0][0],xy[0][1])\r\n #line.AddPoint_2D(xy[1][0],xy[1][1])\r\n point.AddPoint(lon[i], lat[i])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(point)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(point)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID[i])) \r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Lon', \"{:.3f}\".format(lon[i]))\r\n feature.SetField('Lat', \"{:.3f}\".format(lat[i]))\r\n #feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n #feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n feature.SetField('T (day)', int(Ttime[i]))\r\n feature.SetField('Density', density[i])\r\n feature.SetField('Initial', Initial_loc[i])\r\n feature.SetField('Solubility', solubility[i])\r\n feature.SetField('Flow', flows[i])\r\n feature.SetField('C (mg/L)', concentration[i])\r\n feature.SetField('WSE (ft)', water_level[i])\r\n feature.SetField('D (ft)', dist[i])\r\n \r\n layer.CreateFeature(feature)\r\n \r\n \r\n add_feature(layer, branchIDs_combined, SegIDs_combined, lines_combined, \\\r\n lons_combined, lats_combined,\\\r\n Ttimes_combined, Density_combined, Initial_loc_combined, solubility_combined, \\\r\n flow_combined, concentration_combined, water_level_combined, dist_combined)",
"def _insertAllSteps(self): \n self.uMics = self.inputCoordinatesTiltedPairs.get().getUntilted().getMicrographs()\n self.tMics = self.inputCoordinatesTiltedPairs.get().getTilted().getMicrographs()\n\n self.inputMics = self._createSetOfParticles('auxMics')\n self.inputMics.copyInfo(self.uMics)\n self.inputMics.setStore(False)\n \n for micU, micT in izip(self.uMics, self.tMics):\n micU.cleanObjId()\n micT.cleanObjId()\n self.inputMics.append(micU)\n self.inputMics.append(micT)\n\n self.samplingInput = self.uMics.getSamplingRate()\n \n\n if self.downsampleType.get() != OTHER:\n # If 'same as picking' or 'original' get sampling rate from input micrographs\n #TODO: Review this when downsampling before picking is possible\n self.samplingFinal = self.samplingInput\n else:\n # If 'other' multiply the input sampling rate by the factor provided\n self.samplingFinal = self.samplingInput*self.downFactor.get()\n \n # Write pos files for each micrograph\n firstStepId = self._insertFunctionStep('writePosFilesStep')\n \n # For each micrograph insert the steps\n #run in parallel\n \n deps = []\n for mic in self.inputMics:\n localDeps = [firstStepId]\n micrographToExtract = mic.getFileName()\n micName = removeBaseExt(mic.getFileName())\n micId = mic.getObjId()\n\n # If downsample type is 'other' perform a downsample\n if self.downsampleType == OTHER:\n fnDownsampled = self._getTmpPath(micName+\"_downsampled.xmp\")\n downFactor = self.downFactor.get()\n args = \"-i %(micrographToExtract)s -o %(fnDownsampled)s --step %(downFactor)f --method fourier\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_downsample\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnDownsampled\n \n # If remove dust \n if self.doRemoveDust:\n fnNoDust = self._getTmpPath(micName+\"_noDust.xmp\")\n \n thresholdDust = self.thresholdDust.get() #TODO: remove this extra variable\n args=\" -i %(micrographToExtract)s -o %(fnNoDust)s --bad_pixels outliers %(thresholdDust)f\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_filter\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnNoDust\n \n #self._insertFunctionStep('getCTF', micId, micName, micrographToExtract)\n micName = removeBaseExt(mic.getFileName())\n \n # Actually extract\n deps.append(self._insertFunctionStep('extractParticlesStep', micId, micName, \n None, micrographToExtract, prerequisites=localDeps))\n # TODO: Delete temporary files\n \n # Insert step to create output objects \n self._insertFunctionStep('createOutputStep', prerequisites=deps)",
"def write_shapefile_branch1(self, shpname):\r\n inarrays = self.read_traveltime()\r\n \r\n Narrays = len(inarrays) \r\n \r\n \r\n westlats = []\r\n westlons = []\r\n eastlats = []\r\n eastlons = [] \r\n lines1 = []\r\n for i in range(len(self.westPnts1)):\r\n westlat, westlon = utm.to_latlon(self.westPnts1[i,0], self.westPnts1[i,1], 14, 'U')\r\n eastlat, eastlon = utm.to_latlon(self.eastPnts1[i,0], self.eastPnts1[i,1], 14, 'U')\r\n lines1.append([[westlon, westlat], [eastlon, eastlat]])\r\n westlats.append(westlat)\r\n westlons.append(westlon)\r\n eastlats.append(eastlat)\r\n eastlons.append(eastlon)\r\n \r\n # Create the projection\r\n spatialReference = osgeo.osr.SpatialReference()\r\n spatialReference.ImportFromProj4('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs')\r\n \r\n # Create the shape file\r\n outfile = r'ArcGIS_online\\%s'%shpname\r\n driver = osgeo.ogr.GetDriverByName('ESRI Shapefile')\r\n shapeData = driver.CreateDataSource(outfile)\r\n \r\n # Create the layer\r\n layer = shapeData.CreateLayer('Contour', spatialReference, osgeo.ogr.wkbLineString)\r\n layerDefinition = layer.GetLayerDefn()\r\n \r\n # Create fields containing segment infos\r\n field_def = osgeo.ogr.FieldDefn('BranchID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Density', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('SegID', osgeo.ogr.OFTInteger)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_west', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lon_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Lat_east', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n field_def = osgeo.ogr.FieldDefn('Travel_T', osgeo.ogr.OFTReal)\r\n layer.CreateField(field_def)\r\n \r\n \r\n def add_feature(layer, branchID, density, lines, segs, westlon, westlat, eastlon, eastlat, Ttime):\r\n \"\"\"\r\n function that adds feature to layer\r\n \"\"\" \r\n ctr=0\r\n for i in range(len(lines)):\r\n ctr+=1\r\n line = osgeo.ogr.Geometry(osgeo.ogr.wkbLineString)\r\n # Add points individually to the line\r\n xy = lines[i]\r\n \r\n line.AddPoint_2D(xy[0][0],xy[0][1])\r\n line.AddPoint_2D(xy[1][0],xy[1][1])\r\n # Update the feature with the line data\r\n featureIndex = ctr\r\n feature = osgeo.ogr.Feature(layerDefinition)\r\n #feature.SetStyleString(\"PEN(c:r,w:5px)\") \r\n feature.SetGeometry(line)\r\n feature.SetFID(featureIndex)\r\n feature.SetGeometryDirectly(line)\r\n \r\n # Set the attribute table\r\n feature.SetField('BranchID', int(branchID)) \r\n feature.SetField('Density', int(density[i]))\r\n feature.SetField('SegID', int(segs[i])) # convert to int() is necessary, osgeo cannot recognize numpy int32 type\r\n feature.SetField('Travel_T', \"{:.1f}\".format(Ttime[i]))\r\n feature.SetField('Lon_west', \"{:.3f}\".format(westlon[i]))\r\n feature.SetField('Lat_west', \"{:.3f}\".format(westlat[i]))\r\n feature.SetField('Lon_east', \"{:.3f}\".format(eastlon[i]))\r\n feature.SetField('Lat_east', \"{:.3f}\".format(eastlat[i]))\r\n \r\n layer.CreateFeature(feature)\r\n \r\n \r\n Ttime = inarrays[0][:,2]\r\n ind0 = np.nonzero(Ttime)[0][0]\r\n ind = np.arange(ind0, Ttime.shape[0])\r\n \r\n lines1 = [lines1[i] for i in ind]*Narrays\r\n westlats = [westlats[i] for i in ind]*Narrays\r\n westlons = [westlons[i] for i in ind]*Narrays\r\n eastlats = [eastlats[i] for i in ind]*Narrays\r\n eastlons = [eastlons[i] for i in ind]*Narrays\r\n \r\n inarrays_new = [inarrays[i][ind,:] for i in range(Narrays)]\r\n inarrays_stack = np.vstack(inarrays_new)\r\n \r\n add_feature(layer, 1, inarrays_stack[:,3], np.asarray(lines1), inarrays_stack[:,1], \r\n np.asarray(westlons), np.asarray(westlats), \r\n np.asarray(eastlats), np.asarray(eastlons), inarrays_stack[:,2])",
"def main():\n # files\n summary_file = sys.argv[1]\n pwms_to_tfs_file = sys.argv[2]\n expressed_tfs_file = sys.argv[3] # TODO\n\n # TODO pull in num regions to resize things? but complicated with overlaps etc\n # TODO edit edges with type of interaction\n # TODO may want to color by trajectory, to demonstrate waves of trajectory\n \n # read in data\n summary = pd.read_csv(summary_file, sep=\"\\t\")\n pwms_to_tfs = pd.read_csv(pwms_to_tfs_file, sep=\"\\t\")\n pwms_to_tfs = pwms_to_tfs[pwms_to_tfs[\"expressed\"].notna()]\n pwms_to_filt_tfs = {} # dict: key - pwm names, vals - dict of ensembl ids to hgnc ids\n for line_idx in range(pwms_to_tfs.shape[0]):\n pwm_info = pwms_to_tfs.iloc[line_idx,:]\n pwm_name = pwm_info[\"hclust_model_name\"]\n pwm_to_tf = dict(zip(pwm_info[\"expressed\"].split(\";\"), pwm_info[\"expressed_hgnc\"].split(\";\")))\n pwms_to_filt_tfs[pwm_name] = pwm_to_tf\n\n \n # filter expressed hgncs for dynamic ones only\n tfs_filt = pd.read_csv(expressed_tfs_file, sep=\"\\t\", index_col=0)\n for pwm_name in pwms_to_filt_tfs.keys():\n tfs_tmp = pwms_to_filt_tfs[pwm_name]\n for ensembl_tf in tfs_tmp.keys():\n if ensembl_tf not in tfs_filt.index:\n del tfs_tmp[ensembl_tf]\n if len(tfs_tmp.keys()) == 0:\n del pwms_to_filt_tfs[pwm_name]\n pwms_to_filt_tfs[pwm_name] = tfs_tmp\n\n # add in tfs column\n tf1 = []\n for pwm in summary[\"pwm1\"]:\n tf_str = []\n for ensembl_id in pwms_to_filt_tfs[pwm]:\n tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])\n # TODO try add in max point\n expression = tfs_filt.loc[ensembl_id,:]\n max_idx = np.argmax(expression.values)\n tf_str.append(str(max_idx))\n tf_str = (\";\").join(tf_str)\n tf1.append(tf_str)\n summary[\"tf1\"] = tf1\n\n tf2 = []\n for pwm in summary[\"pwm2\"]:\n tf_str = []\n for ensembl_id in pwms_to_filt_tfs[pwm]:\n tf_str.append(pwms_to_filt_tfs[pwm][ensembl_id])\n expression = tfs_filt.loc[ensembl_id,:]\n max_idx = np.argmax(expression.values)\n tf_str.append(str(max_idx))\n tf_str = (\";\").join(tf_str)\n tf2.append(tf_str)\n summary[\"tf2\"] = tf2\n \n # remove failed rules\n summary = summary[~summary[\"interaction\"].str.contains(\"FAILED\")]\n \n # make graph\n graph = nx.from_pandas_edgelist(summary, \"tf1\", \"tf2\")\n\n # set up positions\n #pos = graphviz_layout(graph, prog=\"dot\")\n pos = graphviz_layout(graph, prog=\"neato\")\n scale_factor = 3\n for key in pos.keys():\n coords = pos[key]\n pos[key] = {\"x\": scale_factor*coords[0], \"y\": -scale_factor*coords[1]}\n nx.set_node_attributes(graph, pos, \"graphics\") # note this is diff from v1 to v2 in networkx\n \n # add graphics\n add_graphics_theme_to_nx_graph(graph)\n\n # write gml\n out_file = \"summary.gml\"\n nx.write_gml(stringize_nx_graph(graph), out_file, stringizer=str)\n\n # tfs: for each tf, get gene column\n \n \n return",
"def _insertAllSteps(self):\n \n # Get pointer to input micrographs \n self.particlePickingRun = self.xmippParticlePicking.get()\n \n copyId = self._insertFunctionStep('copyInputFilesStep')\n # Get micrographs to pick\n #self.inputMicrographs.set(self.getInputMicrographs())\n \n deps = []\n for mic in self.getInputMicrographs():\n stepId = self._insertFunctionStep('autopickMicrographStep', mic.getFileName(), prerequisites=[copyId])\n deps.append(stepId)\n \n self._insertFunctionStep('_createOutput',self._getExtraPath(), prerequisites=deps)",
"def build_wmt_ft_half(self):\n train_files = [self.data_dir + '/' + wmt_train_small]\n eval_files = [self.data_dir + '/' + wmt_test_large]\n\n train_data = tf.data.experimental.CsvDataset(\n train_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n eval_data = tf.data.experimental.CsvDataset(\n eval_files,\n record_defaults=[tf.string, tf.string],\n field_delim='\\t',\n use_quote_delim=False)\n\n eval_data = eval_data.cache()\n train_data = train_data.cache() # only read once\n\n def to_features_dict(eng, rus):\n return {'inputs': eng, 'targets': rus}\n\n train_data = train_data.map(to_features_dict)\n eval_data = eval_data.map(to_features_dict)\n\n self.default_builder_obj = None\n return train_data, eval_data",
"def runLNFL(self):\n\n if 'allT5' not in dir(self):\n self.allT5 = sorted(glob.glob('%s/TAPE5_*' % self.dirT5))\n\n # set up the input directory\n self.makeLinks(self.pathLNFL, 'lnfl')\n tapeStrList = ['TAPE1', 'TAPE5']\n self.cleanUp()\n\n # loop through each HITRAN molecule and create an associated TAPE5\n for iMol, mol in enumerate(self.mols):\n base = os.path.basename(mol)\n print(base)\n tape5 = self.allT5[iMol]\n\n if self.isoH2O:\n # there are multiple line files to consider for H2O\n isoStr = ['01_h2o_161_only', '01_h2o_162_excl', \\\n '01_h2o_162_only', '01_h2o_171_only', '01_h2o_172_only', \\\n '01_h2o_181_only', '01_h2o_182_only']\n tape1List = ['%s/%s' % (mol, iso) for iso in isoStr]\n else:\n tape1List = ['%s/%s' % (mol, base)]\n # endif WV\n\n # loop really only exists for H2O\n for tape1 in tape1List:\n tapeList = [tape1, tape5]\n\n # grab the line coupling file if necessary\n if base in ['02_CO2', '06_CH4', '07_O2']:\n tape2 = '%s/lncpl_lines' % mol\n tapeList.append(tape2)\n tapeStrList.append('TAPE2')\n # endif line coupling\n\n # stage the files necessary for an LNFL run\n for source, target in zip(tapeList, tapeStrList):\n self.makeLinks(source, target)\n\n # call LNFL and save TAPE3 to unique name\n sub.call(['lnfl'])\n if self.isoH2O:\n tape3 = '%s/TAPE3_%s' % (mol, os.path.basename(tape1))\n else:\n tape3 = '%s/TAPE3_%s' % (mol, base)\n # endif wv\n if os.path.exists(tape3):\n print('WARNING: overwriting %s' % tape3)\n os.rename('TAPE3', tape3)\n\n # clean up\n self.cleanUp()\n # end TAPE1 loop\n\n #self.cleanUp()\n # if we're doing WV isotopologues, *only* do them\n if self.isoH2O: return\n # end molecule loop\n\n return",
"def make_flats(side='blue',overwrite=False):\r\n\r\n iraf.unlearn('flatcombine')\r\n iraf.flatcombine.ccdtype = \"\"\r\n iraf.flatcombine.process = \"no\"\r\n iraf.flatcombine.subsets = \"no\"\r\n iraf.flatcombine.rdnoise = \"RON\"\r\n iraf.flatcombine.gain = \"GAIN\"\r\n for aperture in ['0.5', '1.0', '1.5', '2.0']:\r\n flats = find_flats(aperture, side=side)\r\n if len(flats) > 0:\r\n if overwrite:\r\n iraf.delete('flat_%s_%s.fits' % (side, aperture), verify='no')\r\n iraf.delete('temp.fits' , verify='no')\r\n iraf.delete('tempsmooth.fits', verify='no')\r\n iraf.delete('norm_temp.fits', verify='no')\r\n # normalize the flat\r\n if side == 'blue': \r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 2\r\n # iraf.unlearn('response')\r\n # iraf.response.function = 'legendre'\r\n iraf.response.order = 100\r\n # iraf.response.high_rej = 5\r\n # iraf.response.low_rej = 2\r\n # iraf.response.niterate = 10\r\n # iraf.response('temp[0]', 'temp[0]',\r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n iraf.imfilter.boxcar('temp', 'tempsmooth', xwindow='1', ywindow='500')\r\n iraf.imarith('temp', '/', 'tempsmooth', 'norm_temp.fits')\r\n iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('norm_temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n else:\r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 1\r\n iraf.unlearn('response')\r\n iraf.response.function = \"spline3\" \r\n iraf.response.order = 100\r\n iraf.response.high_rej = 3\r\n iraf.response.low_rej = 3\r\n iraf.response.niterate = 3\r\n iraf.response('temp[0]', 'temp[0]',\r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n # iraf.unlearn('response')\r\n # iraf.response.function = \"spline3\"\r\n # iraf.response.order = 100\r\n # iraf.response.niterate = 3\r\n # iraf.response.low_rej = 3\r\n # iraf.response.high_rej = 3\r\n # if side == 'blue':\r\n # iraf.twodspec.longslit.dispaxis = 2\r\n # else:\r\n # iraf.twodspec.longslit.dispaxis = 1\r\n \r\n\r\n # measure flat-field error from sigma images\r\n iraf.unlearn('imcombine')\r\n iraf.imcombine.reject = 'avsigclip'\r\n iraf.imcombine(','.join(flats), output='flat', sigma='sigma', scale='mode')\r\n iraf.imarith('sigma', '/', 'flat', 'frac')\r\n s = iraf.imstat('frac.fits', fields=\"mean\", nclip=20, Stdout=1, format=\"no\")\r\n print 'Flat field error: ', np.float(s[0])\r\n iraf.delete('flat.fits', verify=\"no\")\r\n iraf.delete('sigma.fits', verify=\"no\")\r\n iraf.delete('frac.fits', verify=\"no\")\r\n else:\r\n print \"No dome or internal flats for the %s arcsec slit.\" % aperture",
"def merge_light_catalogue():\n output_filename = os.path.join(constants.DESTINATION,\n 'concatenated',\n 'iphas-dr2-light.fits')\n\n instring = ''\n for lon in np.arange(25, 215+1, constants.STRIPWIDTH):\n for part in ['a', 'b']:\n path = os.path.join(constants.DESTINATION,\n 'concatenated',\n 'light',\n 'iphas-dr2-{0:03d}{1}-light.fits'.format(\n lon, part))\n instring += 'in={0} '.format(path)\n\n # Warning: a bug in stilts causes long fieldIDs to be truncated if -utype S15 is not set\n param = {'stilts': constants.STILTS,\n 'in': instring,\n 'out': output_filename}\n\n cmd = '{stilts} tcat {in} countrows=true lazy=true ofmt=colfits-basic out={out}'\n mycmd = cmd.format(**param)\n log.debug(mycmd)\n status = os.system(mycmd)\n log.info('concat: '+str(status))\n\n return status",
"async def infer_shape_env_add(track, env1, env2):\n return NOSHAPE",
"def combine_catchments(catchmentfile, flowfile, elevationfile, comid, \n output = None, overwrite = False, verbose = True):\n\n t0 = time.time()\n numpy.seterr(all = 'raise')\n\n if output is None: output = os.getcwd() + r'\\combined'\n\n if os.path.isfile(output + '.shp') and not overwrite:\n if verbose: print('combined catchment shapefile %s exists' % output)\n return\n \n if verbose: print('combining catchments from %s\\n' % catchmentfile)\n\n # start by copying the projection files\n\n shutil.copy(catchmentfile + '.prj', output + '.prj')\n\n # load the catchment and flowline shapefiles\n\n c = Reader(catchmentfile, shapeType = 5)\n f = Reader(flowfile, shapeType = 3)\n\n # make lists of the comids and featureids\n\n featureid_index = c.fields.index(['FEATUREID', 'N', 9, 0]) - 1\n comid_index = f.fields.index(['COMID', 'N', 9, 0]) - 1\n\n featureids = [r[featureid_index] for r in c.records()]\n comids = [r[comid_index] for r in f.records()]\n\n # check that shapes are traceable--don't have multiple points and start\n # and end at the same place--then make an appropriate list of shapes\n # and records--note it's more memory efficient to read one at a time\n\n n = len(c.records())\n shapes = []\n records = [] \n bboxes = []\n\n try: \n for i in range(n):\n catchment = c.shape(i)\n record = c.record(i)\n\n shape_list = format_shape(catchment.points)\n for s in shape_list:\n shapes.append(s)\n records.append(record)\n bboxes.append(catchment.bbox)\n\n try: combined = combine_shapes(shapes, bboxes, verbose = verbose)\n except: combined = combine_shapes(shapes, bboxes, skip = True, \n verbose = verbose)\n\n except: \n shapes = []\n records = [] \n bboxes = []\n for i in range(n):\n catchment = c.shape(i)\n record = c.record(i)\n\n shape_list = format_shape(catchment.points, omit = True)\n for s in shape_list:\n shapes.append(s)\n records.append(record)\n bboxes.append(catchment.bbox)\n\n try: combined = combine_shapes(shapes, bboxes, verbose = verbose)\n except: combined = combine_shapes(shapes, bboxes, skip = True,\n verbose = verbose)\n\n # iterate through the catchments and get the elevation data from NED\n # then estimate the value of the overland flow plane length and slope\n\n lengths = numpy.empty((n), dtype = 'float')\n slopes = numpy.empty((n), dtype = 'float')\n\n for i in range(n):\n catchment = c.shape(i)\n flowline = f.shape(comids.index(featureids[i]))\n\n catchpoints = get_raster_on_poly(elevationfile, catchment.points,\n verbose = verbose)\n catchpoints = numpy.array([p for p in catchpoints])\n\n zs = get_raster(elevationfile, flowline.points)\n\n flowpoints = numpy.array([[p[0], p[1], z] \n for p, z in zip(flowline.points, zs)])\n\n # iterate through the raster values and find the closest flow point\n\n closest = numpy.empty((len(catchpoints), 3), dtype = 'float')\n\n for point, j in zip(catchpoints, range(len(catchpoints))):\n closest[j] = flowpoints[numpy.dot(flowpoints[:, :2], \n point[:2]).argmin()]\n\n # estimate the slope and overland flow plane length\n\n length, slope = get_overland_vector(catchpoints, closest)\n\n if verbose: print('avg slope and length =', slope.mean(), length.mean())\n\n lengths[i], slopes[i] = length.mean(), slope.mean()\n\n if verbose: print('\\nfinished overland flow plane calculations\\n')\n\n # get area of the subbasin from the catchment metadata\n\n areasq_index = c.fields.index(['AreaSqKM', 'N', 19, 6]) - 1\n areas = numpy.array([r[areasq_index] for r in c.records()])\n\n # take the area weighted average of the slopes and flow lengths\n\n tot_area = round(areas.sum(), 2)\n avg_length = round(1000 * numpy.sum(areas * lengths) / tot_area, 1)\n avg_slope = round(numpy.sum(areas * slopes) / tot_area, 4)\n\n # get the centroid and the average elevation\n\n combined = [[float(x), float(y)] for x, y in combined]\n centroid = get_centroid(numpy.array(combined))\n\n Cx, Cy = round(centroid[0], 4), round(centroid[1], 4)\n\n elev_matrix, origin = get_raster_in_poly(elevationfile, combined, \n verbose = verbose)\n\n elev_matrix = elev_matrix.flatten()\n elev_matrix = elev_matrix[elev_matrix.nonzero()]\n \n avg_elev = round(elev_matrix.mean() / 100., 2)\n\n # write the data to the shapefile\n\n w = Writer(shapeType = 5)\n\n fields = [['ComID', 'N', 9, 0],\n ['PlaneLenM', 'N', 8, 2],\n ['PlaneSlope', 'N', 9, 6],\n ['AreaSqKm', 'N', 10, 2],\n ['CenX', 'N', 12, 6],\n ['CenY', 'N', 12, 6],\n ['AvgElevM', 'N', 8, 2]]\n\n record = [comid, avg_length, avg_slope, tot_area, Cx, Cy, avg_elev]\n\n for field in fields: w.field(*field)\n \n w.record(*record)\n \n w.poly(shapeType = 5, parts = [combined])\n\n w.save(output)\n\n if verbose: print('\\ncompleted catchment combination in %.1f seconds\\n' % \n (time.time() - t0))",
"def phase_two_data():\n from pathlib import Path\n try:\n import cPickle as pickle\n except ImportError:\n import pickle\n \n from annotation import parse_fulltext\n from features import ALL_FEATURES\n \n from feature_template import apply_templates\n from feature_selection import filter_by_frequency\n from feature_encoding import encode\n\n # Feature templates considered if heading by 1:\n # ----------------------------\n # Position + Voice\n # Path length + Clause layer\n # 1 Predicate + Path\n # Path + Position + Voice\n # Path + Position + Voice + Predicate\n # 1 Head word stem + Predicate\n # 1 Head word stem + Predicate + Path\n # 1 Head word stem + Phrase\n # Clause layer + Position + Predicate\n templates = [tuple([f.name]) for f in ALL_FEATURES] + \\\n [('path_to_frame', 'frame'), ('head_stem', 'frame'), ('head_stem', 'frame', 'path_to_frame'), ('head_stem', 'phrase_type')]\n \n size = 40\n instances = []\n for i, p in enumerate(Path(\"/cs/fs2/home/hxiao/Downloads/fndata-1.5/fulltext/\").glob(\"*.xml\")):\n if i == size:\n break\n sys.stderr.write(\"Processing file: '%s'\\n\" %p.absolute())\n annotations = parse_fulltext(str(p.absolute()))\n instances += make_training_data(ALL_FEATURES, annotations)\n\n sys.stderr.write(\"Feature selection...\\n\")\n x, y = zip(*instances)\n x = apply_templates(x, templates)\n features = filter_by_frequency(x, 5)\n sys.stderr.write(\"Feature encoding...\\n\")\n x, feature_map = encode(x, features)\n \n sys.stderr.write(\"Dumping data...\\n\") \n pickle.dump((x, y, ALL_FEATURES, templates, feature_map), open('dump/test_data.pkl', 'w'))\n import pdb\n pdb.set_trace()\n print len(instances)",
"def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )",
"def mk_planeflight_files4sites(testing_mode=False):\n # Location of flight data\n TYPE = ['CVO{}'.format(i) for i in range(1, 8)]\n #\n sdate = datetime.datetime(2015, 1, 1,)\n edate = datetime.datetime(2021, 1, 1,)\n dates = pd.date_range(sdate, edate, freq='T')\n # Get list of species\n num_tracers = 203\n slist = get_planeflight_slist2output(num_tracers=num_tracers)\n\n # for each location make a DataFrame, then conbime\n dfs = []\n for n, type_ in enumerate(TYPE):\n # Get locations\n LON, LAT, ALT = AC.get_loc(type_)\n PRESS = 1013.25 # AC.hPa_to_Km([ALT/1E3], reverse=True, )\n print(n, type_, LON, LAT, ALT)\n # dictionary of data\n nvar = len(dates)\n d = {\n 'datetime': dates, 'LAT': [LAT]*nvar, 'LON': [LON]*nvar,\n 'TYPE': [type_]*nvar, 'PRESS': [PRESS]*nvar}\n dfs += [pd.DataFrame(d, index=np.arange(nvar)+(n*1E6))]\n # combine all TYPE (sites) and sort by date\n df = pd.concat(dfs).sort_values('datetime', ascending=True)\n\n # Now print as files\n AC.prt_PlaneFlight_files_v12_plus(df=df, slist=slist,\n Extra_spacings=Extra_spacings)",
"def concatenate_sft_files(self):\n\n SFTFilename = (\n f\"{self.detectors[0]}-{self.nsfts}_{self.detectors}_{self.Tsft}SFT_mfdv4\"\n )\n # We don't try to reproduce the NB filename convention exactly,\n # as there could be always rounding offsets with the number of bins,\n # instead we use wildcards there.\n outfreq = int(np.floor(self.fmin))\n outwidth = int(np.floor(self.Band))\n SFTFilename += f\"_NBF{outfreq:04d}Hz*W{outwidth:04d}Hz*\"\n SFTFilename += f\"-{self.tstart}-{self.duration}.sft\"\n SFTFile_fullpath = os.path.join(self.outdir, SFTFilename)\n if os.path.isfile(SFTFile_fullpath):\n logger.info(\n f\"Removing previous file(s) {SFTFile_fullpath} (no caching implemented).\"\n )\n os.remove(SFTFile_fullpath)\n\n inpattern = os.path.join(self.tmp_outdir, \"*sft\")\n cl_splitSFTS = \"lalpulsar_splitSFTs\"\n cl_splitSFTS += \" -fs {} -fb {} -fe {} -n {} -- {}\".format(\n self.fmin, self.Band, self.fmin + self.Band, self.outdir, inpattern\n )\n utils.run_commandline(cl_splitSFTS)\n utils.run_commandline(f\"rm -r {self.tmp_outdir}\")\n outglob = glob.glob(SFTFile_fullpath)\n if len(outglob) != 1:\n raise IOError(\n \"Expected to produce exactly 1 merged file\"\n f\" matching pattern '{SFTFile_fullpath}',\"\n f\" but got {len(outglob)} matches: {outglob}\"\n \" Something went wrong!\"\n )\n self.sftfilepath = outglob[0]\n logger.info(f\"Successfully wrote SFTs to: {self.sftfilepath}\")",
"def _add_fuses(self):\r\n fuse_list = self.model.get_all_fuses()\r\n\r\n for fuse in fuse_list:\r\n self._add_fuse(fuse)",
"def infer(self):\r\n for i in range(6):\r\n count_before = len(self.graph.nodes)\r\n\r\n self.graph.cleanup().toposort()\r\n try:\r\n for node in self.graph.nodes:\r\n for o in node.outputs:\r\n o.shape = None\r\n model = gs.export_onnx(self.graph)\r\n model = shape_inference.infer_shapes(model)\r\n self.graph = gs.import_onnx(model)\r\n except Exception as e:\r\n log.info(\"Shape inference could not be performed at this time:\\n{}\".format(e))\r\n try:\r\n self.graph.fold_constants(fold_shapes=True)\r\n except TypeError as e:\r\n log.error(\"This version of ONNX GraphSurgeon does not support folding shapes, please upgrade your \"\r\n \"onnx_graphsurgeon module. Error:\\n{}\".format(e))\r\n raise\r\n\r\n count_after = len(self.graph.nodes)\r\n if count_before == count_after:\r\n # No new folding occurred in this iteration, so we can stop for now.\r\n break",
"def explode_shapes(self, shp, out_folder):\n gdf = gpd.read_file(str(shp))\n gdf = gdf[[f'ADM{self.level}', f'ADM{self.level}_id', 'geometry']]\n for row in gdf.iterrows():\n adm_df = gpd.GeoDataFrame({f'ADM{self.level}': [row[1][0]], f'ADM{self.level}_id': [row[1][1]], 'geometry': [row[1][2]]})\n if self.level == '1':\n name = row[1][0]\n print(name)\n else:\n name = row[1][0]\n print(name)\n if '/' in name:\n name = name.replace('/', '')\n #out_name = (out_folder.joinpath(\"{0}.shp\".format(row[1][f\"ADM{self.level}\"])))\n adm_id = str(row[1][1])\n out_name = out_folder.joinpath(f'{name}_{adm_id}.shp') \n adm_df.to_file(out_name)",
"def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr",
"def features_combine():\n\n\n\t# PROCESSING AUDIO",
"def main(name, line1, line2, orbital_filename):\n #name = \"TERRA\"\n #line1 = \"1 25994U 99068A 16048.43680378 .00000258 00000-0 67198-4 0 9999\"\n #line2 = \"2 25994 98.1982 124.4247 0001352 105.3907 254.7441 14.57126067859938\"\n satellite = ephem.readtle(name, line1, line2)\n \n\n # Landsat 8\n #name = \"Landsat8\"\n #line1=\"1 39084U 13008A 16051.82349873 .00000188 00000-0 51829-4 0 9999\"\n #line2=\"2 39084 98.1988 123.2603 0001265 89.4360 270.6984 14.57110027160810\"\n #LD8 = ephem.readtle(name, line1, line2)\n \n\n sun = ephem.Sun()\n fov = np.radians(68.6)\n\n \"\"\"\n Make pandas dataframe to store swath information\n \"\"\"\n import pandas as pd\n data = {\"DateTime\": [],\"DOY\":[],\"Month\": [],\n \"orbit_id\":[], \"ground_lat\": [], \n \"ground_lon\": [], \"swath_width\": []}\n swaths = pd.DataFrame(data)\n swaths.set_index(keys=\"DateTime\")\n # generate shapefile\n\n orbit_id = 0\n # need to do splitted by hemisphere unfortunately..\n for orbit in make_an_orbit(satellite):\n #import pdb; pdb.set_trace()\n if len(orbit) > 1:\n \"\"\"\n So worth doing processing on orbit...\n\n \"\"\"\n sun = ephem.Sun()\n\n print(orbit[0].datetime)\n\n for overpass in orbit:\n overpass.only_daytime_overpasses(sun)\n overpass.derive_swath_width(fov)\n \"\"\"\n Create a tempoary dataframe for this orbit\n \"\"\"\n epoch = datetime.datetime(1970, 1, 1)\n #import pdb; pdb.set_trace()\n tmp_d = {\"DateTime\": [(o.datetime - epoch).total_seconds() for o in orbit],\n \"DOY\":[int(o.datetime.strftime('%j')) for o in orbit],\n \"Month\": [o.datetime.month for o in orbit],\n \"orbit_id\": orbit_id * np.ones(len(orbit)),\n \"ground_lat\": [o.lat for o in orbit],\n \"ground_lon\": [o.long for o in orbit],\n \"swath_width\": [o.swath_width for o in orbit]}\n tmp = pd.DataFrame(tmp_d)\n tmp.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n orbit_id +=1 \n \"\"\"\n Append to main dataframe\n \"\"\"\n swaths = swaths.append(tmp)\n #swaths.set_index(keys=\"DateTime\")\n\n \"\"\"\n Save the DataFrame to a file\n \"\"\"\n swaths = swaths.set_index(keys=\"DateTime\")\n #swaths.set_index(keys=\"DateTime\")\n #import pdb; pdb.set_trace()\n swaths.to_csv(orbital_filename, header=True)",
"def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()",
"def ecmwf2srf(tfrom, tstop, blcorner, trcorner, dataset, stream, root_name):\n\n # Arbitrary 6-day period\n period = build_period(tfrom, tstop)\n\n # arbitrary box (small pieve of the atlantic side of Mexico)\n modelbox = build_box(blcorner, trcorner)\n\n\n req, file_type, download = build_data_request(modelbox, period,\n dataset,\n stream,\n root_name)\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n write_file(req, file_type, download, dataset, stream,\n period, root_name)",
"def main(FLAGS):\n if FLAGS.format == 'tfrecords':\n raise NotImplementedError\n else:\n # get the names of the train image files\n train_files = txt2list(FLAGS.train_file_names)\n train_limit = floor(FLAGS.train_fraction * FLAGS.n_train)\n train_count = 0\n train_full = False\n\n # get the names of the validation image files\n valid_files = txt2list(FLAGS.valid_file_names)\n valid_limit = floor(FLAGS.valid_fraction * FLAGS.n_valid)\n valid_count = 0\n valid_full = False\n\n # get the names of the test image files\n test_files = txt2list(FLAGS.test_file_names)\n test_limit = floor(FLAGS.test_fraction * FLAGS.n_test)\n test_count = 0\n test_full = False\n\n # accumulators for the image and annotation pairs\n train_windows_with = []\n valid_windows_with = []\n test_windows_with = []\n train_windows_without = []\n valid_windows_without = []\n test_windows_without = []\n train_locations = []\n valid_locations = []\n test_locations = []\n\n # directories of sensor data and annotations\n sub_dirs = glob(os.path.join(FLAGS.satnet_data_dir, '*'))\n\n # go through each sensor collection from each site and prepare\n # the training, validation, and testing sub-windows\n for dir in sub_dirs:\n if train_full and valid_full and test_full:\n pass\n else:\n img_files = glob(os.path.join(dir, 'ImageFiles', '*.fits'))\n json_files = glob(os.path.join(dir, 'Annotations', '*.json'))\n\n # get only the name of the .json file w/o extension\n json_names = [file.split(\"\\\\\")[-1] for file in json_files]\n json_names = [name.split(\".json\")[0] for name in json_names]\n\n # get only the name of the .fits file w/o extension\n img_names = [file.split(\"\\\\\")[-1] for file in img_files]\n img_names = [name.split(\".fits\")[0] for name in img_names]\n\n # in case some annotations/images aren't paired, find the\n # common .json and .fits files names\n similar_files = set(img_names).intersection(json_names)\n\n # prepare the new images and annotations via the sliding-window\n # algorithm\n for file in similar_files:\n if train_full and valid_full and test_full:\n pass\n else:\n # load SatNet image and its corresponding annotations\n img_path = os.path.join(dir, 'ImageFiles', file + '.fits')\n anno_path = os.path.join(dir, 'Annotations', file + '.json')\n image = SatelliteImage(img_path)\n anno = ImageAnnotations(anno_path)\n\n # find the data partition this example belongs to and add\n # that data to the accumulators\n comp_name = '_'.join([anno.directory, anno.name])\n\n # pull all object centroids in the image and store in a list\n centroids = []\n [centroids.append([obj.y_c, obj.x_c]) for obj in anno.objects]\n\n # run sliding window algorithm across the image\n sw = SatNetSubWindows(img=image.image,\n centroids=centroids,\n window_size=FLAGS.window_size,\n stride=FLAGS.stride,\n padding=FLAGS.padding,\n img_width=FLAGS.width,\n img_height=FLAGS.height)\n sw.get_obj_windows()\n\n # find how many background windows to include from the image\n # and generate that many number of random indices to pull\n # them\n if sw.windows_with is not None:\n n_with = sw.windows_with.shape[0]\n n_without = int(FLAGS.bg2sat_ratio * n_with)\n else:\n n_without = int(FLAGS.bg2sat_ratio)\n inds = np.random.permutation(sw.windows_without.shape[0])\n inds = inds[:n_without]\n\n # determine the status of the accumulators\n if train_count >= train_limit:\n train_full = True\n if valid_count >= valid_limit:\n valid_full = True\n if test_count >= test_limit:\n test_full = True\n\n # accumulate sub-windows into the three data\n # partitions\n if comp_name in train_files and not train_full:\n if sw.windows_with is not None:\n train_windows_with.append(sw.windows_with)\n train_locations.append(sw.object_location_with)\n train_windows_without.append(sw.windows_without[inds, :, :])\n train_count += 1\n elif comp_name in valid_files and not valid_full:\n if sw.windows_with is not None:\n valid_windows_with.append(sw.windows_with)\n valid_locations.append(sw.object_location_with)\n valid_windows_without.append(sw.windows_without[inds, :, :])\n valid_count += 1\n elif comp_name in test_files and not test_full and FLAGS.save_test:\n if sw.windows_with is not None:\n test_windows_with.append(sw.windows_with)\n test_locations.append(sw.object_location_with)\n test_windows_without.append(sw.windows_without[inds, :, :])\n test_count += 1\n else:\n print('Windows belong to a filled accumulator... skipped them.')\n pass\n print('Accumulators: train - {}% , valid - {}% , test - {}%'.format(\n int(train_count / train_limit * 100),\n int(valid_count / valid_limit * 100),\n int(test_count / test_limit * 100)))\n\n # combine all of the sub-windows and annotations for each data\n # partition\n train_windows_with = np.concatenate(train_windows_with)\n train_windows_without = np.concatenate(train_windows_without)\n train_locations = np.concatenate(train_locations)\n train_annos_with = np.ones(train_windows_with.shape[0])\n train_annos_without = np.zeros(train_windows_without.shape[0])\n valid_windows_with = np.concatenate(valid_windows_with)\n valid_windows_without = np.concatenate(valid_windows_without)\n valid_locations = np.concatenate(valid_locations)\n valid_annos_with = np.ones(valid_windows_with.shape[0])\n valid_annos_without = np.zeros(valid_windows_without.shape[0])\n\n if FLAGS.save_test:\n test_windows_with = np.concatenate(test_windows_with)\n test_windows_without = np.concatenate(test_windows_without)\n test_locations = np.concatenate(test_locations)\n test_annos_with = np.ones(test_windows_with.shape[0])\n test_annos_without = np.zeros(test_windows_without.shape[0])\n\n train_windows = np.concatenate((train_windows_with, train_windows_without))\n train_annos = np.concatenate((train_annos_with, train_annos_without))\n valid_windows = np.concatenate((valid_windows_with, valid_windows_without))\n valid_annos = np.concatenate((valid_annos_with, valid_annos_without))\n\n if FLAGS.save_test:\n test_windows = np.concatenate((test_windows_with, test_windows_without))\n test_annos = np.concatenate((test_annos_with, test_annos_without))\n\n path_append = '_seedNet2satNet_windowsize_{}_stride_{}_padding_{}_ratio_{}_trainfraction_{}.h5'.format(FLAGS.window_size, FLAGS.stride, FLAGS.padding, FLAGS.bg2sat_ratio, FLAGS.train_fraction)\n train_c_windows_path = os.path.join(FLAGS.save_data_dir, 'train_classification_windows' + path_append)\n train_c_labels_path = os.path.join(FLAGS.save_data_dir, 'train_classification_labels' + path_append)\n train_l_windows_path = os.path.join(FLAGS.save_data_dir, 'train_localization_windows' + path_append)\n train_l_labels_path = os.path.join(FLAGS.save_data_dir, 'train_localization_labels' + path_append)\n valid_c_windows_path = os.path.join(FLAGS.save_data_dir, 'valid_classification_windows' + path_append)\n valid_c_labels_path = os.path.join(FLAGS.save_data_dir, 'valid_classification_labels' + path_append)\n valid_l_windows_path = os.path.join(FLAGS.save_data_dir, 'valid_localization_windows' + path_append)\n valid_l_labels_path = os.path.join(FLAGS.save_data_dir, 'valid_localization_labels' + path_append)\n\n if FLAGS.save_test:\n test_c_windows_path = os.path.join(FLAGS.save_data_dir, 'test_classification_windows' + path_append)\n test_c_labels_path = os.path.join(FLAGS.save_data_dir, 'test_classification_labels' + path_append)\n test_l_windows_path = os.path.join(FLAGS.save_data_dir, 'test_localization_windows' + path_append)\n test_l_labels_path = os.path.join(FLAGS.save_data_dir, 'test_localization_labels' + path_append)\n\n write_hdf5(train_c_windows_path, train_windows)\n write_hdf5(train_c_labels_path, train_annos)\n write_hdf5(train_l_windows_path, train_windows_with)\n write_hdf5(train_l_labels_path, train_locations)\n write_hdf5(valid_c_windows_path, valid_windows)\n write_hdf5(valid_c_labels_path, valid_annos)\n write_hdf5(valid_l_windows_path, valid_windows_with)\n write_hdf5(valid_l_labels_path, valid_locations)\n\n if FLAGS.save_test:\n write_hdf5(test_c_windows_path, test_windows)\n write_hdf5(test_c_labels_path, test_annos)\n write_hdf5(test_l_windows_path, test_windows_with)\n write_hdf5(test_l_labels_path, test_locations)",
"def _create_area_source_tgrmfd_shapefile(shapefile_path, max_np, max_hd,\n rootname):\n\n spatialReference = osr.SpatialReference()\n spatialReference.SetWellKnownGeogCS('WGS84')\n\n driverName = \"ESRI Shapefile\"\n drv = ogr.GetDriverByName(driverName)\n if drv is None:\n print \"%s driver not available.\\n\" % driverName\n sys.exit(1)\n\n layer_name = rootname+\"_trgr\"\n ds = drv.CreateDataSource(shapefile_path+layer_name+\".shp\")\n if ds is None:\n print \"Creation of output file failed.\\n\"\n sys.exit(1)\n\n # Create the layer\n lyr = ds.CreateLayer(layer_name,\n spatialReference,\n ogr.wkbPolygon)\n if lyr is None:\n print \"Layer creation failed.\\n\"\n sys.exit(1)\n\n # Add attributes definition to this layer\n attributes = _get_area_tgrmfd_attr(max_np, max_hd)\n lyr = shpt.add_attributes(lyr, attributes)\n del attributes\n\n return ds",
"def generate_solid(self):\n ext = os.path.splitext(self.filename)[1][1:]\n if ext == 'stl':\n shaft_compound = read_stl_file(self.filename)\n elif ext == 'iges':\n iges_reader = IGESControl_Reader()\n iges_reader.ReadFile(self.filename)\n iges_reader.TransferRoots()\n shaft_compound = iges_reader.Shape()\n else:\n raise Exception('The shaft file is not in iges/stl formats')\n sewer = BRepBuilderAPI_Sewing(1e-2)\n sewer.Add(shaft_compound)\n sewer.Perform()\n result_sewed_shaft = sewer.SewedShape()\n shaft_solid_maker = BRepBuilderAPI_MakeSolid()\n shaft_solid_maker.Add(OCC.Core.TopoDS.topods_Shell(result_sewed_shaft))\n if not shaft_solid_maker.IsDone():\n raise RuntimeError('Unsuccessful assembling of solid shaft')\n shaft_solid = shaft_solid_maker.Solid()\n return shaft_solid",
"def set_udfs(self):\n\n flowcell_type = self.process.all_inputs()[0].udf.get('Flowcell Type')\n\n for key, val in self.process_settings[flowcell_type].items():\n self.process.udf[key] = val\n self.process.put()\n\n for art in self.artifacts:\n for key, val in self.artifact_settings[flowcell_type].items():\n art.udf[key] = val\n art.put()",
"def toENDF6(self, endfMFList, flags, targetInfo, verbosityIndent=''):\n def swaprows( matrix, i1, i2, nrows ):\n # may need to rearrange parameters: ENDF often sorts first by L rather than by energy\n rows = matrix[i1:i1+nrows].copy()\n matrix[i1:i1+nrows] = matrix[i2:i2+nrows]; matrix[i2:i2+nrows] = rows\n cols = matrix[:,i1:i1+nrows].copy()\n matrix[:,i1:i1+nrows] = matrix[:,i2:i2+nrows]; matrix[:,i2:i2+nrows] = cols\n\n # need the resonance parameters as well as covariance matrix:\n res = targetInfo['reactionSuite'].resonances\n RPs = res.resolved.evaluated.resonanceParameters.table\n NRes = self.inputParameters[-1].nResonances\n\n # MF32 header information:\n ZAM, AWT = targetInfo['ZA'], targetInfo['mass']\n NIS, ABN, ZAI = 1, 1.0, ZAM # assuming only one isotope per file\n endf = [endfFormats.endfHeadLine( ZAM, AWT, 0, 0, NIS, 0 )]\n LFW = RPs.getColumn('fissionWidthA') is not None; NER=1\n endf.append( endfFormats.endfHeadLine( ZAI,ABN,0,LFW,NER,0 ) )\n EL,EH = res.resolved.lowerBound.getValueAs('eV'), res.resolved.upperBound.getValueAs('eV')\n LRU,NRO =1,0\n LRF = {'SingleLevel_BreitWigner':1, 'MultiLevel_BreitWigner':2, 'Reich_Moore':3}[\n res.resolved.evaluated.moniker ]\n NAPS = not res.resolved.evaluated.calculateChannelRadius\n endf.append( endfFormats.endfHeadLine( EL,EH,LRU,LRF,NRO,NAPS ) )\n SPI = targetInfo['spin']\n AP = res.resolved.evaluated.scatteringRadius.getValueAs('10*fm')\n LCOMP=1\n if 'LCOMP=0' in self.attributes.get('endfConversionFlags',''): LCOMP=0\n elif 'LCOMP=2' in self.attributes.get('endfConversionFlags',''): LCOMP=2\n\n sortByL = (\"sortByL\" in self.attributes.get('endfConversionFlags',''))\n Ls = RPs.getColumn('L')\n NLS = len(set(Ls))\n if LCOMP==2 or not sortByL: NLS = 0\n ISR = int( isinstance(self.inputParameters[0], inputParameter) and\n ('scatteringRadius' in self.inputParameters[0].name) )\n endf.append( endfFormats.endfHeadLine( SPI,AP,0,LCOMP,NLS,ISR ) )\n MLS = 0\n if ISR:\n MLS = 1 # currently don't handle energy-dependent DAP\n DAP = PQU.PQU( self.matrix.data[0][0], self.inputParameters[0].unit ).getValueAs('10*fm')\n if LRF in (1,2):\n endf.append( endfFormats.endfDataLine( [0,DAP] ) )\n elif LRF==3:\n endf.append( endfFormats.endfHeadLine( 0,0,0,0,MLS,1 ) )\n endf.append( endfFormats.endfDataLine( [DAP] ) )\n else:\n raise Exception(\"ISR>0 not yet supported for LRF=%i!\" % LRF)\n\n # MF32 repeats the resonance parameter information.\n # Extract that info from reactionSuite.resonances:\n table = [RPs.getColumn('L'), RPs.getColumn('energy',units='eV'), RPs.getColumn('J'),\n RPs.getColumn('totalWidth',units='eV') or [0]*NRes,\n RPs.getColumn('neutronWidth',units='eV'), RPs.getColumn('captureWidth',units='eV'),\n RPs.getColumn('fissionWidthA') or [0]*NRes,\n RPs.getColumn('fissionWidthB') or [0]*NRes]\n CS = RPs.getColumn('channelSpin')\n if CS is not None: # ENDF hack: J<0 -> use lower available channel spin\n CS = [2*(cs-SPI) for cs in CS]\n Js = [v[0]*v[1] for v in zip(table[2],CS)]\n table[2] = Js\n table = zip(*table)\n matrix = self.matrix.data[MLS:,MLS:].copy()\n MPAR = len(matrix) / len(table)\n\n if sortByL:\n # reorder resonances, sorting first by L and second by energy:\n table.sort()\n\n elist1 = [(lis[1],lis[4],lis[5]) for lis in table]\n elist2 = zip( RPs.getColumn('energy',units='eV'),\n RPs.getColumn('neutronWidth',units='eV'),\n RPs.getColumn('captureWidth',units='eV') )\n\n for i in range(len(elist1)):\n i2 = elist2.index( elist1[i] )\n if i2!=i:\n swaprows( matrix, MPAR*i, MPAR*elist2.index( elist1[i] ), MPAR )\n val = elist2[i]\n elist2[i] = elist2[i2]; elist2[i2] = val\n\n if LCOMP==0:\n tableIndex = 0\n for L in set( Ls ):\n NRS = Ls.count(L)\n endf.append( endfFormats.endfHeadLine( AWT, 0, L, 0, 18*NRS, NRS ) )\n for i in range(tableIndex, len(table)):\n if table[i][0]!=L: break\n endf.append( endfFormats.endfDataLine( table[i][1:7] ) )\n block = matrix[MPAR*i:MPAR*(i+1), MPAR*i:MPAR*(i+1)]\n lis = [block[0,0], block[1,1], block[2,1], block[2,2]]\n if MPAR==4:\n lis += [block[3,1],block[3,2],block[3,3],0,0,0,0,0]\n else:\n lis += [0,0,0,0,0,0,0,0]\n endf += endfFormats.endfDataList( lis )\n tableIndex += NRS\n\n\n if LCOMP==1:\n NSRS, NLRS = 1,0 # short-range correlations only\n endf.append( endfFormats.endfHeadLine( AWT, 0, 0, 0, NSRS, NLRS ) )\n MPAR = len( self.inputParameters[0].parametersPerResonance.split(',') )\n NRB = NRes\n NVS = (NRB*MPAR)*(NRB*MPAR+1)/2 # length of the upper diagonal matrix\n endf.append( endfFormats.endfHeadLine( 0,0, MPAR, 0, NVS+6*NRB, NRB ) )\n\n for res in table:\n if LRF in (1,2):\n endf.append( endfFormats.endfDataLine( res[1:7] ) )\n elif LRF==3:\n endf.append( endfFormats.endfDataLine( res[1:3] + res[4:8] ) )\n\n dataList = []\n for i in range(len(matrix)): dataList.extend( list( matrix[i][i:] ) )\n endf += endfFormats.endfDataList( dataList )\n\n elif LCOMP==2:\n import numpy\n QX, LRX = 0, 0 # haven't encountered any competitive widths yet\n endf.append( endfFormats.endfHeadLine( AWT,QX,0,LRX, 12*NRes, NRes ) )\n dat = matrix.diagonal()\n for i in range(len(table)):\n if LRF in (1,2):\n params = table[i][1:7]\n uncerts = [dat[MPAR*i],0,0,dat[MPAR*i+1],dat[MPAR*i+2],0]\n if MPAR==4: uncerts[-1] = dat[MPAR*i+3]\n elif LRF==3:\n params = table[i][1:3] + table[i][4:8]\n uncerts = [dat[MPAR*i],0,dat[MPAR*i+1],dat[MPAR*i+2],0,0]\n if MPAR==5: uncerts[-2:] = [dat[MPAR*i+3], dat[MPAR*i+4]]\n endf += endfFormats.endfDataList( params )\n endf += endfFormats.endfDataList( uncerts )\n\n # correlation matrix:\n NDIGIT = [a for a in self.attributes['endfConversionFlags'].split(',') if a.startswith('NDIGIT')]\n NDIGIT = int( NDIGIT[0][-1] )\n nints = 56 // (NDIGIT+1) # how many numbers fit on each line?\n if NDIGIT==3: nints = 13 # special case\n rsd = numpy.sqrt( matrix.diagonal() )\n rsd[ rsd==0 ] = 1\n corr_mat = matrix / numpy.outer( rsd,rsd )\n corr_mat = numpy.rint( corr_mat * 10**NDIGIT ) # rint: round to nearest int\n # write lower-diagonal as sparse matrix using INTG format:\n endfCorrMat = []\n for i in range(len(corr_mat)):\n vals = corr_mat[i,:i]\n j = 0\n while j < i:\n if vals[j]!=0:\n endfCorrMat.append( endfFormats.writeEndfINTG(\n i+1, j+1, list(vals[j:j+nints]), NDIGIT ) )\n j += nints\n else: j+=1\n NNN = NRes * MPAR\n NM = len(endfCorrMat)\n endf.append( endfFormats.endfHeadLine( 0,0, NDIGIT, NNN, NM, 0 ) )\n endf += endfCorrMat\n endf.append( endfFormats.endfSENDLineNumber() )\n endfMFList[32][151] = endf",
"def TransformerFlops(inputs, num_heads, ff_dim, atten_dim, model_dim):\n f = tf.cast(ff_dim, tf.int64)\n a = tf.cast(atten_dim, tf.int64)\n n = tf.cast(num_heads, tf.int64)\n d = tf.cast(model_dim, tf.int64)\n h = tf.cast(a / n, tf.int64) # dim per head\n inputs = tf.cast(inputs, tf.int64)\n b, t = inputs[0], inputs[1]\n multi_head_atten_flops = (6 * a * d + n * t * (2 * h - 1) + a * (2 * t - 1) +\n 5 * n * d + d * (2 * h - 1) * (2 * n - 1))\n residual_flops = 2 * d\n ff_flops = 4 * f * d\n return (multi_head_atten_flops + residual_flops + ff_flops) * b * t"
] | [
"0.5428593",
"0.53293943",
"0.5111582",
"0.50676537",
"0.50566024",
"0.5017332",
"0.49942586",
"0.4984784",
"0.4978153",
"0.4896247",
"0.48875964",
"0.48859593",
"0.4797835",
"0.47945777",
"0.4774569",
"0.47722545",
"0.47544155",
"0.47473112",
"0.47439614",
"0.4709717",
"0.47088903",
"0.4708684",
"0.4702732",
"0.4672353",
"0.46710226",
"0.46705735",
"0.46685153",
"0.46662942",
"0.46660125",
"0.46567357"
] | 0.5575888 | 0 |
This attribute finds the larger airfoil thickness of the HT or VT stabilizers to then be able to construct the tailboom shaft with a radius equal to this larger thickness. | def critical_thickness(self):
horizontal_tail_thickness = sorted(self.stabilizer_h.solid.faces, key=lambda f: f.cog.y)[-1].bbox.height
vertical_tail_thickness = sorted(self.stabilizer_vright.solid.faces, key=lambda f: f.cog.z)[0].bbox.length
if horizontal_tail_thickness >= vertical_tail_thickness:
critical_thickness = horizontal_tail_thickness
else:
critical_thickness = vertical_tail_thickness
return critical_thickness | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_optical_thickness(self, atmosphere, t_surface):\n self.set_atmospheric_state(atmosphere, t_surface)\n\n self.ws.propmat_clearsky_fieldCalc()\n\n tau = np.trapz(\n y=self.ws.propmat_clearsky_field.value[:, :, 0, 0, :, 0, 0],\n x=self.ws.z_field.value[:, 0, 0],\n axis=-1,\n )\n\n return self.ws.f_grid.value.copy(), tau",
"def test_optimal_thickness():\n structure = Material(input)\n assert (structure.calc_optimal_thickness() == 1.9552936422413782)",
"def thickness(self):\n return self._thickness",
"def adjust_for_speed_of_light_in_water(df, tide_level):\n speed_of_light_air = 300000\n speed_of_light_water = 225000\n coef = speed_of_light_water / speed_of_light_air\n df['Height'] = (df['Height']- tide_level) * coef\n return df",
"def ideal_thickness(self, opt_freq=160e9):\n return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq))",
"def calc_optimal_spacing(sun_properties, tilt_angle, module_length):\n h = module_length * sin(tilt_angle)\n D1 = h / tan(radians(sun_properties.worst_sh))\n D = max(D1 * cos(radians(180 - sun_properties.worst_Az)), D1 * cos(radians(sun_properties.worst_Az - 180)))\n return D",
"def calc_thickness(self):\n s = \"::: calculating z-varying thickness :::\"\n print_text(s, cls=self)\n #H = project(self.S - self.x[2], self.Q, annotate=False)\n H = self.vert_integrate(Constant(1.0), d='down')\n Hv = H.vector()\n Hv[Hv < 0] = 0.0\n print_min_max(H, 'H', cls=self)\n return H",
"def coating_weight(self, thickness, weight=2400):\n return self.__length * self.__width * thickness * weight / 100",
"def test_thickness_hydrostatic_subset():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.hPa\n temperature = np.array([22.2, 14.6, 12., 9.4, 7., -38.]) * units.degC\n mixing = np.array([0.01458, 0.00209, 0.00224, 0.00240, 0.00256, 0.00010])\n thickness = thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing,\n bottom=850 * units.hPa, depth=150 * units.hPa)\n assert_almost_equal(thickness, 1630.752 * units.m, 2)",
"def convert2thick(stairsThickness,stepCount,stepWidth,box_height):\n slope_cos = ((stepCount-1)*stepWidth)/math.sqrt((box_height*0.5)**2+((stepCount-1)*stepWidth)**2)#///坡度的余弦值\n thick = stairsThickness/slope_cos\n return thick",
"def test_thickness_hydrostatic_isothermal_subset():\n pressure = np.arange(1000, 500 - 1e-10, -10) * units.hPa\n temperature = np.zeros_like(pressure) * units.degC\n thickness = thickness_hydrostatic(pressure, temperature, bottom=850 * units.hPa,\n depth=350 * units.hPa)\n assert_almost_equal(thickness, 4242.527 * units.m, 2)",
"def compute_thick_lens_approximation(self):\r\n\r\n x = self.film_diagnal * 0.001\r\n so = ti.Vector([x, 0.0, self.front_z() + 1.0])\r\n sd = ti.Vector([0.0, 0.0, -1.0])\r\n fo = ti.Vector([x, 0.0, self.rear_z() - 1.0])\r\n fd = ti.Vector([0.0, 0.0, 1.0])\r\n ok1, o1, d1 = self.gen_ray_from_scene(so, sd)\r\n ok2, o2, d2 = self.gen_ray_from_film(fo, fd)\r\n assert ok1 == True and ok2 == True\r\n fz, pz = self.compute_cardinal_points(so, o1, d1)\r\n fz1, pz1 = self.compute_cardinal_points(fo, o2, d2)\r\n assert fz1 < pz1 and pz < fz\r\n return fz, pz, fz1, pz1",
"def find_shower_max_height(self, energy, h_first_int, gamma_alt):\n\n # offset of the shower-maximum in radiation lengths\n c = 0.97 * log(energy / (83 * u.MeV)) - 1.32\n # radiation length in dry air at 1 atm = 36,62 g / cm**2 [PDG]\n c *= 36.62 * u.g * u.cm ** -2\n # showers with a more horizontal direction spend more path\n # length in each atm. layer the \"effective transverse\n # thickness\" they have to pass is reduced\n c *= np.sin(gamma_alt)\n\n # find the thickness at the height of the first interaction\n t_first_int = self.thickness_profile(h_first_int)\n\n # total thickness at shower maximum = thickness at first\n # interaction + thickness traversed to shower maximum\n t_shower_max = t_first_int + c\n\n # now find the height with the wanted thickness by solving for the\n # desired thickness\n return self.altitude_profile(t_shower_max)",
"def tail_shaft_circle(self):\n _profile = Circle(position=self.stabilizer_vright.position, radius=(self.critical_thickness / 2.0) * 1.5)\n _extrude = ExtrudedSolid(island=_profile, distance=self.stabilizer_h.root_chord)\n return _profile, _extrude",
"def heat_capacity_of_air(self) -> float:\n\n return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2",
"def get_t_half_length(self, alpha):\n\n n = len(self._x)\n m = len(self._y)\n sig_x = numpy.std(self._x)\n sig_y = numpy.std(self._y)\n\n alpha = alpha / 100.0\n\n # calculate CI using formula: Welch's t-interval\n df_n = (sig_x ** 2.0 / n + sig_y ** 2.0 / m) ** 2.0\n df_d = (sig_x ** 2.0 / n) ** 2 / (n - 1) + (sig_y ** 2.0 / m) ** 2 / (m - 1)\n df = round(df_n / df_d, 0)\n\n # t distribution quantile\n t_q = stat.t.ppf(1 - (alpha / 2), df)\n st_dev = (sig_x ** 2.0 / n + sig_y ** 2.0 / m) ** 0.5\n\n return t_q*st_dev",
"def local_thickness(im):\n from skimage.morphology import cube\n if im.ndim == 2:\n from skimage.morphology import square as cube\n dt = spim.distance_transform_edt(im)\n sizes = sp.unique(sp.around(dt, decimals=0))\n im_new = sp.zeros_like(im, dtype=float)\n for r in tqdm(sizes):\n im_temp = dt >= r\n im_temp = spim.distance_transform_edt(~im_temp) <= r\n im_new[im_temp] = r\n # Trim outer edge of features to remove noise\n im_new = spim.binary_erosion(input=im, structure=cube(1))*im_new\n return im_new",
"def maTail(self):\n return self.maCruise * sqrt(self.speedRatio)",
"def well_diameter_at_top(self):\n return self.radius_from_liquid_depth(self.well_depth) * 2 # a generic impl; subclasses can optimize",
"def upper_hook_length(self, i, j, parameter):\n leg = self.circle_star().leg_length(i, j)\n arm = self.star().arm_length(i, j)\n return leg + parameter*(arm + 1)",
"def thermal_conductivity_of_air(self) -> float:\n\n # This more accurate equation is not used by the paper.\n # return (0.02646 * self.ambient_temperature ** 1.5) / (\n # self.ambient_temperature + 254.4 * (10 ** (-12 / self.ambient_temperature))\n # )\n\n # The reference suggests this equation is accurate to 1%.\n return 0.02646 * (self.ambient_temperature / 300) ** 0.8646",
"def get_specific_heat() -> float:\n return 1006.0",
"def width_v_eta(model: SingleRhNeutrinoModel):\n mh = parameters.eta_mass\n fh = parameters.feta\n return _width_v_hp(model, mh, fh)",
"def compute_thickness(self):\n com = vtk.vtkCenterOfMass()\n com.SetInputData(self.inner_rim_poly)\n center = np.asarray(com.GetCenter()) # take center from inner points (not outer)\n\n irp_numpy = numpy_support.vtk_to_numpy(self.inner_rim_poly.GetPoints().GetData())\n orp_numpy = numpy_support.vtk_to_numpy(self.outer_rim_poly.GetPoints().GetData())\n\n # compute average radius ..\n rs_inner = np.linalg.norm(irp_numpy - np.tile(center, (irp_numpy.shape[0], 1)), axis = 1)\n rs_outer = np.linalg.norm(orp_numpy - np.tile(center, (orp_numpy.shape[0], 1)), axis = 1)\n\n # average out\n r_inner = np.mean(rs_inner)\n r_outer = np.mean(rs_outer)\n\n # compute distance\n d = r_outer - r_inner\n self.thickness = d\n\n return d",
"def test_thickness_hydrostatic():\n pressure = np.array([959., 779.2, 751.3, 724.3, 700., 269.]) * units.hPa\n temperature = np.array([22.2, 14.6, 12., 9.4, 7., -38.]) * units.degC\n mixing = np.array([0.01458, 0.00209, 0.00224, 0.00240, 0.00256, 0.00010])\n thickness = thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing)\n assert_almost_equal(thickness, 9891.706 * units.m, 2)",
"def _tooth_thickness(self, d_y):\n\n # necessary due to numerical rounding errors\n if self.data.get('d') / d_y * cos(radians(self.data.get('alpha_t'))) > 1.0:\n alpha_yt = 0.0\n else:\n alpha_yt = degrees(acos(self.data.get('d') / d_y * cos(radians(self.data.get('alpha_t')))))\n s_yt = d_y * (\n (pi + 4 * self.data.get('x_E') * tan(radians(self.data.get('alpha_n')))) / 2 / self.data.get(\n 'z') + inv(self.data.get('alpha_t')) - inv(alpha_yt))\n s_y = d_y * (sin(s_yt / d_y)) # tooth thickness (chord-length)\n d_yc = d_y * (cos(s_yt / d_y)) # diameter at center of tooth (cut with chord)\n\n return s_y, d_yc",
"def molar_mass_dry_air():\n return 28.9647",
"def diffusion_width(conversion_depth): #Return value in PIXELS!!!\n return sqrt((drift_time(maximum(conversion_depth, undepleted_thickness)) *\n 2 * k * temp * low_field_mobility / e) + #depleted\n where(conversion_depth < undepleted_thickness,\n square(undepleted_thickness), 0)) / pixel_width #undepleted",
"def _get_thickness(self,filename, maxLen=3):\n filename = os.path.splitext(filename)[0] \n filename = os.path.split(filename)[1] \n filename = filename.split(\"_t\")[-1] \n filename = filename.split(\"_\")[0]\n if \"v\" in filename:\n part1=filename.split(\"v\",1)[0]\n part2=filename.split(\"v\",1)[1]\n filename = ''.join((filename.split(\"v\",1)[0],'.',filename.split(\"v\",1)[1]))#['%s.%s' %(filename.split(\"v\",1)[0],filename.split(\"v\",1)[1])]\n print(filename)\n thickness = filename\n return thickness",
"def athlete_height(a):\r\n if pd.isnull(a):\r\n return np.nan\r\n if \"cm\" in a:\r\n temp = round(int(a.replace(\" cm\",\"\")),0)\r\n if \"'\" in a:\r\n temp = a.split('\"')[0]\r\n feet = int(temp.split(\"'\")[0])\r\n inches = int(temp.split(\"'\")[1])\r\n cm = round(feet*30.48 + 2.54*inches,0)\r\n temp = cm\r\n if \"in\" in a:\r\n inches = round(int(a.replace(\" in\",\"\")), 0)\r\n kg = round(2.54 * inches,0)\r\n temp = kg \r\n \r\n if temp < 120 or temp > 225:\r\n return np.nan\r\n else:\r\n return temp"
] | [
"0.58767277",
"0.577812",
"0.57439417",
"0.57420033",
"0.57348126",
"0.5701691",
"0.56614035",
"0.5627972",
"0.5577274",
"0.5555835",
"0.55312794",
"0.5513159",
"0.55019605",
"0.5464188",
"0.5458677",
"0.5456074",
"0.5408051",
"0.5388599",
"0.53871053",
"0.53454113",
"0.53291845",
"0.5314386",
"0.5310814",
"0.52998626",
"0.5291957",
"0.52625114",
"0.5234342",
"0.522499",
"0.5217491",
"0.5205492"
] | 0.6089675 | 0 |
This is the internal shape of the compound stabilizer. It is None because the current app uses a boom tail\ structure instead of a single fuselage, and thus there is no shape to present for the fuselage builder. | def internal_shape(self):
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shape(self):\n return None",
"def shape(self):",
"def shape(self):",
"def shape(self) -> Shape:",
"def shape(self) -> str:\n return \"box\"",
"def surface(self):\n return BRep_Tool_Surface(self.topods_shape())",
"def shape(self):\n return self.active.shape",
"def external_shape(self):\n return ScaledShape(shape_in=self.tail_joiner, reference_point=Point(0, 0, 0), factor=1, hidden=True)",
"def shape(self):\n return self._shape",
"def shape_type(self):\n return \"rectangle\"",
"def shape(self):\n if self.color_buffer is not None:\n return self.color_buffer.shape[:2] # in case its a texture\n if self.depth_buffer is not None:\n return self.depth_buffer.shape[:2]\n if self.stencil_buffer is not None:\n return self.stencil_buffer.shape[:2]\n raise RuntimeError('FrameBuffer without buffers has undefined shape')",
"def shape(self):\r\n return self._shape",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)",
"def empty_fuselage():\n fus = Fuselage(construct_geometry=False)\n return fus",
"def shape_type(self):\n return \"square\"",
"def shape(self):\n return self.child_mode.shape()",
"def shape(self) -> Shape:\n raise NotImplementedError()",
"def shape(self):\n for component in ('x', 'y', 'z', 'r', 't'):\n arr = getattr(self, component)\n if arr is not None:\n return arr.shape\n return ()",
"def tail_joiner(self):\n\n # Fusing Right Horizontal Tail:\n shape_in_r = Fused(shape_in=self.stabilizer_h.solid, tool=self.stabilizer_vright.solid)\n shape_out_r = Fused(shape_in=shape_in_r, tool=self.connector_right)\n\n # Fusing Left Horizontal Tail:\n shape_in_l = Fused(shape_in=self.stabilizer_h.ht_mirror, tool=self.stabilizer_vleft.solid)\n shape_out_l = Fused(shape_in=shape_in_l, tool=self.connector_left)\n\n shape_out = Fused(shape_in=shape_out_r, tool=shape_out_l)\n\n return shape_out",
"def frame_shape(self):\n pass",
"def shape(self):\n return self._shape",
"def shape(self):\n return self._shape",
"def get_shape(self):\n if not self.channels_first:\n return [None] + self.w + [self.numoffeatures]\n else:\n return [None] + [self.numoffeatures] + self.w",
"def getShape(transform):\n\n pass",
"def shape(self) -> Optional[tuple]:\n return self._shape",
"def rack_shape(self):\n return self.reservoir_specs.rack_shape",
"def shape_type(self):\n return \"circle\"",
"def shape(self):\n if self.volumes:\n return 4 * self.bars_count + 1 + 1,\n else:\n return 3 * self.bars_count + 1 + 1,",
"def calculate_shape(self):\n\n # error handling\n if self.radius <= 0: raise ValueError(\"Radius must be positive.\")\n if self.inner_radius < 0: raise ValueError(\"Inner radius must not be negative\")\n if self.inner_radius > self.radius: raise ValueError(\"Inner radius must be smaller than radius\")\n if self.thickness <= 0: raise ValueError(\"Thickness must be positive\")\n\n self.area = pi * self.radius ** 2\n self.area -= pi * self.inner_radius ** 2\n\n self.volume = self.area * self.thickness",
"def get_shape_type(self):\n import GEOM\n sgeom = self.get_sgeom()\n shape_type = sgeom.GetShapeType()\n if shape_type is GEOM.COMPOUND:\n node = self.node\n geom_eng = node.get_sobj().GetFatherComponent().GetObject()\n ops = geom_eng.GetIGroupOperations(node.get_std()._get_StudyId())\n sidx = ops.GetType(sgeom)\n shape_type = self._compound_types.get(sidx, shape_type)\n return shape_type"
] | [
"0.6660259",
"0.6377907",
"0.6377907",
"0.6350067",
"0.61458004",
"0.6072909",
"0.59808654",
"0.5950499",
"0.58973956",
"0.5789104",
"0.57881624",
"0.57205176",
"0.567865",
"0.56527114",
"0.56421506",
"0.5631697",
"0.5566981",
"0.55606943",
"0.55593157",
"0.55571765",
"0.5546411",
"0.5546411",
"0.55190057",
"0.5518635",
"0.547383",
"0.5447111",
"0.54302114",
"0.5416207",
"0.54156303",
"0.54136586"
] | 0.65808225 | 1 |
This rotates the extruded right boom shaft connector to point in the X direction. | def connector_right(self):
return RotatedShape(shape_in=self.tail_shaft_circle[1],
rotation_point=self.tail_shaft_circle[0].center,
vector=Vector(0, 1, 0),
angle=radians(90)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def right(self, angle):\r\n self.rotation += angle",
"def rotateRight(self):\n self.faceHeading+=-1*shipRotationSpeed\n self.reDraw()",
"def move_right(self):\n\t\tself.set_x_vector(constants.DONKEY_SPEED)",
"def read_rotor(self):\n self.sig_a = self.pin_a.read_digital()\n self.sig_b = self.pin_b.read_digital()\n # catch the rising edge of A\n if self.sig_a and not self.old_sig_a:\n # if b is also high, clockwise\n if self.sig_b:\n self.x += 1\n\n else:\n self.x -= 1\n if self.x > 9:\n self.x = 0\n elif self.x < 0:\n self.x = 9\n self.old_sig_a = self.sig_a\n return self.x",
"def rotate_right_left(self):\n\t\treturn",
"def right(self, angle):\r\n self.dir += math.radians(angle)",
"def rotate_right(self):\n current = compass.index(self.heading)\n return replace(self, heading=compass[(current + 1) % 4])",
"def rotate_left_right(self):\n\t\treturn",
"def turn_right(self):\n self.facing_direction += self.config\n if self.facing_direction > 7:\n self.facing_direction -= 8\n self.x, self.y = self.compute_positions()",
"def rot_x_rad(self):\n return self._rot_x_rad",
"def go_right(self):\n self.change_x = 6\n self.direction = \"R\"",
"def rotate(X):\n return X",
"def right(self, angle):\n self._rotate(-angle)",
"def turn_ship_right(self):\n self.degrees -= movement",
"def rotate90(self):",
"def go_right(self):\n self.change_x = 6",
"def go_right(self):\n self.change_x = 6",
"def steerright(self):\n self.direction = self.direction-self.steering\n if self.direction < 0:\n self.direction = 360-90\n self.image, self.rect = rot_center(self.image_orig,self.rect,self.direction)",
"def turn_right(self):\n self.direction_mod_offset += 1\n self.calculate_offset_mapping()\n direction_num = self.direction_mod_offset % len(self.direction_arr)\n client.rotateToYawAsync(direction_num * 90).join()",
"def rotate(self,X):\n alpha = random.rand() * 2*pi\n beta = self.beta_sample()\n R = Rotator.rotation_matrix(alpha,beta,0.0)\n X = np.dot(R, X)\n if self.random_flip and (random.rand() > 0.5):\n X[2,:] = -X[2,:]\n X[1,:] = -X[1,:]\n return X",
"def rotateLeft(self):\n self.faceHeading+=shipRotationSpeed\n self.reDraw()",
"def turnRight(ev3):\n ev3.set_angle(\"A\", \"30\", \"90\")\n ev3.set_angle(\"B\", \"-30\", \"-90\")\n ev3.set_angle(\"C\", \"30\", \"90\")",
"def rotate_x(self,rad):\n DetElement.rotate_x(self,rad) #python2\n #super().rotate_x(rad) #python3\n for tube in self._list_of_tubes:\n self.__update__update_tube_pos_after_rotation(tube)\n tube.rotate_x(rad)",
"def rotate_right(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.anticlockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + 1 + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.anticlockwise_rotate(speed)",
"def mate_bolt_right(self):\n return Mate(\n self,\n CoordSystem(\n origin=(10, self.y_bolt_offset + 1.0, self.thickness / 2.0),\n # xDir=(1, 0, 0), normal=(0, -1, 0),\n ),\n )",
"def right(self, angle):\n self.matrix = matrixMultiply(yawMatrix(angle), self.matrix)\n self.directionOut()\n self.delay()",
"def _move_right(self):\n self.x += self.settings.mario_speed\n if self.settings.direction == -1:\n self.image = pygame.transform.flip(self.image, True, False)\n self.settings.direction = 1",
"def rotateX(self, angleInRadians) -> None:\n ...",
"def move_right(self):\n\n if self.xcor() > 230:\n self.setx(250)\n else:\n new_x = self.xcor() + 40\n self.setx(new_x)",
"def x(self):\n return self._turtle.xcor()"
] | [
"0.58585054",
"0.58475804",
"0.5752721",
"0.56557125",
"0.565259",
"0.5642582",
"0.55889606",
"0.55838734",
"0.55481523",
"0.5542192",
"0.55371916",
"0.553619",
"0.5532067",
"0.5516594",
"0.54965496",
"0.54788023",
"0.54788023",
"0.5476728",
"0.5452291",
"0.5441863",
"0.54217017",
"0.5420884",
"0.53897387",
"0.53813934",
"0.5366761",
"0.5365397",
"0.5363091",
"0.5360367",
"0.5330813",
"0.5327575"
] | 0.6050355 | 0 |
This attribute names the component 'ct' for compound stabilizer. | def component_type(self):
return 'ct' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ct(self):\n return self._ct",
"def n_cs(self):\n pass",
"def delta_c(self):\n if self._deltac is None:\n raise AttributeError('Attribute has not yet been initialized.')\n else:\n return self._deltac",
"def get_bertzCTs(self):\n\n self.r_max_bCT = 0\n self.p_max_bCT = 0\n self.delta_bCT = 0\n for m in self.components:\n prop_dict = m.read_prop_file()\n\n if m.role == 'reactant':\n self.r_max_bCT = max([\n self.r_max_bCT,\n prop_dict['bertzCT']\n ])\n elif m.role == 'product':\n self.p_max_bCT = max([\n self.p_max_bCT,\n prop_dict['bertzCT']\n ])\n\n self.delta_bCT = self.p_max_bCT - self.r_max_bCT",
"def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang",
"def getComp( self, c, name ):\n self.validateChannel( name )\n value = self.d[name].value\n return value",
"def parse_C(self, line: str):\n node1, node2, value, v0, i0, name = self.parse_C_or_L(line, \"f\")\n return IComponent.C(node1, node2, value, v0, i0, name)",
"def c(self):\n return self._c",
"def c(self):\r\n return self.__c",
"def get_collision_attrib_template():\n return {\"group\": \"0\", \"rgba\": array_to_string(OBJECT_COLLISION_COLOR)}",
"def getColorTransferFunction(self):\n\t\treturn self.ctf",
"def c(self):\n pass",
"def c(self):\n pass",
"def name(self):\n return \"cnotdihedral\"",
"def c1(self):\n return self.__c1",
"def ct(self, ct):\n\n self._ct = ct",
"def getC(self):\n\t\treturn self.c",
"def C ( self ) :\n lst = [ i for i in self.__nums_components ]\n if not lst : return () ## extended fit? no other components?\n elif 1 == len(lst) : return lst[0] ## single component?\n return tuple ( lst )",
"def C ( self ) :\n lst = [ i for i in self.__nums_components ]\n if not lst : return () ## extended fit? no other components?\n elif 1 == len(lst) : return lst[0] ## single component?\n return tuple ( lst )",
"def cs(self):\n return self._cs",
"def __init__(self, name, c, att):\n\n\t\tself.name = name\n\t\tself.c = c\n\t\tself.att = att",
"def uc(p) :\r\n return Components(p, Scale=2)",
"def ctc(target):\n network = target.project.network\n throats = network.throats(target.name)\n cn = network['throat.conns'][throats]\n C1 = network['pore.coords'][cn[:, 0]]\n C2 = network['pore.coords'][cn[:, 1]]\n value = _norm(C1 - C2, axis=1)\n return value",
"def t2c(x):\n dx = to_dlpack(x)\n return cp.fromDlpack(dx)",
"def __repr__(self):\n\t\treturn \"<Compound %r>\"%self.name",
"def name(self):\n return '{:.2f}_{:d}_{:s}_{:d}'.format(self.A, self.Z,\n self.species, self.C)",
"def cole_coeff(self):\n return self.diseq_coeff(standardize=True)",
"def add_comp(self, name, ctype):\n\n name = self.name + '.' + name\n\n assert name not in self.components, 'A component named \\'{}\\' already exists for node \\'{}\\''.format(\n name, self.name)\n\n try:\n cls = co.str_to_comp(ctype)\n except AttributeError:\n try:\n cls = rc.str_to_comp(ctype)\n except AttributeError:\n cls = None\n\n if cls:\n obj = cls(name=name,\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n else:\n raise ValueError(\n \"%s is not a valid class name! (component is %s, in node %s)\" % (\n ctype, name, self.name))\n\n self.logger.info('Component {} added to {}'.format(name, self.name))\n\n self.components[name] = obj",
"def testIsCCC(self):\n self.assertTrue(\n self.node.is_ccc\n )\n\n self.node._type = 'cdl'\n\n self.assertFalse(\n self.node.is_ccc\n )",
"def tactic_comps(cls) -> Set[str]:\n return set([\"mmic_autodock_vina\"])"
] | [
"0.6306071",
"0.5830233",
"0.55300504",
"0.54758626",
"0.5464467",
"0.54112226",
"0.5399572",
"0.53634334",
"0.53443295",
"0.5325333",
"0.5307353",
"0.526817",
"0.526817",
"0.5239267",
"0.5178399",
"0.5164254",
"0.51474506",
"0.51014316",
"0.51014316",
"0.50527024",
"0.50240517",
"0.5012217",
"0.5009809",
"0.49787772",
"0.49758554",
"0.4959605",
"0.4950169",
"0.49273026",
"0.4922442",
"0.49215105"
] | 0.7008896 | 0 |
This defines the external shape for the ExternalBody class in definitions. | def external_shape(self):
return ScaledShape(shape_in=self.tail_joiner, reference_point=Point(0, 0, 0), factor=1, hidden=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def internal_shape(self):\n return None",
"def shape(self) -> Shape:",
"def createExternalModelDefinition(self):\n return _libsbml.CompSBMLDocumentPlugin_createExternalModelDefinition(self)",
"def nativeObject(self):\n return ParaMeshBody()",
"def shape(self):\n path = super(Arrow, self).shape()\n path.addPolygon(self.arrowHead)\n return path",
"def physicalShape(self):\n return PhysicalField(value = (self.nx * self.dx * self.scale,\n self.ny * self.dy * self.scale))",
"def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)",
"def homogeneous_external_field(self):\n return self._H",
"def shape(self):",
"def shape(self):",
"def shape(self) -> Shape:\n raise NotImplementedError()",
"def makePhysicsBody(self):\n space = self.environment.space\n geom = GeomBox(space, self.dim)\n geom.setPosition(self.centerPos)\n geom.setCategoryBits(2)\n geom.setCollideBits(1)\n self.geomList = [geom]",
"def interior(self):\n return Shape(self - self.edge('inner'))",
"def buildSubType(self):\n \n if self.subType == \"wallType\":\n \"\"\"Build a wall\"\"\"\n \n if \"col\" in self.name:\n \"\"\"Build the collision body for this wall\"\"\"\n self.bulletBody = self.factory.basePhysics.buildTriangleMesh(\n self.object, self.levelEgg, 0, self.isDynamic)\n \n else:\n self.object.reparentTo(self.renderObjectsLevel)\n \n elif self.subType == \"groundType\":\n \"\"\"Build the ground with either custom Mesh or use the plane\"\"\"\n if self.useBulletPlane:\n self.factory.basePhysics.buildGroundPlane()\n \n self.object.reparentTo(self.renderObjectsLevel)\n self.object.setPos(self.position)\n self.object.setHpr(self.hpr)\n \n else:\n \n if \"col\" in self.name:\n self.bulletBody = self.factory.basePhysics.buildTriangleMesh(\n self.object, self.levelEgg, 0, self.isDynamic)\n \n else:\n self.object.reparentTo(self.renderObjectsLevel)\n self.object.setPos(self.position)\n self.object.setHpr(self.hpr)",
"def nodeInitializer(cls):\n\n inAttributes = []\n outAttributes = []\n\n # =======================================\n # Input Attribute\n # =======================================\n\n # Shape Type\n shapeTypeAttr = OpenMaya.MFnEnumAttribute()\n cls.iShapeType = shapeTypeAttr.create(\"shapeType\", \"st\", 0);\n cls.setMFnAttribute(shapeTypeAttr)\n\n for idx, shape_name in enumerate(SHAPE_NAMES):\n shapeTypeAttr.addField(shape_name, idx);\n\n inAttributes.append(cls.iShapeType)\n\n # Drawing type\n drawTypeAttr = OpenMaya.MFnEnumAttribute()\n cls.iDrawingType = drawTypeAttr.create(\"drawType\", \"dt\", 2);\n cls.setMFnAttribute(drawTypeAttr)\n\n for idx, draw_type in enumerate([\"Wireframe\", \"Shaded\", \"Both\"]):\n drawTypeAttr.addField(draw_type, idx);\n\n inAttributes.append(cls.iDrawingType)\n\n # Up Axis\n upAxisAttr = OpenMaya.MFnEnumAttribute()\n cls.iUpAxis = upAxisAttr.create(\"upAxis\", \"ua\", 1);\n cls.setMFnAttribute(upAxisAttr)\n\n for idx, shape_name in enumerate([\"X\", \"Y\", \"Z\"]):\n upAxisAttr.addField(shape_name, idx);\n\n inAttributes.append(cls.iUpAxis)\n\n # XRay\n xRayAttr = OpenMaya.MFnNumericAttribute()\n cls.iXRay = xRayAttr.create(\"xRay\", \"xr\", OpenMaya.MFnNumericData.kBoolean, False)\n cls.setMFnAttribute(xRayAttr)\n\n inAttributes.append(cls.iXRay)\n\n # BillBoard\n biilBoardAttr = OpenMaya.MFnNumericAttribute()\n cls.iBillBoard = biilBoardAttr.create(\"biilBoard\", \"bbd\", OpenMaya.MFnNumericData.kBoolean, False)\n cls.setMFnAttribute(biilBoardAttr)\n\n inAttributes.append(cls.iBillBoard)\n\n # Force Refresh\n forceRefreshAttr = OpenMaya.MFnNumericAttribute()\n cls.iForceRefresh = forceRefreshAttr.create(\"forceRefresh\", \"fr\", OpenMaya.MFnNumericData.kBoolean, False)\n cls.setMFnAttribute(forceRefreshAttr)\n\n inAttributes.append(cls.iForceRefresh)\n\n # Edge Color\n edgeColorAttr = OpenMaya.MFnNumericAttribute()\n cls.iEdgeColor = edgeColorAttr.createPoint(\"edgeColor\", \"ec\")\n cls.setMFnAttribute(edgeColorAttr)\n\n inAttributes.append(cls.iEdgeColor)\n\n # Edge Opacity\n edgeOpacityAttr = OpenMaya.MFnNumericAttribute()\n cls.iEdgeOpacity = edgeOpacityAttr.create(\"edgeOpacity\", \"ep\", OpenMaya.MFnNumericData.kFloat, 1.0)\n cls.setMFnAttribute(edgeOpacityAttr)\n\n inAttributes.append(cls.iEdgeOpacity)\n\n # Polygon Color\n polygonColorAttr = OpenMaya.MFnNumericAttribute()\n cls.iPolygonColor = polygonColorAttr.createPoint(\"polygonColor\", \"pc\")\n cls.setMFnAttribute(polygonColorAttr)\n\n inAttributes.append(cls.iPolygonColor)\n\n # Polygon Opacity\n polygonOpacityAttr = OpenMaya.MFnNumericAttribute()\n cls.iPolygonOpacity = polygonOpacityAttr.create(\"polygonOpacity\", \"pp\", OpenMaya.MFnNumericData.kFloat, .3)\n cls.setMFnAttribute(polygonOpacityAttr)\n\n inAttributes.append(cls.iPolygonOpacity)\n\n # Shape Size\n shapeSizeAttr = OpenMaya.MFnNumericAttribute()\n cls.iShapeSize = shapeSizeAttr.create(\"shapeSize\", \"ss\", OpenMaya.MFnNumericData.kFloat, 1.0)\n cls.setMFnAttribute(shapeSizeAttr)\n\n inAttributes.append(cls.iShapeSize)\n\n # Edge Size\n edgeSizeAttr = OpenMaya.MFnNumericAttribute()\n cls.iEdgeSize = edgeSizeAttr.create(\"edgeSize\", \"es\", OpenMaya.MFnNumericData.kFloat, 1.0)\n cls.setMFnAttribute(edgeSizeAttr)\n\n inAttributes.append(cls.iEdgeSize)\n\n # Position Offset\n positionOffsetAttr = OpenMaya.MFnNumericAttribute()\n cls.iPositionOffset = positionOffsetAttr.createPoint(\"positionOffset\", \"po\")\n cls.setMFnAttribute(positionOffsetAttr)\n\n inAttributes.append(cls.iPositionOffset)\n\n # Rotation Offset\n rotationOffsetAttr = OpenMaya.MFnNumericAttribute()\n cls.iRotationOffset = rotationOffsetAttr.createPoint(\"rotationOffset\", \"ro\")\n cls.setMFnAttribute(rotationOffsetAttr)\n\n inAttributes.append(cls.iRotationOffset)\n\n # =======================================\n # Output Attribute\n # =======================================\n\n # =======================================\n # Add Attribute\n # =======================================\n for attribute in inAttributes + outAttributes:\n cls.addAttribute(attribute)\n\n # =======================================\n # Attribute dependencies\n # =======================================\n for outAttr in outAttributes:\n for inAttr in inAttributes:\n cls.attributeAffects(inAttr, outAttr)",
"def __init__(self, shape):\n\n self.shape = shape",
"def shape(self):\n return None",
"def _constraints_external(self):\n pass",
"def define(self, scope=None):\n declaration = self._declaration.define_with_args(self._name, typedef='complex_and_params', scope=scope)\n lines = [\n '/* EMG_WRAPPER {} */\\n'.format(self._name),\n declaration + \" {\\n\"\n ]\n lines.extend(['\\t{}\\n'.format(stm) for stm in self.body])\n lines.append(\"}\\n\")\n return lines",
"def __init__(self, *args):\n _ShapeBuild.ShapeBuild_ReShape_swiginit(self,_ShapeBuild.new_ShapeBuild_ReShape(*args))",
"def complex_type_factory(name, definition, schema):\n d = dict()\n basecls = None\n basedef = definition.basedef\n if basedef and basedef != ITSELF:\n basecls = complex_type_factory(basedef.name, basedef, schema)\n if definition.content_type.is_element_only():\n model = definition.content_type.partical.term\n complex_model(model, d, schema)\n complex_attributes(definition.attributes, d, schema)\n cls = type(name, (basecls or ComplexImp,), d)\n cls.definition = definition\n return cls",
"def model_definition(self):\n pass",
"def type_shapes(self):\n return self._type_shapes",
"def shape(self) -> str:\n return \"box\"",
"def addExternalModelDefinition(self, *args):\n return _libsbml.CompSBMLDocumentPlugin_addExternalModelDefinition(self, *args)",
"def boundary_lib(self) -> New_Style_Boundary:\n return self._boundary_lib",
"def __init__(self, polyhedron_parent):\n self._polyhedron_parent = polyhedron_parent\n self._base_ring = polyhedron_parent.base_ring()\n self._vector = polyhedron_parent.Hrepresentation_space()(0)\n self._A = polyhedron_parent.ambient_space()(0)\n self._b = polyhedron_parent.base_ring()(0)\n self._index = 0",
"def __init__(self, shape_id=None, shape_type=None, top=None, left=None, right=None, bottom=None, transliteration=None, style=None): # noqa: E501\n self.openapi_types = {\n 'shape_id': str,\n 'shape_type': str,\n 'top': int,\n 'left': int,\n 'right': int,\n 'bottom': int,\n 'transliteration': Transliteration,\n 'style': Style\n }\n\n self.attribute_map = {\n 'shape_id': 'shapeId',\n 'shape_type': 'shapeType',\n 'top': 'top',\n 'left': 'left',\n 'right': 'right',\n 'bottom': 'bottom',\n 'transliteration': 'transliteration',\n 'style': 'style'\n }\n\n self._shape_id = shape_id\n self._shape_type = shape_type\n self._top = top\n self._left = left\n self._right = right\n self._bottom = bottom\n self._transliteration = transliteration\n self._style = style",
"def clone(self):\n return _libsbml.ExternalModelDefinition_clone(self)",
"def test_loading_a_shape_linetype(self, sdoc):\n tdoc = ezdxf.new()\n # handles shouldn't be synchronized to the source document!\n forward_handles(tdoc, 7)\n assert (\n sdoc.styles.find_shx(\"ltypeshp.shx\").dxf.font == \"ltypeshp.shx\"\n ), \"expected ltypeshp.shx entry to exist in the source document\"\n\n loader = xref.Loader(sdoc, tdoc)\n loader.load_linetypes([\"square\"])\n loader.execute()\n assert document_has_no_errors(tdoc) is True\n\n ltype = tdoc.linetypes.get(\"square\")\n assert ltype.dxf.name == \"SQUARE\"\n # do not repeat more tests from test_loading_a_simple_layer()\n\n style = tdoc.styles.find_shx(\"ltypeshp.shx\")\n assert style.dxf.font == \"ltypeshp.shx\"\n pattern_style_handle = ltype.pattern_tags.get_style_handle()\n assert pattern_style_handle != \"0\"\n assert (\n pattern_style_handle == style.dxf.handle\n ), \"expected handle of shape-file 'ltypeshp.shx' as pattern style handle\""
] | [
"0.59475553",
"0.55709356",
"0.551432",
"0.5420617",
"0.53448313",
"0.5337472",
"0.53151894",
"0.5227796",
"0.52101886",
"0.52101886",
"0.520175",
"0.5200861",
"0.5182683",
"0.5145877",
"0.5120419",
"0.5115572",
"0.50709313",
"0.5045184",
"0.495258",
"0.49308845",
"0.49179575",
"0.4917056",
"0.4872561",
"0.4861996",
"0.484675",
"0.48394138",
"0.48375437",
"0.48347238",
"0.48154438",
"0.48047438"
] | 0.6451578 | 0 |
Find a cycle in a graph. s is a solver object, graph is an igraph object. Given the graph iterate over the graph, convert the key and values to symbolic terms and check for a cycle. Returns the unsat core as a list of strings e.g. [1>2, 2>3, 3>4, 4>1] I'm leaving this here just to try to get the abstract hamiltonian cycle constrain working. | def find_all_cycles(s,graph):
grph = u.edge_to_list_dict(graph)
node_cnt = len(grph)
k = z.Int("k")
syms = [z.Int('node%s'%i) for i in range(node_cnt)]
# s.add(syms[0] == 0) # start node is a 0
s.add(k < node_cnt)
s.add(k > 1)
o = z.Optimize()
# for source, sinks in sgraph.s_adj_list():
for i in range(node_cnt):
s.add(syms[i] >= 0)
s.add(syms[i] <= k)
s.add(z.Or([syms[j] == ((syms[i] + 1) % k) for j in grph[i]]) == (syms[i] == 0))
r = []
m = []
# o.minimize(z.Sum([syms[i] for i in range(node_cnt)]))
s.add(z.Product([syms[i] for i in range(node_cnt)]) == 0)
done = False
while not done:
if s.check() == z.sat:
m = s.model()
r.append(m)
s.add(k != m[k])
else:
done = True
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_eulerian_cycle(adj_dict, edges):\n if not adj_dict:\n return []\n\n checked = [False] * len(edges)\n list_keys = list(adj_dict.keys())\n for i in list_keys: # the first time will return true anyway\n cycle = []\n if dfs(i, adj_dict, edges, checked, cycle, i):\n return cycle\n return cycle",
"def find_already_eulerian_cycle(adj_dict, edges):\n if not adj_dict:\n return []\n\n checked = [False] * len(edges)\n list_keys = list(adj_dict.keys())\n for i in list_keys: # the first time will return true anyway\n cycle = [i]\n if dfs_eulerian(i, adj_dict, edges, checked, cycle, i):\n return cycle\n return cycle",
"def simple_cycles(g: Graph) -> List[List[Branch]]:\n # Make copy because the graph gets altered during the algorithm\n graph_copy = g.copy()\n branch_map = {}\n copy_result = list()\n\n # Create map to allow returning original branches\n for branch in g.branches:\n branch_map[branch.id] = branch\n\n # Yield every elementary cycle in python graph G exactly once\n # Expects a dictionary mapping from vertices to iterables of vertices\n def _unblock(thisnode, blocked, B):\n stack = set([thisnode])\n while stack:\n node = stack.pop()\n if node in blocked:\n blocked.remove(node)\n stack.update(B[node])\n B[node].clear()\n sccs = [(graph_copy, scc) for scc in\n strongly_connected_components(graph_copy)]\n while sccs:\n current_graph, scc = sccs.pop()\n startnode = scc.pop()\n path = [startnode.id]\n pathBranches = []\n blocked = set()\n closed = set()\n blocked.add(startnode.id)\n B = defaultdict(set)\n stack = [(startnode, list(startnode.outgoing))]\n while stack:\n thisnode, nbrs = stack[-1]\n if nbrs:\n branch = nbrs.pop()\n nextnode = branch.end\n if nextnode.id == startnode.id:\n result = pathBranches[:]\n result.append(branch)\n copy_result.append(result)\n closed.update(path)\n elif nextnode.id not in blocked:\n path.append(nextnode.id)\n pathBranches.append(branch)\n stack.append((nextnode,\n list(nextnode.outgoing)))\n closed.discard(nextnode.id)\n blocked.add(nextnode.id)\n continue\n if not nbrs:\n if thisnode.id in closed:\n _unblock(thisnode.id, blocked, B)\n else:\n for nbr in map(lambda x: x.end,\n thisnode.outgoing):\n if thisnode.id not in B[nbr.id]:\n B[nbr.id].add(thisnode.id)\n stack.pop()\n path.pop()\n if (pathBranches):\n pathBranches.pop()\n startnode.remove()\n subgraph = current_graph.subgraph(set(scc))\n new_scc = strongly_connected_components(subgraph)\n sccs.extend([(subgraph, scc) for scc in new_scc])\n\n for loop in copy_result:\n yield list(map(lambda b: branch_map[b.id], loop))",
"def _cycled_detail_from_graph(graph, cycled_edge):\n\n messages = [\"Resolve paths starting from initial requests to cycle:\"]\n\n for init_request in _iter_init_request_nodes(graph):\n node = init_request\n visited = list()\n while True:\n visited.append(node)\n down = next((ne for ne in graph.node_neighbors[node]), None)\n if down in cycled_edge:\n visited.append(down)\n break\n if down is None:\n break\n\n node = down\n\n line = \" %s\" % _get_node_label(graph, visited[0]) # init request\n for node in visited[1:]:\n # should be more readable if opt-out requests\n if not _is_request_node(graph, node):\n line += \" --> %s\" % _get_node_label(graph, node)\n\n messages.append(line)\n\n return \"\\n\".join(messages)",
"def _contains_cycle(fgraph, orderings):\r\n\r\n # These are lists of Variable instances\r\n inputs = fgraph.inputs\r\n outputs = fgraph.outputs\r\n\r\n\r\n # this is hard-coded reimplementation of functions from graph.py\r\n # reason: go faster, prepare for port to C.\r\n # specifically, it could be replaced with a wrapper\r\n # around graph.io_toposort that returns True iff io_toposort raises\r\n # a ValueError containing the substring 'cycle'.\r\n # This implementation is optimized for the destroyhandler and runs\r\n # slightly faster than io_toposort.\r\n\r\n # this is performance-critical code. it is the largest single-function\r\n # bottleneck when compiling large graphs.\r\n\r\n assert isinstance(outputs, (tuple, list, deque))\r\n\r\n # TODO: For more speed - use a defaultdict for the orderings\r\n # (defaultdict runs faster than dict in the case where the key\r\n # is not in the dictionary, at least in CPython)\r\n\r\n iset = set(inputs)\r\n\r\n # IG: I tried converting parent_counts to use an id for the key,\r\n # so that the dict would do reference counting on its keys.\r\n # This caused a slowdown.\r\n # Separate benchmark tests showed that calling id is about\r\n # half as expensive as a dictionary access, and that the\r\n # dictionary also runs slower when storing ids than when\r\n # storing objects.\r\n\r\n\r\n # dict mapping an Apply or Variable instance to the number\r\n # of its parents (including parents imposed by orderings)\r\n # that haven't been visited yet\r\n parent_counts = {}\r\n # dict mapping an Apply or Variable instance to its children\r\n node_to_children = {}\r\n\r\n # visitable: A container holding all Variable and Apply instances\r\n # that can currently be visited according to the graph topology\r\n # (ie, whose parents have already been visited)\r\n # TODO: visitable is a fifo_queue. could this run faster if we\r\n # implement it as a stack rather than a deque?\r\n # TODO: visitable need not be a fifo_queue, any kind of container\r\n # that we can throw things into and take things out of quickly will\r\n # work. is there another kind of container that could run faster?\r\n # we don't care about the traversal order here as much as we do\r\n # in io_toposort because we aren't trying to generate an ordering\r\n # on the nodes\r\n visitable = deque()\r\n\r\n # IG: visitable could in principle be initialized to fgraph.inputs\r\n # + fgraph.orphans... if there were an fgraph.orphans structure.\r\n # I tried making one and maintaining it caused a huge slowdown.\r\n # This may be because I made it a list, so it would have a\r\n # deterministic iteration order, in hopes of using it to speed\r\n # up toposort as well.\r\n # I think since we need to scan through all variables and nodes\r\n # to make parent_counts anyway, it's cheap enough to always\r\n # detect orphans at cycle detection / toposort time\r\n\r\n\r\n # Pass through all the nodes to build visitable, parent_count, and\r\n # node_to_children\r\n for var in fgraph.variables:\r\n\r\n # this is faster than calling get_parents\r\n owner = var.owner\r\n if owner:\r\n parents = [ owner ]\r\n else:\r\n parents = []\r\n\r\n # variables don't appear in orderings, so we don't need to worry\r\n # about that here\r\n\r\n if parents:\r\n for parent in parents:\r\n # insert node in node_to_children[r]\r\n # (if r is not already in node_to_children,\r\n # intialize it to [])\r\n node_to_children.setdefault(parent, []).append(var)\r\n parent_counts[var] = len(parents)\r\n else:\r\n visitable.append(var)\r\n parent_counts[var] = 0\r\n\r\n for a_n in fgraph.apply_nodes:\r\n parents = list(a_n.inputs)\r\n # This is faster than conditionally extending\r\n # IG: I tried using a shared empty_list = [] constructed\r\n # outside of the for loop to avoid constructing multiple\r\n # lists, but this was not any faster.\r\n parents.extend(orderings.get(a_n, []))\r\n\r\n if parents:\r\n for parent in parents:\r\n # insert node in node_to_children[r]\r\n # (if r is not already in node_to_children,\r\n # intialize it to [])\r\n node_to_children.setdefault(parent, []).append(a_n)\r\n parent_counts[a_n] = len(parents)\r\n else:\r\n # an Apply with no inputs would be a weird case, but I'm\r\n # not sure we forbid it\r\n visitable.append(a_n)\r\n parent_counts[a_n] = 0\r\n\r\n # at this point,\r\n # parent_counts.keys() == fgraph.apply_nodes + fgraph.variables\r\n\r\n\r\n\r\n # Now we actually check for cycles\r\n # As long as there are nodes that can be visited while respecting\r\n # the topology, we keep visiting nodes\r\n # If we run out of visitable nodes and we haven't visited all nodes,\r\n # then there was a cycle. It blocked the traversal because some\r\n # node couldn't be visited until one of its descendants had been\r\n # visited too.\r\n # This is a standard cycle detection algorithm.\r\n\r\n visited = 0\r\n while visitable:\r\n # Since each node is inserted into the visitable queue exactly\r\n # once, it comes out of the queue exactly once\r\n # That means we can decrement its children's unvisited parent count\r\n # and increment the visited node count without double-counting\r\n node = visitable.popleft()\r\n visited += 1\r\n for client in node_to_children.get(node,[]):\r\n parent_counts[client] -= 1\r\n # If all of a node's parents have been visited,\r\n # it may now be visited too\r\n if not parent_counts[client]:\r\n visitable.append(client)\r\n\r\n\r\n return visited != len(parent_counts)",
"def is_cyclic_graph(graph):\n\n path_taken, visited_nodes, result = [], [], False\n nodes_in_graph = list(graph.keys())\n # check if the first node in list in visited\n result, path_taken, visited_nodes = node_is_visited(graph,\n nodes_in_graph[0],\n path_taken,\n visited_nodes)\n list_results = [result]\n graph_size, results_size = len(nodes_in_graph), len(list_results)\n\n # iterate over each node till all nodes are visited\n while not result and graph_size == results_size:\n for node in nodes_in_graph[1:]:\n result, path_taken, visited_nodes = \\\n node_is_visited(graph, node, path_taken, visited_nodes)\n list_results.append(result)\n results_size = len(list_results)\n return result",
"def scc(graph, reverse_graph, nodes):\n\n out = defaultdict(list)\n track = Track()\n dfs_loop(reverse_graph, nodes, track)\n sorted_nodes = sorted(track.finish_time,\n key=track.finish_time.get, reverse=True)\n track.current_time = 0\n track.current_source = None\n track.explored = set()\n dfs_loop(graph, sorted_nodes, track)\n for lead, vertex in groupby(sorted(track.leader, key=track.leader.get),\n key=track.leader.get):\n out[lead] = list(vertex)\n\n return out",
"def MFAS_set_cover(s,graph):\n\n ## initialization\n m = graph.ecount()\n cycle_matrix = u.mk_cycle_matrix(u.find_all_cycles(graph), m)\n n, c = graph.get_adjacency().shape\n num_cycles = len(cycle_matrix)\n edge_list = graph.get_edgelist()\n sym_to_edge_cache = {}\n edge_to_sym_cache = {}\n sum_var = 'y'\n\n\n def symbolize(i,j):\n \"given two indices, create a symbolic variable\"\n new = z.Int('{0}->{1}'.format(i,j))\n return new\n\n\n def constraint_1(i,s_edge):\n \"\"\" Multiply the edge by its corresponding value in the cycle matrix\n \"\"\"\n edge = sym_to_edge_cache[s_edge]\n value = 0\n if edge in cycle_matrix[i]:\n value = cycle_matrix[i][edge]\n\n return (value * s_edge)\n\n\n ## symbolize the edges\n for source,sink in edge_list:\n s_edge = symbolize(source, sink)\n ## an edge is either a 0 or a 1\n s.add(z.Or([s_edge == 0, s_edge == 1]))\n\n sym_to_edge_cache[s_edge] = (source,sink)\n edge_to_sym_cache[(source,sink)] = s_edge\n\n\n ## Perform constraint 1 and add it to the solver instance\n for i in range(num_cycles):\n s.add(z.Sum([constraint_1(i,s_edge)\n for s_edge in sym_to_edge_cache.keys()]) >= 1)\n\n\n ## we want the smallest y possible\n s.minimize(z.Sum([s_edge for s_edge in sym_to_edge_cache.keys()]))\n\n s.check()\n return s.model()",
"def cc_visited(ugraph):\n remain = set(ugraph.keys())\n conn_comp = []\n while remain:\n node = remain.pop()\n visited = bfs_visited(ugraph, node)\n conn_comp.append(visited)\n remain = remain.difference(visited)\n return conn_comp",
"def find_cycles(graph, from_node = None):\n cycle = []\n if not from_node:\n from_node = graph.keys()[0]\n \n keep_reading = True\n while keep_reading:\n to_nodes = graph[from_node]\n #pick first approach\n if len(to_nodes) >= 1:\n \n interaction = find_next(to_nodes, from_node)\n if interaction:\n if len(to_nodes) > 1:\n split.append(from_node)\n used_edges.append(interaction)\n cycle.append(interaction)\n \n from_node = interaction[1]\n \n else:\n keep_reading = False\n else:\n keep_reading = False\n\n\n if check_cycle(cycle):\n cycles.append(cycle)\n\n return split",
"def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com",
"def process_graph(graph):\n init_globals()\n\n # If the graph contains integers this is triggered.\n # See the check_graph() function description for more information\n if type(graph.keys()[0]) == int:\n graph = check_graph(graph)\n\n \n balances = get_node_balances(graph)\n\n\n if is_eulerian(balances):\n splits = find_cycles(graph, sorted(graph.keys())[0])\n for node in splits:\n find_cycles(graph, node)\n\n print(\"graph is eulerian:\\n{}\\n\".format(\n \"->\".join(merge_cycles(cycles))))\n\n\n else:\n print(\"graph is not eulerian\")\n\n if has_eulerian_path(balances):\n print(\"graph has an eulerian path:\\n{}\\n\".format(\n \"->\".join(get_eulerian_path(graph, balances))))\n else:\n print(\"graph has no eulerian path\")",
"def dfs(\n graph: Mapping[T, Iterable[T]], u: T, v: T = None, raise_cycle: bool = False\n) -> Union[None, List[T]]:\n if v is None:\n raise_cycle = True\n if not raise_cycle and (u not in graph or v not in graph):\n return None\n is_active = {}\n path = []\n stack = [u]\n while stack:\n node = stack.pop()\n if isinstance(node, Node):\n is_active[node.value] = False\n if path[-1] != node.value:\n raise RuntimeError(stack=stack + [node], path=path)\n path.pop()\n continue\n path.append(node)\n if node == v:\n return path\n is_active[node] = True\n stack.append(Node(node))\n for child in graph.get(node, []):\n if is_active.get(child) is None:\n stack.append(child)\n elif not is_active[child]:\n continue\n else:\n cycle = [node]\n while stack and (len(cycle) < 2 or cycle[-1] != node):\n prev = stack.pop()\n if not isinstance(prev, Node):\n continue\n if cycle[-1] in graph.get(prev.value, []):\n cycle.append(prev.value)\n raise CycleDFSError(cycle=cycle[::-1])",
"def E_cycle(adj_dict, num_edges, num_lines):\n\n #current_node = 0 #arbitrarily choose node 0 to start\n\n #set current_node as key in adj_dict when using strings instead of node numbers\n keys = adj_dict.keys()\n current_node = keys[0]\n\n path = []\n seen_edges = 0\n seen_and_extra_edges = [] #for backtracking\n\n while seen_edges != num_edges:\n if len(adj_dict[current_node]) != 0:\n #if there is another outgoing edge\n path.append(current_node)\n next_node = adj_dict[current_node][0] #get the next unseen edge\n adj_dict[current_node].remove(next_node)\n #remove edge so that it won't be visited twice\n if len(adj_dict[current_node]) !=0:\n #if there is another outgoing edge, add it to backtracking list\n seen_and_extra_edges.append(current_node)\n seen_edges +=1\n current_node = next_node\n else:\n #made a bad choice, need to start a new sub-cycle\n #print(seen_and_extra_edges)\n #print(path)\n current_node = seen_and_extra_edges[0]\n seen_and_extra_edges.remove(current_node)\n\n #put the previous sub-cycle into the path\n temp_path = []\n new_start = path.index(current_node)\n temp_path = path[new_start:] #from the restart node to the end\n temp_path += path[:new_start] #from the beginning to the restart node\n path = temp_path\n\n #append the last elt\n source = path[0]\n path+=[source]\n return path",
"def cc_visited(ugraph):\n \n remaining = set(ugraph.keys())\n ccomp = []\n while len(remaining) > 0:\n node = remaining.pop()\n visited = bfs_visited(ugraph,node)\n ccomp.append(visited)\n remaining.difference_update(visited)\n \n return ccomp",
"def figurate_cycles(*s_vals):\n assert len(s_vals) > 1 #incomplete sanity check\n # Since a DFS has to start SOMEWHERE and we're looking for cycles, we\n # arbitrarily take the first list of figurates and use them as the\n # roots of our search.\n roots = figurate_list(s_vals[0])\n # Make a big list of all the rest of the figurate numbers\n candidates = []\n for s in s_vals[1:]:\n candidates.extend(figurate_list(s))\n answer = []\n # Perform a cycle-detecting DFS for every root in our list\n for root in roots:\n for cycle in find_all_cycles(candidates, root):\n answer.append(cycle)\n return answer",
"def stronglyConnectedComponents(graph):\n indexCounter = [0]\n stack = []\n lowLinks = {}\n index = {}\n result = []\n\n def strongConnect(node):\n index[node] = indexCounter[0]\n lowLinks[node] = indexCounter[0]\n indexCounter[0] += 1\n stack.append(node)\n\n try:\n successors = graph[node]\n except:\n successors = []\n for successor in successors:\n if successor not in lowLinks:\n # Successor has not yet been visited; recurse on it\n strongConnect(successor)\n lowLinks[node] = min(lowLinks[node], lowLinks[successor])\n elif successor in stack:\n # the successor is in the stack and hence in the current SCC\n lowLinks[node] = min(lowLinks[node], index[successor])\n\n # If `node` is a root node, pop the stack and generate an SCC\n if lowLinks[node] == index[node]:\n connectedComponent = []\n\n while True:\n successor = stack.pop()\n connectedComponent.append(successor)\n if successor == node:\n break\n component = tuple(connectedComponent)\n result.append(component)\n\n for node in graph:\n if node not in lowLinks:\n strongConnect(node)\n\n return result",
"def _check_cycles(self, graph):\n if list(nx.simple_cycles(graph)):\n raise AbstractGraphError('Cycles in graph')",
"def get_eulerian_path(graph, balances):\n init_globals()\n\n start_key = None\n to_key = None\n\n # Find the semi-balanced node keys\n for key,value in balances.items():\n if value == 1:\n start_key = key\n elif value == -1:\n to_key = key\n\n # If there are semi-balanced node keys, an edge is artificially added\n # between these two nodes.\n was_eulerian = True\n if to_key and start_key:\n was_eulerian = False\n graph[start_key].append(to_key)\n\n splits = find_cycles(graph, start_key)\n\n # This is probably not necessary.\n for node in splits:\n find_cycles(graph, node)\n\n\n raw_path = merge_cycles(cycles)\n \n if was_eulerian: # Then the path will be equal to the cycle\n return \"\".join(raw_path)\n else: # We have to remove the artificially added node \n path = \"\".join(raw_path)\n return \"\".join(path.split(start_key+to_key)[::-1])",
"def find_cycle(self):\n # from guido's blog :\n # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html\n worklist = set(self.successors)\n while worklist:\n stack = [worklist.pop()]\n while stack:\n top = stack[-1]\n for node in self.successors.get(top, ()):\n try:\n # raises ValueError if node is not in stack.\n cycle = stack[stack.index(node) :]\n succs = dict(\n (source, [cycle[(i + 1) % len(cycle)]])\n for i, source in enumerate(cycle)\n )\n return Digraph(succs, self.get_score, self.get_label)\n except ValueError:\n pass\n if node in worklist:\n stack.append(node)\n worklist.remove(node)\n break\n else:\n stack.pop()\n return None",
"def test_cycles():\n graph = Graph()\n for one, two in [(1, 2), (2, 3), (3, 1)]:\n graph.add_edge(one, two)\n cycles = list(graph.find_cycles())\n eq_(len(cycles), 1)\n eq_(cycles[0], [1, 2, 3])",
"def has_cycle(graph):\n ds = DisjointSet()\n\n # creates a set of all graph nodes\n node_set = set()\n for edge in graph:\n node_set.add(edge.node1)\n node_set.add(edge.node2)\n\n for item in node_set:\n ds.make_set(item)\n\n for edge in graph:\n same_set = ds.union(edge.node1, edge.node2)\n if same_set:\n return True\n\n return False",
"def strongly_connected_components(graph):\n \n result = [ ]\n stack = [ ]\n low = { }\n \n def visit(node):\n if node in low: return\n\t\n num = len(low)\n low[node] = num\n stack_pos = len(stack)\n stack.append(node)\n\t\n for successor in graph[node]:\n visit(successor)\n low[node] = min(low[node], low[successor])\n \n if num == low[node]:\n component = tuple(stack[stack_pos:])\n del stack[stack_pos:]\n result.append(component)\n for item in component:\n low[item] = len(graph)\n \n for node in graph:\n visit(node)\n \n return result",
"def hamilton_cycle(graph: list[list[int]], start_index: int = 0) -> list[int]:\n\n # Initialize path with -1, indicating that we have not visited them yet\n path = [-1] * (len(graph) + 1)\n # initialize start and end of path with starting index\n path[0] = path[-1] = start_index\n # evaluate and if we find answer return path either return empty array\n return path if util_hamilton_cycle(graph, path, 1) else []",
"def path(g): #g: graph\n marked = set()\n nodes = set(g.nodes) \n output = list()\n def recursive(g):\n for i in nodes.copy():\n d = dependents(g,i)\n if (not d) or all(dd in marked for dd in d):\n output.append((i,g.nodes[i]['word']))\n marked.add(i)\n nodes.remove(i)\n if nodes==set([0]):\n break\n recursive(g)\n break\n recursive(g)\n return output",
"def decode_MST(energies, lengths, leading_symbolic=0, labeled=True):\n\n def find_cycle(par):\n added = np.zeros([length], np.bool)\n added[0] = True\n cycle = set()\n findcycle = False\n for i in range(1, length):\n if findcycle:\n break\n\n if added[i] or not curr_nodes[i]:\n continue\n\n # init cycle\n tmp_cycle = set()\n tmp_cycle.add(i)\n added[i] = True\n findcycle = True\n l = i\n\n while par[l] not in tmp_cycle:\n l = par[l]\n if added[l]:\n findcycle = False\n break\n added[l] = True\n tmp_cycle.add(l)\n\n if findcycle:\n lorg = l\n cycle.add(lorg)\n l = par[lorg]\n while l != lorg:\n cycle.add(l)\n l = par[l]\n break\n\n return findcycle, cycle\n\n def chuLiuEdmonds():\n par = np.zeros([length], dtype=np.int32)\n # create best graph\n par[0] = -1\n for i in range(1, length):\n # only interested at current nodes\n if curr_nodes[i]:\n max_score = score_matrix[0, i]\n par[i] = 0\n for j in range(1, length):\n if j == i or not curr_nodes[j]:\n continue\n\n new_score = score_matrix[j, i]\n if new_score > max_score:\n max_score = new_score\n par[i] = j\n\n # find a cycle\n findcycle, cycle = find_cycle(par)\n # no cycles, get all edges and return them.\n if not findcycle:\n final_edges[0] = -1\n for i in range(1, length):\n if not curr_nodes[i]:\n continue\n\n pr = oldI[par[i], i]\n ch = oldO[par[i], i]\n final_edges[ch] = pr\n return\n\n cyc_len = len(cycle)\n cyc_weight = 0.0\n cyc_nodes = np.zeros([cyc_len], dtype=np.int32)\n id = 0\n for cyc_node in cycle:\n cyc_nodes[id] = cyc_node\n id += 1\n cyc_weight += score_matrix[par[cyc_node], cyc_node]\n\n rep = cyc_nodes[0]\n for i in range(length):\n if not curr_nodes[i] or i in cycle:\n continue\n\n max1 = float(\"-inf\")\n wh1 = -1\n max2 = float(\"-inf\")\n wh2 = -1\n\n for j in range(cyc_len):\n j1 = cyc_nodes[j]\n if score_matrix[j1, i] > max1:\n max1 = score_matrix[j1, i]\n wh1 = j1\n\n scr = cyc_weight + score_matrix[i, j1] - score_matrix[par[j1], j1]\n\n if scr > max2:\n max2 = scr\n wh2 = j1\n\n score_matrix[rep, i] = max1\n oldI[rep, i] = oldI[wh1, i]\n oldO[rep, i] = oldO[wh1, i]\n score_matrix[i, rep] = max2\n oldO[i, rep] = oldO[i, wh2]\n oldI[i, rep] = oldI[i, wh2]\n\n rep_cons = []\n for i in range(cyc_len):\n rep_cons.append(set())\n cyc_node = cyc_nodes[i]\n for cc in reps[cyc_node]:\n rep_cons[i].add(cc)\n\n for i in range(1, cyc_len):\n cyc_node = cyc_nodes[i]\n curr_nodes[cyc_node] = False\n for cc in reps[cyc_node]:\n reps[rep].add(cc)\n\n chuLiuEdmonds()\n\n # check each node in cycle, if one of its representatives is a key in the final_edges, it is the one.\n found = False\n wh = -1\n for i in range(cyc_len):\n for repc in rep_cons[i]:\n if repc in final_edges:\n wh = cyc_nodes[i]\n found = True\n break\n if found:\n break\n\n l = par[wh]\n while l != wh:\n ch = oldO[par[l], l]\n pr = oldI[par[l], l]\n final_edges[ch] = pr\n l = par[l]\n\n if labeled:\n assert energies.ndim == 4, 'dimension of energies is not equal to 4'\n else:\n assert energies.ndim == 3, 'dimension of energies is not equal to 3'\n input_shape = energies.shape\n batch_size = input_shape[0]\n max_length = input_shape[2]\n\n pars = np.zeros([batch_size, max_length], dtype=np.int32)\n arc_tags = np.zeros([batch_size, max_length], dtype=np.int32) if labeled else None\n for i in range(batch_size):\n energy = energies[i]\n\n # calc the real length of this instance\n length = lengths[i]\n\n # calc real energy matrix shape = [length, length, num_labels - #symbolic] (remove the label for symbolic arcs).\n if labeled:\n energy = energy[leading_symbolic:, :length, :length]\n # get best label for each edge.\n label_id_matrix = energy.argmax(axis=0) + leading_symbolic\n energy = energy.max(axis=0)\n else:\n energy = energy[:length, :length]\n label_id_matrix = None\n # get original score matrix\n orig_score_matrix = energy\n # initialize score matrix to original score matrix\n score_matrix = np.array(orig_score_matrix, copy=True)\n\n oldI = np.zeros([length, length], dtype=np.int32)\n oldO = np.zeros([length, length], dtype=np.int32)\n curr_nodes = np.zeros([length], dtype=np.bool)\n reps = []\n\n for s in range(length):\n orig_score_matrix[s, s] = 0.0\n score_matrix[s, s] = 0.0\n curr_nodes[s] = True\n reps.append(set())\n reps[s].add(s)\n for t in range(s + 1, length):\n oldI[s, t] = s\n oldO[s, t] = t\n\n oldI[t, s] = t\n oldO[t, s] = s\n\n final_edges = dict()\n chuLiuEdmonds()\n par = np.zeros([max_length], np.int32)\n if labeled:\n arc_tag = np.ones([max_length], np.int32)\n arc_tag[0] = 0\n else:\n arc_tag = None\n\n for ch, pr in final_edges.items():\n par[ch] = pr\n if labeled and ch != 0:\n arc_tag[ch] = label_id_matrix[pr, ch]\n\n par[0] = 0\n pars[i] = par\n if labeled:\n arc_tags[i] = arc_tag\n\n return pars, arc_tags",
"def discreteComplexDecomposeGraph(self,graph):\n s=[complex(*graph[i]) for i in range(len(graph))]\n N=len(s)\n M=self.coefficients_number\n d=0\n c=[]\n for k in range(-M//2,M//2):\n d+=sum([s[n]*cmath.exp(2j*cmath.pi*k*n/N) for n in range(N)])/N\n c.append(d)\n return c",
"def graph_decomposition(graph = None):\n\tg = graph.copy()\n\ttag_break_points(graph = g)\n\tstems_list = stems(graph = g)\n\tremove_stems(graph = g)\n\n\tcomponent_dict = loops(graph = g)\n\tcomponent_dict['stem'] = stems_list\n\n\thairpinloops = len(component_dict['hairpinloop'])\n\tbulges = len(component_dict['bulge'])\n\tinternal_loops = len(component_dict['internal_loop'])\n\tmultiloop3s = len(component_dict['multiloop3'])\n\tmultiloop4s = len(component_dict['multiloop4'])\n\tmultiloop5s = len(component_dict['multiloop5'])\n\tdangling_ends = len(component_dict['dangling_end'])\n\tstem = len(component_dict['stem'])\n\tseq_length = nx.number_of_nodes(graph)\n\n\tcomponent_counter = {'hairpinloop':hairpinloops , 'bulge':bulges , 'internal_loop':internal_loops , \\\n\t\t\t\t\t\t'multiloop3':multiloop3s , 'multiloop4':multiloop4s , 'multiloop5':multiloop5s, 'dangling_end':dangling_ends , \\\n\t\t\t\t\t\t'stem':stem, 'size':seq_length}\n\treturn component_dict, component_counter",
"def strongly_connected_component_subgraphs(G):\n cc=strongly_connected_components(G)\n graph_list=[]\n for c in cc:\n graph_list.append(G.subgraph(c))\n return graph_list",
"def contract(self, cycle):\n # create a new id to represent the cycle in the resulting graph.\n new_id = Digraph.new_node_id\n Digraph.new_node_id += 1\n\n # we store links that cross into and out of the cycle in these maps. the\n # to_cycle map contains links reaching into the cycle, and is thus a map\n # from each target node in the cycle to a list of source nodes that\n # reach that target from outside the cycle. the from_cycle map contains\n # links going out from the cycle, and is thus a map from each source\n # node in the cycle to a list of target nodes outside the cycle.\n to_cycle = collections.defaultdict(list)\n from_cycle = collections.defaultdict(list)\n\n scores = {}\n labels = {}\n succs = collections.defaultdict(list)\n for source, target in self.iteredges():\n if source in cycle:\n if target not in cycle:\n from_cycle[target].append(source)\n elif target in cycle:\n # we know source is not in cycle from above.\n to_cycle[source].append(target)\n else:\n succs[source].append(target)\n succs[target]\n scores[source, target] = self.get_score(source, target)\n labels[source, target] = self.get_label(source, target)\n\n old_edges = collections.defaultdict(list)\n\n # for each target in our graph that's reachable from the cycle, add an\n # edge from our new node to that target, with an appropriate score.\n for target, sources in from_cycle.items():\n succs[new_id].append(target)\n max_score = -1e100\n max_source = None\n for s in sources:\n score = self.get_score(s, target)\n if score > max_score:\n max_score = score\n max_source = s\n old_edges[max_source].append(target)\n scores[new_id, target] = max_score\n labels[new_id, target] = self.get_label(max_source, target)\n\n # before we handle the to_cycle map, we need to build some convenience\n # information for the cycle -- total score, and predecessor edges.\n pred = {}\n cycle_score = 0\n for s, t in cycle.iteredges():\n pred[t] = s\n cycle_score += self.get_score(s, t)\n\n # for each source in our graph that reaches into the cycle, add an edge\n # from the source to our new node, with an appropriate edge score.\n for source, targets in to_cycle.items():\n succs[source].append(new_id)\n max_score = -1e100\n max_target = None\n for t in targets:\n score = self.get_score(source, t) - self.get_score(pred[t], t)\n if score > max_score:\n max_score = score\n max_target = t\n old_edges[source].append(max_target)\n scores[source, new_id] = cycle_score + max_score\n labels[source, new_id] = self.get_label(source, max_target)\n\n return (\n new_id,\n old_edges,\n Digraph(\n succs, lambda s, t: scores[s, t], lambda s, t: labels[s, t]\n ),\n )"
] | [
"0.6317372",
"0.611582",
"0.5957695",
"0.5797448",
"0.57344425",
"0.5620256",
"0.56129",
"0.5610811",
"0.5604997",
"0.5584068",
"0.5572033",
"0.5533192",
"0.5509296",
"0.550141",
"0.5495953",
"0.5464865",
"0.5461448",
"0.54586077",
"0.5360699",
"0.5358017",
"0.5301723",
"0.5261275",
"0.5236765",
"0.5231112",
"0.5219735",
"0.52143747",
"0.52125645",
"0.5188548",
"0.5180368",
"0.51779664"
] | 0.7576485 | 0 |
Find the minimum feedback arc set by encoding it as a minimum set cover. The encoding requires a cycle matrix which we find externally to the SAT | def MFAS_set_cover(s,graph):
## initialization
m = graph.ecount()
cycle_matrix = u.mk_cycle_matrix(u.find_all_cycles(graph), m)
n, c = graph.get_adjacency().shape
num_cycles = len(cycle_matrix)
edge_list = graph.get_edgelist()
sym_to_edge_cache = {}
edge_to_sym_cache = {}
sum_var = 'y'
def symbolize(i,j):
"given two indices, create a symbolic variable"
new = z.Int('{0}->{1}'.format(i,j))
return new
def constraint_1(i,s_edge):
""" Multiply the edge by its corresponding value in the cycle matrix
"""
edge = sym_to_edge_cache[s_edge]
value = 0
if edge in cycle_matrix[i]:
value = cycle_matrix[i][edge]
return (value * s_edge)
## symbolize the edges
for source,sink in edge_list:
s_edge = symbolize(source, sink)
## an edge is either a 0 or a 1
s.add(z.Or([s_edge == 0, s_edge == 1]))
sym_to_edge_cache[s_edge] = (source,sink)
edge_to_sym_cache[(source,sink)] = s_edge
## Perform constraint 1 and add it to the solver instance
for i in range(num_cycles):
s.add(z.Sum([constraint_1(i,s_edge)
for s_edge in sym_to_edge_cache.keys()]) >= 1)
## we want the smallest y possible
s.minimize(z.Sum([s_edge for s_edge in sym_to_edge_cache.keys()]))
s.check()
return s.model() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dauxmin_cc_piece(x,k_ind,m_ind):\n g_cc = np.zeros_like(x)\n if cfg.min_line[k_ind,m_ind] == cfg.jk[k_ind]-1:\n g_cc[0:cfg.nfea-1]=cfg.a[m_ind,:cfg.nfea-1] \n g_cc[cfg.nfea-1] = 1.0\n \n return g_cc",
"def decode_MST(energies, lengths, leading_symbolic=0, labeled=True):\n\n def find_cycle(par):\n added = np.zeros([length], np.bool)\n added[0] = True\n cycle = set()\n findcycle = False\n for i in range(1, length):\n if findcycle:\n break\n\n if added[i] or not curr_nodes[i]:\n continue\n\n # init cycle\n tmp_cycle = set()\n tmp_cycle.add(i)\n added[i] = True\n findcycle = True\n l = i\n\n while par[l] not in tmp_cycle:\n l = par[l]\n if added[l]:\n findcycle = False\n break\n added[l] = True\n tmp_cycle.add(l)\n\n if findcycle:\n lorg = l\n cycle.add(lorg)\n l = par[lorg]\n while l != lorg:\n cycle.add(l)\n l = par[l]\n break\n\n return findcycle, cycle\n\n def chuLiuEdmonds():\n par = np.zeros([length], dtype=np.int32)\n # create best graph\n par[0] = -1\n for i in range(1, length):\n # only interested at current nodes\n if curr_nodes[i]:\n max_score = score_matrix[0, i]\n par[i] = 0\n for j in range(1, length):\n if j == i or not curr_nodes[j]:\n continue\n\n new_score = score_matrix[j, i]\n if new_score > max_score:\n max_score = new_score\n par[i] = j\n\n # find a cycle\n findcycle, cycle = find_cycle(par)\n # no cycles, get all edges and return them.\n if not findcycle:\n final_edges[0] = -1\n for i in range(1, length):\n if not curr_nodes[i]:\n continue\n\n pr = oldI[par[i], i]\n ch = oldO[par[i], i]\n final_edges[ch] = pr\n return\n\n cyc_len = len(cycle)\n cyc_weight = 0.0\n cyc_nodes = np.zeros([cyc_len], dtype=np.int32)\n id = 0\n for cyc_node in cycle:\n cyc_nodes[id] = cyc_node\n id += 1\n cyc_weight += score_matrix[par[cyc_node], cyc_node]\n\n rep = cyc_nodes[0]\n for i in range(length):\n if not curr_nodes[i] or i in cycle:\n continue\n\n max1 = float(\"-inf\")\n wh1 = -1\n max2 = float(\"-inf\")\n wh2 = -1\n\n for j in range(cyc_len):\n j1 = cyc_nodes[j]\n if score_matrix[j1, i] > max1:\n max1 = score_matrix[j1, i]\n wh1 = j1\n\n scr = cyc_weight + score_matrix[i, j1] - score_matrix[par[j1], j1]\n\n if scr > max2:\n max2 = scr\n wh2 = j1\n\n score_matrix[rep, i] = max1\n oldI[rep, i] = oldI[wh1, i]\n oldO[rep, i] = oldO[wh1, i]\n score_matrix[i, rep] = max2\n oldO[i, rep] = oldO[i, wh2]\n oldI[i, rep] = oldI[i, wh2]\n\n rep_cons = []\n for i in range(cyc_len):\n rep_cons.append(set())\n cyc_node = cyc_nodes[i]\n for cc in reps[cyc_node]:\n rep_cons[i].add(cc)\n\n for i in range(1, cyc_len):\n cyc_node = cyc_nodes[i]\n curr_nodes[cyc_node] = False\n for cc in reps[cyc_node]:\n reps[rep].add(cc)\n\n chuLiuEdmonds()\n\n # check each node in cycle, if one of its representatives is a key in the final_edges, it is the one.\n found = False\n wh = -1\n for i in range(cyc_len):\n for repc in rep_cons[i]:\n if repc in final_edges:\n wh = cyc_nodes[i]\n found = True\n break\n if found:\n break\n\n l = par[wh]\n while l != wh:\n ch = oldO[par[l], l]\n pr = oldI[par[l], l]\n final_edges[ch] = pr\n l = par[l]\n\n if labeled:\n assert energies.ndim == 4, 'dimension of energies is not equal to 4'\n else:\n assert energies.ndim == 3, 'dimension of energies is not equal to 3'\n input_shape = energies.shape\n batch_size = input_shape[0]\n max_length = input_shape[2]\n\n pars = np.zeros([batch_size, max_length], dtype=np.int32)\n arc_tags = np.zeros([batch_size, max_length], dtype=np.int32) if labeled else None\n for i in range(batch_size):\n energy = energies[i]\n\n # calc the real length of this instance\n length = lengths[i]\n\n # calc real energy matrix shape = [length, length, num_labels - #symbolic] (remove the label for symbolic arcs).\n if labeled:\n energy = energy[leading_symbolic:, :length, :length]\n # get best label for each edge.\n label_id_matrix = energy.argmax(axis=0) + leading_symbolic\n energy = energy.max(axis=0)\n else:\n energy = energy[:length, :length]\n label_id_matrix = None\n # get original score matrix\n orig_score_matrix = energy\n # initialize score matrix to original score matrix\n score_matrix = np.array(orig_score_matrix, copy=True)\n\n oldI = np.zeros([length, length], dtype=np.int32)\n oldO = np.zeros([length, length], dtype=np.int32)\n curr_nodes = np.zeros([length], dtype=np.bool)\n reps = []\n\n for s in range(length):\n orig_score_matrix[s, s] = 0.0\n score_matrix[s, s] = 0.0\n curr_nodes[s] = True\n reps.append(set())\n reps[s].add(s)\n for t in range(s + 1, length):\n oldI[s, t] = s\n oldO[s, t] = t\n\n oldI[t, s] = t\n oldO[t, s] = s\n\n final_edges = dict()\n chuLiuEdmonds()\n par = np.zeros([max_length], np.int32)\n if labeled:\n arc_tag = np.ones([max_length], np.int32)\n arc_tag[0] = 0\n else:\n arc_tag = None\n\n for ch, pr in final_edges.items():\n par[ch] = pr\n if labeled and ch != 0:\n arc_tag[ch] = label_id_matrix[pr, ch]\n\n par[0] = 0\n pars[i] = par\n if labeled:\n arc_tags[i] = arc_tag\n\n return pars, arc_tags",
"def circle_of_least_confusion(self,start):\n def f(x):\n pl=self.project_onto_plane(x)\n return max(pl[:,1])-min(pl[:,1])\n\n # m=self.marginal_ray\n if hasattr(self, 'start'):\n start=self.start\n else:\n# start=(m.Q_p[-1,0,2]-m.Q_p[-2,0,2])/2\n start=start\n print(start)\n res=minimize(f,(start), method='Nelder-Mead')\n self.start=res.final_simplex[0][0,0]\n\n return res.final_simplex[0][0,0],res.final_simplex[1][0]",
"def make_get_minimum_set(gRNA_hits, manual_check = True, exclude_seqs = set(), targets = None,\n prioritise_nr = False, sc_algorithm = \"LAR\", num_sets = 1, tie_breaker = None,\n low_coverage_penalty = 0.5, suppress_warning = False,\n impossible_set_message = impossible_set_message_default):\n # ## filter by excluded sequences\n if exclude_seqs:\n gRNA_hits.set_seqs_check(\"exclude\", False, [str(s) for s in exclude_seqs])\n gRNA_hits = gRNA_hits.filter_seqs_all_checks_passed(quiet = suppress_warning)\n gRNA_hits = gRNA_hits.filter_hits_all_checks_passed(quiet = suppress_warning)\n if prioritise_nr:\n set_cover = make_set_cover_nr(gRNA_hits, num_sets = num_sets, target_ids = targets,\n num_lengths_to_track = None,\n low_coverage_penalty = low_coverage_penalty,\n suppress_warning = suppress_warning)\n else:\n ## tie breakers should return 2 values: <gRNASeq>, [<gRNAHits>]\n ## note: If antisense, tie break by minimum -end. Else, tie break by minimum start.\n ## note: tie-breaker uses AVERAGE distance of hits (to inferred N-terminus)\n if tie_breaker is None:\n tie_breaker = lambda *args: tuple(all_best_pos(*args).items())[0]\n set_cover = make_set_cover_pos(gRNA_hits, num_sets = num_sets, target_ids = targets,\n algorithm = sc_algorithm, id_key = lambda x:x.target_id,\n tie_breaker = tie_breaker,\n suppress_warning = suppress_warning)\n set_num = [0]\n def get_minimum_set():\n restore = []\n set_num[0] += 1\n while True:\n ## solve set_cover\n selected_grna = set_cover(restore = restore)\n restore = selected_grna\n ## if empty set, print message and break out of loop to exit and return the empty set\n if len(selected_grna) == 0:\n print(impossible_set_message)\n break\n ## if valid set AND manual check NOT requested, break out of loop to exit and return the valid set\n elif not manual_check: break\n ## if valid set AND manual check requested\n else:\n ## print gRNA sequences in selected_grna to screen for user to evaluate\n sorted_grna = sorted(selected_grna, key = lambda grna:grna.id)\n usr_input = manual_check_prompt(sorted_grna, set_num[0])\n if usr_input.upper() == 'X':\n break\n else:\n ## id_list and seq_list have same order as selected_grna\n id_list = tuple(grna.id for grna in selected_grna)\n seq_list = tuple(str(grna.seq).upper() for grna in selected_grna)\n ## remove gRNA from list of gRNAs to restore upon next set cover generation\n if usr_input in id_list:\n index = id_list.index(usr_input)\n restore.remove(selected_grna[index])\n elif usr_input.upper() in seq_list:\n index = seq_list.index(usr_input)\n restore.remove(selected_grna[index])\n else:\n print(\"Invalid input.\")\n return [str(grna.seq) for grna in selected_grna]\n return get_minimum_set",
"def concave_piece(x,k_ind,m_ind):\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n f_cc=np.dot(x[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+x[line_start+cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n \n # next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]): # jk is ok, range does not take the limit itself but jk-1.\n f_tmp = np.dot(x[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+x[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp < f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n return f_cc",
"def mc_problem(start=(3, 3, 1, 0, 0, 0), goal=None):\n if goal is None:\n goal = (0,0,0) + start[:3]\n if start == goal:\n return [start]\n explored = set() #set of states we have visited\n frontier = [[start]] #ordered list of paths we have blazed\n while frontier:\n path = frontier.pop(0)\n s = path[-1]\n for (state, action) in csuccessors(s).items():\n if state not in explored:\n explored.add(state)\n path2 = path + [action, state]\n if state == goal:\n return path2\n else:\n frontier.append(path2)\n return fail",
"def constraint_1(i,s_edge):\n edge = sym_to_edge_cache[s_edge]\n value = 0\n if edge in cycle_matrix[i]:\n value = cycle_matrix[i][edge]\n\n return (value * s_edge)",
"def minimum_spanning_arborescence(sol):",
"def _min_fill(self):\n adj = self.adj\n def num_fill_edges(mask,i):\n \"\"\"\n number of fill edges created by eliminating node i\n in the graph with 0-1 adjacency matrix mask\n \"\"\"\n n = mask.shape[0]\n nb = np.nonzero(mask[i, :])[0]\n clique_edges = nb.shape[0]*(nb.shape[0]-1)/2\n current_edges = mask[np.ix_(nb, nb)].sum()/2\n return clique_edges - current_edges\n assert np.all(adj == np.transpose(\n adj)), \"the adjacency matrix should be symmetric\"\n n = adj.shape[0]\n mask = (np.abs(adj) > 0).astype(float)\n np.fill_diagonal(mask, 0)\n order = []\n available = list(range(n))\n total_fill_edges = 0\n for _ in range(n):\n mask_iter = mask[np.ix_(available, available)]\n fills = [num_fill_edges(mask_iter, j)\n for j in range(len(available))]\n best_ind = np.argmin(fills)\n num_fills = fills[best_ind]\n total_fill_edges += num_fills\n best = available[best_ind]\n neighbors_ind = np.nonzero(mask_iter[best_ind, :])[0]\n neighbors = [available[i] for i in neighbors_ind]\n mask[np.ix_(neighbors, neighbors)] = 1\n mask[neighbors, neighbors] = 0\n available.pop(best_ind)\n order.append(best)\n return order, mask",
"def get_interval_from_minflow(self, wide=False):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"start\"],\n # self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"destin\"],\n # self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.source(),\n # x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.sink(),\n # x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(s_prime, v, int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(v, t_prime, int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # s_prime,\n # x,\n # int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # x,\n # t_prime,\n # int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i],\n capacities[i],\n unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n # print('Minimum cost:', min_cost_flow.OptimalCost())\n # print('')\n # print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n # cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n # print('%1s -> %1s %3s / %3s %3s' % (\n # min_cost_flow.Tail(i),\n # min_cost_flow.Head(i),\n # min_cost_flow.Flow(i),\n # min_cost_flow.Capacity(i),\n # cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n # print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n # print(\"Has become ({}, {}) with sup {}\".format(start,\n # destin,\n # sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in\n self.arc_info[arc].keys()):\n # print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n bound_1 = old_flow + sup_flow\n bound_2 = old_flow - sup_flow\n new_lb = max(0, int(min(bound_1, bound_2)))\n new_ub = int(max(bound_1, bound_2))\n if wide:\n if new_lb == new_ub:\n # print(\"We had a zero interval\")\n new_lb = int(new_lb*0.8)\n new_ub = int(new_ub*1.2)\n if new_lb == 0:\n # print(\"We got a zero lower bound\")\n new_ub = 5\n # print(\"But now we're doing {} {}\".\n # format(new_lb, new_ub))\n\n self.arc_info[arc][\"lower_bound\"] = new_lb\n self.arc_info[arc][\"upper_bound\"] = new_ub\n # print(\"Edge ({},{}) bounds are [{},{}]\".format(\n # start,\n # destin,\n # self.arc_info[arc][\"lower_bound\"],\n # self.arc_info[arc][\"upper_bound\"]))\n # print(self.arc_info[arc])\n else:\n print('There was an issue with the min cost flow input.')\n # self.check_conservation_of_flow() # check that solution is valid",
"def auxmin_cc_piece(x,k_ind,m_ind):\n \n # Adding new linear function as a last function:\n # The first line. If jk = 1 and k_ind = nomax, this is a new line, otherwise an old one.\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n #print line_start,cfg.jk,k_ind,cfg.nomax-1,cfg.jk[k_ind], cfg.xprev,x\n if cfg.jk[k_ind]==1 and k_ind==cfg.nomax-1:\n #print \"hihu0\"\n f_cc=np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n return f_cc\n else:\n #print \"hihu1\",line_start,k_ind\n f_cc=np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n if cfg.jk[k_ind]==1:\n return f_cc\n \n # Next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]-1): # Everything but the first and last.\n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n # The last line.\n if k_ind==cfg.nomax-1:\n #print \"hihu3\"\n f_tmp = np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n else: \n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = cfg.jk[k_ind]-1 \n\n return f_cc",
"def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid",
"def mc_problem(start=(3,3,1,0,0,0),goal=None):\n if goal is None:\n def goal_fn(state): return state[:3] == (0,0,0) \n else:\n def goal_fn(state): return state == goal\n return shortest_path_search(start,csuccessors,goal_fn)",
"def minim(self):\n cycles = self.get_elementary_cycles()\n\n for cycle in cycles:\n # take back our clean graph (self._graph) and operate on it\n nodes_concerned = []\n for node in self._graph.node_list:\n possible_names = [check_node.getName() for check_node in cycle]\n if node.getName() in possible_names:\n nodes_concerned.append(node)\n possible_debts = []\n\n # inspect what the minimum debt is\n for node in nodes_concerned:\n next_node = node.getNext()\n while next_node is not None:\n if next_node.getCreditor() in nodes_concerned:\n if int(next_node.getAmount()) != 0:\n possible_debts.append(int(next_node.getAmount()))\n break # Only once\n next_node = next_node.getNext()\n\n # subtract that debt from all the debts in the cycle\n least_debt = min(possible_debts)\n for node in nodes_concerned:\n next_node = node.getNext()\n while next_node is not None:\n if next_node.getCreditor() in nodes_concerned:\n if int(next_node.getAmount()) != 0:\n next_node.setAmount(str(int(next_node.getAmount()) - int(least_debt)))\n break # Only once\n next_node = next_node.getNext()\n return self._graph",
"def circuitSat(C):",
"def limited_optimal_SC(U, S, size = 1, redundancy = 1):\n S_length = len(S)\n S_class = S.__class__\n S = sorted(S, key = len, reverse = True)\n max_redundancy = redundancy * len(U)\n print(S_length, max_redundancy)\n def recur(C, iS, d):\n \"\"\"\n C (SetOfSets): (partial) set cover solution\n iS (int): index of set in S from which to start adding (to avoid repeating combinations)\n d (int): current depth of recursion; if exceeds 'size', terminates.\n I'm hoping using a var is quicker than using len(C).\n \"\"\"\n C_elements = C.elements\n ## if max depth reached or set cover not possible, exit\n if ((d >= size)\n or (len(U - (C_elements.union(*S[iS:]))) != 0)):\n return []\n else:\n output = []\n ## set minimum set size to be <uncovered>/<remaining set cover size allowance>\n min_set_size = int((len(U - C_elements) / (size - d)) + 1)\n for i in range(iS, S_length):\n s = S[i]\n ## if set size is too small, stop searching\n ## (S is sorted by largest to shortest so lengths of all sets after\n ## will be <= len(s))\n if len(s) < min_set_size:\n break\n ## if s is not a subset of current partial cover solution, add it\n if not s < C_elements:\n C_branch = C.copy()\n C_branch.add(s)\n ## if exceeds redundancy threshold, skip\n if C_branch.redundancy > max_redundancy:\n continue\n else:\n ## if set cover, add to solutions\n if C_branch.elements == U:\n output.append(C_branch)\n else:\n output.extend(recur(C_branch, i+1, d+1))\n return output\n return recur(S_class(), 0, 0)",
"def original_solution():\n matrix = get_data()\n # Construct Graph\n G = nx.DiGraph()\n rows, cols = len(matrix), len(matrix[0])\n for r in xrange(rows):\n for c in xrange(cols):\n if 0 < c:\n G.add_edge(r*cols + c, r*cols + c - 1, weight=matrix[r][c-1])\n if c < cols-1:\n G.add_edge(r*cols + c, r*cols + c + 1, weight=matrix[r][c+1])\n if 0 < r:\n G.add_edge(r*cols + c, (r-1)*cols + c, weight=matrix[r-1][c])\n if r < rows-1:\n G.add_edge(r*cols + c, (r+1)*cols + c, weight=matrix[r+1][c])\n # Calculate shortest path\n path = nx.shortest_path(G, 0, rows*cols-1, weighted=True)\n \n # Get cost for path\n s = 0\n for p in path:\n c = p % cols\n r = (p - c) / rows\n s += matrix[r][c]\n return s",
"def lowest_cost_search(start, successors, is_goal, action_cost):\r\n # your code here\r\n explored = set()\r\n frontier = [ [start] ]\r\n while frontier:\r\n path = frontier.pop(0)\r\n state1 = final_state(path)\r\n if is_goal(state1):\r\n return path\r\n explored.add(state1)\r\n pcost = path_cost(path)\r\n for (state, action) in successors(state1).items():\r\n if state not in explored:\r\n total_cost = pcost + action_cost(action)\r\n path2 = path [(action, total_cost), state]\r\n add_to_frontier(frontier, path2)\r\n return Fail",
"def floyd_warshall(A):\n n = A.shape[0]\n \n for k in tqdm(range(1, n+1)):\n for i in range(n):\n for j in range(n):\n A[i,j,k] = min(A[i,j,k-1], A[i,k-1,k-1]+A[k-1,j,k-1])\n \n \n for i in range(n):\n if A[i,i,n] <0:\n min_path = 'Negative cycle'\n return min_path\n min_path = np.min(A[:,:,n])\n \n return min_path",
"def exact_min_vertex_cover(graph):\n for N in range(1,len(graph.nodes())+1):\n for graph_sub in it.combinations(sorted(graph.nodes(), reverse=True), N):\n graph_temp = graph.copy()\n graph_temp.remove_nodes_from(graph_sub)\n if len(graph_temp.edges()) == 0:\n return list(graph_sub)",
"def _zero_out_most_similar_conformer(self):\n n_confs = len(self._coor_set)\n\n # Make a square matrix for pairwise RMSDs, where\n # - the lower triangle (and diagonal) are np.inf\n # - the upper triangle contains the pairwise RMSDs (k=1 to exclude diagonal)\n pairwise_rmsd_matrix = np.zeros((n_confs,) * 2)\n pairwise_rmsd_matrix[np.tril_indices(n_confs)] = np.inf\n for i, j in zip(*np.triu_indices(n_confs, k=1)):\n pairwise_rmsd_matrix[i, j] = calc_rmsd(self._coor_set[i], self._coor_set[j])\n\n # Which coords have the lowest RMSD?\n # `idx_low_rmsd` will contain the coordinates of the lowest value in the pairwise matrix\n # a.k.a. the indices of the closest confs\n idx_low_rmsd = np.array(\n np.unravel_index(\n np.argmin(pairwise_rmsd_matrix), pairwise_rmsd_matrix.shape\n )\n )\n low_rmsd = pairwise_rmsd_matrix[tuple(idx_low_rmsd)]\n logger.debug(\n f\"Lowest RMSD between conformers {idx_low_rmsd.tolist()}: {low_rmsd:.06f} Å\"\n )\n\n # Of these, which has the lowest occupancy?\n occs_low_rmsd = self._occupancies[idx_low_rmsd]\n idx_to_zero, idx_to_keep = idx_low_rmsd[occs_low_rmsd.argsort()]\n\n # Assign conformer we want to remove with an occupancy of 0\n logger.debug(\n f\"Zeroing occupancy of conf {idx_to_zero} (of {n_confs}): \"\n f\"occ={self._occupancies[idx_to_zero]:.06f} vs {self._occupancies[idx_to_keep]:.06f}\"\n )\n if (\n self.options.write_intermediate_conformers\n ): # Output all conformations before we remove them\n self._write_intermediate_conformers(prefix=\"cplex_remove\")\n self._occupancies[idx_to_zero] = 0",
"def find_smallest(self):\n # add max value to covered rows and columns to ignore the covered cells\n maxval = self.C.max()\n C = self.C + self.row_cover[:, np.newaxis]*maxval\n C += self.col_cover*maxval\n # return the smallest value\n return C.min()",
"def connection(self, sampleseq, num):\n self.Adjmatrix = np.zeros((self.nodenum, self.nodenum), dtype = int)\n \n for i in range(self.supplynum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.trandemandseries], sampleseq[self.supplyseries[i]]))\n self.Adjmatrix[self.supplyseries[i], self.trandemandseries[minindex]] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n for i in range(self.trannum):\n if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n self.Adjmatrix[minindex, self.transeries[i]] = 1\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n \n# for i in range(self.supplynum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries[i], self.supplyseries], num))\n# self.Adjmatrix[self.supplyseries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.supplyseries[i]] = 1\n \n# for i in range(self.trannum):\n# if(np.sum(self.Adjmatrix[self.supplyseries, self.transeries[i]]) != 0):\n# continue\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.supplyseries, self.transeries[i]], num))\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n## self.Adjmatrix[self.transeries[i], minindex] = 1\n# \n for i in range(self.trannum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.demandseries], min(sampleseq[self.transeries[i]], self.demandnum))) + self.supplynum + self.trannum\n self.Adjmatrix[self.transeries[i], minindex] = 1\n# self.Adjmatrix[minindex, self.transeries[i]] = 1\n \n# for i in range(self.demandnum):\n# if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], 1)) + self.supplynum\n# self.Adjmatrix[minindex, self.demandseries[i]] = 1\n \n# for i in range(self.trannum):\n# minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries[i], self.transeries], num)) + self.supplynum\n# self.Adjmatrix[self.transeries[i], minindex] = 1\n \n for i in range(self.demandnum):\n if(np.sum(self.Adjmatrix[self.transeries, self.demandseries[i]]) == 0):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.transeries, self.demandseries[i]], num)) + self.supplynum\n self.Adjmatrix[minindex, self.demandseries[i]] = 1\n# self.Adjmatrix[self.demandseries[i], minindex] = 1\n \n for i in range(self.demandnum):\n minindex = np.array(sf.minimumk(self.Dismatrix[self.demandseries[i], self.demandseries], min(sampleseq[self.demandseries[i]] + 1, self.demandnum))) + self.supplynum + self.trannum\n minindex = minindex[1:-1]\n for j in range(len(minindex)):\n if(self.Adjmatrix[self.demandseries[i], minindex[j]] == 1 or self.Adjmatrix[minindex[j], self.demandseries[i]] == 1):\n continue\n self.Adjmatrix[self.demandseries[i], minindex[j]] = 1",
"def reachable_set(graph, initial_nodes, out=None):\n\n if not initial_nodes:\n raise AttributeError('Set of initial nodes needs to be non-empty.')\n\n if out is None:\n visited = np.zeros((graph.number_of_nodes(),\n constants.action_count + 1),\n dtype=np.bool)\n else:\n visited = out\n\n # All nodes in the initial set are visited\n visited[initial_nodes, 0] = True\n\n # for _, next_node, data in graph.edges_iter(10*20+7, data=True):\n # if data['action']==4:\n # print(data)\n\n stack = list(initial_nodes)\n\n # TODO: rather than checking if things are safe, specify a safe subgraph?\n while stack:\n node = stack.pop(0)\n scary_actions = list()\n # examine all edges from node, see which actions are unsafe\n for _, next_node, data in graph.edges_iter(node, data=True):\n action = data['action']\n probability = data['probability']\n safe = data['safe']\n if not safe and probability:\n scary_actions.append(action)\n # if next_node == 6*20+7:\n # print('corner\\'s scary')\n # print(scary_actions)\n # if node == 7*20+7:\n # print('from\\'s scary')\n # print(scary_actions)\n for _, next_node, data in graph.edges_iter(node, data=True):\n action = data['action']\n safe = data['safe']\n if not visited[node, action] and safe and action not in scary_actions:\n # if next_node == 9 * 20 + 7:\n # print('node')\n # print(node)\n # print('action')\n # print(action)\n # print('safe')\n # print(safe)\n # print('all actions')\n # for _, next_node, data in graph.edges_iter(node, data=True):\n # print(next_node)\n # print(visited[next_node, :])\n # print(data['action'])\n # print(data['probability'])\n # print(data['safe'])\n # print('.')\n # print(visited[node, :])\n visited[node, action] = True\n if not visited[next_node, 0]:\n stack.append(next_node)\n visited[next_node, 0] = True\n\n # for action in range(4,5):\n # plt.figure(action)\n # plt.imshow(np.reshape(visited[:,action], constants.world_shape).T,\n # origin='lower', interpolation='nearest', vmin=0, vmax=1)\n # plt.title('action <-')\n # plt.show(block=False)\n # plt.pause(0.01)\n # plt.figure(6)\n # plt.imshow(np.reshape(visited[:, 0], constants.world_shape).T,\n # origin='lower', interpolation='nearest', vmin=0, vmax=1)\n # plt.title('reachable')\n # plt.show(block=False)\n # plt.pause(0.01)\n\n if out is None:\n return visited",
"def remove_cycle_recur(self):\n \n visited = set()\n path = []\n \n for node in self.node_set:\n if node not in visited:\n min_edge = self.remove_cycle_helper(node, visited, path)\n visited.clear()\n path.clear()\n if min_edge is not None:\n # if there is a cycle and the min weight is found\n if len(min_edge) == 2:\n return min_edge\n \n visited.clear()\n path.clear()\n return []",
"def astar_minimal_matrix(matrix):\n # We define a virtual start node\n start = (-1, -1)\n a_star = AStarMinimalMatrix(matrix)\n a_star.a_star(start)\n return sum(matrix[row][col] for row, col in a_star.path if row >= 0 and col >= 0)",
"def lowest_cost_search(start, successors, is_goal, action_cost):\n explored = set()\n frontier = [ [start] ]\n if is_goal(start):\n return frontier[0]\n while frontier:\n path = frontier.pop(0)\n state1 = final_state(path)\n if is_goal(state1):\n return path\n explored.add(state1)\n pcost = path_cost(path)\n for (state, action) in successors(state1).items():\n if state not in explored:\n total_cost = pcost +action_cost(action)\n path2 = path + [(action, total_cost), state]\n frontier.append(path2)\n add_to_frontier(frontier, path2)\n return Fail",
"def minimumCostPathOnArray(arr):\n arr_mask = np.ones(np.array(arr).shape)\n\n rows = len(arr)\n cols = len(arr[0])\n\n for i in range(1,rows):\n arr[i][0] = arr[i][0] + min(arr[i-1][0], arr[i-1][1])\n for j in range(1, cols-1):\n arr[i][j] = arr[i][j] + min(arr[i-1][j-1], arr[i-1][j], arr[i-1][j+1])\n arr[i][cols-1] = arr[i][cols-1] + min(arr[i-1][cols-2], arr[i-1][cols-1])\n\n min_index = [0]*rows\n min_cost = min(arr[-1])\n for k in range(1,cols-1):\n if arr[-1][k] == min_cost:\n min_index[-1] = k\n\n for i in range(rows-2, -1, -1):\n j = min_index[i+1]\n lower_bound = 0\n upper_bound = 1 # Bounds for the case j=1\n \n if j==cols-1:\n lower_bound = cols-2\n upper_bound = cols-1\n elif j>0:\n lower_bound = j-1\n upper_bound = j+1\n \n min_cost = min(arr[i][lower_bound:upper_bound+1])\n for k in range(lower_bound, upper_bound+1):\n if arr[i][k] == min_cost:\n min_index[i] = k\n\n\n path = []\n for i in range(0, rows):\n arr_mask[i,0:min_index[i]] = np.zeros(min_index[i])\n path.append((i+1, min_index[i]+1))\n # print(\"Minimum cost path is: \")\n # print(path)\n return arr_mask",
"def mincost(L,m,n):\n # find the length of the strings\n # declaring the array for storing the dp values\n tc = [[0]*(n + 1) for i in range(m + 1)]\n tc[0][0]=L[0][0]\n for i in range(1,m + 1):\n tc[i][0]=tc[i-1][0]+L[i][0]\n for j in range(1,n + 1):\n tc[0][j]=tc[0][j-1]+L[0][j]\n for i in range(1,m+1):\n for j in range(1,n+1):\n tc[i][j]=min(tc[i-1][j-1],tc[i-1][j],tc[i][j-1])+L[i][j]\n\n\n # L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]\n return tc[m][n]",
"def mtx_minimize(p, trans, all_Cis, photo):\n\n demand, __, __, __ = calc_photosynthesis(p, np.expand_dims(trans, axis=1),\n all_Cis, photo)\n supply = A_trans(p, np.expand_dims(trans, axis=1), all_Cis)\n\n # closest match to ~ 0. (i.e. supply ~ demand)\n idx = bn.nanargmin(abs(supply - demand), axis=1)\n\n # each Ci on the transpiration stream\n Ci = np.asarray([all_Cis[e, idx[e]] for e in range(len(trans))])\n Ci = np.ma.masked_where(idx == 0, Ci)\n\n return Ci"
] | [
"0.5864169",
"0.57378393",
"0.5687902",
"0.56660724",
"0.56399906",
"0.56240785",
"0.562271",
"0.56220156",
"0.561726",
"0.56002665",
"0.55332106",
"0.55105716",
"0.54956925",
"0.54948974",
"0.5421922",
"0.53512096",
"0.5348889",
"0.53431565",
"0.5338174",
"0.5308137",
"0.5237585",
"0.5234841",
"0.52340716",
"0.52311814",
"0.5225706",
"0.5225418",
"0.5219307",
"0.51988184",
"0.51584446",
"0.513713"
] | 0.6773402 | 0 |
Multiply the edge by its corresponding value in the cycle matrix | def constraint_1(i,s_edge):
edge = sym_to_edge_cache[s_edge]
value = 0
if edge in cycle_matrix[i]:
value = cycle_matrix[i][edge]
return (value * s_edge) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def directed_cycle_score(A):\n\n # Implement your cycle score given Problem 4 Part 2\n temp_matrix = np.zeros(A.shape)\n alpha = 0.05\n k = 0\n summation_term = 999999\n num_terms = A.shape[0]\n # while change < 0.05:\n for i in range(num_terms):\n summation_term = (1 / np.math.factorial(k)) * expm(A)\n temp_matrix += summation_term\n\n cycle_score = np.trace(temp_matrix) - (A.shape[0] * num_terms)\n return cycle_score",
"def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]",
"def commute_matrix(A):\n R = resistance_matrix(A)\n E = A.sum() / 2 # number of edges in graph\n C = 2 * E * R\n return C",
"def scalar_mult(diagram, scalar):\n for node in diagram.nodes:\n if node.is_leaf():\n node.value *= scalar\n else:\n for oindex in node.offsets:\n node.offsets[oindex] *= scalar",
"def scalar_mult(diagram, scalar):\n for oindex in diagram.offsets:\n diagram.offsets[oindex] *= scalar",
"def scalar_mult(diagram, scalar):\n for oindex in diagram.offsets:\n diagram.offsets[oindex] *= scalar",
"def compute_edge_logits(self):",
"def p2(self, i):\n j = 0 if i == 1 else 1\n self.edges[i].m_v = exp(dot(self.edges[j].m_f, self.tp.F))",
"def __mul__(self,m):\n if type(m) != Matrix:\n raise TypeError('The second argument is not a matrix lol')\n if self.ncols != m.nrows:\n raise ValueError('matrix dot argument has incorrect number of rows')\n new = Matrix(self.nrows,m.ncols)\n columns = m.getCols()\n rowindex = 0\n colindex = 0 \n for row in self.matrix:\n colindex = 0 \n for col in columns:\n summ = 0\n for i,j in zip(row,col):\n summ+= i*j \n new.matrix[rowindex][colindex] = summ\n print new.matrix\n colindex += 1 \n rowindex+=1\n return new",
"def multRow(A,r,m):\n for col in range(len(A[r])):\n A[r][col] = (A[r][col])*m",
"def forward(self, node_attn, edge_attn):\n new_attn = torch.matmul(node_attn, edge_attn)\n return new_attn",
"def row_to_edge(row):\r\n return float(row[\"Dem\"]) - float(row[\"Rep\"])",
"def increaseFlow(self, edge, value):\r\n self.flow[edge] += value\r\n self.flow[edge[::-1]] -= value",
"def scalar_mult(diagram, scalar):\n for leaf in diagram.leaves:\n leaf.value *= scalar",
"def vcycle(v, b):\n if (len(v) - 1) & (len(v) - 2) != 0:\n raise ValueError(\"Lenth of v must be 2**n + 1.\")\n\n for i in range(3):\n jacobi23(v, b)\n\n if len(v) <= 3:\n return\n\n r = b - Amul(v)\n r2 = 4. * restrict(r)\n e2 = np.zeros_like(r2)\n vcycle(e2, r2)\n v += prolong(e2)\n\n for i in range(3):\n jacobi23(v, b)",
"def row_to_edge(row):\n return float(row[\"Dem\"]) - float(row[\"Rep\"])",
"def addEdge(self,x,y):\r\n self.matr[x][y] = True\r\n self.matr[y][x] = True",
"def MFAS_set_cover(s,graph):\n\n ## initialization\n m = graph.ecount()\n cycle_matrix = u.mk_cycle_matrix(u.find_all_cycles(graph), m)\n n, c = graph.get_adjacency().shape\n num_cycles = len(cycle_matrix)\n edge_list = graph.get_edgelist()\n sym_to_edge_cache = {}\n edge_to_sym_cache = {}\n sum_var = 'y'\n\n\n def symbolize(i,j):\n \"given two indices, create a symbolic variable\"\n new = z.Int('{0}->{1}'.format(i,j))\n return new\n\n\n def constraint_1(i,s_edge):\n \"\"\" Multiply the edge by its corresponding value in the cycle matrix\n \"\"\"\n edge = sym_to_edge_cache[s_edge]\n value = 0\n if edge in cycle_matrix[i]:\n value = cycle_matrix[i][edge]\n\n return (value * s_edge)\n\n\n ## symbolize the edges\n for source,sink in edge_list:\n s_edge = symbolize(source, sink)\n ## an edge is either a 0 or a 1\n s.add(z.Or([s_edge == 0, s_edge == 1]))\n\n sym_to_edge_cache[s_edge] = (source,sink)\n edge_to_sym_cache[(source,sink)] = s_edge\n\n\n ## Perform constraint 1 and add it to the solver instance\n for i in range(num_cycles):\n s.add(z.Sum([constraint_1(i,s_edge)\n for s_edge in sym_to_edge_cache.keys()]) >= 1)\n\n\n ## we want the smallest y possible\n s.minimize(z.Sum([s_edge for s_edge in sym_to_edge_cache.keys()]))\n\n s.check()\n return s.model()",
"def symmeterize(self):\n A = self.to_coo_matrix()\n symg = wgraph_from_adjacency((A + A.T) / 2)\n self.E = symg.E\n self.edges = symg.edges\n self.weights = symg.weights\n return self",
"def edge(self, v, d):\n # method here",
"def __mul__(self, other):\n #\n # TODO - your code here\n #\n final_matrix = []\n for i in range(self.h):\n temp_row = []\n for j in range(other.w):\n # take dot-product of row of\n # matrix in 1st arg with col of\n # matrix in 2nd arg\n temp_row.append(dot_product(get_row(self.g, i), get_col(other.g, j)))\n final_matrix.append(temp_row)\n return Matrix(final_matrix)\n # TODO - your code here",
"def current_update():\n # Compute the multiplier coefficient:\n ci = dt / (L * dx)\n for k in range(0, nx-1):\n I[k] = I[k] - (ci * (V[k + 1] - V[k]))",
"def matrix(self):\n return self.composition(self._parent.edge_to_matrix)",
"def mulConstantToConstant(line, cycles):\n if(re.match(\"^([A-z]+)\\s*=\\s*([0-9]+)\\s*[*]\\s*([0-9]+)$\", line)):\n outputText.config(state = NORMAL)\n instruction = re.match(\"^([A-z]+)\\s*=\\s*([0-9]+)\\s*[*]\\s*([0-9]+)$\", line)\n register = instruction.group(1)\n constant1 = instruction.group(2)\n constant2 = instruction.group(3)\n if(register not in registerList):\n cycles = 0\n return cycles\n outputText.insert(END, \"MOV R{}, {}\\n\".format(registerList.index(register), constant1))\n outputText.insert(END, \"MUL R{}, {}\\n\".format(registerList.index(register), constant2))\n cycles = cycles + 0\n outputText.config(state = DISABLED)\n return cycles",
"def add_edge(self, e):\n a, b = e\n self[a][b] = e\n self[b][a] = e",
"def _multiply_matrix(self, v):\n\n self.inputs.grad.data.zero_()\n\n with torch.no_grad():\n v_features = self.lpips_model.features(self.inputs.detach() +\n self.h * v)\n D_phi_v = (\n normalize_flatten_features(v_features) -\n self.input_features\n ) / self.h\n\n torch.sum(self.input_features * D_phi_v).backward(retain_graph=True)\n\n return self.inputs.grad.data.clone()",
"def mmultiply(self, matrix):\n try:\n result_matrix = [[0 for row in range(len(self.matrix))] for col in range(len(matrix[0]))]\n for i in range(len(self.matrix)):\n for j in range(len(matrix[0])):\n for k in range(len(matrix)):\n result_matrix[i][j] += self.matrix[i][k] * matrix[k][j]\n self.matrix = result_matrix\n except IndexError:\n pass\n pass",
"def mutateEdge(g, edges, directed, connected):\n if ((directed and g.e == g.n ** 2 - g.n)\n or (not directed and g.e == (g.n ** 2 - g.n) / 2)): # Complete graph\n return\n\n if (g.e > edges):\n while g.e != edges:\n removeEdge(g, directed)\n g.e -= 1\n elif (g.e < edges):\n while g.e != edges:\n addEdge(g, directed, connected)\n g.e += 1\n else: # Edge count is correct, just do an edge swap for the mutation\n removeEdge(g, directed)\n addEdge(g, directed, connected)",
"def vandermonde_matrix(x):\n m = size(x) \n n = m+1\n V = ones((m, n))\n for j in range(1, n):\n for i in range(0, m):\n V[i,j] = pow(x[i],j) \n return V",
"def edge_magnitude(edge_x, edge_y):\n c = copy.deepcopy(edge_x)\n #print(len(c), len(c[0]))\n for i in range(len(edge_x)):\n for j in range(len(edge_x[0])):\n \n c[i][j] = ((edge_x[i][j]**2 + edge_y[i][j]**2)**0.5)\n \n #print(max([max(i) for i in c]))\n #print(min([min(i) for i in c]))\n c = normalize(c)\n return c\n # TODO: implement this function.\n #raise NotImplementedError\n #return edge_mag"
] | [
"0.60190064",
"0.5874437",
"0.5793331",
"0.5536843",
"0.5514731",
"0.5514731",
"0.5465191",
"0.5450256",
"0.54244536",
"0.54083383",
"0.5379471",
"0.5352703",
"0.53356016",
"0.52899003",
"0.52723056",
"0.52532554",
"0.52459836",
"0.52313924",
"0.52127993",
"0.5145558",
"0.5140674",
"0.5132688",
"0.51227355",
"0.5073981",
"0.50734764",
"0.5065101",
"0.5056542",
"0.5055056",
"0.5049715",
"0.50392544"
] | 0.62255317 | 0 |
Given the dimension of the lattice, size of the lattice along all dimensions, the number of steps within which two vertices are connected (nei), and the probability p, find the minimum feedback arc set of a wattsstrogatz graph | def runWattsStrogatz(dim, size, nei, p):
s = z.Optimize()
g = ig.Graph.Watts_Strogatz(dim, size, nei, p, loops=True, multiple=False)
while g.is_dag():
g = ig.Graph.Watts_Strogatz(dim, size, nei, p, loops=True, multiple=False)
return MFAS_set_cover(s,g), u.get_feedback_arc_set(g) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def probability(N_dr, L_opmin, L_opmax, L_min, L_max, L_d):\n opening_nomullignas = []\n opening_withmullignas = []\n sum_nomulligans = 0\n sum_withmulligans = 0\n mulligan_coeff = 0\n\n for i in range(L_opmin, min(L_opmax + 1, 8)): # first make a list of tuples of the form:\n # (number_of_lands_in_opening_hand, probability_of_drawing_such_a_hand)\n a = hypergeom(i, 7, 60, L_d)\n opening_nomullignas.append((i, a))\n mulligan_coeff = mulligan_coeff + a # this will be used later for calculating the probability of\n # taking the mulligan and is used as a coefficient before the mulligan sum\n for (x, y) in opening_nomullignas: # use the list of tuples to calculate the first part of equation 5\n partial_nomulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_nomulligans = partial_nomulligans + hypergeom(j, N_dr, 53, L_d - x)\n sum_nomulligans = sum_nomulligans + partial_nomulligans * y\n\n mulligan_coeff = 1 - mulligan_coeff # probability of mulliganing\n for i in range(L_opmin, min(L_opmax + 1, 7)): # doing the same thing as before, but drawing 6 instead of 7 cards\n a = hypergeom(i, 6, 60, L_d)\n opening_withmullignas.append((i, a))\n\n for (x, y) in opening_withmullignas:\n partial_withmulligans = 0\n for j in range(L_min - x, L_max - x + 1):\n partial_withmulligans = partial_withmulligans + hypergeom(j, N_dr, 54, L_d - x)\n sum_withmulligans = sum_withmulligans + partial_withmulligans * y\n total_withmulligans = mulligan_coeff * sum_withmulligans\n\n return total_withmulligans + sum_nomulligans",
"def solve():\n # the amount of lattice paths from (0, 0) to (n, k) is (n+k) over n (according to Wikipedia)\n return binomial_coefficient(20 + 20, 20)",
"def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid",
"def graph_estimate(S, lambdaL, p, maxdf, threshold=1e-4, max_iter=10000):\n nlambda = lambdaL.shape[0]\n x = np.zeros(p * maxdf * nlambda)\n col_cnz = np.zeros(p + 1).astype(int)\n row_idx = np.zeros(p * maxdf * nlambda).astype(int)\n idx_a = np.zeros(p).astype(int)\n w1 = np.zeros(p)\n\n cnz = 0\n for m in range(p):\n idx_i = np.ones(p).astype(int)\n idx_i[m] = 0\n w0 = np.zeros(p)\n size_a = 0\n\n for i in range(nlambda):\n ilambda = lambdaL[i]\n gap_ext = 1\n iter_ext = 0\n while gap_ext != 0 and iter_ext < max_iter:\n size_a_prev = size_a\n for j in range(p):\n if idx_i[j] == 1:\n r = S[m, j]\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[j, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[j] = r - ilambda\n else:\n w1[j] = r + ilambda\n idx_a[size_a] = j\n size_a += 1\n idx_i[j] = 0\n else:\n w1[j] = 0\n\n w0[j] = w1[j]\n\n gap_ext = size_a - size_a_prev\n\n gap_int = 1\n iter_int = 0\n while gap_int > threshold and iter_int < max_iter:\n tmp1 = 0\n tmp2 = 0\n for j in range(size_a):\n w_idx = idx_a[j]\n r = S[m, w_idx] + w0[w_idx]\n\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n tmp2 += abs(w1[w_idx])\n else:\n w1[w_idx] = 0\n\n tmp1 += abs(w1[w_idx] - w0[w_idx])\n w0[w_idx] = w1[w_idx]\n gap_int = tmp1 / tmp2\n iter_int += 1\n\n junk_a = 0\n for j in range(size_a):\n w_idx = idx_a[j]\n if w1[w_idx] == 0:\n junk_a += 1\n idx_i[w_idx] = 1\n else:\n idx_a[j - junk_a] = w_idx\n size_a -= junk_a\n iter_ext += 1\n\n for j in range(size_a):\n w_idx = idx_a[j]\n x[cnz] = w1[w_idx]\n row_idx[cnz] = i * p + w_idx\n cnz += 1\n col_cnz[m + 1] = cnz\n\n return col_cnz, row_idx, x",
"def circle_of_least_confusion(self,start):\n def f(x):\n pl=self.project_onto_plane(x)\n return max(pl[:,1])-min(pl[:,1])\n\n # m=self.marginal_ray\n if hasattr(self, 'start'):\n start=self.start\n else:\n# start=(m.Q_p[-1,0,2]-m.Q_p[-2,0,2])/2\n start=start\n print(start)\n res=minimize(f,(start), method='Nelder-Mead')\n self.start=res.final_simplex[0][0,0]\n\n return res.final_simplex[0][0,0],res.final_simplex[1][0]",
"def diameter_clustering_vs_prob_ws(num_nodes, k):\r\n xdata = []\r\n ydata = []\r\n zdata = []\r\n prob = 0.0005\r\n while prob < 1:\r\n xdata += [prob]\r\n diameters = []\r\n coeffs = []\r\n for i in range(k):\r\n graph = make_ws_graph(num_nodes, 8, prob)\r\n diameters += [diameter(graph)]\r\n coeffs += [clustering_coefficient(graph)]\r\n ydata += [sum(diameters) / k / 19.0] #divide by 19 as this diameter of circle lattice\r\n zdata += [sum(coeffs) / k / 0.7] #divide by 0.7 as this is clustering coefficient of circle lattice\r\n prob = 1.2*prob\r\n return xdata, ydata, zdata",
"def MinHks(N): \n return EntropyKS(nx.Graph([(i,i+1) for i in range(N-1)]))",
"def minimumTeachings(self, n: int, languages: List[List[int]], friendships: List[List[int]]) -> int:\n def dfs(p):\n if p == m + 1:\n return 0\n candi = set()\n for q in f_edges[p]:\n if q > p and not l_edges[p] & l_edges[q]:\n candi |= l_edges[q]\n if not candi:\n return dfs(p + 1)\n res = float('inf')\n for lang in candi:\n # l_edges[p].add(lang)\n res = min(res, 1 + dfs(p + 1))\n # l_edges[p].remove(lang)\n # print(p, candi, res, l_edges)\n return res\n\n f_edges = defaultdict(set)\n m = len(languages)\n for p, q in friendships:\n f_edges[p].add(q)\n f_edges[q].add(p)\n l_edges = defaultdict(set)\n\n for i, lst in enumerate(languages, 1):\n l_edges[i] = set(lst)\n print('f_edges', f_edges)\n print('l_edges', l_edges)\n return dfs(1)",
"def minimum_spanning_arborescence(sol):",
"def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")",
"def NP_ATS(trajectory, min_gini):\r\n \r\n if len(trajectory) == 0:\r\n return -1\r\n \r\n \r\n \"\"\"\r\n Build partitions by avg velocity\r\n \"\"\"\r\n\r\n\r\n # epsilon_dict = [0.004592016 / 111, 0.01176842 / 111, 0.02649389 / 111, 0.05039507 / 111] #g=0.4\r\n epsilon_dict = [0.003461152 / 111, 0.02017883 / 111, 0.03125521 / 111, 0.08043219 / 111] #g=0.1, MAX Final Version\r\n # epsilon_dict = [0.00502254864192/111, 0.0255854290033/111, 0.040629531118/111, 0.112363957637/111] # g=0.3 max\r\n # epsilon_dict = [0.0129068727529/111,0.0336671793759/111,0.0587065427654/111,0.16344623299/111] # g=0.5 max\r\n # epsilon_dict = [0.0395070706597/111,0.0868617737273/111,0.220840329515/111,0.224195931453/111] # g=0.7 max\r\n # epsilon_dict = [0.0494846202161/111, 0.0942020880901/111, 0.252738388544/111, 0.347107672303/111] # g=0.9 max\r\n \r\n velocity_list = get_velocity(trajectory)\r\n \r\n epsilon_list = [epsilon_dict[label(v)] for v in velocity_list]\r\n \r\n # print [label(v) for v in velocity_list]\r\n # print epsilon_list\r\n \r\n S = EBT_Adaptive(trajectory, epsilon_list)\r\n\r\n return S",
"def find_min_hamiltonian_path(G,weights,probs_instead_of_weights=False):\n\n # Create a new model\n m = Model(\"hamiltonian_cycle\")\n \n # Create variables\n x_vars = {}\n u_vars = {}\n for var1 in permute(G.vertices()):\n for var2 in permute(G.vertices()):\n if var1 != var2:\n x_vars[(var1,var2)] = m.addVar(vtype='B', name=\"x_\"+str(var1)+'_'+str(var2))\n u_vars[var1] = m.addVar(vtype=GRB.INTEGER, name=\"u_\"+str(var1))\n m.update()\n \n for var in G.vertices():\n if var != START_NODE:\n cur_incoming = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[1] == var)])\n #print(cur_incoming)\n m.addConstr(cur_incoming,GRB.EQUAL,1.0)\n \n if var != END_NODE:\n cur_outgoing = LinExpr([(1.0,v) for k,v in x_vars.items() if (k[0] == var)])\n #print(cur_outgoing)\n m.addConstr(cur_outgoing,GRB.EQUAL,1.0)\n \n for var1 in G.vertices():\n for var2 in G.vertices():\n if var1 != var2:\n c = LinExpr([(1.0,u_vars[var1]),(-1.0,u_vars[var2]),(G.num_vertices(),x_vars[(var1,var2)])])\n #print(c)\n m.addConstr(c,GRB.LESS_EQUAL,G.num_vertices()-1)\n \n # Set objective\n #try:\n edge_weights = permute(G.get_edge_weights(weights))\n if probs_instead_of_weights:\n all_probs = []\n for v in G.vertices():\n if v != END_NODE:\n batch_scores = [(e,w) for e,w in edge_weights if e[0] == v]\n S = logsumexp([x[1] for x in batch_scores])\n batch_scores = [(e,np.exp(w-S)) for e,w in batch_scores]\n all_probs.extend(batch_scores)\n edge_weights = all_probs\n objective = LinExpr([(weight,x_vars[edge]) for edge,weight in edge_weights])\n #except TypeError:\n # return None\n \n m.setObjective(objective,GRB.MINIMIZE)\n m.update()\n code = m.optimize()\n \n try:\n return [k for k,v in x_vars.items() if v.x > 0.98]\n except GurobiError:\n return None",
"def screening_graph_estimate(S, lambdaL, p, maxdf, idx_scr, threshold=1e-4, max_iter=10000):\n nlambda = lambdaL.shape[0]\n nscr = idx_scr.shape[0]\n x = np.zeros(p * maxdf * nlambda)\n col_cnz = np.zeros(p + 1).astype(int)\n row_idx = np.zeros(p * maxdf * nlambda).astype(int)\n idx_a = np.zeros(nscr).astype(int)\n w1 = np.zeros(p)\n\n cnz = 0\n for m in range(p):\n idx_i = np.copy(idx_scr[:, m])\n w0 = np.zeros(p)\n size_a = 0\n\n for i in range(nlambda):\n ilambda = lambdaL[i]\n gap_ext = 1\n iter_ext = 0\n while gap_ext > 0 and iter_ext < max_iter:\n size_a_prev = size_a\n for j in range(nscr):\n w_idx = idx_i[j]\n if w_idx != -1:\n r = S[m, w_idx]\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n idx_a[size_a] = w_idx\n size_a += 1\n idx_i[j] = -1\n else:\n w1[w_idx] = 0\n w0[w_idx] = w1[w_idx]\n\n gap_ext = size_a - size_a_prev\n\n gap_int = 1\n iter_int = 0\n while gap_int > threshold and iter_int < max_iter:\n tmp1 = 0\n tmp2 = 1e-4\n for j in range(size_a):\n w_idx = idx_a[j]\n r = S[m, w_idx] + w0[w_idx]\n\n for k in range(size_a):\n rss_idx = idx_a[k]\n r -= S[w_idx, rss_idx] * w0[rss_idx]\n\n if abs(r) > ilambda:\n if r >= 0:\n w1[w_idx] = r - ilambda\n else:\n w1[w_idx] = r + ilambda\n tmp2 += abs(w1[w_idx])\n else:\n w1[w_idx] = 0\n tmp1 += abs(w1[w_idx] - w0[w_idx])\n w0[w_idx] = w1[w_idx]\n gap_int = tmp1 / tmp2\n iter_int += 1\n iter_ext += 1\n\n for j in range(size_a):\n w_idx = idx_a[j]\n x[cnz] = w1[w_idx]\n row_idx[cnz] = i * p + w_idx\n cnz += 1\n col_cnz[m + 1] = cnz\n\n return col_cnz, row_idx, x",
"def calc_configurations(self):\n n = self.n\n m = self.m\n p = self.p\n return ((n - p*m + 1) * (1 + ( (p-2)*(n - p*m) + 1))) / 2",
"def second_heuristic(self):\r\n directions = [[-1, -1], [-1, 1], [1, 1], [1, -1]]\r\n # aceasta matrice indica valoarea pe care o are mutarea unei piese pe o celula aleasa\r\n # se va aduna la media ponderilor adunate in lista weights\r\n\r\n # mijlocul tablei este punctul cel mai vulnerabil\r\n # in timp ce lateralele sunt sigure,iar linia bazei transforma piesa in rege\r\n\r\n points = [[0, 4, 0, 4, 0, 4, 0, 4],\r\n [4, 0, 3, 0, 3, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 1, 0, 2, 0, 4],\r\n [4, 0, 2, 0, 1, 0, 3, 0],\r\n [0, 3, 0, 2, 0, 2, 0, 4],\r\n [4, 0, 4, 0, 4, 0, 4, 0]]\r\n\r\n weights = [0 for i in range(4)]\r\n whites, blacks = 0, 0\r\n for i in range(8):\r\n for j in range(8):\r\n\r\n # numaram discurile de fiecare culoarea\r\n blacks += 1 if self.matrix[i][j] in ['N', 'n'] else 0\r\n whites += 1 if self.matrix[i][j] in ['A', 'a'] else 0\r\n\r\n if self.matrix[i][j] in [self.current_player, self.current_player.upper()]:\r\n\r\n # daca e piesa normala\r\n if self.matrix[i][j] == self.current_player:\r\n weights[0] += 4\r\n\r\n # cat de aproape este piesa de a deveni rege ( nr de linii din tabla - cate mai are pana ajunge pe ultima linie)\r\n\r\n # cu cat se apropie piesa mai multe de a deveni rege, scorul creste( negru - rege pentru i=0, alb -rege pentru i =7)\r\n if self.matrix[i][j] == 'n':\r\n weights[1] += (7 - i)\r\n elif self.matrix[i][j] == 'a':\r\n weights[1] += i\r\n else:\r\n # daca e piesa rege\r\n weights[0] += 8\r\n\r\n # cat de aproape este piesa rege de celelalte piese\r\n for d in directions:\r\n if self.matrix[i][j] == self.current_player.upper():\r\n # gaseste pe diagonala in directia d, o piesa adversara,daca exista\r\n x, y = self.find_piesa(i, j, d)\r\n if x and y:\r\n weights[2] += (x - i) * (x - i) + (y - j) * (y - j)\r\n vx = d[0] + i\r\n vy = d[1] + j\r\n back_x = i - d[0]\r\n back_y = j - d[1]\r\n next_x, next_y = vx + d[0], vy + d[1]\r\n # piesele pe care le poate captura jucatorul, daca e piesa rege are un scor mai mare\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(next_x, next_y) and self.matrix[next_x][next_y] == '.':\r\n if self.matrix[next_x][next_y] == self.opponent().upper():\r\n weights[3] += 7\r\n else:\r\n weights[3] += 4\r\n # piese care pot fi capturate; la fel daca este piesa rege atunci se scade mai mult scorul\r\n if self.bounded(vx, vy) and self.matrix[vx][vy] in [self.opponent(), self.opponent().upper()]:\r\n if self.bounded(back_x, back_y) and self.matrix[back_x][back_y] == '.':\r\n if self.matrix[vx][vy] == self.opponent().upper():\r\n weights[3] -= 6\r\n else:\r\n weights[3] -= 3\r\n # adunam piesa la media sumei date pentru a face AI-ul in caz de egalitate a scorului\r\n # sa imi aleaga piesa care ma pozitioneaza mai bine\r\n if self.move:\r\n return sum(weights) / 4 + points[self.move[0]][self.move[1]]\r\n return sum(weights) / 4\r\n\r\n def __str__(self):\r\n s = ' '\r\n for i in range(8):\r\n s += str(i) + ' '\r\n s += '\\n'\r\n for index, line in enumerate(self.matrix):\r\n s += str(chr(index + ord('a'))) + ' '\r\n for el in line:\r\n s += str(el) + ' '\r\n s += '\\n'\r\n\r\n return s",
"def prior_params_tree(self):\n id = {name:i for i, name in enumerate(list(self.tree.keys()))}\n n_nodes = len(id)\n dist_mx = np.zeros((n_nodes, n_nodes))\n\n for node1, edges in self.tree.items():\n for node2, dist in edges.dist:\n dist_mx[id[node1], id[node2]] = dist\n dist_mx[id[node2], id[node1]] = dist\n\n # while np.count_nonzero(dist_mx) < (n_nodes ** 2 - n_nodes):\n for _ in range(20):\n for i, j in combinations(range(n_nodes), 2):\n if dist_mx[i,j] > 0:\n continue\n row_i = dist_mx[i]\n row_j = dist_mx[j]\n value = (row_i + row_j) * (row_i > 0) * (row_j > 0)\n dist_mx[i, j] = dist_mx[j, i] = - max(np.unique(value))\n dist_mx = np.abs(dist_mx)\n\n evolve_rate = []\n for node1, node2 in combinations(self.m_cov.keys(), 2):\n mx_cov_dist = np.abs(self.m_cov[node1] - self.m_cov[node2])\n elements = mx_cov_dist[np.triu_indices(len(mx_cov_dist))]\n norm_elements = elements / dist_mx[id[node2], id[node1]]\n evolve_rate += list(norm_elements)\n\n\n\n df = np.mean([p.shape[0] for _, p in self.m_profiles.items()])\n p_theta_alpha = df/2\n # p_theta_alpha = 4\n p_theta_beta = np.percentile(evolve_rate, 75) * (p_theta_alpha - 1)\n # print(p_theta_alpha, p_theta_beta)\n return p_theta_alpha, p_theta_beta",
"def lp_acent(A,b,c,x_0):\n #Parameters\n b = b.flatten()\n c = c.flatten()\n ALPHA = 0.01\n BETA = 0.5\n EPSILON = 1e-6\n MAXITERS = 100\n if (np.min(x_0)<=0) and (np.linalg.norm>1e-3):\n print 'failed' \n return 0\n #m = len(b)\n #n = len(x_0)\n lambda_hist = []\n x = x_0\n for iter in range(MAXITERS):\n # H = np.diag(1/np.power(x,3))\n g = c-np.power(x,-1)\n #print g.shape\n #solving KKT system\n w = np.linalg.solve(np.dot(np.dot(A,np.diag(np.power(x,2))),A.T),\n np.dot(np.dot(-A,np.diag(np.power(x,2))),g))\n dx = np.dot(-np.diag(np.power(x,2)),np.dot(A.T,w)+g)\n lambdasqr = np.dot(-g.T,dx) #dx'*T*dx: newton incremental\n lambda_hist.append(lambdasqr/2)\n if lambdasqr/2 <= EPSILON:\n break\n # backtracking line search\n t = 1\n # brin the point inside the domain\n while np.min(x+t*dx)<=0:\n t =BETA*t\n while np.dot(c.T,np.dot(t,dx))-np.sum(np.log(x+t*dx))+np.sum(np.log(x))-ALPHA*t*np.dot(g.T,dx)>0:\n t = BETA*t\n x = x+t*dx\n if iter == MAXITERS:\n print 'ERROR: MAXITERS reached'\n else:\n #plt.figure()\n #plt.plot(range(len(lambda_hist)),lambda_hist,'b-',range(len(lambda_hist)),lambda_hist,'bo')\n return x,w,lambda_hist",
"def _iterative_cutting(g, p):\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res",
"def solve():\n\n s, g, e = make_lattice(21)\n stack = deque([[e]])\n vals = {s: 1}\n max_n = 0\n\n while stack:\n max_n = max(max_n, len(stack))\n n, *p = stack.pop()\n for c in g.get_connected(n):\n if c > n:\n continue\n if c in vals:\n propagate(c, [n] + p, vals)\n else:\n stack.append([c, n] + p)\n return vals[e]",
"def prob6(n = 5):\n domain = np.linspace(-1,1, 100)\n X = anp.array(list(domain), dtype = anp.float)\n dTn = auto.elementwise_grad(cheb_poly)\n primes = []\n \n for i in range(0, n):\n primes.append(dTn(X, i))\n \n #Plot graphs\n fig, graph = plt.subplots(3, 2)\n \n graph[0, 0].plot(X, primes[0], label = \"T0\")\n graph[0, 0].set_title(\"Chebyshev - T0\")\n \n graph[0, 1].plot(X, primes[1], label = \"T1\")\n graph[0, 1].set_title(\"Chebyshev - T1\")\n \n graph[1, 0].plot(X, primes[2], label = \"T2\")\n graph[1, 0].set_title(\"Chebyshev - T2\")\n \n graph[1, 1].plot(X, primes[3], label = \"T3\")\n graph[1, 1].set_title(\"Chebyshev - T3\")\n \n graph[2, 0].plot(X, primes[4], label = \"T4\")\n graph[2, 0].set_title(\"Chebyshev - T4\")\n \n plt.tight_layout()\n \n return",
"def annealing_objective(x: npt.NDArray[np.float64], *args: Any) -> float:\n blocks = [int(ind) for ind in x]\n\n approx_threshold = args[0]\n upper_limit = args[1]\n psol_configs = args[2]\n dists = args[3]\n utrys = args[4]\n n_cxs = args[5]\n p = args[6]\n w = args[7]\n\n if blocks in psol_configs:\n return 1.2\n\n approx_dist = sum(dists[i][b] for i, b in enumerate(blocks))\n\n if approx_dist > approx_threshold:\n return 1.1\n\n n_cx = w * (sum(n_cxs[i][b] for i, b in enumerate(blocks)) / upper_limit)\n\n if len(psol_configs) == 0:\n return n_cx\n\n distances = [0] * len(psol_configs)\n\n for index, config in enumerate(psol_configs):\n distances[index] = sum(\n utrys[i][config[i]].get_distance_from(utrys[i][b])\n < max(dists[i][config[i]], dists[i][b])\n for i, b in enumerate(blocks)\n ) / len(blocks)\n\n b_dv = (1 - w) * np.percentile(distances, p)\n\n return n_cx + b_dv",
"def circle_of_least_confusion(self):\n ff=beam_field() \n ff.rectangular_grid(1,2000,self.entrance_pupil)\n ff.propagate(self.surfaces)\n def f(x):\n pl=ff.project_onto_plane(x)\n return max(pl[:,1])\n \n # m=self.marginal_ray\n if hasattr(self, 'start'):\n start=self.start\n else:\n# start=(m.Q_p[-1,0,2]-m.Q_p[-2,0,2])/2\n start=(self.surfaces[-1].pos()-self.surfaces[-2].pos())/2\n #print(start)\n res=minimize(f,(start), method='Nelder-Mead')\n self.start=res.final_simplex[0][0,0]\n \n return res.final_simplex[0][0,0],res.final_simplex[1][0]",
"def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum",
"def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")",
"def solve(num_wizards, num_constraints, wizards, constraints):\n\n # print(num_wizards)\n # print(num_constraints)\n # print(wizards)\n # print(constraints)\n # node_set = set(wizards)\n \n\n\n def cost(sol,num_constraints,constraints):\n constraints_satisfied = 0\n constraints_failed = []\n output_ordering_map = {k: v for v, k in enumerate(sol)}\n for c in constraints:\n\n m = output_ordering_map # Creating an alias for easy reference\n\n wiz_a = m[c[0]]\n wiz_b = m[c[1]]\n wiz_mid = m[c[2]]\n\n if (wiz_a < wiz_mid < wiz_b) or (wiz_b < wiz_mid < wiz_a):\n constraints_failed.append(c)\n else:\n constraints_satisfied += 1\n return num_constraints - constraints_satisfied\n\n def neighbors(sol):\n wiz1 = random.randint(0,num_wizards-1)\n wiz2 = random.randint(0,num_wizards-1)\n\n new_sol = copy.copy(sol)\n temp = new_sol[wiz1]\n new_sol[wiz1] = new_sol[wiz2]\n new_sol[wiz2] = temp\n \n return new_sol\n\n def acceptance_probability(old_cost,new_cost,T):\n exponent = (old_cost - new_cost) / T\n \n try:\n ans = math.exp(exponent)\n except OverflowError:\n ans = float('inf')\n return ans\n\n\n def anneal(solution, num_constraints, constraints):\n old_cost = 0\n new_cost = 0\n old_cost = cost(solution,num_constraints,constraints)\n T = 1.0\n T_min = 0.000001\n alpha = 0.98\n while T > T_min:\n i = 1\n while i <= 1000:\n new_solution = neighbors(solution)\n new_cost = cost(new_solution,num_constraints,constraints)\n if new_cost == 0:\n return new_solution,new_cost\n ap = acceptance_probability(old_cost, new_cost, T)\n if ap > random.random():\n solution = new_solution\n old_cost = new_cost\n i += 1\n T = T*alpha\n return solution, old_cost\n\n s = copy.copy(wizards)\n random.shuffle(s)\n ret = anneal(s,num_constraints,constraints)\n \n for i in range(10):\n if ret[1] == 0:\n break\n random.shuffle(s)\n new_ret = anneal(s,num_constraints,constraints)\n print(i)\n if new_ret[1] < ret[1]:\n ret = new_ret\n print(\"constraints failed: {0}\".format(ret[1]))\n return ret[0]",
"def minNumberOfSemesters(self, n: int, dependencies: List[List[int]], k: int) -> int:\n\n @lru_cache(None)\n def dp(status, take, avaliable):\n if status == target: # all taken\n return 0\n bin_take = bin(take)[2:][::-1]\n for i,v in enumerate(bin_take):\n if v == '1':\n for j in edges[i]: # the indegree number changed during recursion\n indegree[j] -= 1\n if indegree[j] == 0:\n avaliable |= (1 << j)\n status |= (1 << i)\n # print('i, status', i, v, bin(status))\n # take -= (1 << i)\n\n lst = [i for i,v in enumerate(bin(avaliable)[2:][::-1]) if v == '1']\n # print(indegree)\n # print(lst)\n if not lst:\n res = 0\n # print('lst', lst, k)\n elif len(lst) <= k:\n res = dp(status, avaliable, 0)\n else:\n res = float('inf')\n for comb in combinations(lst, k):\n # print(comb)\n t, a = 0, avaliable\n for d in comb:\n t |= (1 << d)\n a -= (1 << d)\n res = min(res, dp(status, t, a))\n for i,v in enumerate(bin_take):\n if v == '1':\n for j in edges[i]: \n indegree[j] += 1\n return 1 + res\n\n self.counts = 0\n edges = defaultdict(list)\n indegree = Counter()\n for i,j in dependencies:\n edges[i].append(j)\n indegree[j] += 1\n\n courses = set(range(1, n+1))\n start = courses - indegree.keys()\n target = 2**(n+1) - 1\n avaliable = 0\n for i in start:\n avaliable |= (1 << i)\n\n return dp(1, 0, avaliable) - 1# first dp not take courses",
"def gap_length(L_t=79.6, p=75, fmax=1e12, p1=database['K+'],\r\n p2=database['pi+'], p3=database['p+'], l=2.74,\r\n E=1e6, plot=True, nf=200, delta_p=1.6e-2, n=100,\r\n just_pi=False, set_freq=5.7e9):\r\n gap_length = np.linspace(0, L_t-(2*l), n)[:-1]\r\n min_disp, freq = [], []\r\n for g in gap_length:\r\n if set_freq == None:\r\n f = freq_defl_comp(fmax, p, p1, p2, p3, L_t, l, E, plot=False,\r\n details=False, n=nf, delta_p=delta_p, fmin=0,\r\n just_pi=just_pi)\r\n else:\r\n f = set_freq\r\n freq.append(f)\r\n if just_pi == True:\r\n d2 = ang_deflection(p, f, p1, p2, L_t-g, l, E, delta_p=delta_p)\r\n min_disp.append(abs_deflection(d2, g))\r\n if just_pi == False: \r\n d2 = ang_deflection(p, f, p1, p2, L_t-g, l, E, delta_p=delta_p)\r\n d3 = ang_deflection(p, f, p1, p3, L_t-g, l, E, delta_p=delta_p)\r\n disp_2 = abs_deflection(d2, g)\r\n disp_3 = abs_deflection(d3, g)\r\n min_disp.append(np.min([disp_2, disp_3]))\r\n freq = np.array(freq)\r\n min_disp = np.array(min_disp)\r\n ratio = min_disp/freq\r\n ratio *= np.max(freq)/np.max(ratio)\r\n opt_freq_gap_index = np.argmax(ratio)\r\n opt_freq_gap_disp = [freq[opt_freq_gap_index], gap_length[opt_freq_gap_index], min_disp[opt_freq_gap_index]]\r\n if plot == True: \r\n fig = plt.figure(figsize=[9, 5])\r\n ax1 = fig.add_subplot(1, 1, 1)\r\n line1 = ax1.plot(gap_length, min_disp, 'r', alpha=0.5, label=f'minimum displacement')\r\n ax2 = ax1.twinx()\r\n line2 = ax2.plot(gap_length, freq, 'b', alpha=0.5, label=f'optimum frequency')\r\n line3 = ax2.plot(gap_length, ratio, 'g', alpha=0.5, label=f'ratio')\r\n ax1.set_xlabel('Gap Length / m', fontsize=20)\r\n ax1.set_xlim(0, L_t-(2*l))\r\n text = r'Minimum $D_{'\r\n text += p2.name[:-1] + '/' + p3.name[:-1]\r\n text += '}$ / mm'\r\n ax1.set_ylabel(text, fontsize=20, color='r')\r\n ax1.tick_params(axis='y', labelcolor='r')\r\n ax2.set_ylabel('Frequency / Hz', fontsize=20, color='b', alpha=1)\r\n ax2.tick_params(axis='y', labelcolor='b')\r\n ax1.set_ylim(0)\r\n ax2.set_ylim(0)\r\n leg = line1 + line2 + line3\r\n labs = [l.get_label() for l in leg]\r\n ax1.legend(leg, labs, loc=0, fontsize=12)\r\n fig.tight_layout()\r\n plt.show()\r\n return opt_freq_gap_disp",
"def get_interval_from_minflow(self, wide=False):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"start\"],\n # self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.arc_info[arc][\"destin\"],\n # self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.source(),\n # x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # self.sink(),\n # x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n # print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n # x,\n # self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(s_prime, v, int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".\n # format(v, t_prime, int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # s_prime,\n # x,\n # int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n # print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n # x,\n # t_prime,\n # int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i],\n capacities[i],\n unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n # print('Minimum cost:', min_cost_flow.OptimalCost())\n # print('')\n # print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n # cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n # print('%1s -> %1s %3s / %3s %3s' % (\n # min_cost_flow.Tail(i),\n # min_cost_flow.Head(i),\n # min_cost_flow.Flow(i),\n # min_cost_flow.Capacity(i),\n # cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n # print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n # print(\"Has become ({}, {}) with sup {}\".format(start,\n # destin,\n # sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in\n self.arc_info[arc].keys()):\n # print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n bound_1 = old_flow + sup_flow\n bound_2 = old_flow - sup_flow\n new_lb = max(0, int(min(bound_1, bound_2)))\n new_ub = int(max(bound_1, bound_2))\n if wide:\n if new_lb == new_ub:\n # print(\"We had a zero interval\")\n new_lb = int(new_lb*0.8)\n new_ub = int(new_ub*1.2)\n if new_lb == 0:\n # print(\"We got a zero lower bound\")\n new_ub = 5\n # print(\"But now we're doing {} {}\".\n # format(new_lb, new_ub))\n\n self.arc_info[arc][\"lower_bound\"] = new_lb\n self.arc_info[arc][\"upper_bound\"] = new_ub\n # print(\"Edge ({},{}) bounds are [{},{}]\".format(\n # start,\n # destin,\n # self.arc_info[arc][\"lower_bound\"],\n # self.arc_info[arc][\"upper_bound\"]))\n # print(self.arc_info[arc])\n else:\n print('There was an issue with the min cost flow input.')\n # self.check_conservation_of_flow() # check that solution is valid",
"def min_number_of(g, elt, n_fuel):\n if elt == 'FUEL':\n return n_fuel\n t = sum([g.edges[elt, s]['cost'] * min_number_of(g, s, n_fuel) for s in\n g.successors(elt)])\n return divup(t, g.nodes[elt]['batch_size'])",
"def metis(W, levels, rid=None):\n # Function written by M. Defferrard, taken verbatim, from \n # https://github.com/mdeff/cnn_graph/blob/master/lib/coarsening.py#L34\n\n N, N = W.shape\n if rid is None:\n rid = np.random.permutation(range(N))\n parents = []\n degree = W.sum(axis=0) - W.diagonal()\n graphs = []\n graphs.append(W)\n #supernode_size = np.ones(N)\n #nd_sz = [supernode_size]\n #count = 0\n\n #while N > maxsize:\n for _ in range(levels):\n\n #count += 1\n\n # CHOOSE THE WEIGHTS FOR THE PAIRING\n # weights = ones(N,1) # metis weights\n weights = degree # graclus weights\n # weights = supernode_size # other possibility\n weights = np.array(weights).squeeze()\n\n # PAIR THE VERTICES AND CONSTRUCT THE ROOT VECTOR\n idx_row, idx_col, val = scipy.sparse.find(W)\n perm = np.argsort(idx_row)\n rr = idx_row[perm]\n cc = idx_col[perm]\n vv = val[perm]\n cluster_id = metis_one_level(rr,cc,vv,rid,weights) # rr is ordered\n parents.append(cluster_id)\n\n # TO DO\n # COMPUTE THE SIZE OF THE SUPERNODES AND THEIR DEGREE \n #supernode_size = full( sparse(cluster_id, ones(N,1) ,\n #\tsupernode_size ) )\n #print(cluster_id)\n #print(supernode_size)\n #nd_sz{count+1}=supernode_size;\n\n # COMPUTE THE EDGES WEIGHTS FOR THE NEW GRAPH\n nrr = cluster_id[rr]\n ncc = cluster_id[cc]\n nvv = vv\n Nnew = cluster_id.max() + 1\n # CSR is more appropriate: row,val pairs appear multiple times\n W = scipy.sparse.csr_matrix((nvv,(nrr,ncc)), shape=(Nnew,Nnew))\n W.eliminate_zeros()\n # Add new graph to the list of all coarsened graphs\n graphs.append(W)\n N, N = W.shape\n\n # COMPUTE THE DEGREE (OMIT OR NOT SELF LOOPS)\n degree = W.sum(axis=0)\n #degree = W.sum(axis=0) - W.diagonal()\n\n # CHOOSE THE ORDER IN WHICH VERTICES WILL BE VISTED AT THE NEXT PASS\n #[~, rid]=sort(ss); # arthur strategy\n #[~, rid]=sort(supernode_size); # thomas strategy\n #rid=randperm(N); # metis/graclus strategy\n ss = np.array(W.sum(axis=0)).squeeze()\n rid = np.argsort(ss)\n\n return graphs, parents"
] | [
"0.6027979",
"0.5936431",
"0.5802892",
"0.5774201",
"0.57534164",
"0.5735415",
"0.56888187",
"0.56885964",
"0.56682485",
"0.56297076",
"0.55605185",
"0.55485064",
"0.5500358",
"0.54803574",
"0.54629415",
"0.54441243",
"0.5442056",
"0.5441907",
"0.54413617",
"0.54407454",
"0.54405135",
"0.5432442",
"0.5423952",
"0.5423032",
"0.53739434",
"0.5369961",
"0.5362276",
"0.53617376",
"0.53605866",
"0.5358188"
] | 0.622067 | 0 |
The `is_mersenne` method for the beginner challenge. | def is_mersenne(num: int) -> bool:
s = 4
m = (2 ** num) - 1
for i in range(0, num - 2):
s = ((s**2) - 2) % m
return s == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_MT19937Cipher():\n plaintext = \"h#llo PRNG stream cipher w@rld!!!!\"\n cipher = MT19937Cipher(0xdeadbeef)\n ciphertext = cipher.encrypt(plaintext)\n decrypted_plaintext = cipher.decrypt(ciphertext)\n is_match = plaintext.__eq__(decrypted_plaintext)\n assert(is_match == True)",
"def is_seed_valid(seed):\n if seed == \"0\":\n return True\n\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True",
"def test_is_wieferich(self):\n self.assertEqual(MathFunctions.is_wieferich(2), False)\n self.assertEqual(MathFunctions.is_wieferich(3), False)\n self.assertEqual(MathFunctions.is_wieferich(7), False)\n self.assertEqual(MathFunctions.is_wieferich(1093), True)\n self.assertEqual(MathFunctions.is_wieferich(3511), True)\n self.assertEqual(MathFunctions.is_wieferich(1012393), False)",
"def test_task559_mersen_number(number, expected_value):\r\n assert algo.Task559.mersen_numbers(number) == expected_value",
"def check_fermat(a, b, c, n):\n if n > 2 and ((a**n + b**n) == c**n):\n print('Holy smokes, Fermat was wrong!')\n else:\n print('No, that doesn’t work.')",
"def answer(self) -> bool:",
"def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True",
"def is_seed_valid(seed):\n for ch in seed:\n if not ch in all_chars_set:\n return False\n return True",
"def test_ok_mm_primer(self):\r\n primers = ['AAAA', 'GGGG']\r\n self.assertEqual(ok_mm_primer('AAAA', primers, 0), True)\r\n self.assertEqual(ok_mm_primer('AAAA', primers, 3), True)\r\n self.assertEqual(ok_mm_primer('CCCC', primers, 0), False)\r\n self.assertEqual(ok_mm_primer('CCCA', primers, 3), True)\r\n self.assertEqual(ok_mm_primer('CCCA', primers, 2), False)\r\n self.assertEqual(ok_mm_primer('CCGG', primers, 2), True)\r\n self.assertEqual(ok_mm_primer('CCGA', primers, 2), False)",
"def is_amnicable(num):\n\n # Because d(m) = d(n) = s(m) + s(n)\n # so d(s(m) - m) = d(n)\n result = divisor(1, num)\n\n # s(n) and n is supposed to be\n # different numbers so not amnicable\n if 2 * num == result:\n return False\n\n result2 = divisor(1, result - num)\n return result == result2",
"def mersenne(p):\n return 2 ** p -1",
"def is_armstrong_number(armstrong_candidate: int) -> bool:\n\n if armstrong_candidate < 0:\n return False\n\n nr_digits = nr_digits_number(armstrong_candidate)\n a_sum = armstrong_sum(armstrong_candidate, nr_digits)\n\n return True if a_sum == armstrong_candidate else False",
"def confused(self, rand):\n return rand > 0",
"def test_fasle_is_armstrongs(number: int):\n assert is_armstrong(number) is False",
"def fermat(n, k=10):\n if n != 2 and n % 2 == 0:\n return False\n\n for i in range(k):\n a = random.randint(2, n - 2)\n result = pow(a, n-1, n)\n if result != 1:\n return False\n\n return True",
"def have_mister(self):\n return bool(self.mister)",
"def isthmus1D(cube):\n \n return countComponents26(cube) >= 2;",
"def prime():\n number = random.randint(1, 100)\n if len(primfacs(number)) == 1:\n return number, 'yes'\n return number, 'no'",
"def is_harshad(n):\n return n % euler.sum_digits(n) == 0",
"def can_generate_ransom_note(self):\n if self.ransom_text == '' or self.ransom_text == ' ':\n return True\n ransom_text_words = self.ransom_text.split(' ')\n magazine_text_words = self.magazine_text.split(' ')\n # counting the occurrences of words in the ransom and magazine texts.\n ransom_count = self._count_words_in_string(ransom_text_words)\n magazine_count = self._count_words_in_string(magazine_text_words)\n result = False\n for i in ransom_text_words:\n # if magazine_count hashmap doesn't have word\n if magazine_count.get(i) is None:\n result = False\n break\n # if ransom_count hashmap have less word occurances than magazine count.\n if ransom_count.get(i) <= magazine_count.get(i):\n result = True\n else:\n result = False\n break\n return result",
"def is_armstrong_number(number: int) -> bool:\n result = 0\n num_str = str(number)\n for i in num_str:\n result += int(i) ** len(num_str)\n return result == number",
"def oracle(ct: int) -> bool:\n return rsa.dec(ct) & 1 == 0",
"def test_task559_eratosthenes(number, expected_value):\r\n assert algo.Task559.eratosthenes(number) == expected_value",
"def fermat_strong_test(n, a):\n if n == 2:\n return True\n # n - 1 = d * 2 ^ s\n d, s = factor_twos(n - 1)\n\n # by Fermat theorem, if n is prime then\n # (a^d - 1)(a^d + 1)(a^2d + 1)(a^4d + 1)...(a^2^(s-1)d + 1) = 0 (mod n)\n a = powmod(a, d, n)\n if a == 1 or a == n - 1:\n return True\n for _ in range(s):\n a = a * a % n\n if a == n - 1:\n return True\n return False",
"def _check_random_state(seed):\n return check_random_state(seed)",
"def check_seed():\n np.random.seed(1000)\n standard = [\n {0: -3.0, 1: -5.0, 'index': 0},\n {0: -6.0, 1: -8.0, 'index': 1},\n {0: 5.0, 1: -1.0, 'index': 2},\n {0: 1.0, 1: -7.0, 'index': 3},\n {0: -2.0, 1: -3.0, 'index': 4},\n {0: 7.0, 1: 3.0, 'index': 5},\n {0: -4.0, 1: -2.0, 'index': 6},\n {0: 2.0, 1: 6.0, 'index': 7}\n ]\n\n this_machine = create_points(8)\n\n flag = True\n for i in range(8) :\n flag &= this_machine[i][0] == standard[i][0] \n flag &= this_machine[i][1] == standard[i][1] \n flag &= this_machine[i][\"index\"] == i\n \n if not flag :\n print(\"\"\"\n The Python installation on this machine is odd: it appears to\n use a non-standard random number generator -- run \n this script on the machines in the Otter lab instead.\n If that fails too, send an email to [email protected].\n \"\"\")\n print (\"You got these test points:\", this_machine)\n print (\"You should have got:\", standard)\n exit(-1)\n else :\n print (\"Check passed\")",
"def test_encoder():\r\n #Check edge cases first\r\n assert encode_morse(123) == \"Plaintext is not a string!\", \"Test 1 failed, input integer 123\"\r\n assert encode_morse(\"\") == \"\", \"Test 2 failed, input ''\"\r\n assert encode_morse(\"^\") == \"ERROR: You can't encode the following character: ^\", \"Test 3 failed, input '^'\"\r\n assert encode_morse(\" e e \") == \". / .\", \"Test 4 failed, input ' e e '\"\r\n assert encode_morse(\"AbCd\") == \".- -... -.-. -..\", \"Test 5 failed, input 'AbCd'\"\r\n \r\n #Now we run possible plaintexts and check their corresponding ciphertexts\r\n assert encode_morse(\"the quick brown fox jumps over the lazy dog\") == \"- .... . / --.- ..- .. -.-. -.- / -... .-. --- .-- -. / ..-. --- -..- / .--- ..- -- .--. ... / --- ...- . .-. / - .... . / .-.. .- --.. -.-- / -.. --- --.\", \"Test 6 failed, input 'the quick brown fox jumps over the lazy dog'\"\r\n assert encode_morse(\"H1er0ph@nT + '\") == \".... .---- . .-. ----- .--. .... .--.-. -. - / .-.-. / .----.\", \"Test 7 failed, input 'H1er0ph@nT + ''\"\r\n assert encode_morse('\"' + \"'\") == \".-..-. .----.\", \"Test 8 failed, input ''(double apostrophe)' + '(single apostrophe)'\"\r\n \r\n #Check that input not mutated\r\n test_plaintext_9 = \"test\"\r\n encode_morse(test_plaintext_9)\r\n assert test_plaintext_9 == \"test\", \"Test 9 failed, input 'test' mutated\"\r\n \r\n #If all tests passed\r\n print (\"Congratulations! 9/9 tests passed!\")",
"def test_true_is_armstrong(number: int):\n assert is_armstrong(number)",
"def is_armstrong_number(number: int) -> bool:\n\n str_number = f\"{number}\"\n return sum(pow(int(x), len(str_number)) for x in str_number) == number",
"def McNuggets(n):\n a=0\n b=0\n c=0\n result=0\n while result <= n:\n result = 6*a + 9*b + 20*c\n if result > n:\n return False\n elif result == n:\n return True\n else:\n a+=1\n ..."
] | [
"0.59019184",
"0.57263315",
"0.5583907",
"0.5496015",
"0.54939693",
"0.5433102",
"0.53913236",
"0.53913236",
"0.53301436",
"0.5304294",
"0.5301869",
"0.52900606",
"0.52485555",
"0.5239065",
"0.5235083",
"0.5229403",
"0.5112689",
"0.5111179",
"0.51060545",
"0.50973195",
"0.5096979",
"0.5096555",
"0.5089493",
"0.5062336",
"0.50600374",
"0.5036897",
"0.5018408",
"0.5014187",
"0.50126696",
"0.49986455"
] | 0.8502041 | 0 |
The `swap_nodes` method can be used as a helper for `invert_tree`. | def swap_nodes(tree) -> None:
if tree is None:
raise ValueError('Empty tree')
tmp = tree.left
tree.left = tree.right
tree.right = tmp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swap(self, subtree_a, subtree_b):\n\n temp1 = subtree_a.parent\n temp2 = subtree_b.parent\n\n temp1.children[temp1.children.index(subtree_a)] = subtree_b\n temp2.children[temp2.children.index(subtree_b)] = subtree_a\n \n subtree_a.parent = temp2\n subtree_b.parent = temp1\n\n self.propogate_subtree(subtree_a)\n self.propogate_subtree(subtree_b)",
"def Rearrange(self, node):\n nnode = Node(node, \"%si\" % node.tag);\n nnode.children = node.children[1:];\n node.children[1:] = [nnode];",
"def swapChildren(self, *args):\n return _libsbml.ASTNode_swapChildren(self, *args)",
"def _reorder_nodes(orient, nodes, flip_matrix, unflip=False):\n # reorder nodes (Code adapted from\n # meshmode.mesh.processing.flip_simplex_element_group)\n\n # ( round to int bc applying on integers)\n flip_mat = np.rint(flip_matrix)\n if unflip:\n flip_mat = flip_mat.T\n\n # flipping twice should be identity\n assert la.norm(\n np.dot(flip_mat, flip_mat)\n - np.eye(len(flip_mat))) < 1e-13\n\n # flip nodes that need to be flipped\n flipped_nodes = np.copy(nodes)\n flipped_nodes[orient < 0] = np.einsum(\n \"ij,ej->ei\",\n flip_mat, nodes[orient < 0])\n\n return flipped_nodes",
"def _swap(self, node1, node2):\n arr = self._array\n arr[node1._index], arr[node2._index] = arr[node2._index], \\\n arr[node1._index]\n # Swap indices stored in nodes as well\n node1._index, node2._index = node2._index, node1._index",
"def _switch_nodes(self, walker):\n walker.prev.next = walker.next \n walker.next = walker.next.next \n walker.next.prev = walker\n walker.prev.next.prev = walker.prev\n walker.prev.next.next = walker \n walker.prev = walker.prev.next",
"def swap_nodes(self, a, b):\n if a == b:\n return\n if len(self) < 2:\n return\n\n nodeA = nodeB = None\n curr_node = self._header\n\n while curr_node is not None and not (nodeA and nodeB):\n if curr_node._element == a and not nodeA:\n nodeA = curr_node\n elif curr_node._element == b and not nodeB:\n nodeB = curr_node\n curr_node = curr_node._next\n\n if curr_node is None:\n raise Empty(\"Not in list\")\n\n precessorA = nodeA._prev\n successorA = nodeA._next\n precessorB = nodeB._prev\n successorB = nodeB._next\n\n precessorA._next = successorA._prev = nodeB\n precessorB._next = successorB._prev = nodeA\n\n nodeA._prev, nodeB._prev = nodeB._prev, nodeA._prev\n nodeA._next, nodeB._next = nodeB._next, nodeA._next",
"def sort_nodes(self):\n nodes = self._chain.root_node.ordered_subnodes_hierarchy()\n self._chain.nodes = nodes",
"def recoverTree(root):\r\n \r\n #inorder dfs solution; we will exploit the fact that the an INORDER dfs traversal for a binary tree will always return\r\n #the node values in increasing order due to its nature\r\n #thus, given the two nodes we will need to swap, \r\n #[1] the first node will always be greater then its next node, \r\n #and [2] the second node will always be smaller than its previous node. \r\n #Otherwise, there will be more than one swap required - which is impossible.\r\n \r\n node1 = None\r\n node2 = None\r\n prev = TreeNode(float(-inf))\r\n \r\n def dfs(node):\r\n \r\n nonlocal node1, node2, prev\r\n \r\n if node:\r\n dfs(node.left)\r\n \r\n if not node1 and prev.val >= node.val: #[1]\r\n node1 = prev\r\n \r\n if prev.val >= node.val: #[2]\r\n node2 = node\r\n \r\n prev = node\r\n \r\n dfs(node.right)\r\n return\r\n \r\n dfs(root)\r\n node1.val,node2.val = node2.val,node1.val\r\n return",
"def _replace_node(self, nxt, node):\n nxt.left = node.left\n nxt.right = node.right\n nxt.parent = node.parent\n if node is self.root:\n self.root = nxt\n if nxt.left:\n nxt.left.parent = nxt\n if nxt.right:\n nxt.right.parent = nxt\n if nxt.parent:\n if nxt.parent.right is node:\n nxt.parent.right = nxt\n else:\n nxt.parent.left = nxt",
"def invert_tree(tree: TreeNode) -> TreeNode:\n if tree is None:\n raise ValueError('Empty treee')\n swap_nodes(tree)\n invert_tree(tree.left)\n invert_tree(tree.right)\n return tree",
"def remap_nodes(self, new_node_mapping):\n # because all nodes are SchemaNodeIDs (i.e. objects), we only need to reassign nodes one way\n # changes propagate to chains, chain root_nodes, and parents automatically\n for chain in self.chains:\n for edge in chain:\n head, tail = edge\n if head in new_node_mapping.keys():\n head.value = new_node_mapping[head]\n if tail in new_node_mapping.keys():\n tail.value = new_node_mapping[tail]",
"def alter_tree(node):\n if not node.input:\n return _alter_node(node)\n\n converted_children = []\n for input_op in node.input:\n converted_children.append(alter_tree(input_op))\n node.input = converted_children\n return _alter_node(node)",
"def __swap_kv(self, node1, node2):\r\n node1.key, node2.key = node2.key, node1.key\r\n node1.value, node2.value = node2.value, node1.value",
"def recoverTree(self, root: TreeNode) -> None:\n # base case\n if not root:\n return\n # a list to store node to be exchange\n change = []\n lst = self.inorder(root)\n for i in range(len(lst)-1):\n if lst[i+1].val < lst[i].val:\n # If we already found the first one i, the seconde one would be i+1\n # you can find that in the second example given by Leetcode\n if change:\n change.append(i+1)\n else:\n change.append(i)\n # exchange elements\n if len(change) == 1:\n lst[change[0]].val, lst[change[0]+1].val = lst[change[0]+1].val, lst[change[0]].val\n else:\n lst[change[0]].val, lst[change[1]].val = lst[change[1]].val, lst[change[0]].val",
"def merge_nodes(node_ids, tree):\n # Copy the tree so we keep the original intact\n temp = deepcopy(tree)\n # Don't merge nodes if they are already merged\n if node_ids in temp or tuple(reversed(node_ids)) in temp:\n return temp\n # Get all the children of each node we need to merge (except if that child is the other node)\n children_1 = filter(lambda x: x != node_ids[1], temp[node_ids[0]])\n children_2 = filter(lambda x: x != node_ids[0], temp[node_ids[1]])\n merged_children = children_1 + children_2\n # Remove the original nodes in the tree\n temp.pop(node_ids[0], None)\n temp.pop(node_ids[1], None)\n # Add a new node\n temp[node_ids] = merged_children\n \n # Update references to the old node with references to the new node\n for k, v in temp.iteritems():\n if node_ids[0] in v:\n idx = v.index(node_ids[0])\n v.remove(node_ids[0])\n v.insert(idx, node_ids)\n if node_ids[1] in v:\n idx = v.index(node_ids[1])\n v.remove(node_ids[1])\n v.insert(idx, node_ids)\n temp[k] = dedupe(v)\n return temp",
"def vertex_swap(d, n, l, i1, i2, j1, j2):\n if i1 == i2 and j1 == j2:\n return l\n if i1 == j1:\n # (i1,i1) -> (i2,i2)\n assert i2 == j2\n def swap(v):\n swap2(d, n, v, i1, i2)\n elif i1 == i2:\n # (i,j1) -> (i,j2)\n def swap(v):\n swap2(d, n, v, j1, j2)\n elif j1 == j2:\n # (i1,j) -> (i2,j)\n def swap(v):\n swap2(d, n, v, i1, i2)\n elif i1 == j2 and i2 == j1:\n # (i1,j1) -> (j1,i1)\n def swap(v):\n swap2(d, n, v, i1, j1)\n elif i1 == j2:\n # (i1,j1) -> (i2,i1)\n def swap(v):\n swap3(d, n, v, j1, i1, i2)\n elif i2 == j1:\n # (i1,j1) -> (j1,j2)\n def swap(v):\n swap3(d, n, v, i1, j1, j2)\n else:\n # (i1,j1) -> (i2,j2)\n def swap(v):\n swap2(d, n, v, i1, i2)\n swap2(d, n, v, j1, j2)\n ll = []\n for v in l:\n v = v.__copy__()\n swap(v)\n v.set_immutable()\n ll.append(v)\n ll.sort()\n return tuple(ll)",
"def swap_node(\n tree: Optional[GPTree],\n tree_data: Union[str, Callable],\n target_data: Union[str, Callable],\n) -> bool:\n if tree is None:\n return False\n\n if tree.data == tree_data:\n tree.data = target_data\n return True\n\n if not swap_node(tree.left, tree_data, target_data):\n return swap_node(tree.right, tree_data, target_data)\n return True",
"def swap(self, *args):\n return _osgAnimation.VertexList_swap(self, *args)",
"def update_nodes(nodes, bb):\n \n for node in nodes:\n node.set(\"label\", update_bb_string(node.get_attributes()[\"label\"], bb))\n node.set_name(update_node_name(node.get_name(), bb))",
"def move_container_nodes(nodes, container):\n for c in container:\n parents = [x for x in nodes if x.has_child(c) and x != c]\n children = [ x for x in c.get_children() if x != c]\n\n for content in container[c]:\n if content == c:\n continue\n for child in children:\n content.add_child(child)\n for parent in parents:\n parent.add_child(content)\n\n for parent in parents:\n parent.remove_child(c)\n return nodes",
"def order_nodes(tree, increase=True):\n res = tree.copy()\n for node in res.postorder():\n if node.is_tip():\n node.n = 1\n else:\n node.n = sum(x.n for x in node.children)\n for node in res.postorder():\n if not node.is_tip():\n children = node.children\n node.children = []\n for child in sorted(children, key=lambda x: x.n, reverse=increase):\n node.append(child)\n for node in res.postorder():\n delattr(node, 'n')\n return res",
"def rearange_nodes_links_old(idx, nodes, links):\n nodes = nodes[idx,:]\n for i in range(0, len(links)):\n links[i, 0] = idx.index(links[i, 0])\n links[i, 1] = idx.index(links[i, 1])\n for i in range (0, len(links)):\n links[i] = sorted(links[i])\n \n # Sort links according to the source.\n links = links[links[:,0].argsort()]\n idx = update_idx_links(links[:,0], links[:,1])\n links = links[idx]\n return nodes, links",
"def substituteNodes(node, replacements):\n p = node.parentNode\n for r in replacements:\n p.insertBefore(r, node)\n p.removeChild(node)",
"def _redirect(self, node1, node2):\n if node1.parent.right is node1:\n node1.parent.right = node2\n else:\n node1.parent.left = node2",
"def invert(tree: nx.DiGraph) -> nx.DiGraph:\n new_tree = tree.copy()\n for node in new_tree.nodes:\n swap = np.empty(new_tree.nodes[node]['colinear_segments'].shape)\n swap[:, 1, :] = new_tree.nodes[node]['colinear_segments'][:, 0, :]\n swap[:, 0, :] = new_tree.nodes[node]['colinear_segments'][:, 1, :]\n new_tree.nodes[node]['colinear_segments'] = swap\n\n line_swap = np.empty((2, 2))\n old_line = new_tree.nodes[node]['line']\n line_swap[0] = old_line[1]\n line_swap[1] = old_line[0]\n new_tree.nodes[node]['line'] = line_swap\n\n for edge in new_tree.edges:\n new_tree.edges[edge]['position'] *= -1\n\n return new_tree",
"def _replace(self, x, y):\n y.parent = x.parent\n if x is self.root:\n self.root = y\n return\n elif x is x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n\n self.update(y, -1)",
"def swap(root, k):\n q = deque([(root, 1)])\n while q:\n node, level = q.popleft()\n if node is None:\n continue\n if level % k == 0:\n node.left, node.right = node.right, node.left\n q.append((node.left, level+1))\n q.append((node.right, level+1))",
"def reorder_nodes(g, new_node_ids):\n assert (\n len(new_node_ids) == g.num_nodes()\n ), \"The number of new node ids must match #nodes in the graph.\"\n new_node_ids = utils.toindex(new_node_ids)\n sorted_ids, idx = F.sort_1d(new_node_ids.tousertensor())\n assert (\n F.asnumpy(sorted_ids[0]) == 0\n and F.asnumpy(sorted_ids[-1]) == g.num_nodes() - 1\n ), \"The new node IDs are incorrect.\"\n new_gidx = _CAPI_DGLReorderGraph_Hetero(\n g._graph, new_node_ids.todgltensor()\n )\n new_g = DGLGraph(gidx=new_gidx, ntypes=[\"_N\"], etypes=[\"_E\"])\n new_g.ndata[\"orig_id\"] = idx\n return new_g",
"def swap_vertices(self, i, j):\r\n store_vertex_i = self.vertices[i]\r\n store_vertex_j = self.vertices[j]\r\n self.vertices[j] = store_vertex_i\r\n self.vertices[i] = store_vertex_j\r\n for k in range(len(self.vertices)):\r\n for swap_list in [self.vertices[k].children, self.vertices[k].parents]:\r\n if i in swap_list:\r\n swap_list[swap_list.index(i)] = -1\r\n if j in swap_list:\r\n swap_list[swap_list.index(j)] = i\r\n if -1 in swap_list:\r\n swap_list[swap_list.index(-1)] = j"
] | [
"0.68996036",
"0.66603166",
"0.66058844",
"0.65468365",
"0.647947",
"0.6389255",
"0.63157016",
"0.60198313",
"0.5989695",
"0.59827334",
"0.5894863",
"0.5893917",
"0.5848373",
"0.58432406",
"0.5836522",
"0.5817962",
"0.5729272",
"0.57066095",
"0.56845456",
"0.5676853",
"0.56750584",
"0.565925",
"0.5651929",
"0.56477153",
"0.5634925",
"0.5607992",
"0.5592078",
"0.5590798",
"0.5580672",
"0.5569982"
] | 0.80218107 | 0 |
The `invert_tree` method for the proficient challenges. | def invert_tree(tree: TreeNode) -> TreeNode:
if tree is None:
raise ValueError('Empty treee')
swap_nodes(tree)
invert_tree(tree.left)
invert_tree(tree.right)
return tree | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_inverted_tree(tree: NodeTree) -> NodeTree:\n inverted_tree = NodeTree(tree.value)\n\n if tree.left is not None:\n inverted_tree.right = create_inverted_tree(tree.left)\n\n if tree.right is not None:\n inverted_tree.left = create_inverted_tree(tree.right)\n\n return inverted_tree",
"def invert(tree: nx.DiGraph) -> nx.DiGraph:\n new_tree = tree.copy()\n for node in new_tree.nodes:\n swap = np.empty(new_tree.nodes[node]['colinear_segments'].shape)\n swap[:, 1, :] = new_tree.nodes[node]['colinear_segments'][:, 0, :]\n swap[:, 0, :] = new_tree.nodes[node]['colinear_segments'][:, 1, :]\n new_tree.nodes[node]['colinear_segments'] = swap\n\n line_swap = np.empty((2, 2))\n old_line = new_tree.nodes[node]['line']\n line_swap[0] = old_line[1]\n line_swap[1] = old_line[0]\n new_tree.nodes[node]['line'] = line_swap\n\n for edge in new_tree.edges:\n new_tree.edges[edge]['position'] *= -1\n\n return new_tree",
"def reverse_other(t):\n\n ###############\n # My Solution #\n ###############\n\n def deep(t, depth):\n depth+=1\n\n if t.is_leaf():\n return\n \n branches = []\n for b in t.branches:\n branches.append(b.label)\n deep(b, depth)\n\n branches = branches[::-1]\n\n if depth % 2 != 0:\n i = 0\n for b in t.branches:\n b.label = branches[i]\n i+=1\n\n return deep(t, 0)",
"def invert_binary_tree(root):\n if root is None:\n return None\n left = invert_binary_tree(root.left)\n right = invert_binary_tree(root.right)\n root.left = right\n root.right = left\n return root",
"def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")",
"def __invert__(self):\n return self.inverse()",
"def change_directions(tree):\n tmp = [] # holds the nodes that have edges pointing to\n new_tree = []\n for e in tree:\n try:\n if tmp.index(e[1])>=0:\n new_tree.append(e[::-1])\n tmp.append(e[0])\n except ValueError:\n new_tree.append(e)\n tmp.append(e[1])\n return new_tree",
"def __invert__(self):\n return self.fam.c_unop('invert', self)",
"def get_original_tree(self, tree):\n if not tree:\n return\n tree = copy.deepcopy(tree)\n PCFG.__revert_step_4(tree.root)\n PCFG.__revert_step_2(tree.root)\n # Get rid of step 1, namely get rid of S_0 -> S\n new_root = tree.root.children[0]\n new_tree = ParseTree(new_root, tree.probability)\n return new_tree",
"def invert(self, solution, inverse_data):\n status = solution['status']\n\n primal_vars = None\n dual_vars = None\n if status in s.SOLUTION_PRESENT:\n opt_val = solution['value'] + inverse_data[s.OFFSET]\n primal_vars = {inverse_data[self.VAR_ID]: solution['primal']}\n return Solution(status, opt_val, primal_vars, dual_vars, {})\n else:\n return failure_solution(status)",
"def invert(self):\n raise NotImplementedError()",
"def __invert(self, args):",
"def __invert__(self):\n return self.negated()",
"def invert(f, g):\n lev, dom, per, F, G = f.unify(g)\n return per(dmp_invert(F, G, lev, dom))",
"def predict_decision_tree(input_data=rand_input, tree=dtr_full):\n return y_scaler.inverse_transform(tree.predict(input_data))",
"def invert(self, results, inverse_data):\n import mosek\n # Status map is taken from:\n # https://docs.mosek.com/8.1/pythonapi/constants.html?highlight=solsta#mosek.solsta\n STATUS_MAP = {mosek.solsta.optimal: s.OPTIMAL,\n mosek.solsta.integer_optimal: s.OPTIMAL,\n mosek.solsta.prim_feas: s.OPTIMAL_INACCURATE, # for integer problems\n mosek.solsta.prim_infeas_cer: s.INFEASIBLE,\n mosek.solsta.dual_infeas_cer: s.UNBOUNDED}\n # \"Near\" statuses only up to Mosek 8.1\n if hasattr(mosek.solsta, 'near_optimal'):\n STATUS_MAP_INACCURATE = {mosek.solsta.near_optimal: s.OPTIMAL_INACCURATE,\n mosek.solsta.near_integer_optimal: s.OPTIMAL_INACCURATE,\n mosek.solsta.near_prim_infeas_cer: s.INFEASIBLE_INACCURATE,\n mosek.solsta.near_dual_infeas_cer: s.UNBOUNDED_INACCURATE}\n STATUS_MAP.update(STATUS_MAP_INACCURATE)\n STATUS_MAP = defaultdict(lambda: s.SOLVER_ERROR, STATUS_MAP)\n\n env = results['env']\n task = results['task']\n solver_opts = results['solver_options']\n\n if inverse_data['integer_variables']:\n sol = mosek.soltype.itg\n elif 'bfs' in solver_opts and solver_opts['bfs'] and inverse_data['is_LP']:\n sol = mosek.soltype.bas # the basic feasible solution\n else:\n sol = mosek.soltype.itr # the solution found via interior point method\n\n problem_status = task.getprosta(sol)\n solution_status = task.getsolsta(sol)\n\n status = STATUS_MAP[solution_status]\n\n # For integer problems, problem status determines infeasibility (no solution)\n if sol == mosek.soltype.itg and problem_status == mosek.prosta.prim_infeas:\n status = s.INFEASIBLE\n\n if status in s.SOLUTION_PRESENT:\n # get objective value\n opt_val = task.getprimalobj(sol) + inverse_data[s.OBJ_OFFSET]\n # recover the cvxpy standard-form primal variable\n z = [0.] * inverse_data['n0']\n task.getxxslice(sol, 0, len(z), z)\n primal_vars = {inverse_data[self.VAR_ID]: z}\n # recover the cvxpy standard-form dual variables\n if sol == mosek.soltype.itg:\n dual_vars = None\n else:\n dual_vars = MOSEK.recover_dual_variables(task, sol, inverse_data)\n else:\n if status == s.INFEASIBLE:\n opt_val = np.inf\n elif status == s.UNBOUNDED:\n opt_val = -np.inf\n else:\n opt_val = None\n primal_vars = None\n dual_vars = None\n\n # Store computation time\n attr = {s.SOLVE_TIME: task.getdouinf(mosek.dinfitem.optimizer_time)}\n\n # Delete the mosek Task and Environment\n task.__exit__(None, None, None)\n env.__exit__(None, None, None)\n\n return Solution(status, opt_val, primal_vars, dual_vars, attr)",
"def __invert__(self):\n return self.reverse()",
"def swap_tree_test():\r\n par_rates = [0.01,0.02,0.025,0.028,0.036]\r\n maturities = [1,2,3,4,5]\r\n sig,FV,c = 0.1743,100,0.04\r\n fwd_tree,swap_values,CVA,DVA,EpEs,EnEs = swap_tree(par_rates,maturities,sig,FV,c,0.5,\\\r\n 0.4,0.025,0.01)\r\n print(\"Credit and debit adjusted value of swap\",swap_values[-1][0]+DVA-CVA)\r\n print(\"The values of CVA and DVA respectively are \",CVA,DVA)\r\n print(\"Expected positive exposures\",EpEs)\r\n print(\"Expected negative exposures\",EnEs)\r\n r\"\"\"\r\n Now we check manually that the swap value has been computed correctly, independent \r\n of the model used. \r\n \"\"\"\r\n import numpy.linalg as la\r\n import numpy as np\r\n A = np.array([[1.01,0,0,0,0],[0.02,1.02,0,0,0],[0.025,0.025,1.025,0,0],\\\r\n [0.028,0.028,0.028,1.028,0],[0.036,0.036,0.036,0.036,1.036]])\r\n b = np.array([1,1,1,1,1]).T\r\n discounts = np.dot(la.inv(A),b)\r\n b_fix = np.dot(discounts,[4,4,4,4,104])\r\n v_swap = b_fix - 100\r\n print(\"Model independent value of swap:\",v_swap)\r\n print(\"Binomial value of the swap\",swap_values[-1][0])",
"def _correct_tree(self, current_element: Node):\r\n while True:\r\n if current_element == None or current_element.parent() == None:\r\n return None\r\n current_element = current_element.parent()\r\n b1 = current_element.balance()\r\n\r\n try:\r\n b2 = current_element.right_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.right_son().left_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if b1 in (-1, 0, 1):\r\n if current_element.parent() == None:\r\n break\r\n else:\r\n continue\r\n elif ((b1 == -2 and b2 == 1 and b3 == -1) or\r\n (b1 == -2 and b2 == 1 and b3 == 0 ) or\r\n (b1 == -2 and b2 == 1 and b3 == 1)):\r\n current_element.reset(*self._right_left(current_element))\r\n elif b1 == -2:\r\n current_element.reset(*self._right_right(current_element))\r\n break\r\n\r\n try:\r\n b2 = current_element.left_son().balance()\r\n except AttributeError:\r\n b2 = 0\r\n try:\r\n b3 = current_element.left_son().right_son().balance()\r\n except AttributeError:\r\n b3 = 0\r\n\r\n if ((b1 == 2 and b2 == 2 and b3 == 2) or\r\n (b1 == -1 and b2 == -1 and b3 == -1) or\r\n (b1 == -1 and b2 == 0 and b3 == 1) or\r\n (b1 == 2 and b2 == -1 and b3 == 0)):\r\n current_element.reset(*self._left_right(current_element))\r\n elif b1 == 2:\r\n current_element.reset(*self._left_left(current_element))\r\n break\r\n \r\n if current_element.parent() == None:\r\n break",
"def copyAndCleanTree (self):\n\t\t# TODO: Need to do several things here:\n\t\t# - NoNames\n\t\t# - copy support scores to internal branch names\n\n\t\t## Main:\n\t\t# Copy the tree so as not to damage original\n\t\tete_tree = deepcopy (self.data)\n\n\t\t# set root branch to zero, make change later\n\t\tete_tree.dist = 0.0\n\n\t\t# find max / min branchlength for diagnostic purposes\n\t\t# doesn't use negative or zero branch lengths\n\t\t# Also clean names\n\t\tmax_bl = None\n\t\tmin_bl = None\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (0.0 < n.dist):\n\t\t\t\tif (max_bl is None) or (max_bl < n.dist):\n\t\t\t\t\tmax_bl = n.dist\n\t\t\t\tif (min_bl is None) or (n.dist < min_bl):\n\t\t\t\t\tmin_bl = n.dist\n\t\t\tclean_name = n.name.strip()\n\t\t\tif (clean_name[0] == \"'\") and (clean_name[-1] == \"'\"):\n\t\t\t\tclean_name = clean_name[1:-1]\n\t\t\tn.name = clean_name\n\n\t\t# set all branches to be at least 1/100 of the largest or 1/10 the\n\t\t# smallest, whichever is larger\n\t\tdefault_bl = max (max_bl / 100, min_bl/10)\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\tif (n.dist <= 0.0):\n\t\t\t\tn.dist = default_bl\n\n\t\t# get support values on tree by setting supprt as name\n\t\tfor n in ete_tree.traverse (\"postorder\"):\n\t\t\t# if an internal node\n\t\t\tif (not n.is_leaf()):\n\t\t\t\tn.name = config.SUPPORT_FMT % n.support\t\n\n\t\t# very hacky - calc appropriate scale bar size and stick on root\n\t\tmagn = int (floor (log10 (max_bl)))\n\t\tscale_size = 10**magn\n\t\tete_tree.scale_size = scale_size\n\n\t\t## Postcondtions & return:int ( floor ( log10 (x)))\n\t\treturn ete_tree",
"def __invert__(self) -> BooleanExpression:",
"def _invert(G):\n return Surreal.from_value(1 / G._n)",
"def __invert__(self):\n return BitBoard(~self.num)",
"def __invert__(self):\n return self.__neg__()",
"def convertTreeToCoveringTree( self, tree ):\n\n self.debug( \"convertTreeToCoveringTree: tree at start\" )\n if E.getLogLevel() >= 2: self.printTree( tree )\n \n ntree = self.addChildren( tree )\n \n #######\n # descend tree and add new domains\n # if domain has only a single child: delete the child and\n # rewire\n for t in ntree:\n info, children = t\n \n if info:\n node, parent, level, ranges = info\n \n if len(children) == 1:\n ntree[children[0]][0] = None\n ntree[node][1] = ntree[children[0]][1]\n \n #######\n # build new tree with new node identifiers\n current_node = 0\n covering_tree = []\n \n levels = map( lambda x: [], [0] * len(tree))\n \n for t in ntree:\n info, children = t\n \n if not info: continue\n node, parent, level, ranges = info\n \n if len(children) == 2:\n \n # add new node to tree, rename parent in children and\n # set borders\n leftchild = children[0]\n rightchild = children[1] \n \n # change left child\n lnode, lparent, llevel, lranges = ntree[leftchild][0]\n rnode, rparent, rlevel, rranges = ntree[rightchild][0] \n \n if ranges:\n lranges, rranges = self.getCoveringRanges( lranges, rranges, ranges )\n else:\n continue\n \n # change left child\n ntree[leftchild][0]= (None, current_node, level + 1, lranges) \n \n # change right child \n # cnode, cparent, clevel, cranges = ntree[rightchild][0]\n ntree[rightchild][0]= (None, current_node, level + 1, rranges )\n \n covering_tree.append( [level, parent, 0, 0, ranges] )\n levels[level].append( current_node )\n \n current_node += 1\n \n max_range = covering_tree[0][4][0][1]\n \n self.debug( \"convertTreeToCoveringTree: tree before removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n ###################################\n ## remove small fragments\n ## has to be done per level in order to be consistent\n ## done here and not during matrix decomposition, so that\n ## matrix needs not to be permuted more than once.\n for l in range(0, len(levels)):\n if len(levels[l]) == 0: break\n # collect all domains per level in a list of the form\n # (from, to, node)\n ranges = []\n for node in levels[l]:\n ranges += map(lambda x: (x[0], x[1], node), covering_tree[node][4])\n covering_tree[node][4] = []\n \n # and remove small fragments\n new_ranges = self.removeSmallRanges( ranges )\n \n # and put back into tree if there is more than one range\n for (xfrom, xto, node) in new_ranges:\n covering_tree[node][4].append( (xfrom, xto) )\n \n ###################################\n ## delete nodes with empty ranges or only a single child.\n ## renumber nodes so that there are no gaps\n\n self.debug( \"convertTreeToCoveringTree: after removing small domains\" )\n if E.getLogLevel() >= 2: self.printTree( covering_tree )\n \n return self.collapseTree( covering_tree )",
"def invert(self):\n return self._invert",
"def __invert__(self) -> Operators:\n return self.operate(inv)",
"def __invert__(cls):\n try:\n return cls.__inverse__\n except:\n # TODO: more descriptive\n raise err.VinoError('no inverse class was set')",
"def revert(self, o):\r\n # Check for empty or incorrect type input\r\n if (not o or\r\n not isinstance(o, TreeNode)):\r\n return list()\r\n\r\n a = list()\r\n q = deque([o])\r\n\r\n i = 0\r\n while q:\r\n p = q.popleft()\r\n a.append(p.v)\r\n if p.cl is not None:\r\n q.append(p.cl)\r\n if p.cr is not None:\r\n q.append(p.cr)\r\n i += 1\r\n return a",
"def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace = self.nFace\n self.nFace = tmp"
] | [
"0.6759059",
"0.6705779",
"0.58604103",
"0.57773787",
"0.57395315",
"0.57179874",
"0.5696101",
"0.5665392",
"0.5645661",
"0.5622745",
"0.56212926",
"0.5608631",
"0.5527884",
"0.550309",
"0.5494076",
"0.54709524",
"0.54265046",
"0.53877723",
"0.53698325",
"0.5364037",
"0.5298445",
"0.5279437",
"0.5265863",
"0.523913",
"0.5230423",
"0.522078",
"0.52080595",
"0.52079034",
"0.5195149",
"0.51921123"
] | 0.7347783 | 0 |
Caculate the Costs J= sum(sum(r[k,n] squared_distance(m[k],x[n])) | def cost(X, R, M):
cost = 0
for k in range(len(M)):
for n in range(len(X)):
cost += R[n,k] * d(M[k], X[n])
return cost | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def total_cost(self):\n return np.einsum('i->', self.c[self.s])",
"def cost_matrix(x, y, p=2):\n xc = tf.expand_dims(x, 1)\n yr = tf.expand_dims(y, 0)\n d = tf.math.pow(tf.abs(xc - yr), p)\n return tf.reduce_sum(d, axis=-1)",
"def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass",
"def final_cost(self, x):\n return self.x_M_x(x[-1,:,:],self.R)",
"def compute_cost(x, y, theta=[[0], [0]]):\n m = y.size\n h = x.dot(theta)\n j = 1 / (2 * m) * np.sum(np.square(h - y))\n return j",
"def euclidean_cost_hessian(self, J: dict, K: dict, r: dict):\n H = 0\n for e in J.keys():\n J_e = J[e]\n N = J_e.shape[1]\n H += J_e.T @ J_e\n # TODO: Try with einsum for speed, maybe?\n for idx in range(N):\n for jdx in range(idx, N):\n dH = K[e][:, idx, jdx].T @ r[e]\n H[idx, jdx] -= dH\n if idx != jdx:\n H[jdx, idx] -= dH\n return H",
"def dist_cost(s_vr, failed_vr, neighbor_vr, dist_matrix, w_a1, w_a2):\n #print s_vr, failed_vr, neighbor_vr\n dist_i_f = dist_matrix[s_vr][failed_vr + 1]\n dist_i_k = dist_matrix[s_vr][neighbor_vr + 1]\n dist = w_a1 * float(dist_i_f) + w_a2 * float(dist_i_k)\n #print \"d_i_f: \", dist_i_f, \", dist_i_k: \", dist_i_k\n return dist",
"def calculate_costs(self):\n cost_matrix = self.make_cost_matrix()\n \n if self.greedy:\n # Riesen et al., \"Greedy Graph Edit Distance\"\n costs = []\n psi = []\n \n for row in range(self.N):\n phi = self.M\n row_min = sys.maxint\n for column in range(self.N+self.M):\n if column not in psi:\n if cost_matrix[row, column] < row_min:\n row_min = cost_matrix[row, column]\n phi = column\n \n costs.append(row_min)\n if phi < self.M:\n psi.append(phi)\n \n for row in range(self.N, self.N+self.M):\n if (row - self.N) not in psi:\n costs.append(cost_matrix[row, row - self.N])\n else:\n # Riesen & Bunke, \"Approximate graph edit distance computation by means of bipartite graph matching\"\n row_ind, col_ind = optimize.linear_sum_assignment(cost_matrix)\n \n if self.verbose:\n for row, column in (row_ind, col_ind):\n value = cost_matrix[row, column]\n print '%d, %d, %.4f' % (row, column, value)\n \n return row_ind, col_ind, cost_matrix[row_ind, col_ind]",
"def cost_(theta, X, Y):\n if X.shape[1] + 1 != theta.size or X.shape[0] != Y.size:\n print(\"Inc dim\")\n return\n c = cost_elem_(theta, X, Y)\n s = 0\n for i in c:\n s = s + i\n return(s)",
"def costFun(self, S, x):",
"def _cost_function(self, y_pred, y, m):\n sumatory = 0\n for x in range(m):\n sumatory += (y_pred[0][x] -y[0][x])**2\n\n cost = 1/(2*m) * sumatory\n return cost",
"def costFunction(R, W):\n costFunc = 0\n for i in range(0, len(R)):\n for j in range(i, len(R)):\n costFunc += costBetweenNodes(R, W, i, j)\n return costFunc",
"def costMatrix(row_feats, col_feats, row_labels, col_labels, metric=\"Pearson\"):\n\n # Get unique label values in non-moving and moving brain\n row_labs = np.asarray(list(set(row_labels).difference({-1, 0})))\n col_labs = np.asarray(list(set(col_labels).difference({-1, 0})))\n\n # Initialize cost matrix\n costMatrix = np.zeros((len(row_labs), len(col_labs)))\n print(costMatrix.shape)\n\n # Compute pairwise costs between all label sets\n for i, r in enumerate(row_labs):\n indr = np.where(row_labels == r)[0]\n lr = len(indr)\n\n if metric in [\"Spearman\",\"Euclidean\",\"Pearson\"]:\n featr = row_feats[indr, :]\n\n for j, c in enumerate(col_labs):\n indc = np.where(col_labels == c)[0]\n \n if metric in [\"Spearman\",\"Euclidean\",\"Pearson\"]:\n featc = col_feats[indc, :]\n\n if metric == \"Spearman\":\n [rVal, _] = spearmanr(featr, featc, axis=1)\n rVal = 1-rVal[lr:, 0:lr]\n\n elif metric == \"Pearson\":\n rVal = cdist(featr, featc, metric='Correlation').mean()\n\n elif metric == \"Euclidean\":\n rVal = cdist(featr, featc).mean()\n\n elif metric == \"Dice\":\n rVal = 1-hmg.dice(indr, indc)\n\n elif metric == \"EMD\":\n rmu = row_feats[indr, :].mean(0)\n rmu = rmu/rmu.sum()\n\n cmu = col_feats[indc, :].mean(0)\n cmu = cmu/cmu.sum()\n\n rVal = emd(rmu, cmu)\n\n\n costMatrix[i, j] = rVal\n\n return [row_labs, col_labs, costMatrix]",
"def __collision_cost(self, x0, x1):\n d = np.linalg.norm(x0 - x1)\n cost = self.qc / (1 + np.exp(self.kappa * (d - 2 * self.radius)))\n return cost",
"def cost(self, X, y) :\n ### ========== TODO : START ========== ###\n # part d: compute J(theta)\n #we know for linear/polynomial regression, the cost is the square of the errors\n X = self.generate_polynomial_features(X)\n y_pred_vector = np.dot(X, self.coef_)\n cost = np.dot((y-y_pred_vector).transpose(),(y-y_pred_vector))#write in the matrix form\n ### ========== TODO : END ========== ###\n return cost",
"def calculate_cost(data, centers, clusters):\n total = 0\n for i in range(len(centers)):\n total = total + np.sum(data[centers[i]][clusters[i]]) \n return total",
"def ComputeCostMatrix( self, A, B ):\n #return [[EuclideanDistance(a,b) for b in B] for a in A]\n return [[self.EuclideanDistanceSq(a,b) for b in B] for a in A] # skip the sqrt operation until later",
"def cost(self) -> float:",
"def total_cost_2D(self, final_list):\n total_cost = 0\n for i in range(len(final_list) - 1):\n temp = self.pairwise_distance(final_list[i], final_list[i + 1])\n total_cost = total_cost + temp\n print(\"Total distance: \" + str(total_cost))",
"def compute_cost(self, del_u : list, u : list):\n print(\"ym: \", self.ym, \"yn: \", self.yn)\n self.cost = 0.0\n\n self.ym = self.d_model.ym\n self.yn = self.d_model.yn\n\n # FIXME : this is supposed to be from N1 to N2\n self.cost+= (self.ym[0] - self.yn[0])\n angle_diff = (self.ym[1] - self.yn[1])\n if angle_diff > np.pi:\n angle_diff -= 2*np.pi\n if angle_diff < -np.pi:\n angle_diff += 2*np.pi\n self.cost += angle_diff\n\n for j in range(self.Nu):\n self.cost += (self.ym[j] - self.yn[j])**2\n\n for j in range(self.Nu):\n self.cost += self.lambd[j]*(del_u[j])**2\n\n for j in range(self.Nu):\n self.cost += self.s / (u[j] + self.r / 2.0 - self.b) + self.s / (self.r/2.0 + self.b - u[j]) - 4.0 / self.r\n\n return self.cost",
"def compute_objective(X, C):\n # similar as mse\n\n acc = 0\n for x in range(len(X)):\n acc += (nearest_distance(X.iloc[x], C)) ** 2\n\n return acc",
"def compute_cost(features, values, theta):\r\n m = len(values)\r\n sum_of_square_errors = numpy.square(numpy.dot(features, theta) - values).sum()\r\n cost = sum_of_square_errors / (2*m)\r\n\r\n return cost",
"def cost(self,x):\n Mx = self.generate_vector()\n self.prior.M.mult(x,Mx)\n return .5*Mx.inner(x)",
"def UpdateCostMatrix( self, extraXs ):\n for x in extraXs:\n newRow = [ self.EuclideanDistanceSq(x,y) for y in self.Y ]\n self.C.append(newRow)",
"def cost_fun(x, problem):\n j = 0\n if problem['use_log_bar']:\n c = ineqconstr(x, problem)\n j += np.sum(logbarrierfunc(0.1, c, problem['use_sigma']))\n\n x, t_final = matrify(x, problem)\n if problem['T']!=0:\n j += np.sum([problem['cost_fun_single'](x[:, :, i], t_final, problem) for i in range(problem['Nv'])])\n else:\n j = t_final\n return j",
"def compute_cost(features, values, theta):\n m = len(values)\n sum_of_square_errors = np.square(np.dot(features, theta) - values).sum()\n cost = sum_of_square_errors / (2 * m)\n\n return cost",
"def cost_func(plist):\n\t\tgamma, alpha = plist\n\t\tk = ac.Moffat2DKernel(gamma, alpha, x_size=nx, y_size=ny)\n\n\t\tarr_out_predict = ac.convolve(arr_in, k)\n\n\t\tarr_out_fit, arr_out_predict_fit = match_dimension(arr_out, arr_out_predict)\n\t\tdiff = (arr_out_fit - arr_out_predict_fit)*scale_factor\n\n\t\treturn np.sum(diff**2)/diff.size",
"def compute_cost(self, r):\n self.r_max = 1\n return np.exp(1 / (np.power(r, 2) - np.power(self.r_max, 2))) if r < self.r_max else 0",
"def compute_cost_matrix(self):\n\n if rank == 0:\n #do random sampling of a parameters\n if self.sampling == \"LHS\":\n lhs = Lhs(lhs_type=\"classic\", criterion=None)\n param_samples = lhs.generate(self.sample_space, self.niters)\n elif self.sampling == \"rsampling\":\n param_samples = self.sample_space.rvs(self.niters)\n elif self.sampling == \"Sobol\":\n sobol = Sobol()\n param_samples = sobol.generate(self.sample_space.dimensions, self.niters)\n \n # generate param samples split\n niters_rank0 = self.niters//size + self.niters % size\n niters_rank = self.niters//size\n count_scatter = [niters_rank0]\n count_scatter.extend((size-2)*[niters_rank])\n count_scatter = np.cumsum(count_scatter)\n\n param_samples_split = np.split(param_samples,count_scatter)\n else:\n param_samples_split = None\n \n #scatter parameter samples data\n param_samps = comm.scatter(param_samples_split,root=0)\n\n # initialize data\n param_samples_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n jac_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n qoi_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n\n \n\n # evaluate QoI at random sampling\n for sample in param_samps: \n qoi_sample, jac_sample = self.jac(sample).values()\n # store output\n for qoi_name in self.funcnames:\n if not (jac_sample[qoi_name] is None):\n param_samples_dict_rank[qoi_name].append(jac_sample[qoi_name])\n jac_dict_rank[qoi_name].append(jac_sample[qoi_name])\n qoi_dict_rank[qoi_name].append(qoi_sample[qoi_name])\n else:\n param_samples_diff_dict_rank[qoi_name].append(sample)\n\n # gather data\n param_samples = None\n param_samples_diff_int = None\n jac_dict = None\n qoi_dict= None\n\n param_samples_dict = comm.gather(param_samples_dict_rank, root=0)\n params_samples_diff_dict = comm.gather(param_samples_diff_dict_rank, root=0)\n jac_dict = comm.gather(jac_dict_rank, root=0)\n qoi_dict = comm.gather(qoi_dict_rank, root=0)\n\n # format gathered data\n if rank == 0:\n #flatten data\n param_samples_dict_flattened = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n jac_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n qoi_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n\n for cpurank in range(size):\n for qoi_name in self.funcnames:\n param_samples_dict_flattened[qoi_name].extend(param_samples_dict[cpurank][qoi_name]) \n param_samples_diff_dict_flattened[qoi_name].extend(params_samples_diff_dict[cpurank][qoi_name])\n jac_dict_flattened[qoi_name].extend(jac_dict[cpurank][qoi_name])\n qoi_dict_flattened[qoi_name].extend(qoi_dict[cpurank][qoi_name])\n\n #compute outer product\n jac_outer_dict = {qoi_name: [] for qoi_name in self.funcnames}\n nfuncs_dict = {qoi_name: 0 for qoi_name in self.funcnames}\n\n for qoi_name in self.funcnames:\n for i in range(len(jac_dict_flattened[qoi_name])):\n jac_sample = jac_dict_flattened[qoi_name][i]\n jac_outer_dict[qoi_name].append(np.outer(jac_sample,jac_sample))\n nfuncs_dict[qoi_name] += 1\n\n # compute cost matrix and norm convergence\n cost_matrix_dict = {}\n cost_matrix_cumul_dict = {}\n norm_convergence_dict = {}\n\n for qoi_name in self.funcnames:\n cost_cumsum = np.cumsum(jac_outer_dict[qoi_name],axis=0)/np.arange(1,nfuncs_dict[qoi_name]+1)[:,None,None]\n cost_matrix_cumul_dict[qoi_name] = cost_cumsum\n cost_matrix_dict[qoi_name] = cost_cumsum[-1,:,:]\n norm_convergence_dict[qoi_name] = np.linalg.norm(cost_cumsum,ord='fro',axis=(1,2))\n\n # compute variance matrix\n variance_matrix_dict = {}\n for qoi_name in self.funcnames:\n variance_mat = np.sum((jac_outer_dict[qoi_name]-cost_matrix_dict[qoi_name])**2/(nfuncs_dict[qoi_name]-1),axis=0) \n variance_matrix_dict[qoi_name] = variance_mat\n\n param_results = {\"PARAM_SAMPLES\": param_samples_dict_flattened,\n \"DIFFICULT_PARAM_SAMPLES\": param_samples_diff_dict_flattened}\n\n fun_results = {\"NUMBER_OF_FUNCTION_SUCCESS\": nfuncs_dict,\n \"NORM_OF_SEQ_OF_CUMUL_SUMS\": norm_convergence_dict,\n \"SEQ_OF_CUMUL_SUMS\": cost_matrix_cumul_dict, \n \"VARIANCE_OF_ENTRIES\": variance_matrix_dict,\n \"FINAL_COST_MATRIX\":cost_matrix_dict}\n\n return {'PARAMETER_RESULTS': param_results, 'FUNCTION_RESULTS': fun_results}",
"def compute_cost(X, groups, K_clusters):\n m = X.shape[0]\n dis = np.empty(m)\n for i in range(m):\n dis[i] = compute_distance(X[i,:].reshape(1,X.shape[1]), K_clusters[groups[i],:].reshape(1,X.shape[1]))\n cost = (1/m)*np.sum(dis)\n return cost"
] | [
"0.6906183",
"0.67312104",
"0.67167264",
"0.6657094",
"0.6641777",
"0.66405445",
"0.65683174",
"0.65535605",
"0.6470182",
"0.64275986",
"0.6414472",
"0.63875943",
"0.6365623",
"0.63528895",
"0.63380504",
"0.6333762",
"0.6333615",
"0.62924534",
"0.62811",
"0.6254352",
"0.6241441",
"0.6239056",
"0.6235081",
"0.62274355",
"0.62005293",
"0.61562866",
"0.61494166",
"0.61214155",
"0.61033136",
"0.60939777"
] | 0.765551 | 0 |
Find valid path from options, which is a list of 2tuple of (name, path). Return first pair where path is not None. If no valid path is found, return ('', None) | def _find_valid_path(options):
for by, data in options:
if data is not None:
return by, data
else:
return '<unknown>', None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def option_qualified_name(self, opt):\n if not opt in self.results:\n return ('unknown', opt)\n elif not self.results[opt][1] in self._is_funny:\n return ('trac.ini', self.results[opt][1])\n else:\n return (self.results[opt][1], opt)",
"def find_path_options(self, name, path_context):\n # If the name doesn't have the extension then we need to try\n # both with and without the extension\n extension = os.path.splitext(path_context)[1]\n if not name.endswith(extension):\n with_extension = lambda path: [path, path + extension]\n else:\n with_extension = lambda path: [path]\n # Absolute paths are straightforward\n if os.path.isabs(name):\n return with_extension(name)\n # Relative paths get resolved with respect to path_context\n if name.startswith('./') or name.startswith('../'):\n dir_context = os.path.dirname(path_context)\n path = os.path.normpath(os.path.join(dir_context, name))\n return with_extension(path)\n # Otherwise we walk over the load_paths\n paths = []\n for load_path in self.load_paths:\n path = os.path.join(load_path, name)\n paths.extend(with_extension(path))\n return paths",
"def spath_stripoptions(spath):\n l = [comp.split(\"?\", 1)[0] for comp in spath.split(\"/\")]\n return \"/\".join(l)",
"def check_and_resolve_path(key, parameter):\n if 'paths' in key:\n return [resolve_relative_path(p) for p in parameter]\n if 'path' in key:\n return resolve_relative_path(parameter)\n return parameter",
"def south_path(self):\n answer = prompt(\"That is a very dangerous path. Are you sure \\\nyou want to proceed? (y/n) \").lower()\n if answer == \"y\":\n return (paths.south, wrathful)\n elif answer == \"n\":\n return None\n else:\n return None",
"def __convert_to_path_exit(self, options):\n options[options == 0] = 255\n options[self.currY, self.currX] = 0\n cur = (self.exitY, self.exitX)\n dirs = []\n while True:\n for dir in (0, 1, 2, 3):\n if options[cur[0], cur[1]] - 1 \\\n == options[cur[0] + self.directions[dir][0], cur[1] + self.directions[dir][1]]:\n cur = (cur[0] + self.directions[dir][0], cur[1] + self.directions[dir][1])\n dirs.append(dir)\n break\n if (cur[0] == self.currY) and (cur[1] == self.currX):\n moves = []\n while len(dirs) > 0:\n moves += self.__convert_to_turn(self.dirs[dirs.pop() + 2]) + ['F']\n return moves",
"def parse_path(path):\n if path == '/':\n return None, None, None\n paths = path[1:].split('/', 1)\n\n #Filter Empty strings\n paths = [p for p in paths if p != '']\n if len(paths) == 1:\n return paths[0], None, None\n else:\n file_hash, rest = paths\n paths = rest.split('/', 1)\n #Filter Empty strings\n paths = [p for p in paths if p != '']\n if len(paths) == 1:\n return file_hash, paths[0], None\n else:\n action, rest = paths\n return file_hash, action, rest",
"def check_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_path\")",
"def check_path(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_path\")",
"def parse_optional_file_list_from_args(args_list: Any, append_error_func: Callable[[str], None]) -> List[str]:\n results = [] # type: List[str]\n if args_list is None:\n # No arguments\n pass\n elif isinstance(args_list, List):\n for c in args_list:\n if not os.path.exists(c):\n append_error_func(\"Given path %s does not exist!\" % c)\n results = list(args_list)\n else:\n append_error_func(\"Argument was not a list?\")\n return results",
"def ex_path(path):\n if path is None:\n return []\n elif isinstance(path, str):\n return path.split(\",\")\n elif isinstance(path, list):\n return path\n return []",
"def _get_dircomp(self, opt):\n ## version\n if self.parser_type == 'optparse':\n if '--version' == opt:\n return \":\"\n else: # argparse\n if '-v' == opt or '--version' == opt:\n return \":\"\n ## help\n if '-h' == opt or '--help' == opt:\n return \":\"\n ## user define options\n if self.parser_type == 'optparse': # TODO: now, only optparse module\n opt_obj = self.parser._short_opt.get(opt)\n if opt_obj and opt_obj.action in ('store_true', 'store_false'):\n return \"\"\n else:\n opt_obj = self.parser._long_opt.get(opt)\n if opt_obj and opt_obj.action in ('store_true', 'store_false'):\n return \"\"\n return \"\"",
"def thereis_short_opt(options, opt_char):\n if len(opt_char) != 1:\n raise RuntimeError(\n \"This function is for one character options, opt_char>%s<.\" %\n opt_char)\n off = string.find(options, opt_char)\n if off >= 0:\n return (off < len(options)) and (options[off+1] == ':')\n else:\n return None",
"def handle_dotted_path(\n value: str, author: str\n) -> Tuple[List[str], Path, ConfigLoader, Optional[ComponentId]]:\n parts = value.split(\".\")\n\n root = parts[0]\n if root not in ALLOWED_PATH_ROOTS:\n raise AEAException(\n \"The root of the dotted path must be one of: {}\".format(ALLOWED_PATH_ROOTS)\n )\n\n if (\n len(parts) < 2\n or parts[0] == AGENT\n and len(parts) < 2\n or parts[0] == VENDOR\n and len(parts) < 5\n or parts[0] != AGENT\n and len(parts) < 3\n ):\n raise AEAException(\n \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n # if the root is 'agent', stop.\n if root == AGENT:\n resource_type_plural = AGENTS\n path_to_resource_configuration = Path(DEFAULT_AEA_CONFIG_FILE)\n json_path = parts[1:]\n component_id = None\n elif root == VENDOR:\n # parse json path\n resource_author = parts[1]\n resource_type_plural = parts[2]\n resource_name = parts[3]\n\n # extract component id\n resource_type_singular = resource_type_plural[:-1]\n try:\n component_type = ComponentType(resource_type_singular)\n except ValueError as e:\n raise AEAException(\n f\"'{resource_type_plural}' is not a valid component type. Please use one of {ComponentType.plurals()}.\"\n ) from e\n component_id = ComponentId(\n component_type, PublicId(resource_author, resource_name)\n )\n\n # find path to the resource directory\n path_to_resource_directory = (\n Path(\".\") / VENDOR / resource_author / resource_type_plural / resource_name\n )\n path_to_resource_configuration = (\n path_to_resource_directory\n / RESOURCE_TYPE_TO_CONFIG_FILE[resource_type_plural]\n )\n json_path = parts[4:]\n if not path_to_resource_directory.exists():\n raise AEAException( # pragma: nocover\n \"Resource vendor/{}/{}/{} does not exist.\".format(\n resource_author, resource_type_plural, resource_name\n )\n )\n else:\n # navigate the resources of the agent to reach the target configuration file.\n resource_type_plural = root\n resource_name = parts[1]\n\n # extract component id\n resource_type_singular = resource_type_plural[:-1]\n component_type = ComponentType(resource_type_singular)\n resource_author = author\n component_id = ComponentId(\n component_type, PublicId(resource_author, resource_name)\n )\n\n # find path to the resource directory\n path_to_resource_directory = Path(\".\") / resource_type_plural / resource_name\n path_to_resource_configuration = (\n path_to_resource_directory\n / RESOURCE_TYPE_TO_CONFIG_FILE[resource_type_plural]\n )\n json_path = parts[2:]\n if not path_to_resource_directory.exists():\n raise AEAException(\n \"Resource {}/{} does not exist.\".format(\n resource_type_plural, resource_name\n )\n )\n\n config_loader = ConfigLoader.from_configuration_type(resource_type_plural[:-1])\n return json_path, path_to_resource_configuration, config_loader, component_id",
"def extract_option(prefix, args):\n if prefix in ('#',):\n unique = False\n else:\n unique = True\n value = [a for a in args if a.startswith(prefix)]\n if len(value) == 1:\n value = value[0]\n args.remove(value)\n value = value[1:]\n if not unique:\n return [value]\n return value\n elif len(value) > 1 and unique:\n print('More than one %s found in args' % prefix)\n sys.exit(1)\n elif len(value) > 1 and not unique:\n for v in value:\n if v in args:\n args.remove(v)\n return [v[1:] for v in value]\n return None",
"def _get_lsp_config_path_select_secondary(self):\n return self.__lsp_config_path_select_secondary",
"def homedir(options=['/home/jennifer/', '/home/jwalker/',\n 'C:/Users/jenfl/']):\n\n home = None\n for h in options:\n if os.path.isdir(h):\n home = h\n if home is None:\n raise ValueError('Home directory not found in list of options.')\n return home",
"def search_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"search_paths\")",
"def search_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"search_paths\")",
"def search_paths(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"search_paths\")",
"def __parse_location_and_options(cls, loc_opt_str):\n # type: (str) -> List[str]\n split_location_options = loc_opt_str.split('?') # type: List(str)\n location = split_location_options[0] # type: str\n options = {} # type: Dict\n\n if len(split_location_options) == 2:\n options = cls.__parse_options_dict(\n split_location_options[1]\n ) # type: Dict[str, str]\n\n return [location, options]",
"def valid_path(s):\n if len(s) > 0:\n return s\n else:\n raise argparse.ArgumentTypeError('path cannot be empty')",
"def _find_config_file(self) -> str or None:\n import os\n\n for path in self.paths:\n path = os.path.expanduser(path)\n for extension in self.file_extensions:\n for file_name in self.file_names:\n file_path = os.path.join(path, \"{}.{}\".format(file_name, extension))\n if os.path.isfile(file_path):\n return file_path\n\n return None",
"def processOption (self, line) :\n ll = line.split ('=')\n if len (ll) < 2:\n print \"Cannot parse option \" , line\n sys.exit()\n result = (ll[0].strip() , ll[1].strip())\n return result",
"def findPath(enviroment: Environment, position: tuple) -> list:",
"def findPath(enviroment: Environment, position: tuple) -> list:",
"def find_paths(documents: List[str], question: str, candidate: str,\n style='wikihop') -> Optional[List]:\n sentlimit = 1\n nearest_only = False\n d = process_data(documents, question, candidate)\n\n doc_ners = d['docners']\n doc_postags = d['docpostags']\n doc_sents = d['docsents']\n\n qpos = d[\"qpos\"]\n qner = d[\"qner\"]\n qlemma = d['qlemma']\n rel = qlemma[0]\n entity = ' '.join(qlemma[1:]).lower()\n candidates = []\n orig_candidates = [d['candidate']]\n for ctoks in orig_candidates:\n sctoks = [stemmer.stem(ca) for ca in ctoks]\n if sctoks in candidates:\n candidates.append(ctoks)\n else:\n candidates.append(sctoks)\n candidates = [' '.join(cand) for cand in candidates]\n candpos = [d['cpos']]\n candner = [d['cner']]\n\n doc_sents_lemma = lemmatize_docsents(doc_sents, stem)\n\n if style.strip().lower() == \"wikihop\":\n pf = PathFinder(\"qid\", doc_sents_lemma,\n entity, rel,\n candidates,\n answer=None,\n sentlimit=sentlimit,\n nearest_only=nearest_only)\n else:\n pf = ObqaPathFinder(\"qid\", doc_sents_lemma,\n qlemma, qpos, qner,\n candidates, candpos, candner,\n answer=None, sentlimit=sentlimit,\n nearest_only=nearest_only)\n\n paths = pf.get_paths(doc_ners, doc_postags)\n if len(paths) == 0:\n print(\"No Paths Found !!\")\n return None\n # pathdict = {\"id\": \"qid\", \"pathlist\": paths[list(paths.keys())[0]]}\n return paths[list(paths.keys())[0]]",
"def parse_path(path):\n assert path is not None and len(path) > 0, \"Invalid path: %s.\" % str(path)\n if not isinstance(path, tuple):\n path = str(path).split('.')\n return path",
"def __check_path__(path):\n\n def seq_iter(iterable):\n result = []\n for p in iterable:\n if isinstance(p, Iterable) and \\\n not isinstance(p, (basestring, tuple)):\n result += seq_iter(p)\n else:\n result.append(p)\n\n return result\n\n if isinstance(path, (basestring, int, float, complex, NoneType)):\n return path,\n else:\n return tuple(seq_iter(path))",
"def FindBinary( binary, user_options ):\n\n def _FindPath():\n key = '{0}_binary_path'.format( binary )\n if user_options.get( key ):\n return user_options[ key ]\n return GO_BINARIES.get( binary )\n\n binary_path = _FindPath()\n if os.path.isfile( binary_path ):\n return binary_path\n return None"
] | [
"0.5819407",
"0.5445068",
"0.5414672",
"0.541425",
"0.54042286",
"0.5392301",
"0.53444403",
"0.5334321",
"0.5334321",
"0.5205876",
"0.5203414",
"0.51825213",
"0.5180476",
"0.51746553",
"0.5171607",
"0.51599324",
"0.51492083",
"0.5144711",
"0.5144711",
"0.5144711",
"0.514106",
"0.5129904",
"0.5119722",
"0.5098138",
"0.5090607",
"0.5090607",
"0.50572544",
"0.5053794",
"0.505074",
"0.50477153"
] | 0.85310924 | 0 |
Return path to systemwide cudatoolkit; or, None if it doesn't exist. | def get_system_ctk(*subdirs):
# Linux?
if sys.platform.startswith('linux'):
# Is cuda alias to /usr/local/cuda?
# We are intentionally not getting versioned cuda installation.
base = '/usr/local/cuda'
if os.path.exists(base):
return os.path.join(base, *subdirs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_conda_ctk():\n is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))\n if not is_conda_env:\n return\n # Assume the existence of NVVM to imply cudatoolkit installed\n paths = find_lib('nvvm')\n if not paths:\n return\n # Use the directory name of the max path\n return os.path.dirname(max(paths))",
"def get_nvidia_cudalib_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))\n subdir = 'bin' if IS_WIN32 else 'lib'\n return os.path.join(env_dir, subdir)",
"def get_nvidia_static_cudalib_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))\n dirs = ('Lib', 'x64') if IS_WIN32 else ('lib',)\n return os.path.join(env_dir, *dirs)",
"def __find_tool_path(self):\n tool_path = Path(os.path.dirname(os.path.realpath(__file__)))\n # We asume the installion path is relative to our installation path\n tool_path = tool_path / '../../../bin'\n if os.name == 'posix':\n ret = tool_path / 'fast-discovery-server'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n elif os.name == 'nt':\n ret = tool_path / 'fast-discovery-server.exe'\n if not os.path.exists(ret):\n ret = tool_path / 'fast-discovery-server.bat'\n if not os.path.exists(ret):\n print('fast-discovery-server tool not installed')\n sys.exit(1)\n else:\n print(f'{os.name} not supported')\n sys.exit(1)\n\n return ret",
"def get_debian_pkg_libdevice():\n pkg_libdevice_location = '/usr/lib/nvidia-cuda-toolkit/libdevice'\n if not os.path.exists(pkg_libdevice_location):\n return None\n return pkg_libdevice_location",
"def get_nvidia_libdevice_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n nvvm_dir = os.path.dirname(nvvm_ctk)\n return os.path.join(nvvm_dir, 'libdevice')",
"def get_diagtool_bin():\n context = analyzer_context.get_context()\n clang_bin = context.analyzer_binaries.get(ClangSA.ANALYZER_NAME)\n\n if not clang_bin:\n return None\n\n # Resolve symlink.\n clang_bin = os.path.realpath(clang_bin)\n\n # Find diagtool next to the clang binary.\n diagtool_bin = os.path.join(os.path.dirname(clang_bin), 'diagtool')\n if os.path.exists(diagtool_bin):\n return diagtool_bin\n\n LOG.debug(\"'diagtool' can not be found next to the clang binary (%s)!\",\n clang_bin)",
"def _detect(env):\n try:\n return env['KCC']\n except KeyError:\n pass\n\n kcc = env.WhereIs('kcc', env['KCC_DIR'])\n if kcc:\n return kcc\n\n raise SCons.Errors.StopError(\n KccNotFound,\n \"Could not find Kalimba C compiler (kcc.exe)\")",
"def locate_data():\n # Locate by using the environment variable\n if \"TESSDATA_PREFIX\" in os.environ:\n data_prefix = os.environ[\"TESSDATA_PREFIX\"]\n\n if os.path.isdir(data_prefix):\n return data_prefix\n\n # Locate by using the command directory\n cmd_path = os.path.dirname(_config.command)\n\n if cmd_path:\n cmd_data_path = os.path.join(cmd_path, \"tessdata\")\n\n if os.path.isdir(cmd_data_path):\n return cmd_data_path\n\n return None",
"def get_output_path():\n\n path = rs.DocumentPath()\n name = rs.DocumentName()\n \n if gc.operating_system == \"mac\":\n\n path = path[:-len(name)] + \"_system.dat\"\n\n elif gc.operating_system == \"win\":\n\n i = path.rfind(\"\\\\\")\n\n path = path[:i] + \"/_system.dat\" \n\n return path",
"def get_nvidia_nvvm_ctk():\n is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))\n if not is_conda_env:\n return\n # Assume the existence of NVVM to imply cudatoolkit installed\n libdir = os.path.join(sys.prefix, 'nvvm', _cudalib_path())\n if not os.path.exists(libdir) or not os.path.isdir(libdir):\n return\n paths = find_lib('nvvm', libdir=libdir)\n if not paths:\n return\n # Use the directory name of the max path\n return os.path.dirname(max(paths))",
"def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")",
"def locate_nuget():\n if NuGetRunner.valid_nuget_executable(\"nuget\"):\n return \"nuget\"\n return None",
"def _get_default_path(self):\n return os.path.join(cfg.ROOT_DIR, 'data', 'KITTI')",
"def _client(self):\n return self.m.cipd.ensure_tool('infra/tools/luci/isolated/${platform}',\n self._version)",
"def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"",
"def get_tool_chain_path(tool_chain_path):\n if not tool_chain_path:\n if 'ISSM_TOOLCHAIN' in os.environ:\n tool_chain_path = os.environ['ISSM_TOOLCHAIN']\n elif 'IAMCU_TOOLCHAIN_DIR' in os.environ:\n tool_chain_path = os.environ['IAMCU_TOOLCHAIN_DIR']\n tool_chain_path = os.path.join(tool_chain_path,\n '..', '..', '..', '..', '..')\n tool_chain_path = os.path.normpath(tool_chain_path)\n else:\n print('Tool chain path not found: please provide it as input.')\n exit(1)\n # Path validation.\n tool_chain_path = is_valid_path(tool_chain_path)\n else:\n # Toolchain path given as input by\n # the user and already validated.\n pass\n\n return tool_chain_path",
"def _locate_bootloader():\n pkg_path = os.path.dirname(__file__)\n blpath = os.path.abspath(os.path.join(pkg_path, 'bootloader'))\n if not os.path.isfile(blpath):\n raise InternalError(\"bootloader not found at {}\".format(blpath))\n return blpath",
"def get_toolkit(tool_list):\n best_choice = None \n for exe in tool_list:\n if which(exe):\n best_choice = exe\n break\n \n # Did not find any tools\n # to potentially use\n if not best_choice:\n err(\n 'Error: Did not find any tools to get job information!'\n )\n fatal(\n 'Expected one of the following tools to be in $PATH:'\n '\\t{0}'.format(tool_list)\n )\n \n return best_choice",
"def get_helper_path(tool):\n return os.path.join(TOOLS_DIR, tool)",
"def find_tool():\n return shutil.which('nm')",
"def set_syspath(self, hasal_dir):\n library_path = os.path.join(hasal_dir, \"lib\", \"sikuli\")\n sys.path.append(library_path)\n return library_path",
"def find_teradata_home():\n if platform.system() == 'Windows':\n # The default installation path for Windows is split between the\n # Windows directories for 32-bit/64-bit applications. It is\n # worth noting that Teradata archiecture installed should match\n # the architecture of the Python architecture being used (i.e.\n # TTU 32-bit is required /w Python 32-bit and TTU 64-bit is\n # required for Python 64-bit).\n if is_64bit():\n return latest_teradata_version(\"C:/Program Files/Teradata/Client\")\n else:\n return latest_teradata_version(\"C:/Program Files (x86)/Teradata/Client\")\n elif platform.system() == 'Linux':\n return latest_teradata_version(\"/opt/teradata/client\")\n elif platform.system() == 'Darwin':\n return latest_teradata_version(\"/Library/Application Support/teradata/client\")\n else:\n # In the case nothing is found, the default for Linux is\n # attempted as a last effort to find the correct install\n # directory.\n return latest_teradata_version(\"/opt/teradata/client\")",
"def lib_tds_path():\n if sys.platform == 'darwin':\n return '/usr/local/lib/libtdsodbc.so'\n return 'FreeTDS'",
"def get_system():\n if 'google.colab' in sys.modules:\n return Constant.SYS_GOOGLE_COLAB\n if os.name == 'posix':\n return Constant.SYS_LINUX\n if os.name == 'nt':\n return Constant.SYS_WINDOWS\n\n raise EnvironmentError('Unsupported environment')",
"def locate_cuda():\n # adapted from\n # https://stackoverflow.com/questions/10034325/can-python-distutils-compile-cuda-code\n nvcc = None\n envs = ['CUDA_HOME', 'CUDA_ROOT', 'CUDAHOME', 'CUDAROOT']\n for env in envs:\n if env in os.environ:\n nvcc = os.path.join(os.environ[env], 'bin', 'nvcc')\n break\n else:\n # otherwise, search PATH for NVCC\n nvcc = find_in_path(['nvcc'])\n if nvcc is None:\n raise EnvironmentError(\n 'The nvcc executable could not be found. ' +\n 'Add it to $PATH or set one of the environment variables ' +\n ', '.join(envs))\n home = os.path.dirname(os.path.dirname(nvcc))\n\n cudaconfig = {}\n cudaconfig['home'] = home\n cudaconfig['nvcc'] = nvcc\n cudaconfig['include'] = os.path.join(home, 'include')\n # on Linux, CUDA has the libraries in lib64\n lib_dir = os.path.join(home, 'lib64')\n if not os.path.isdir(lib_dir):\n # on the MAC they are in lib\n lib_dir = os.path.join(home, 'lib')\n cudaconfig['lib'] = lib_dir\n\n for k, v in cudaconfig.items():\n if not os.path.exists(v):\n raise EnvironmentError(\n 'The CUDA %s path could not be located in %s' % (k, v))\n # print \"CUDA installation detected: \" + home\n return cudaconfig",
"def findLibraryPath():\n path = os.path.split(os.path.abspath(__file__))[0]\n\n if os.path.exists(os.path.join(path, 'lib/header_primaryHDU.txt')):\n return os.path.join(path, 'lib')\n elif os.path.exists(os.path.join(path, 'header_primaryHDU.txt')):\n return path\n elif os.path.exists('header_primaryHDU.txt'):\n return './'\n else:\n raise IOError(\"Cannot find header files. Called from findLibraryPath() in sdfits.py\")",
"def _get_pdfkit_config(self):\n if platform.system() == 'Windows':\n return pdfkit.configuration(wkhtmltopdf=os.environ.get('WKHTMLTOPDF_BINARY',\n 'C:\\\\Program Files\\\\wkhtmltopdf\\\\bin\\\\wkhtmltopdf.exe'))\n else:\n WKHTMLTOPDF_CMD = subprocess.Popen(['which', os.environ.get('WKHTMLTOPDF_BINARY', 'wkhtmltopdf')],\n stdout=subprocess.PIPE).communicate()[0].strip()\n return pdfkit.configuration(wkhtmltopdf=WKHTMLTOPDF_CMD)",
"def find_toolkit(self) -> str:\n raise NotImplementedError",
"def _getNETSDKPath():\r\n try:\r\n dotNETSDK_root_key = win32api.RegOpenKeyEx(win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\\\Microsoft\\\\Microsoft SDKs\\\\.NETFramework\\\\v2.0', 0, win32con.KEY_READ)\r\n found = False\r\n i = 0\r\n try:\r\n try:\r\n while not found:\r\n name, obj, ntype = win32api.RegEnumValue(dotNETSDK_root_key, i)\r\n i = i + 1\r\n if name=='InstallationFolder':\r\n return obj\r\n found = True\r\n except:\r\n win32api.RegCloseKey(dotNETSDK_root_key)\r\n return ''\r\n finally:\r\n win32api.RegCloseKey(dotNETSDK_root_key)\r\n except:\r\n return ''"
] | [
"0.6586595",
"0.6505726",
"0.62108535",
"0.61482155",
"0.6069431",
"0.59912306",
"0.5942864",
"0.5693473",
"0.56767124",
"0.5654563",
"0.55638784",
"0.551995",
"0.55120856",
"0.5467015",
"0.54590136",
"0.5451404",
"0.54293686",
"0.54079705",
"0.537745",
"0.53771037",
"0.53624934",
"0.5354249",
"0.5327427",
"0.53243816",
"0.53117925",
"0.530393",
"0.52819467",
"0.5276852",
"0.52647257",
"0.5261144"
] | 0.672219 | 0 |
Return path to directory containing the shared libraries of cudatoolkit. | def get_nvidia_cudalib_ctk():
nvvm_ctk = get_nvidia_nvvm_ctk()
if not nvvm_ctk:
return
env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))
subdir = 'bin' if IS_WIN32 else 'lib'
return os.path.join(env_dir, subdir) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_library_dir():\n return os.path.join(get_script_path(), 'library')",
"def get_nvidia_static_cudalib_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n env_dir = os.path.dirname(os.path.dirname(nvvm_ctk))\n dirs = ('Lib', 'x64') if IS_WIN32 else ('lib',)\n return os.path.join(env_dir, *dirs)",
"def get_library_dirs():\n if DAALTK_HOME_ENV_VAR not in os.environ:\n raise Exception(\"Required environment variable %s not set\" % DAALTK_HOME_ENV_VAR)\n\n daaltk_home = os.environ[DAALTK_HOME_ENV_VAR]\n return [daaltk_home, os.path.join(daaltk_home, LIB_DIR)]",
"def get_data_dir():\n rootdir = os.path.dirname(__file__)\n libdir = rootdir + os.sep + \"data\"\n return libdir",
"def lib_name_path(interface, simulator):\n library_name_path = os.path.join(libs_dir, lib_name(interface, simulator))\n\n # On Windows use mixed mode \"c:/a/b/c\" as this work in all cases\n if os.name == \"nt\":\n return library_name_path.replace(\"\\\\\", \"/\")\n\n return library_name_path",
"def lib_dir(self):\n if not self._lib_dir:\n lib_files = glob.glob(\"/usr/lib/*/librpm.so*\")\n if not lib_files:\n raise InstallError(\"Can not find lib directory.\")\n self._lib_dir = os.path.dirname(lib_files[0])\n return self._lib_dir",
"def get_nvidia_libdevice_ctk():\n nvvm_ctk = get_nvidia_nvvm_ctk()\n if not nvvm_ctk:\n return\n nvvm_dir = os.path.dirname(nvvm_ctk)\n return os.path.join(nvvm_dir, 'libdevice')",
"def library_dirs(self):",
"def get_conda_ctk():\n is_conda_env = os.path.exists(os.path.join(sys.prefix, 'conda-meta'))\n if not is_conda_env:\n return\n # Assume the existence of NVVM to imply cudatoolkit installed\n paths = find_lib('nvvm')\n if not paths:\n return\n # Use the directory name of the max path\n return os.path.dirname(max(paths))",
"def lib_dir(self):\n raise NotImplementedError('Implement this property.')",
"def set_syspath(self, hasal_dir):\n library_path = os.path.join(hasal_dir, \"lib\", \"sikuli\")\n sys.path.append(library_path)\n return library_path",
"def findLibraryPath():\n path = os.path.split(os.path.abspath(__file__))[0]\n\n if os.path.exists(os.path.join(path, 'lib/header_primaryHDU.txt')):\n return os.path.join(path, 'lib')\n elif os.path.exists(os.path.join(path, 'header_primaryHDU.txt')):\n return path\n elif os.path.exists('header_primaryHDU.txt'):\n return './'\n else:\n raise IOError(\"Cannot find header files. Called from findLibraryPath() in sdfits.py\")",
"def getmodulepath(modulename):\n return USERLIBDIR + '\\\\' + modulename + '.sikuli\\\\' + modulename + '.py'",
"def lib_tds_path():\n if sys.platform == 'darwin':\n return '/usr/local/lib/libtdsodbc.so'\n return 'FreeTDS'",
"def linking_library_dirs(self):",
"def get_system_ctk(*subdirs):\n # Linux?\n if sys.platform.startswith('linux'):\n # Is cuda alias to /usr/local/cuda?\n # We are intentionally not getting versioned cuda installation.\n base = '/usr/local/cuda'\n if os.path.exists(base):\n return os.path.join(base, *subdirs)",
"def lib_dir(self):\n if not self._lib_dir:\n rpm_lib_dir = None\n cmd = '{rpm_path} -ql {rpm_lib}'.format(\n rpm_path=self.rpm_path, rpm_lib=self.rpm_lib_pkg_name\n )\n out = Cmd.sh_e_out(cmd)\n lines = out.split('\\n')\n for line in lines:\n if 'librpm.so' in line:\n rpm_lib_dir = os.path.dirname(line)\n break\n self._lib_dir = rpm_lib_dir\n return self._lib_dir",
"def _ask_ctypes(self):\n if os.name == 'nt':\n libpath = find_library(self._libname)\n libpath = libpath if libpath is not None else find_library(self._libname + '.dll')\n else:\n libpath = find_library(self._libname)\n return str(libpath) if libpath is not None else libpath",
"def find_lib_path():\n curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\n # make pythonpack hack: copy this directory one level upper for setup.py\n dll_path = [curr_path, os.path.join(curr_path, '../../lib/'),\n os.path.join(curr_path, './lib/'),\n os.path.join(sys.prefix, 'xlearn')]\n if sys.platform == 'win32':\n if platform.architecture()[0] == '64bit':\n dll_path.append(os.path.join(curr_path, '../../windows/x64/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/x64/Release/'))\n else:\n dll_path.append(os.path.join(curr_path, '../../windows/Release/'))\n # hack for pip installation when copy all parent source directory here\n dll_path.append(os.path.join(curr_path, './windows/Release/'))\n dll_path = [os.path.join(p, 'xlearn_api.dll') for p in dll_path]\n elif sys.platform.startswith('linux'):\n dll_path = [os.path.join(p, 'libxlearn_api.so') for p in dll_path]\n elif sys.platform == 'darwin':\n dll_path = [os.path.join(p, 'libxlearn_api.dylib') for p in dll_path]\n\n lib_path = [p for p in dll_path if os.path.exists(p) and os.path.isfile(p)]\n\n # From github issues, most of installation errors come from machines w/o compilers\n if not lib_path:\n raise XLearnLibraryNotFound(\n 'Cannot find xlearn Library in the candidate path'\n )\n return lib_path",
"def get_services_dir():\n return bytestostr(libruss.russ_get_services_dir())",
"def python_lib_arch_dir(self):\n return get_python_lib(plat_specific=True)",
"def initLibPath():\n libHash = {\n 'Framework': 1,\n 'UserControlleLib': 1,\n 'CaseLib': 1\n }\n\n binPath = os.path.split(os.path.realpath(__file__))[0]\n\n for key in libHash:\n sys.path.append(os.path.join(__getLibAbsPath(binPath, libHash[key]), key))",
"def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'",
"def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__",
"def library_search_path(self, pedantic=False):\n return []",
"def path(self):\n return self.lib.path",
"def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath",
"def CoreDirectory():\n thisDir=WindowsPath(__file__).parent.resolve()\n # print(f\"this dir {thisDir}\")\n coreDir=thisDir.parent/\"MacroSystem/core\"\n return coreDir",
"def datadir():\n return '../data/'",
"def find_lib_directory(self):\n lib_directory = None\n if self.lib_micro_version in self.lib_directories:\n lib_directory = self.lib_micro_version\n elif self.lib_minor_version in self.lib_directories:\n lib_directory = self.lib_minor_version\n elif self.lib_major_version in self.lib_directories:\n lib_directory = self.lib_major_version\n else:\n for lv in [self.lib_micro_version, self.lib_minor_version, self.lib_major_version]:\n for d in self.lib_directories:\n if lv in d:\n lib_directory = d\n break\n else:\n continue\n break\n return lib_directory"
] | [
"0.71016824",
"0.6860122",
"0.65566653",
"0.6523251",
"0.6495012",
"0.63525116",
"0.63236153",
"0.63159585",
"0.6251684",
"0.6166421",
"0.6151194",
"0.6136339",
"0.6069506",
"0.6065325",
"0.59846205",
"0.5933944",
"0.5912517",
"0.5902956",
"0.5875484",
"0.587457",
"0.58695763",
"0.58613664",
"0.584653",
"0.5840799",
"0.58028346",
"0.5797718",
"0.57826746",
"0.5773946",
"0.5770634",
"0.5750831"
] | 0.6947953 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.